text stringlengths 4 1.02M | meta dict |
|---|---|
import random
from decimal import Decimal
from shop.models.productmodel import Product # is the overridden CategoryProduct (project.models.product.Product)
from shop_categories.models.categorymodel import Category # is the overridden Category (project.models.category.Category)
from django.test.testcases import TestCase
from django.template.defaultfilters import slugify
def make_category_tree():
top = Category(name='Top category', slug=slugify('Top category'), active=True)
top.save()
level1_first = Category(name='Level1 first', slug=slugify('Level1 first'), active=True, parent=top)
level1_first.save()
level1_second = Category(name='Level1 second', slug=slugify('Level1 second'), active=True, parent=top)
level1_second.save()
level2_first = Category(name='Level2 first', slug=slugify('Level2 first'), active=True, parent=level1_first)
level2_first.save()
level2_first_sub = Category(name='Level2 first sub', slug=slugify('Level2 first sub'), active=True, parent=level2_first)
level2_first_sub.save()
level2_second = Category(name='Level2 second', slug=slugify('Level2 second'), active=True, parent=level1_first)
level2_second.save()
top_two = Category(name='Top category two', slug=slugify('Top category two'), active=True)
top_two.save()
level1_two_first = Category(name='Level1 two first', slug=slugify('Level1 two first'), active=True, parent=top_two)
level1_two_first.save()
level1_two_second = Category(name='Level1 two second', slug=slugify('Level1 two second'), active=True, parent=top_two)
level1_two_second.save()
level1_two_second_sub = Category(name='Level1 two second sub', slug=slugify('Level1 two second sub'), active=True, parent=level1_two_second)
level1_two_second_sub.save()
Category.objects.rebuild()
class CategoryTestCase(TestCase):
def setUp(self):
make_category_tree()
def test_category_unicode(self):
self.assertEqual(unicode(Category.objects.get(slug='level1-first')), 'Top category> Level1 first')
def test_category_short_title(self):
self.assertEqual(Category.objects.get(slug='level1-first').short_title(), 'Level1 first')
def test_category_save(self):
Category.objects.get(slug='level1-first').save()
def test_category_count(self):
self.assertEqual(Category.objects.count(), 10)
def test_category_leaf_path(self):
self.assertEqual(Category.objects.get(slug='level2-first-sub').path, 'top-category/level1-first/level2-first/level2-first-sub')
def test_category_leaf_url(self):
self.assertEqual(Category.objects.get(slug='level2-first-sub').get_absolute_url(), '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/')
class CategoryProductTestCase(TestCase):
def setUp(self):
make_category_tree()
Product(
name='Product 1',
slug=slugify('Product 1'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level2-first-sub')
).save()
Product(
name='Product 2',
slug=slugify('Product 2'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-first')
).save()
Product(
name='Product 3',
slug=slugify('Product 3'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-second')
).save()
Product(
name='Product 4 with other treeid',
slug=slugify('Product 4'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-two-second')
).save()
def test_product_adds_additional_categories(self):
p = Product(
name='Product 5',
slug=slugify('Product 5'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-second')
)
p.save()
self.assertEqual(p.additional_categories.all()[0].slug, 'level1-second')
def test_product_absolute_url(self):
self.assertEqual(Product.objects.get(slug='product-1').get_absolute_url(),
'/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
def test_product_detail(self):
product_url = Product.objects.get(slug='product-1').get_absolute_url()
response = self.client.get(product_url)
self.assertContains(response, '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
def test_list_products_in_category(self):
category = Category.objects.get(slug='level1-first')
response = self.client.get(category.get_absolute_url())
self.assertContains(response, '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
self.assertContains(response, '/shop/catalog/top-category/level1-first/product/product-2/')
self.assertNotContains(response, '/shop/catalog/top-category/level1-second/product/product-3/')
category = Category.objects.get(slug='level1-second')
response = self.client.get(category.get_absolute_url())
self.assertNotContains(response, '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
self.assertNotContains(response, '/shop/catalog/top-category/level1-first/product/product-2/')
self.assertContains(response, '/shop/catalog/top-category/level1-second/product/product-3/')
def test_list_products_in_category_with_tree_id(self):
product = Product.objects.get(slug='product-4')
category = Category.objects.get(slug='top-category')
response = self.client.get(category.get_absolute_url())
self.assertNotContains(response, product.get_absolute_url())
category = Category.objects.get(slug='top-category-two')
response = self.client.get(category.get_absolute_url())
self.assertContains(response, product.get_absolute_url()) | {
"content_hash": "5978fc2ba20746f1da43418f8b47ccb2",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 164,
"avg_line_length": 49.47692307692308,
"alnum_prop": 0.6629353233830846,
"repo_name": "fivethreeo/django-shop-categories",
"id": "27c706b788372aa0a3a08b62fa16c192339efde8",
"size": "6456",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "shop_categories/tests/product_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "17969"
}
],
"symlink_target": ""
} |
from django.db import models
from bluebottle.bb_tasks.models import BaseTask, BaseSkill, BaseTaskFile, BaseTaskMember
from django.utils.translation import ugettext as _
class Task(BaseTask):
"""
Extended Task model for 1%Club
"""
pass
class Skill(BaseSkill):
pass
class TaskMember(BaseTaskMember):
pass
class TaskFile(BaseTaskFile):
pass
| {
"content_hash": "2d66326349e9815263eb38185ef162d4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 88,
"avg_line_length": 17,
"alnum_prop": 0.7299465240641712,
"repo_name": "onepercentclub/onepercentclub-site",
"id": "b2f5628d34a97b25ab7f3df0b4afa35067c102c9",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/tasks/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13896"
},
{
"name": "CSS",
"bytes": "351343"
},
{
"name": "HTML",
"bytes": "898027"
},
{
"name": "Handlebars",
"bytes": "246489"
},
{
"name": "JavaScript",
"bytes": "168884"
},
{
"name": "Python",
"bytes": "1511371"
},
{
"name": "Ruby",
"bytes": "1050"
},
{
"name": "Shell",
"bytes": "74046"
}
],
"symlink_target": ""
} |
import os
import gzip
import urllib
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
download_folder = settings.GEOIP_PATH
class Command(BaseCommand):
help = 'Updates GeoIP data in {}'.format(download_folder)
base_url = 'http://www.maxmind.com/download/geoip/database/'
files = ['GeoLiteCity.dat.gz', 'GeoLiteCountry/GeoIP.dat.gz']
def handle(self, *args, **options):
for path in self.files:
root, filepath = os.path.split(path)
dowloadpath = os.path.join(download_folder, filepath)
download_url = urllib.parse.urljoin(self.base_url, path)
self.stdout.write('Downloading {} to {}\n'.format(download_url, dowloadpath))
urllib.request.urlretrieve(download_url, dowloadpath)
outfilepath, ext = os.path.splitext(dowloadpath)
if ext != '.gz':
raise CommandError('Something went wrong while decompressing {}'.format(dowloadpath))
self.stdout.write('Extracting {} to {}\n'.format(dowloadpath, outfilepath))
with gzip.open(dowloadpath, 'rb') as infile, open(outfilepath, 'wb') as outfile:
outfile.writelines(infile)
self.stdout.write('Deleting {}\n'.format(dowloadpath))
os.remove(dowloadpath)
self.stdout.write('Done with {}\n'.format(path))
self.stdout.write('\nDownload the paid db from MaxMind for more precise results. Ask Dale for the credentials'.format(path))
| {
"content_hash": "5acc4f746adc63a2ced7523e312c48e9",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 136,
"avg_line_length": 45.6,
"alnum_prop": 0.6641604010025063,
"repo_name": "wevote/WebAppPublic",
"id": "bc4711ed96b95976f4cb5505aab093ed85f293e9",
"size": "1596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geoip/management/commands/update_geoip_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8022"
},
{
"name": "HTML",
"bytes": "131153"
},
{
"name": "JavaScript",
"bytes": "296860"
},
{
"name": "Python",
"bytes": "1700558"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
} |
from django import db
from django.db import transaction
from django.core.management.base import NoArgsCommand
from django.core.exceptions import MultipleObjectsReturned
from data.models import State, Msn, ElectricEmissionsStateRaw, ElectricEmissionsState
# National Priorities Project Data Repository
# load_electric_emissions_state.py
# Created 4/20/2012
# Populates Electric Emissions model used by the API
# source model(s): EnergyProductionStateRaw, State, Msn
# source load command(s): import_electric_emissions
# destination model: ElectricEmissionsState
# HOWTO:
# 1) Ensure that State is loaded and up to date
# 2) Run as Django management command from your project path "python manage.py load_electric_emissions_state"
# Safe to rerun: yes
class Command(NoArgsCommand):
@transaction.commit_on_success
def handle_noargs(self, **options):
state_abbr = ''
insert_count = 0
update_count = 0
unchanged_count = 0
raw = ElectricEmissionsStateRaw.objects.all().order_by('state')
for r in raw:
if r.state != state_abbr:
try:
state_ref_current = State.objects.get(state_abbr=r.state)
except:
print 'Skipping record. Unable to find state: ' + r.state
continue
state_abbr = r.state
try:
record = ElectricEmissionsState.objects.get(
state=state_ref_current,
year = r.year,
producer_type = r.producer_type,
energy_source = r.energy_source)
if record.co2 <> r.co2 or record.so2 <> r.so2 or record.nox <> r.nox:
record.co2 = r.co2
record.so2 = r.so2
record.nox = r.nox
record.save()
db.reset_queries()
update_count = update_count + 1
else:
unchanged_count = unchanged_count + 1
except MultipleObjectsReturned:
print 'error: multiple records exist for %s %s %s %s' % (
r.year, r.state, r.producer_type, r.energy_source)
continue
except:
record = ElectricEmissionsState(year=r.year,
state=state_ref_current,
producer_type = r.producer_type,
energy_source = r.energy_source,
co2 = r.co2,
so2 = r.so2,
nox = r.nox)
record.save()
db.reset_queries()
insert_count = insert_count + 1
print 'EnergyProductionState load complete. %s records updated, %s inserted, %s unchanged' % (
update_count, insert_count, unchanged_count)
| {
"content_hash": "c30ce7abf879c2a3c7488c2bab177750",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 109,
"avg_line_length": 38.39473684210526,
"alnum_prop": 0.5534612748457848,
"repo_name": "npp/npp-api",
"id": "3480c060533780701acc1b2a3fb9f853552af86b",
"size": "2918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/management/commands/load_electric_emissions_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5982539"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
import os
import argparse
parser = argparse.ArgumentParser(
description="compare one design from a regression result to a benchmark result")
parser.add_argument('--benchmark', '-b', action='store', required=True,
help="The csv file from which to extract the benchmark results")
parser.add_argument('--regression_results', '-r', action='store', required=True,
help="The csv file to be tested")
parser.add_argument('--design', '-d', action='store', required=True,
help="The design to compare for between the two scripts. Same as -design in flow.tcl")
parser.add_argument('--run_path', '-rp', action='store', required=True,
help="The run path, will be used to search for any missing files.")
parser.add_argument('--output_report', '-o', action='store', required=True,
help="The file to print the final report in")
args = parser.parse_args()
benchmark_file = args.benchmark
regression_results_file = args.regression_results
output_report_file = args.output_report
design = args.design
run_path = args.run_path
tolerance = {'general_tolerance':1, 'tritonRoute_violations':2, 'Magic_violations':10, 'antenna_violations':10, 'lvs_total_errors':0}
critical_statistics = ['tritonRoute_violations','Magic_violations', 'antenna_violations','lvs_total_errors']
magic_file_extensions = ['gds','mag','lef','spice']
def compare_vals(benchmark_value, regression_value, param):
if str(benchmark_value) == "-1":
return True
if str(regression_value) == "-1":
return False
tol = 0-tolerance['general_tolerance']
if param in tolerance.keys():
tol = 0-tolerance[param]
if float(benchmark_value) - float(regression_value) >= tol:
return True
else:
return False
def findIdx(header, label):
for idx in range(len(header)):
if label == header[idx]:
return idx
else:
return -1
def parseCSV(csv_file):
design_out = dict()
csvOpener = open(csv_file, 'r')
csvData = csvOpener.read().split("\n")
headerInfo = csvData[0].split(",")
designPathIdx = findIdx(headerInfo, "design")
if designPathIdx == -1:
print("invalid report. No design paths.")
for i in range(1, len(csvData)):
if len(csvData[i]):
entry = csvData[i].split(",")
designPath=entry[designPathIdx]
if designPath == design:
for idx in range(len(headerInfo)):
if idx != designPathIdx:
design_out[headerInfo[idx]] = entry[idx]
break
return design_out
def criticalMistmatch(benchmark, regression_result):
if len(benchmark):
return False, "The design is not benchmarked"
for stat in critical_statistics:
if compare_vals(benchmark[stat],regression_result[stat],stat):
continue
else:
if str(regression_result[stat]) == "-1":
return True, "The test didn't pass the stage responsible for "+ stat
else:
return True, "The results of " +stat+" mismatched with the benchmark"
return False, "The test passed"
def compareStatus(benchmark, regression_result):
if len(benchmark) == 0:
return False, "The design is not benchmarked"
elif "fail" in str(regression_result["flow_status"]):
if "fail" in str(benchmark["flow_status"]):
return False, "The OpenLane flow failed, but the benchmark never saw it succeed"
else:
return True, "The OpenLane flow failed outright, check the logs"
else:
return False, "The test passed"
def missingResultingFiles(design):
searchPrefix = str(run_path) + '/results/magic/' + str(design['design_name'])
for ext in magic_file_extensions:
File = searchPrefix+'.'+str(ext)
if os.path.isfile(File) == False:
return True, "File "+File+" is missing from the results directory"
return False, "The test passed"
benchmark = parseCSV(benchmark_file)
regression_result = parseCSV(regression_results_file)
testFail, reasonWhy = compareStatus(benchmark, regression_result)
report = str(design)
if testFail:
report += ",FAILED,"+reasonWhy+"\n"
else:
testFail, reasonWhy = criticalMistmatch(benchmark, regression_result)
if testFail:
report += ",FAILED,"+reasonWhy+"\n"
else:
testFail, reasonWhy = missingResultingFiles(regression_result)
if testFail:
report += ",FAILED,"+reasonWhy+"\n"
else:
report += ",PASSED,"+reasonWhy+"\n"
outputReportOpener = open(output_report_file, 'a+')
outputReportOpener.write(report)
outputReportOpener.close() | {
"content_hash": "6a8e53a0e3babe9ed5a1fbe4af629f96",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 133,
"avg_line_length": 36.315384615384616,
"alnum_prop": 0.6418131751747511,
"repo_name": "efabless/openlane",
"id": "6e7b60349954c4b082aee9381474db9f25af1ee2",
"size": "5307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/compare_regression_design.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Coq",
"bytes": "128005"
},
{
"name": "Dockerfile",
"bytes": "52446"
},
{
"name": "HTML",
"bytes": "218625"
},
{
"name": "Logos",
"bytes": "26097"
},
{
"name": "Makefile",
"bytes": "2470"
},
{
"name": "Perl",
"bytes": "5589"
},
{
"name": "Python",
"bytes": "246219"
},
{
"name": "Shell",
"bytes": "21834"
},
{
"name": "Tcl",
"bytes": "168706"
},
{
"name": "Verilog",
"bytes": "4684813"
}
],
"symlink_target": ""
} |
import logging
from typing import List, Optional
from flask_appbuilder.security.sqla.models import User
from superset.annotation_layers.commands.exceptions import (
AnnotationLayerBulkDeleteFailedError,
AnnotationLayerBulkDeleteIntegrityError,
AnnotationLayerNotFoundError,
)
from superset.annotation_layers.dao import AnnotationLayerDAO
from superset.commands.base import BaseCommand
from superset.dao.exceptions import DAODeleteFailedError
from superset.models.annotations import AnnotationLayer
logger = logging.getLogger(__name__)
class BulkDeleteAnnotationLayerCommand(BaseCommand):
def __init__(self, user: User, model_ids: List[int]):
self._actor = user
self._model_ids = model_ids
self._models: Optional[List[AnnotationLayer]] = None
def run(self) -> None:
self.validate()
try:
AnnotationLayerDAO.bulk_delete(self._models)
return None
except DAODeleteFailedError as ex:
logger.exception(ex.exception)
raise AnnotationLayerBulkDeleteFailedError()
def validate(self) -> None:
# Validate/populate model exists
self._models = AnnotationLayerDAO.find_by_ids(self._model_ids)
if not self._models or len(self._models) != len(self._model_ids):
raise AnnotationLayerNotFoundError()
if AnnotationLayerDAO.has_annotations(self._model_ids):
raise AnnotationLayerBulkDeleteIntegrityError()
| {
"content_hash": "a02818d75a76eb7b10b05d96a0348f54",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 36.7,
"alnum_prop": 0.7213896457765667,
"repo_name": "mistercrunch/panoramix",
"id": "148bdd0bd2d61270e6290e620b5132435e359b4d",
"size": "2253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/annotation_layers/commands/bulk_delete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "46750"
},
{
"name": "HTML",
"bytes": "34140"
},
{
"name": "JavaScript",
"bytes": "81606"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "240195"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
hordak_dir = os.path.abspath("..")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_project.settings")
sys.path.insert(0, hordak_dir)
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Django Hordak"
copyright = "2016, Adam Charnock"
author = "Adam Charnock"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "DjangoHordakdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "DjangoHordak.tex", "Django Hordak Documentation", "Adam Charnock", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "djangohordak", "Django Hordak Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"DjangoHordak",
"Django Hordak Documentation",
author,
"DjangoHordak",
"One line description of project.",
"Miscellaneous",
)
]
autodoc_member_order = "bysource"
| {
"content_hash": "a90059f4d788816107a03d2d99beba6b",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 94,
"avg_line_length": 30.292517006802722,
"alnum_prop": 0.6644958454974175,
"repo_name": "waldocollective/django-hordak",
"id": "93c0b021139c9805a55b8a7085f764c7a24e05ea",
"size": "5142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62305"
}
],
"symlink_target": ""
} |
'''
OVERVIEW: Python module for generating plots for processed data automatically.
'''
import os, sys
import matplotlib.pyplot as plt
from pylab import *
class AutoPlot():
def __init__(self, datasetID, workingdir):
# Initialize output directories
self.datasetID = datasetID
self.workingdir = os.path.join(workingdir,'plots')
self.logfiledir = '/home/ubuntu/logs'
sys.stdout = open(self.logfiledir + '/listener_stdout.log', 'w')
sys.stderr = open(self.logfiledir + '/listener_stderr.log', 'w')
os.system('mkdir ' + self.workingdir)
def plotKEGGabundances(self, filepath, level):
# Plots KEGG modules by grouping them to the specified level (levels: 1,2 or 3)
cmd_lvl = 'summarize_taxa_through_plots.py -i ' + filepath + ' -p /home/ubuntu/qiime_params/qiime_params_L' + str(level) + '.txt -o ' + self.workingdir + '/picrust/plots_at_level' + str(level)
os.system(cmd_lvl)
def plotAlphaDiversities(self, alphaDiversityFile, figure_filename):
# Take an alpha diversity file and create a box plot
with open(alphaDiversityFile,'r') as fid:
all_lines = fid.readlines()
alpha_diversities = [float(line.split()[1]) for line in all_lines[1:]]
sampleIDs = [line.split()[0] for line in all_lines[1:]]
figure()
plt.boxplot(alpha_diversities)
plt.xlabel('Sample category')
plt.ylabel('Alpha diversity')
plt.savefig(figure_filename)
| {
"content_hash": "77213490352ca145b16a8469cfc2cf89",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 200,
"avg_line_length": 39.30769230769231,
"alnum_prop": 0.6373124592302675,
"repo_name": "thomasgurry/amplicon_sequencing_pipeline",
"id": "7b47a1d05a83752dd662a294d6593449b14eee88",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/AutoPlot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "207811"
}
],
"symlink_target": ""
} |
from ..base import BitbucketCloudBase
from .projects import Projects
from ..repositories import WorkspaceRepositories
class Workspaces(BitbucketCloudBase):
def __init__(self, url, *args, **kwargs):
super(Workspaces, self).__init__(url, *args, **kwargs)
def __get_object(self, data):
return Workspace(data, **self._new_session_args)
def each(self, role=None, q=None, sort=None):
"""
Get all workspaces matching the criteria.
:param role: string (default is None): Filters the workspaces based on the authenticated user"s role on each workspace.
* member: returns a list of all the workspaces which the caller is a member of
at least one workspace group or repository
* collaborator: returns a list of workspaces which the caller has write access
to at least one repository in the workspace
* owner: returns a list of workspaces which the caller has administrator access
:param q: string (default is None): Query string to narrow down the response.
See https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering for details.
:param sort: string (default is None): Name of a response property to sort results.
See https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering for details.
:return: A generator for the Workspace objects
"""
params = {}
if role is not None:
params["role"] = role
if q is not None:
params["q"] = q
if sort is not None:
params["sort"] = sort
for workspace in self._get_paged(None, params):
yield self.__get_object(workspace)
return
def get(self, workspace):
"""
Returns the requested workspace
:param workspace: string: This can either be the workspace ID (slug) or the workspace UUID
surrounded by curly-braces, for example: {workspace UUID}.
:return: The requested Workspace objects
"""
return self.__get_object(super(Workspaces, self).get(workspace))
class Workspace(BitbucketCloudBase):
def __init__(self, data, *args, **kwargs):
super(Workspace, self).__init__(None, *args, data=data, expected_type="workspace", **kwargs)
self.__projects = Projects(self.get_link("projects"), **self._new_session_args)
self.__repositories = WorkspaceRepositories(self.get_link("repositories"), **self._new_session_args)
@property
def name(self):
""" The workspace name """
return self.get_data("name")
@name.setter
def name(self, name):
""" Setter for the workspace name """
return self.update(name=name)
@property
def slug(self):
""" The workspace slug """
return self.get_data("slug")
@property
def uuid(self):
""" The workspace uuid """
return self.get_data("uuid")
@property
def is_private(self):
""" The workspace private flag """
return self.get_data("is_private")
@property
def created_on(self):
""" The workspace creation time """
return self.get_data("created_on")
@property
def updated_on(self):
""" The workspace last update time """
return self.get_data("updated_on", "never updated")
def get_avatar(self):
""" The project avatar """
return self.get(self.get_link("avatar"), absolute=True)
@property
def projects(self):
""" The workspace projects """
return self.__projects
@property
def repositories(self):
""" The workspace repositories """
return self.__repositories
| {
"content_hash": "83c62d76956586fbb7b3a61f30fecc0c",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 136,
"avg_line_length": 37.138888888888886,
"alnum_prop": 0.5801545749189728,
"repo_name": "MattAgile/atlassian-python-api",
"id": "75a78df39357cd254bdae8903cdfa251e951a79f",
"size": "4027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atlassian/bitbucket/cloud/workspaces/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35317"
}
],
"symlink_target": ""
} |
"""
Views for managing instances.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.utils import filters
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups import forms as project_forms
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups import tables as project_tables
class DetailView(tables.DataTableView):
table_class = project_tables.RulesTable
template_name = 'project/access_and_security/security_groups/detail.html'
@memoized.memoized_method
def _get_data(self):
sg_id = filters.get_int_or_uuid(self.kwargs['security_group_id'])
try:
return api.network.security_group_get(self.request, sg_id)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to retrieve security group.'),
redirect=redirect)
def get_data(self):
return self._get_data().rules
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["security_group"] = self._get_data()
return context
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateGroup
template_name = 'project/access_and_security/security_groups/update.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
@memoized.memoized_method
def get_object(self):
sg_id = filters.get_int_or_uuid(self.kwargs['security_group_id'])
try:
return api.network.security_group_get(self.request, sg_id)
except Exception:
msg = _('Unable to retrieve security group.')
url = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request, msg, redirect=url)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["security_group"] = self.get_object()
return context
def get_initial(self):
security_group = self.get_object()
return {'id': self.kwargs['security_group_id'],
'name': security_group.name,
'description': security_group.description}
class AddRuleView(forms.ModalFormView):
form_class = project_forms.AddRule
template_name = 'project/access_and_security/security_groups/add_rule.html'
def get_success_url(self):
sg_id = self.kwargs['security_group_id']
return reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[sg_id])
def get_context_data(self, **kwargs):
context = super(AddRuleView, self).get_context_data(**kwargs)
context["security_group_id"] = self.kwargs['security_group_id']
return context
def get_initial(self):
return {'id': self.kwargs['security_group_id']}
def get_form_kwargs(self):
kwargs = super(AddRuleView, self).get_form_kwargs()
try:
groups = api.network.security_group_list(self.request)
except Exception:
groups = []
exceptions.handle(self.request,
_("Unable to retrieve security groups."))
security_groups = []
for group in groups:
if group.id == filters.get_int_or_uuid(
self.kwargs['security_group_id']):
security_groups.append((group.id,
_("%s (current)") % group.name))
else:
security_groups.append((group.id, group.name))
kwargs['sg_list'] = security_groups
return kwargs
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateGroup
template_name = 'project/access_and_security/security_groups/create.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
| {
"content_hash": "f65f19d6b42a33319dde318de01f542f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 37.26086956521739,
"alnum_prop": 0.6469078179696616,
"repo_name": "hep-gc/glint-horizon",
"id": "f061464dcb3c96384717c6cbee3f8a10ca42621f",
"size": "5094",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/images/security_groups/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "227769"
},
{
"name": "HTML",
"bytes": "329304"
},
{
"name": "JavaScript",
"bytes": "707335"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "3247421"
},
{
"name": "Shell",
"bytes": "17821"
}
],
"symlink_target": ""
} |
"""
***Calculator button class***
Calculator by Ariel Haviv (ariel.haviv@gmail.com)
Instructors: Anatoly Peymer, Zehava Lavi
"""
from Tkinter import *
class CalcButton(Button):
def __init__(self, root, **args):
self.root = root
Button.__init__(self, root)
#b_args will contain only relevant button configuration keys:
b_args = dict([(i,args[i]) for i in args.keys() if i in self.keys()])
self.configure(b_args)
#self.args will contain all the rest of keys received from constructor.
self.args = dict([(i, args[i]) for i in args.keys() if i not in b_args])
| {
"content_hash": "8727cd563d63882c5a5c8035408c48c8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 27.761904761904763,
"alnum_prop": 0.6792452830188679,
"repo_name": "ArielCabib/python-tkinter-calculator",
"id": "c74794ffad6269216ed1e0e5c17f67bd1043bb13",
"size": "2346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Calculator/Widgets/Buttons.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74760"
}
],
"symlink_target": ""
} |
import unittest
import os
import time
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
from azure.cli.testsdk import JMESPathCheck as JMESPathCheckV2
from azure.cli.testsdk.vcr_test_base import (ResourceGroupVCRTestBase,
JMESPathCheck, NoneCheck)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# pylint: disable=line-too-long
class WebappBasicE2ETest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappBasicE2ETest, self).__init__(__file__, test_method, resource_group='azurecli-webapp-e2e')
def test_webapp_e2e(self):
self.execute()
def body(self):
webapp_name = 'webapp-e2e'
plan = 'webapp-e2e-plan'
self.cmd('appservice plan create -g {} -n {}'.format(self.resource_group, plan))
self.cmd('appservice plan list -g {}'.format(self.resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].sku.tier', 'Basic'),
JMESPathCheck('[0].sku.name', 'B1')
])
self.cmd('appservice plan list', checks=[
JMESPathCheck("length([?name=='{}' && resourceGroup=='{}'])".format(plan, self.resource_group), 1)
])
self.cmd('appservice plan show -g {} -n {}'.format(self.resource_group, plan), checks=[
JMESPathCheck('name', plan)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group, webapp_name, plan), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
self.cmd('webapp list -g {}'.format(self.resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', webapp_name),
JMESPathCheck('[0].hostNames[0]', webapp_name + '.azurewebsites.net')
])
self.cmd('webapp show -g {} -n {}'.format(self.resource_group, webapp_name), checks=[
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
result = self.cmd('webapp deployment source config-local-git -g {} -n {}'.format(self.resource_group, webapp_name))
self.assertTrue(result['url'].endswith(webapp_name + '.git'))
self.cmd('webapp deployment source show -g {} -n {}'.format(self.resource_group, webapp_name), checks=[
JMESPathCheck('repoUrl', 'https://{}.scm.azurewebsites.net'.format(webapp_name))
])
# turn on diagnostics
test_cmd = ('webapp log config -g {} -n {} --level verbose'.format(self.resource_group, webapp_name) + ' '
'--application-logging true --detailed-error-messages true --failed-request-tracing true --web-server-logging filesystem')
self.cmd(test_cmd)
self.cmd('webapp log show -g {} -n {}'.format(self.resource_group, webapp_name), checks=[
JMESPathCheck('detailedErrorMessages.enabled', True),
JMESPathCheck('failedRequestsTracing.enabled', True)
])
self.cmd('webapp config show -g {} -n {}'.format(self.resource_group, webapp_name), checks=[
JMESPathCheck('detailedErrorLoggingEnabled', True),
JMESPathCheck('httpLoggingEnabled', True),
JMESPathCheck('scmType', 'LocalGit'),
JMESPathCheck('requestTracingEnabled', True)
# TODO: contact webapp team for where to retrieve 'level'
])
# show publish profile info
result = self.cmd('webapp deployment list-publishing-profiles -g {} -n {}'.format(self.resource_group, webapp_name))
self.assertTrue(result[1]['publishUrl'].startswith('ftp://'))
self.cmd('webapp stop -g {} -n {}'.format(self.resource_group, webapp_name))
self.cmd('webapp show -g {} -n {}'.format(self.resource_group, webapp_name), checks=[
JMESPathCheck('state', 'Stopped'),
JMESPathCheck('name', webapp_name)
])
self.cmd('webapp start -g {} -n {}'.format(self.resource_group, webapp_name))
self.cmd('webapp show -g {} -n {}'.format(self.resource_group, webapp_name), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name)
])
class WebappQuickCreateTest(ScenarioTest):
@ResourceGroupPreparer()
def test_win_webapp_quick_create(self, resource_group):
webapp_name = 'webapp-quick'
plan = 'plan-quick'
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
r = self.cmd('webapp create -g {} -n {} --plan {} --deployment-local-git -r "node|6.1"'.format(resource_group, webapp_name, plan)).get_output_in_json()
self.assertTrue(r['ftpPublishingUrl'].startswith('ftp://'))
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name, checks=[
JMESPathCheckV2('[0].name', 'WEBSITE_NODE_DEFAULT_VERSION'),
JMESPathCheckV2('[0].value', '6.1.0'),
]))
@ResourceGroupPreparer()
def test_win_webapp_quick_create_cd(self, resource_group):
webapp_name = 'webapp-quick-cd'
plan = 'plan-quick'
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --deployment-source-url https://github.com/yugangw-msft/azure-site-test.git -r "node|6.1"'.format(resource_group, webapp_name, plan))
import time
time.sleep(30) # 30 seconds should be enough for the deployment finished(Skipped under playback mode)
import requests
r = requests.get('http://{}.azurewebsites.net'.format(webapp_name))
# verify the web page
self.assertTrue('Hello world' in str(r.content))
@ResourceGroupPreparer(location='japaneast')
def test_linux_webapp_quick_create(self, resource_group):
webapp_name = 'webapp-quick-linux'
plan = 'plan-quick-linux'
self.cmd('appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -i naziml/ruby-hello'.format(resource_group, webapp_name, plan))
import requests
r = requests.get('http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Ruby on Rails in Web Apps on Linux' in str(r.content))
# verify app settings
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name, checks=[
JMESPathCheckV2('[0].name', 'WEBSITES_ENABLE_APP_SERVICE_STORAGE'),
JMESPathCheckV2('[0].value', 'false'),
]))
@ResourceGroupPreparer(location='westus')
def test_linux_webapp_quick_create_cd(self, resource_group):
webapp_name = 'webapp-quick-linux-cd'
plan = 'plan-quick-linux-cd'
self.cmd('appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -u https://github.com/yugangw-msft/azure-site-test.git -r "node|6.10"'.format(resource_group, webapp_name, plan))
import requests
r = requests.get('http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Hello world' in str(r.content))
@ResourceGroupPreparer(parameter_name='resource_group', parameter_name_for_location='resource_group_location')
@ResourceGroupPreparer(parameter_name='resource_group2', parameter_name_for_location='resource_group_location2')
def test_create_in_different_group(self, resource_group, resource_group_location, resource_group2, resource_group_location2):
plan = 'planInOneRG'
self.cmd('group create -n {} -l {}'.format(resource_group2, resource_group_location))
plan_id = self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan)).get_output_in_json()['id']
self.cmd('webapp create -g {} -n webInOtherRG --plan {}'.format(resource_group2, plan_id), checks=[
JMESPathCheckV2('name', 'webInOtherRG')
])
class AppServicePlanSceanrioTest(ScenarioTest):
@ResourceGroupPreparer()
def test_retain_plan(self, resource_group):
webapp_name = 'webapp-quick'
plan = 'plan-quick'
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {} --keep-dns-registration --keep-empty-plan --keep-metrics'.format(resource_group, webapp_name))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheckV2('[0].name', plan)
])
@ResourceGroupPreparer()
def test_auto_delete_plan(self, resource_group):
webapp_name = 'webapp-delete2'
plan = 'webapp-delete-plan2'
self.cmd('appservice plan create -g {} -n {} -l westus'.format(resource_group, plan))
self.cmd('appservice plan update -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheckV2('name', plan),
JMESPathCheckV2('sku.tier', 'Standard'),
JMESPathCheckV2('sku.name', 'S1')
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
# test empty service plan should be automatically deleted.
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheckV2('length(@)', 0)
])
class WebappConfigureTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappConfigureTest, self).__init__(__file__, test_method, resource_group='azurecli-webapp-config')
self.webapp_name = 'webapp-config-test'
def test_webapp_config(self):
self.execute()
def set_up(self):
super(WebappConfigureTest, self).set_up()
plan = 'webapp-config-plan'
plan_result = self.cmd('appservice plan create -g {} -n {} --sku S1'.format(self.resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group, self.webapp_name, plan_result['id']))
def body(self):
# site config testing
# verify the baseline
result = self.cmd('webapp config show -g {} -n {}'.format(self.resource_group, self.webapp_name), checks=[
JMESPathCheck('alwaysOn', False),
JMESPathCheck('autoHealEnabled', False),
JMESPathCheck('phpVersion', '5.6'),
JMESPathCheck('netFrameworkVersion', 'v4.0'),
JMESPathCheck('pythonVersion', ''),
JMESPathCheck('use32BitWorkerProcess', True),
JMESPathCheck('webSocketsEnabled', False)
])
# update and verify
checks = [
JMESPathCheck('alwaysOn', True),
JMESPathCheck('autoHealEnabled', True),
JMESPathCheck('phpVersion', '7.0'),
JMESPathCheck('netFrameworkVersion', 'v3.0'),
JMESPathCheck('pythonVersion', '3.4'),
JMESPathCheck('use32BitWorkerProcess', False),
JMESPathCheck('webSocketsEnabled', True)
]
self.cmd('webapp config set -g {} -n {} --always-on true --auto-heal-enabled true --php-version 7.0 --net-framework-version v3.5 --python-version 3.4 --use-32bit-worker-process=false --web-sockets-enabled=true'.format(
self.resource_group, self.webapp_name), checks=checks)
self.cmd('webapp config show -g {} -n {}'.format(self.resource_group, self.webapp_name), checks=checks)
# site appsettings testing
# update
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=foo s2=bar s3=bar2'.format(self.resource_group, self.webapp_name), checks=[
JMESPathCheck("length([?name=='s1'])", 1),
JMESPathCheck("length([?name=='s2'])", 1),
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?value=='foo'])", 1),
JMESPathCheck("length([?value=='bar'])", 1),
JMESPathCheck("length([?value=='foo'])", 1)
])
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(self.resource_group, self.webapp_name))
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
# delete
self.cmd('webapp config appsettings delete -g {} -n {} --setting-names s1 s2'.format(self.resource_group, self.webapp_name), checks=[
JMESPathCheck("length([?name=='s3'])", 1)
])
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(self.resource_group, self.webapp_name))
self.assertEqual(set([x['name'] for x in result]), set(['s3', 'WEBSITE_NODE_DEFAULT_VERSION']))
# hostnames
self.cmd('webapp config hostname list -g {} --webapp-name {}'.format(self.resource_group, self.webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', '{0}.azurewebsites.net'.format(self.webapp_name))
])
# site connection string tests
self.cmd('webapp config connection-string set -t mysql -g {} -n {} --settings c1="conn1" c2=conn2 --slot-settings c3=conn3'.format(self.resource_group, self.webapp_name))
result = self.cmd('webapp config connection-string list -g {} -n {}'.format(self.resource_group, self.webapp_name), checks=[
JMESPathCheck('length([])', 3),
JMESPathCheck("[?name=='c1']|[0].slotSetting", False),
JMESPathCheck("[?name=='c1']|[0].value.type", 'MySql'),
JMESPathCheck("[?name=='c1']|[0].value.value", 'conn1'),
JMESPathCheck("[?name=='c2']|[0].slotSetting", False),
JMESPathCheck("[?name=='c3']|[0].slotSetting", True),
])
self.cmd('webapp config connection-string delete -g {} -n {} --setting-names c1 c3'.format(self.resource_group, self.webapp_name))
result = self.cmd('webapp config connection-string list -g {} -n {}'.format(self.resource_group, self.webapp_name), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].slotSetting', False),
JMESPathCheck('[0].name', 'c2')
])
# see deployment user
result = self.cmd('webapp deployment user show')
self.assertTrue(result['type']) # just make sure the command does return something
class WebappScaleTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappScaleTest, self).__init__(__file__, test_method, resource_group='azurecli-webapp-scale')
def test_webapp_scale(self):
self.execute()
def body(self):
plan = 'webapp-scale-plan'
# start with shared sku
self.cmd('appservice plan create -g {} -n {} --sku SHARED'.format(self.resource_group, plan), checks=[
JMESPathCheck('sku.name', 'D1'),
JMESPathCheck('sku.tier', 'Shared'),
JMESPathCheck('sku.size', 'D1'),
JMESPathCheck('sku.family', 'D'),
JMESPathCheck('sku.capacity', 0) # 0 means the default value: 1 instance
])
# scale up
self.cmd('appservice plan update -g {} -n {} --sku S2'.format(self.resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S2'),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.size', 'S2'),
JMESPathCheck('sku.family', 'S')
])
# scale down
self.cmd('appservice plan update -g {} -n {} --sku B1'.format(self.resource_group, plan), checks=[
JMESPathCheck('sku.name', 'B1'),
JMESPathCheck('sku.tier', 'Basic'),
JMESPathCheck('sku.size', 'B1'),
JMESPathCheck('sku.family', 'B')
])
# scale out
self.cmd('appservice plan update -g {} -n {} --number-of-workers 2'.format(self.resource_group, plan), checks=[
JMESPathCheck('sku.name', 'B1'),
JMESPathCheck('sku.tier', 'Basic'),
JMESPathCheck('sku.size', 'B1'),
JMESPathCheck('sku.family', 'B'),
JMESPathCheck('sku.capacity', 2)
])
class AppServiceBadErrorPolishTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(AppServiceBadErrorPolishTest, self).__init__(__file__, test_method, resource_group='clitest-error')
self.resource_group2 = 'clitest-error2'
self.webapp_name = 'webapp-error-test123'
self.plan = 'webapp-error-plan'
def test_appservice_error_polish(self):
self.execute()
def set_up(self):
super(AppServiceBadErrorPolishTest, self).set_up()
self.cmd('group create -n {} -l westus'.format(self.resource_group2))
self.cmd('appservice plan create -g {} -n {} --sku b1'.format(self.resource_group, self.plan))
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group, self.webapp_name, self.plan))
self.cmd('appservice plan create -g {} -n {} --sku b1'.format(self.resource_group2, self.plan))
def tear_down(self):
super(AppServiceBadErrorPolishTest, self).tear_down()
self.cmd('group delete -n {} --yes'.format(self.resource_group2))
def body(self):
# we will try to produce an error by try creating 2 webapp with same name in different groups
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group2, self.webapp_name, self.plan),
allowed_exceptions='Website with given name {} already exists'.format(self.webapp_name))
# this test doesn't contain the ultimate verification which you need to manually load the frontpage in a browser
class LinuxWebappSceanrioTest(ScenarioTest):
@ResourceGroupPreparer()
def test_linux_webapp(self, resource_group):
runtime = 'node|6.4'
plan = 'webapp-linux-plan2'
webapp = 'webapp-linux2'
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
JMESPathCheckV2('reserved', True), # this weird field means it is a linux
JMESPathCheckV2('sku.name', 'S1'),
])
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(resource_group, webapp, plan, runtime), checks=[
JMESPathCheckV2('name', webapp),
])
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheckV2('length([])', 1),
JMESPathCheckV2('[0].name', webapp)
])
self.cmd('webapp config set -g {} -n {} --startup-file {}'.format(resource_group, webapp, 'process.json'), checks=[
JMESPathCheckV2('appCommandLine', 'process.json')
])
result = self.cmd('webapp deployment container config -g {} -n {} --enable-cd true'.format(resource_group, webapp)).get_output_in_json()
self.assertTrue(result['CI_CD_URL'].startswith('https://'))
self.assertTrue(result['CI_CD_URL'].endswith('.scm.azurewebsites.net/docker/hook'))
result = self.cmd('webapp config container set -g {} -n {} --docker-custom-image-name {} --docker-registry-server-password {} --docker-registry-server-user {} --docker-registry-server-url {}'.format(
resource_group, webapp, 'foo-image', 'foo-password', 'foo-user', 'foo-url')).get_output_in_json()
self.assertEqual(set(x['value'] for x in result if x['name'] == 'DOCKER_REGISTRY_SERVER_PASSWORD'), set([None])) # we mask the password
result = self.cmd('webapp config container show -g {} -n {} '.format(resource_group, webapp)).get_output_in_json()
self.assertEqual(set(x['name'] for x in result), set(['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME', 'DOCKER_CUSTOM_IMAGE_NAME', 'DOCKER_REGISTRY_SERVER_PASSWORD']))
self.assertEqual(set(x['value'] for x in result if x['name'] == 'DOCKER_REGISTRY_SERVER_PASSWORD'), set([None])) # we mask the password
sample = next((x for x in result if x['name'] == 'DOCKER_REGISTRY_SERVER_URL'))
self.assertEqual(sample, {'name': 'DOCKER_REGISTRY_SERVER_URL', 'slotSetting': False, 'value': 'foo-url'})
self.cmd('webapp config container delete -g {} -n {}'.format(resource_group, webapp))
result2 = self.cmd('webapp config container show -g {} -n {} '.format(resource_group, webapp)).get_output_in_json()
self.assertEqual(result2, [])
class WebappACRSceanrioTest(ScenarioTest):
@ResourceGroupPreparer()
def test_acr_integration(self, resource_group):
plan = 'plan11'
webapp = 'webappacrtest11'
runtime = 'node|6.4'
acr_registry_name = webapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(resource_group, acr_registry_name))
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(resource_group, webapp, plan, runtime))
creds = self.cmd('acr credential show -n {}'.format(acr_registry_name)).get_output_in_json()
self.cmd('webapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, webapp, acr_registry_name), checks=[
JMESPathCheckV2("[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
class WebappGitScenarioTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappGitScenarioTest, self).__init__(__file__, test_method, resource_group='cli-webapp-git4')
def test_webapp_git(self):
self.execute()
def body(self):
plan = 'webapp-git-plan5'
webapp = 'web-git-test2'
# You can create and use any repros with the 3 files under "./sample_web"
test_git_repo = 'https://github.com/yugangw-msft/azure-site-test'
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(self.resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group, webapp, plan))
self.cmd('webapp deployment source config -g {} -n {} --repo-url {} --branch {} --manual-integration'.format(self.resource_group, webapp, test_git_repo, 'master'), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('isMercurial', False),
JMESPathCheck('branch', 'master')
])
self.cmd('webapp deployment source show -g {} -n {}'.format(self.resource_group, webapp), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('isMercurial', False),
JMESPathCheck('branch', 'master')
])
self.cmd('webapp deployment source delete -g {} -n {}'.format(self.resource_group, webapp), checks=[
JMESPathCheck('repoUrl', None)
])
class WebappSlotScenarioTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappSlotScenarioTest, self).__init__(__file__, test_method, resource_group='cli-webapp-slot')
self.plan = 'webapp-slot-test2-plan'
self.webapp = 'web-slot-test2'
def test_webapp_slot(self):
self.execute()
def body(self):
plan_result = self.cmd('appservice plan create -g {} -n {} --sku S1'.format(self.resource_group, self.plan))
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group, self.webapp, plan_result['id']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
slot2 = 'dev'
test_git_repo = 'https://github.com/yugangw-msft/azure-site-test'
test_php_version = '5.6'
# create a few app-settings to test they can be cloned
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=v1 --slot-settings s2=v2'.format(self.resource_group, self.webapp))
# create an empty slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot), checks=[
JMESPathCheck('name', slot)
])
self.cmd('webapp deployment source config -g {} -n {} --repo-url {} --branch {} -s {} --manual-integration'.format(self.resource_group, self.webapp, test_git_repo, slot, slot), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('branch', slot)
])
# swap with prod and verify the git branch also switched
self.cmd('webapp deployment slot swap -g {} -n {} -s {}'.format(self.resource_group, self.webapp, slot))
result = self.cmd('webapp config appsettings list -g {} -n {} -s {}'.format(self.resource_group, self.webapp, slot))
self.assertEqual(set([x['name'] for x in result]), set(['s1']))
# create a new slot by cloning from prod slot
self.cmd('webapp config set -g {} -n {} --php-version {}'.format(self.resource_group, self.webapp, test_php_version))
self.cmd('webapp deployment slot create -g {} -n {} --slot {} --configuration-source {}'.format(self.resource_group, self.webapp, slot2, self.webapp))
self.cmd('webapp config show -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot2), checks=[
JMESPathCheck("phpVersion", test_php_version),
])
self.cmd('webapp config appsettings set -g {} -n {} --slot {} --settings s3=v3 --slot-settings s4=v4'.format(self.resource_group, self.webapp, slot2), checks=[
JMESPathCheck("[?name=='s4']|[0].slotSetting", True),
JMESPathCheck("[?name=='s3']|[0].slotSetting", False),
])
self.cmd('webapp config connection-string set -g {} -n {} -t mysql --slot {} --settings c1=connection1 --slot-settings c2=connection2'.format(self.resource_group, self.webapp, slot2))
# verify we can swap with non production slot
self.cmd('webapp deployment slot swap -g {} -n {} --slot {} --target-slot {}'.format(self.resource_group, self.webapp, slot, slot2), checks=NoneCheck())
result = self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot2))
self.assertEqual(set([x['name'] for x in result]), set(['s1', 's4']))
result = self.cmd('webapp config connection-string list -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot2))
self.assertEqual(set([x['name'] for x in result]), set(['c2']))
result = self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot))
self.assertTrue(set(['s3']).issubset(set([x['name'] for x in result])))
result = self.cmd('webapp config connection-string list -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot))
self.assertEqual(set([x['name'] for x in result]), set(['c1']))
self.cmd('webapp deployment slot list -g {} -n {}'.format(self.resource_group, self.webapp), checks=[
JMESPathCheck("length([])", 2),
JMESPathCheck("length([?name=='{}'])".format(slot2), 1),
JMESPathCheck("length([?name=='{}'])".format(slot), 1),
])
self.cmd('webapp deployment slot delete -g {} -n {} --slot {}'.format(self.resource_group, self.webapp, slot), checks=NoneCheck())
class WebappSlotTrafficRouting(ScenarioTest):
@ResourceGroupPreparer()
def test_traffic_routing(self, resource_group):
webapp = 'clitestwebtraffic'
plan_result = self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, 'clitesttrafficplan')).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp, plan_result['id']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
# create an empty slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
self.cmd('webapp traffic-routing set -g {} -n {} -d {}=15'.format(resource_group, webapp, slot), checks=[
JMESPathCheckV2("[0].actionHostName", slot + '.azurewebsites.net'),
JMESPathCheckV2("[0].reroutePercentage", 15.0)
])
self.cmd('webapp traffic-routing show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheckV2("[0].actionHostName", slot + '.azurewebsites.net'),
JMESPathCheckV2("[0].reroutePercentage", 15.0)
])
self.cmd('webapp traffic-routing clear -g {} -n {}'.format(resource_group, webapp))
class WebappSlotSwapScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
def test_webapp_slot_swap(self, resource_group):
plan = 'slot-swap-plan2'
webapp = 'slot-swap-web2'
plan_result = self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp, plan_result['id']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
self.cmd('webapp config appsettings set -g {} -n {} --slot-settings s1=prod'.format(resource_group, webapp))
# create an empty slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings set -g {} -n {} --slot-settings s1=slot --slot {}'.format(resource_group, webapp, slot))
# swap with preview
self.cmd('webapp deployment slot swap -g {} -n {} -s {} --action preview'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheckV2("[?name=='s1']|[0].value", 'prod')
])
# complete the swap
self.cmd('webapp deployment slot swap -g {} -n {} -s {}'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheckV2("[?name=='s1']|[0].value", 'slot')
])
# reset
self.cmd('webapp deployment slot swap -g {} -n {} -s {} --action reset'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheckV2("[?name=='s1']|[0]", None)
])
class WebappSSLCertTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappSSLCertTest, self).__init__(__file__, test_method, resource_group='test_cli_webapp_ssl')
self.webapp_name = 'webapp-ssl-test123'
def test_webapp_ssl(self):
self.execute()
def body(self):
plan = 'webapp-ssl-test-plan'
# Cert Generated using
# https://docs.microsoft.com/en-us/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = 'DB2BA6898D0B330A93E7F69FF505C61EF39921B6'
self.cmd('appservice plan create -g {} -n {} --sku B1'.format(self.resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {}'.format(self.resource_group, self.webapp_name, plan, self.location))
self.cmd('webapp config ssl upload -g {} -n {} --certificate-file "{}" --certificate-password {}'.format(self.resource_group, self.webapp_name, pfx_file, cert_password), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(self.resource_group, self.webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(self.webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(self.webapp_name), cert_thumbprint)
])
self.cmd('webapp config ssl unbind -g {} -n {} --certificate-thumbprint {}'.format(self.resource_group, self.webapp_name, cert_thumbprint), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(self.webapp_name), 'Disabled'),
])
self.cmd('webapp config ssl delete -g {} --certificate-thumbprint {}'.format(self.resource_group, cert_thumbprint))
self.cmd('webapp delete -g {} -n {}'.format(self.resource_group, self.webapp_name))
class WebappBackupConfigScenarioTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappBackupConfigScenarioTest, self).__init__(__file__, test_method, resource_group='cli-webapp-backup')
self.webapp_name = 'azurecli-webapp-backupconfigtest'
def test_webapp_backup_config(self):
self.execute()
def set_up(self):
super(WebappBackupConfigScenarioTest, self).set_up()
plan = 'webapp-backup-plan'
plan_result = self.cmd('appservice plan create -g {} -n {} --sku S1'.format(self.resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -l {}'.format(self.resource_group, self.webapp_name, plan_result['id'], self.location))
def body(self):
sas_url = 'https://azureclistore.blob.core.windows.net/sitebackups?sv=2015-04-05&sr=c&sig=%2FjH1lEtbm3uFqtMI%2BfFYwgrntOs1qhGnpGv9uRibJ7A%3D&se=2017-02-14T04%3A53%3A28Z&sp=rwdl'
frequency = '1d'
db_conn_str = 'Server=tcp:cli-backup.database.windows.net,1433;Initial Catalog=cli-db;Persist Security Info=False;User ID=cliuser;Password=cli!password1;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;'
retention_period = 5
# set without databases
self.cmd('webapp config backup update -g {} --webapp-name {} --frequency {} --container-url {} --retain-one true --retention {}'
.format(self.resource_group, self.webapp_name, frequency, sas_url, retention_period), checks=NoneCheck())
checks = [
JMESPathCheck('backupSchedule.frequencyInterval', 1),
JMESPathCheck('backupSchedule.frequencyUnit', 'Day'),
JMESPathCheck('backupSchedule.keepAtLeastOneBackup', True),
JMESPathCheck('backupSchedule.retentionPeriodInDays', retention_period)
]
self.cmd('webapp config backup show -g {} --webapp-name {}'.format(self.resource_group, self.webapp_name), checks=checks)
# update with databases
database_name = 'cli-db'
database_type = 'SqlAzure'
self.cmd('webapp config backup update -g {} --webapp-name {} --db-connection-string "{}" --db-name {} --db-type {} --retain-one true'
.format(self.resource_group, self.webapp_name, db_conn_str, database_name, database_type), checks=NoneCheck())
checks = [
JMESPathCheck('backupSchedule.frequencyInterval', 1),
JMESPathCheck('backupSchedule.frequencyUnit', 'Day'),
JMESPathCheck('backupSchedule.keepAtLeastOneBackup', True),
JMESPathCheck('backupSchedule.retentionPeriodInDays', retention_period),
JMESPathCheck('databases[0].connectionString', db_conn_str),
JMESPathCheck('databases[0].databaseType', database_type),
JMESPathCheck('databases[0].name', database_name)
]
self.cmd('webapp config backup show -g {} --webapp-name {}'.format(self.resource_group, self.webapp_name), checks=checks)
# update frequency and retention only
frequency = '18h'
retention_period = 7
self.cmd('webapp config backup update -g {} --webapp-name {} --frequency {} --retain-one false --retention {}'
.format(self.resource_group, self.webapp_name, frequency, retention_period), checks=NoneCheck())
checks = [
JMESPathCheck('backupSchedule.frequencyInterval', 18),
JMESPathCheck('backupSchedule.frequencyUnit', 'Hour'),
JMESPathCheck('backupSchedule.keepAtLeastOneBackup', False),
JMESPathCheck('backupSchedule.retentionPeriodInDays', retention_period),
JMESPathCheck('databases[0].connectionString', db_conn_str),
JMESPathCheck('databases[0].databaseType', database_type),
JMESPathCheck('databases[0].name', database_name)
]
self.cmd('webapp config backup show -g {} --webapp-name {}'.format(self.resource_group, self.webapp_name), checks=checks)
class WebappBackupRestoreScenarioTest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(WebappBackupRestoreScenarioTest, self).__init__(__file__, test_method, resource_group='cli-webapp-backup')
self.webapp_name = 'azurecli-webapp-backuptest3'
def test_webapp_backup_restore(self):
self.execute()
def set_up(self):
super(WebappBackupRestoreScenarioTest, self).set_up()
plan = 'webapp-backup-plan'
plan_result = self.cmd('appservice plan create -g {} -n {} --sku S1'.format(self.resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -l {}'.format(self.resource_group, self.webapp_name, plan_result['id'], self.location))
def body(self):
sas_url = 'https://azureclistore.blob.core.windows.net/sitebackups?sv=2015-04-05&sr=c&sig=PJpE6swgZ6oZNFTlUz0GOIl87KKdvvgX7Ap8YXKHRp8%3D&se=2017-03-10T23%3A40%3A24Z&sp=rwdl'
db_conn_str = 'Server=tcp:cli-backup.database.windows.net,1433;Initial Catalog=cli-db;Persist Security Info=False;User ID=cliuser;Password=cli!password1;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;'
database_name = 'cli-db'
database_type = 'SqlAzure'
backup_name = 'mybackup'
create_checks = [
JMESPathCheck('backupItemName', backup_name),
JMESPathCheck('storageAccountUrl', sas_url),
JMESPathCheck('databases[0].connectionString', db_conn_str),
JMESPathCheck('databases[0].databaseType', database_type),
JMESPathCheck('databases[0].name', database_name)
]
self.cmd('webapp config backup create -g {} --webapp-name {} --container-url {} --db-connection-string "{}" --db-name {} --db-type {} --backup-name {}'
.format(self.resource_group, self.webapp_name, sas_url, db_conn_str, database_name, database_type, backup_name), checks=create_checks)
list_checks = [
JMESPathCheck('[-1].backupItemName', backup_name),
JMESPathCheck('[-1].storageAccountUrl', sas_url),
JMESPathCheck('[-1].databases[0].connectionString', db_conn_str),
JMESPathCheck('[-1].databases[0].databaseType', database_type),
JMESPathCheck('[-1].databases[0].name', database_name)
]
self.cmd('webapp config backup list -g {} --webapp-name {}'.format(self.resource_group, self.webapp_name), checks=list_checks)
import time
time.sleep(300) # Allow plenty of time for a backup to finish -- database backup takes a while (skipped in playback)
self.cmd('webapp config backup restore -g {} --webapp-name {} --container-url {} --backup-name {} --db-connection-string "{}" --db-name {} --db-type {} --ignore-hostname-conflict --overwrite'
.format(self.resource_group, self.webapp_name, sas_url, backup_name, db_conn_str, database_name, database_type), checks=JMESPathCheck('name', self.webapp_name))
class FunctionAppWithPlanE2ETest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(FunctionAppWithPlanE2ETest, self).__init__(__file__, test_method, resource_group='azurecli-functionapp-e2e')
def test_functionapp_asp_e2e(self):
self.execute()
def body(self):
functionapp_name = 'functionapp-e2e3'
plan = 'functionapp-e2e-plan'
storage = 'functionappplanstorage'
self.cmd('appservice plan create -g {} -n {}'.format(self.resource_group, plan))
self.cmd('appservice plan list -g {}'.format(self.resource_group))
self.cmd('storage account create --name {} -g {} -l westus --sku Standard_LRS'.format(storage, self.resource_group))
self.cmd('functionapp create -g {} -n {} -p {} -s {}'.format(self.resource_group, functionapp_name, plan, storage), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')
])
self.cmd('functionapp delete -g {} -n {}'.format(self.resource_group, functionapp_name))
class FunctionAppWithConsumptionPlanE2ETest(ResourceGroupVCRTestBase):
def __init__(self, test_method):
super(FunctionAppWithConsumptionPlanE2ETest, self).__init__(__file__, test_method, resource_group='azurecli-functionapp-c-e2e')
def test_functionapp_consumption_e2e(self):
self.execute()
def body(self):
functionapp_name = 'functionappconsumption'
location = 'westus'
storage = 'functionaconstorage'
self.cmd('storage account create --name {} -g {} -l {} --sku Standard_LRS'.format(storage, self.resource_group, location))
self.cmd('functionapp create -g {} -n {} -c {} -s {}'.format(self.resource_group, functionapp_name, location, storage), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')
])
self.cmd('functionapp delete -g {} -n {}'.format(self.resource_group, functionapp_name))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2bced1ea1ee3e1ab1721b7707e300d03",
"timestamp": "",
"source": "github",
"line_count": 732,
"max_line_length": 257,
"avg_line_length": 58.05874316939891,
"alnum_prop": 0.6278735970258124,
"repo_name": "samedder/azure-cli",
"id": "297e4014a3ab27e9dcc4bfb47eeb6bc95be597c0",
"size": "42844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-appservice/azure/cli/command_modules/appservice/tests/test_webapp_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5627973"
},
{
"name": "Shell",
"bytes": "25031"
}
],
"symlink_target": ""
} |
from tensorflow.python import debug as tf_debug
import tensorflow as tf
from tensorflow.python.client import timeline
import numpy as np
tf.logging.set_verbosity(tf.logging.INFO)
x_train = np.random.rand(100000).astype(np.float32)
print(x_train)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise
print(y_train)
x_test = np.random.rand(len(x_train)).astype(np.float32)
print(x_test)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_test = x_test * 0.1 + 0.3 + noise
print(y_test)
W = tf.get_variable(shape=[], name='weights')
print(W)
b = tf.get_variable(shape=[], name='bias')
print(b)
x_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='x_observed')
print(x_observed)
y_pred = W * x_observed + b
print (y_pred)
init_op = tf.global_variables_initializer()
print(init_op)
config = tf.ConfigProto(
log_device_placement=True,
)
sess = tf.Session(config=config)
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with tf.device("/device:cpu:0"):
sess.run(init_op)
def test(x, y):
return sess.run(loss_op, feed_dict={x_observed: x, y_observed: y})
y_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='y_observed')
print(y_observed)
loss_op = tf.reduce_mean(tf.square(y_pred - y_observed))
optimizer_op = tf.train.GradientDescentOptimizer(0.5)
train_op = optimizer_op.minimize(loss_op)
print("loss:", loss_op)
print("optimizer:", optimizer_op)
print("train:", train_op)
print("W: %f" % sess.run(W))
print("b: %f" % sess.run(b))
| {
"content_hash": "35f1146b4ad3a90f026e0f2cc184c2cd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 22.83582089552239,
"alnum_prop": 0.703921568627451,
"repo_name": "shareactorIO/pipeline",
"id": "612ae489a3f11badd4996983b17d20c35faa1e6a",
"size": "1530",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "gpu.ml/src/main/python/debug/debug_model_cpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "36325"
},
{
"name": "Batchfile",
"bytes": "63654"
},
{
"name": "C",
"bytes": "1759"
},
{
"name": "C++",
"bytes": "50538"
},
{
"name": "CSS",
"bytes": "548116"
},
{
"name": "Cuda",
"bytes": "12823"
},
{
"name": "Go",
"bytes": "9555"
},
{
"name": "Groovy",
"bytes": "24769"
},
{
"name": "HTML",
"bytes": "146027580"
},
{
"name": "Java",
"bytes": "109991"
},
{
"name": "JavaScript",
"bytes": "644060"
},
{
"name": "Jupyter Notebook",
"bytes": "17753504"
},
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "PLSQL",
"bytes": "2470"
},
{
"name": "PLpgSQL",
"bytes": "3657"
},
{
"name": "Protocol Buffer",
"bytes": "692822"
},
{
"name": "Python",
"bytes": "844350"
},
{
"name": "Scala",
"bytes": "228848"
},
{
"name": "Shell",
"bytes": "176444"
},
{
"name": "XSLT",
"bytes": "80778"
}
],
"symlink_target": ""
} |
"""
A two dimensional table having row and column headers.
"""
import copy
import h2o
from frame import _is_list_of_lists
class H2OTwoDimTable(object):
"""
A class representing an 2D table (for pretty printing output).
"""
def __init__(self, row_header=None, col_header=None, col_types=None,
table_header=None, raw_cell_values=None,
col_formats=None, cell_values=None, table_description=None):
self.row_header = row_header
self.col_header = col_header
self.col_types = col_types
self.table_header = table_header
self.cell_values = cell_values if cell_values else self._parse_values(raw_cell_values, col_types)
self.col_formats = col_formats
self.table_description = table_description
def show(self, header=True):
#if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(self.cell_values,columns=self.col_header)
# return
print
if header:
print self.table_header + ":",
if self.table_description: print self.table_description
print
table = copy.deepcopy(self.cell_values)
nr=0
if _is_list_of_lists(table): nr = len(table) # only set if we truly have multiple rows... not just one long row :)
if nr > 20: # create a truncated view of the table, first/last 5 rows
trunc_table =[]
trunc_table += [ v for v in table[:5]]
trunc_table.append(["---"]*len(table[0]))
trunc_table += [v for v in table[(nr-5):]]
table = trunc_table
h2o.H2ODisplay(table, self.col_header, numalign="left", stralign="left")
def __repr__(self):
self.show()
return ""
def _parse_values(self, values, types):
if self.col_header[0] is None:
self.col_header = self.col_header[1:]
types = types[1:]
values = values[1:]
for col_index, column in enumerate(values):
for row_index, row_value in enumerate(column):
if types[col_index] == 'integer':
values[col_index][row_index] = "" if row_value is None else int(float(row_value))
elif types[col_index] in ['double', 'float', 'long']:
values[col_index][row_index] = "" if row_value is None else float(row_value)
else: # string?
continue
return zip(*values) # transpose the values! <3 splat ops
def __getitem__(self, item):
if item in self.col_header: #single col selection returns list
return list(zip(*self.cell_values)[self.col_header.index(item)])
elif isinstance(item, slice): #row selection if item is slice returns H2OTwoDimTable
self.cell_values = [self.cell_values[ii] for ii in xrange(*item.indices(len(self.cell_values)))]
return self
elif isinstance(item, list) and set(item).issubset(self.col_header): #multiple col selection returns list of cols
return [list(zip(*self.cell_values)[self.col_header.index(i)]) for i in item]
else:
raise TypeError('can not support getting item for ' + str(item))
def __setitem__(self, key, value):
cols = zip(*self.cell_values)
if len(cols[0]) != len(value): raise ValueError('value must be same length as columns')
if key not in self.col_header:
self.col_header.append(key)
cols.append(tuple(value))
else:
cols[self.col_header.index(key)] = value
self.cell_values = [list(x) for x in zip(*cols)] | {
"content_hash": "11af5d4832fa1c993c1327ce2bd8aa9c",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 119,
"avg_line_length": 38.29545454545455,
"alnum_prop": 0.6498516320474778,
"repo_name": "madmax983/h2o-3",
"id": "53a013fb72b35448f41798d58dfbd239a788bfe2",
"size": "3370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/h2o/two_dim_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162402"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "HTML",
"bytes": "139398"
},
{
"name": "Java",
"bytes": "5770492"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34048"
},
{
"name": "Python",
"bytes": "2721983"
},
{
"name": "R",
"bytes": "1611237"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22834"
},
{
"name": "Shell",
"bytes": "46382"
},
{
"name": "TeX",
"bytes": "535732"
}
],
"symlink_target": ""
} |
"""Tests for aggregations."""
from __future__ import unicode_literals
import mock
from timesketch.lib.analyzers.similarity_scorer import SimilarityScorer
from timesketch.lib.analyzers.similarity_scorer import SimilarityScorerConfig
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
class TestSimilarityScorerConfig(BaseTest):
"""Tests for the functionality of the config object."""
def test_config(self):
"""Test config object."""
data_type = "test:test"
index = "test_index"
config = SimilarityScorerConfig(data_type=data_type, index_name=index)
compare_config = {
"index_name": "{0}".format(index),
"data_type": "{0}".format(data_type),
"query": 'data_type:"{0}"'.format(data_type),
"field": "message",
"delimiters": [" ", "-", "/"],
"threshold": config.DEFAULT_THRESHOLD,
"num_perm": config.DEFAULT_PERMUTATIONS,
}
self.assertIsInstance(config, SimilarityScorerConfig)
for k, v in compare_config.items():
self.assertEqual(v, getattr(config, k))
class TestSimilarityScorer(BaseTest):
"""Tests for the functionality of the scorer object."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data_type = "test:test"
self.test_index = "test_index"
self.test_text = "This is a test text-with tests/test"
@mock.patch("timesketch.lib.analyzers.interface.OpenSearchDataStore", MockDataStore)
def test_scorer(self):
"""Test scorer object."""
scorer = SimilarityScorer(
index_name=self.test_index, sketch_id=1, data_type=self.test_data_type
)
self.assertIsInstance(scorer, SimilarityScorer)
| {
"content_hash": "2e1ab53fd43e897110430e4bdf5bfcfe",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 88,
"avg_line_length": 36.56,
"alnum_prop": 0.6422319474835886,
"repo_name": "google/timesketch",
"id": "255094457f8c701f7aadbe06f214fad0b712face",
"size": "2424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timesketch/lib/analyzers/similarity_scorer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "245"
},
{
"name": "Dockerfile",
"bytes": "3735"
},
{
"name": "HTML",
"bytes": "8718"
},
{
"name": "JavaScript",
"bytes": "97456"
},
{
"name": "Jupyter Notebook",
"bytes": "340247"
},
{
"name": "Makefile",
"bytes": "593"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PowerShell",
"bytes": "7120"
},
{
"name": "Python",
"bytes": "1859758"
},
{
"name": "SCSS",
"bytes": "17377"
},
{
"name": "Shell",
"bytes": "22830"
},
{
"name": "Vue",
"bytes": "584825"
}
],
"symlink_target": ""
} |
import requests
import threading
from bs4 import BeautifulSoup
import Queue
from sys import argv
def formUrl(user_id, page):
return 'https://www.ratebeer.com/ajax/user/{0}/beer-ratings/{1}/5/'.format(user_id, page)
def getPagnination(soup):
try:
pages = int(soup.ul.findChildren()[-1]['href'].split('/')[-3])
except:
pages = 1
return pages
def reviewPages(user_id):
intitial = formUrl(user_id, 1)
soup = BeautifulSoup(requests.get(intitial).text)
pages = getPagnination(soup)
urls = []
for i in xrange(1, pages + 1):
urls.append(formUrl(user_id, i))
return(urls)
def getReview(soup):
div_curvy = soup.findAll('div', class_='curvy')
for i in div_curvy[-1].text.split('/'):
if u'OVERALL' in i:
return int(i[-2:].strip())
def fetchreqs(url, out, get_id=False):
req = requests.get(url)
if get_id == True:
beer_id = getId(url)
out.append((req, beer_id))
else:
out.append(req)
def threadedReqs(url_list, get_id=False):
out = []
if get_id == False:
threads = [threading.Thread(target=fetchreqs, args=(url, out)) for url in url_list]
else:
threads = [threading.Thread(target=fetchreqs, args=(url, out, True)) for url in url_list]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return out
def getSoup(reqs, get_id=False):
if get_id == False:
return [BeautifulSoup(req.text) for req in reqs]
else:
out = []
for req, beer_id in reqs:
out.append((BeautifulSoup(req.text), beer_id))
return out
def reviewlinks(soup):
links = []
for a in soup.tbody.findAll('a'):
if a['href'][:6] == u'/beer/':
links.append(u'http://www.ratebeer.com' + a['href'])
return links
def gatherReviewUrls(soup_list):
url_list = []
for soup in soup_list:
url_list.extend(reviewlinks(soup))
return url_list
def gatherReviews(soup_beerid_list):
reviews = []
for soup, beer_id in soup_beerid_list:
review = getReview(soup)
reviews.append((review, beer_id))
return reviews
def getId(url):
return int(url.split('/')[-3])
def scrapeProfile(user_id):
# request all pages containing list of reviews beers
pages = reviewPages(user_id)
reqs = threadedReqs(pages)
# create beautiful soup objects and parse for links to user reviews
soups = getSoup(reqs)
url_list = gatherReviewUrls(soups)
# request all pages containing reviews and assemble list of review - beer id pairs
url_reqs = threadedReqs(url_list, get_id=True)
rev_soups = getSoup(url_reqs, get_id=True)
return gatherReviews(rev_soups)
if __name__ == '__main__':
scrapProfile(argv[1])
| {
"content_hash": "e6191e6f558bfaf9b5f8a0e08723fb22",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 93,
"avg_line_length": 25.00943396226415,
"alnum_prop": 0.6589966050546964,
"repo_name": "gevurtz/beer-recommender",
"id": "63d729d33891c0cb1736aa7f7b428d4b751e6cb9",
"size": "2651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/profile_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3896"
},
{
"name": "HTML",
"bytes": "7157"
},
{
"name": "JavaScript",
"bytes": "96670"
},
{
"name": "Jupyter Notebook",
"bytes": "353232"
},
{
"name": "Python",
"bytes": "18218"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import math
import unittest
from nose.tools import *
from py_stringmatching.similarity_measure.soundex import Soundex
class SoundexTestCases(unittest.TestCase):
def setUp(self):
self.sdx = Soundex()
def test_valid_input_raw_score(self):
self.assertEqual(self.sdx.get_raw_score('Robert', 'Rupert'), 1)
self.assertEqual(self.sdx.get_raw_score('Sue', 'S'), 1)
self.assertEqual(self.sdx.get_raw_score('robert', 'rupert'), 1)
self.assertEqual(self.sdx.get_raw_score('Gough', 'goff'), 0)
self.assertEqual(self.sdx.get_raw_score('gough', 'Goff'), 0)
self.assertEqual(self.sdx.get_raw_score('ali', 'a,,,li'), 1)
self.assertEqual(self.sdx.get_raw_score('Jawornicki', 'Yavornitzky'), 0)
self.assertEqual(self.sdx.get_raw_score('Robert', 'Robert'), 1)
self.assertEqual(self.sdx.get_raw_score('Ris..h.ab', 'Ris;hab.'), 1)
self.assertEqual(self.sdx.get_raw_score('gough', 'G2'), 1)
self.assertEqual(self.sdx.get_raw_score('robert', 'R1:6:3'), 1)
def test_valid_input_sim_score(self):
self.assertEqual(self.sdx.get_sim_score('Robert', 'Rupert'), 1)
self.assertEqual(self.sdx.get_sim_score('Sue', 'S'), 1)
self.assertEqual(self.sdx.get_sim_score('robert', 'rupert'), 1)
self.assertEqual(self.sdx.get_sim_score('Gough', 'goff'), 0)
self.assertEqual(self.sdx.get_sim_score('gough', 'Goff'), 0)
self.assertEqual(self.sdx.get_sim_score('ali', 'a,,,li'), 1)
self.assertEqual(self.sdx.get_sim_score('Jawornicki', 'Yavornitzky'), 0)
self.assertEqual(self.sdx.get_sim_score('Robert', 'Robert'), 1)
self.assertEqual(self.sdx.get_raw_score('Ris..h.ab', 'Ris;hab.'), 1)
self.assertEqual(self.sdx.get_sim_score('Gough', 'G2'), 1)
self.assertEqual(self.sdx.get_sim_score('gough', 'G2'), 1)
self.assertEqual(self.sdx.get_sim_score('robert', 'R1:6:3'), 1)
@raises(TypeError)
def test_invalid_input1_raw_score(self):
self.sdx.get_raw_score('a', None)
@raises(TypeError)
def test_invalid_input2_raw_score(self):
self.sdx.get_raw_score(None, 'b')
@raises(TypeError)
def test_invalid_input3_raw_score(self):
self.sdx.get_raw_score(None, None)
@raises(ValueError)
def test_invalid_input4_raw_score(self):
self.sdx.get_raw_score('a', '')
@raises(ValueError)
def test_invalid_input5_raw_score(self):
self.sdx.get_raw_score('', 'This is a long string')
@raises(TypeError)
def test_invalid_input7_raw_score(self):
self.sdx.get_raw_score('xyz', [''])
@raises(TypeError)
def test_invalid_input1_sim_score(self):
self.sdx.get_sim_score('a', None)
@raises(TypeError)
def test_invalid_input2_sim_score(self):
self.sdx.get_sim_score(None, 'b')
@raises(TypeError)
def test_invalid_input3_sim_score(self):
self.sdx.get_sim_score(None, None)
@raises(ValueError)
def test_invalid_input4_sim_score(self):
self.sdx.get_sim_score('a', '')
@raises(ValueError)
def test_invalid_input5_sim_score(self):
self.sdx.get_sim_score('', 'This is a long string')
@raises(TypeError)
def test_invalid_input7_sim_score(self):
self.sdx.get_sim_score('xyz', [''])
@raises(ValueError)
def test_invalid_input8_sim_score(self):
self.sdx.get_sim_score('..,', '..abc.')
@raises(ValueError)
def test_invalid_input9_sim_score(self):
self.sdx.get_sim_score('..', '')
@raises(ValueError)
def test_invalid_input10_sim_score(self):
self.sdx.get_sim_score('.', '..abc,,')
@raises(TypeError)
def test_invalid_input11_sim_score(self):
self.sdx.get_sim_score('abc', 123) | {
"content_hash": "3398f7c876bd946a6cbf419d44a21163",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 80,
"avg_line_length": 36.980582524271846,
"alnum_prop": 0.6321869257022841,
"repo_name": "anhaidgroup/py_stringmatching",
"id": "e834aef8e2700f77af1a078611409bfefae0d4ed",
"size": "3825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_stringmatching/tests/test_sim_Soundex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3600"
},
{
"name": "PowerShell",
"bytes": "3045"
},
{
"name": "Python",
"bytes": "302520"
}
],
"symlink_target": ""
} |
from dateutil.parser import parse
import datetime
from pytz import timezone, UTC
import urllib
import urllib2
import cookielib
PROXY_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%z'
def get_time_now():
"""
This will return the current UTC time, which will include UTC info
"""
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def parse_time(date_str):
if not date_str:
return None
return parse(date_str)
def auth(login_url, username, password):
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
params = {'login_url': '',
'login_email': username,
'login_password': password,
'login_remember_me': 'on',
'offeredNotListedPromotionFlag': ''}
login_data = urllib.urlencode(params)
resp = opener.open(login_url, login_data)
print resp.read()
return opener | {
"content_hash": "81a8a5d6f4d1cb28bc58d935f4980e89",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.6482300884955752,
"repo_name": "champs/lendingclub",
"id": "57dc47688c8afeb9fd2cec6b5d7db436c0530c73",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lendingclub/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "212"
},
{
"name": "Python",
"bytes": "21311"
}
],
"symlink_target": ""
} |
import copy
import json
import uuid
from django.conf import settings
from django.db import transaction
from django.db.models import Exists
from django.db.models import F
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django_filters.rest_framework import CharFilter
from django_filters.rest_framework import DjangoFilterBackend
from le_utils.constants import content_kinds
from le_utils.constants import roles
from rest_framework.permissions import IsAuthenticated
from rest_framework.serializers import empty
from rest_framework.serializers import PrimaryKeyRelatedField
from rest_framework.serializers import ValidationError
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import ContentTag
from contentcuration.models import File
from contentcuration.models import generate_storage_url
from contentcuration.models import PrerequisiteContentRelationship
from contentcuration.models import User
from contentcuration.viewsets.base import BulkListSerializer
from contentcuration.viewsets.base import BulkModelSerializer
from contentcuration.viewsets.base import BulkUpdateMixin
from contentcuration.viewsets.base import CopyMixin
from contentcuration.viewsets.base import RequiredFilterSet
from contentcuration.viewsets.base import ValuesViewset
from contentcuration.viewsets.common import NotNullArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import UUIDInFilter
from contentcuration.viewsets.sync.constants import CONTENTNODE
from contentcuration.viewsets.sync.constants import DELETED
from contentcuration.viewsets.sync.constants import UPDATED
orphan_tree_id_subquery = ContentNode.objects.filter(
pk=settings.ORPHANAGE_ROOT_ID
).values_list("tree_id", flat=True)[:1]
class ContentNodeFilter(RequiredFilterSet):
id__in = UUIDInFilter(name="id")
channel_root = CharFilter(method="filter_channel_root")
class Meta:
model = ContentNode
fields = ("parent", "id__in", "kind", "channel_root")
def filter_channel_root(self, queryset, name, value):
return queryset.filter(
parent=Channel.objects.filter(pk=value).values_list(
"main_tree__id", flat=True
)
)
class ContentNodeListSerializer(BulkListSerializer):
def gather_prerequisites(self, validated_data, add_empty=True):
prerequisite_ids_by_id = {}
for obj in validated_data:
try:
prerequisites = obj.pop("prerequisite")
prerequisite_ids = [prereq.id for prereq in prerequisites]
except KeyError:
pass
else:
if add_empty or prerequisite_ids:
prerequisite_ids_by_id[obj["id"]] = prerequisite_ids
return prerequisite_ids_by_id
def set_prerequisites(self, prerequisite_ids_by_id):
prereqs_to_create = []
prereqs_to_delete = []
current_prereqs = PrerequisiteContentRelationship.objects.filter(
target_node__in=prerequisite_ids_by_id.keys()
)
current_prereqs_by_id = {}
for prereq in current_prereqs:
if prereq.target_node.id not in current_prereqs_by_id:
current_prereqs_by_id[prereq.target_node.id] = []
current_prereqs_by_id[prereq.target_node.id].append(prereq)
for target_node_id, prereq_ids in prerequisite_ids_by_id.items():
current = current_prereqs_by_id.get(target_node_id, [])
ids_set = set(prereq_ids)
current_set = set()
for prereq in current:
if prereq.prerequisite.id not in ids_set:
prereqs_to_delete.append(prereq)
else:
current_set.add(prereq.prerequisite)
prereqs_to_create.extend(
[
PrerequisiteContentRelationship(
target_node_id=target_node_id, prerequisite_id=prereq_id
)
for prereq_id in ids_set - current_set
]
)
PrerequisiteContentRelationship.objects.filter(
id__in=[p.id for p in prereqs_to_delete]
).delete()
# Can simplify this in Django 2.2 by using bulk_create with ignore_conflicts
# and just setting all required objects.
PrerequisiteContentRelationship.objects.bulk_create(prereqs_to_create)
def update(self, queryset, all_validated_data):
prereqs = self.gather_prerequisites(all_validated_data)
all_objects = super(ContentNodeListSerializer, self).update(
queryset, all_validated_data
)
self.set_prerequisites(prereqs)
return all_objects
class ContentNodeSerializer(BulkModelSerializer):
"""
This is a write only serializer - we leverage it to do create and update
operations, but read operations are handled by the Viewset.
"""
prerequisite = PrimaryKeyRelatedField(
many=True, queryset=ContentNode.objects.all(), required=False
)
class Meta:
model = ContentNode
fields = (
"id",
"title",
"description",
"prerequisite",
"kind",
"language",
"license",
"license_description",
"copyright_holder",
"author",
"role_visibility",
"aggregator",
"provider",
"extra_fields",
"thumbnail_encoding",
)
list_serializer_class = ContentNodeListSerializer
def create(self, validated_data):
# Creating a new node, by default put it in the orphanage on initial creation.
if "parent" not in validated_data:
validated_data["parent_id"] = settings.ORPHANAGE_ROOT_ID
prerequisites = validated_data.pop("prerequisite", [])
self.prerequisite_ids = [prereq.id for prereq in prerequisites]
return super(ContentNodeSerializer, self).create(validated_data)
def post_save_create(self, instance, many_to_many=None):
prerequisite_ids = getattr(self, "prerequisite_ids", [])
super(ContentNodeSerializer, self).post_save_create(
instance, many_to_many=many_to_many
)
if prerequisite_ids:
prereqs_to_create = [
PrerequisiteContentRelationship(
target_node_id=instance.id, prerequisite_id=prereq_id
)
for prereq_id in prerequisite_ids
]
PrerequisiteContentRelationship.objects.bulk_create(prereqs_to_create)
def retrieve_thumbail_src(item):
""" Get either the encoding or the url to use as the <img> src attribute """
try:
if item.get("thumbnail_encoding"):
return json.loads(item.get("thumbnail_encoding")).get("base64")
except ValueError:
pass
if (
item["thumbnail_checksum"] is not None
and item["thumbnail_extension"] is not None
):
return generate_storage_url(
"{}.{}".format(item["thumbnail_checksum"], item["thumbnail_extension"])
)
return None
def clean_content_tags(item):
tags = item.pop("content_tags")
return filter(lambda x: x is not None, tags)
def get_title(item):
# If it's the root, use the channel name (should be original channel name)
return item["title"] if item["parent_id"] else item["original_channel_name"]
def copy_tags(from_node, to_channel_id, to_node):
from_channel_id = from_node.get_channel().id
from_query = ContentTag.objects.filter(channel_id=to_channel_id)
to_query = ContentTag.objects.filter(channel_id=from_channel_id)
create_query = from_query.values("tag_name").difference(to_query.values("tag_name"))
new_tags = [
ContentTag(channel_id=to_channel_id, tag_name=tag_name)
for tag_name in create_query
]
ContentTag.objects.bulk_create(new_tags)
tag_ids = to_query.filter(tag_name__in=from_node.tags.values("tag_name"))
new_throughs = [
ContentNode.tags.through(contentnode_id=to_node.id, contenttag_id=tag_id)
for tag_id in tag_ids
]
ContentNode.tags.through.objects.bulk_create(new_throughs)
copy_ignore_fields = {
"tags",
"total_count",
"resource_count",
"coach_count",
"error_count",
"has_files",
"invalid_exercise",
}
channel_trees = (
"main_tree",
"chef_tree",
"trash_tree",
"staging_tree",
"previous_tree",
)
edit_filter = Q()
for tree_name in channel_trees:
edit_filter |= Q(
**{"editable_channels__{}__tree_id".format(tree_name): OuterRef("tree_id")}
)
view_filter = Q()
for tree_name in channel_trees:
view_filter |= Q(
**{"view_only_channels__{}__tree_id".format(tree_name): OuterRef("tree_id")}
)
# Apply mixin first to override ValuesViewset
class ContentNodeViewSet(BulkUpdateMixin, CopyMixin, ValuesViewset):
queryset = ContentNode.objects.all()
serializer_class = ContentNodeSerializer
permission_classes = [IsAuthenticated]
filter_backends = (DjangoFilterBackend,)
filter_class = ContentNodeFilter
values = (
"id",
"content_id",
"title",
"description",
"author",
"assessment_items_ids",
"prerequisite_ids",
"provider",
"aggregator",
"content_tags",
"role_visibility",
"kind__kind",
"language_id",
"license_id",
"license_description",
"copyright_holder",
"extra_fields",
"node_id",
"original_source_node_id",
"original_channel_id",
"original_channel_name",
"original_node_id",
"original_parent_id",
"total_count",
"resource_count",
"error_count",
"coach_count",
"thumbnail_checksum",
"thumbnail_extension",
"thumbnail_encoding",
"published",
"modified",
"has_children",
"parent_id",
"complete",
)
field_map = {
"language": "language_id",
"license": "license_id",
"tags": clean_content_tags,
"kind": "kind__kind",
"prerequisite": "prerequisite_ids",
"assessment_items": "assessment_items_ids",
"thumbnail_src": retrieve_thumbail_src,
"title": get_title,
}
def get_queryset(self):
user_id = not self.request.user.is_anonymous() and self.request.user.id
user_queryset = User.objects.filter(id=user_id)
queryset = ContentNode.objects.annotate(
edit=Exists(user_queryset.filter(edit_filter)),
view=Exists(user_queryset.filter(view_filter)),
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("tree_id")
)
),
)
queryset = queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(tree_id=orphan_tree_id_subquery)
)
return queryset.exclude(pk=settings.ORPHANAGE_ROOT_ID)
def get_edit_queryset(self):
user_id = not self.request.user.is_anonymous() and self.request.user.id
user_queryset = User.objects.filter(id=user_id)
queryset = ContentNode.objects.annotate(
edit=Exists(user_queryset.filter(edit_filter)),
)
queryset = queryset.filter(Q(edit=True) | Q(tree_id=orphan_tree_id_subquery))
return queryset.exclude(pk=settings.ORPHANAGE_ROOT_ID)
def annotate_queryset(self, queryset):
queryset = queryset.annotate(total_count=(F("rght") - F("lft") - 1) / 2)
descendant_resources = (
ContentNode.objects.filter(
tree_id=OuterRef("tree_id"),
lft__gt=OuterRef("lft"),
rght__lt=OuterRef("rght"),
)
.exclude(kind_id=content_kinds.TOPIC)
.order_by("id")
.distinct("id")
.values_list("id", flat=True)
)
# Get count of descendant nodes with errors
descendant_errors = (
ContentNode.objects.filter(
tree_id=OuterRef("tree_id"),
lft__gt=OuterRef("lft"),
rght__lt=OuterRef("rght"),
)
.filter(complete=False)
.order_by("id")
.distinct("id")
.values_list("id", flat=True)
)
thumbnails = File.objects.filter(
contentnode=OuterRef("id"), preset__thumbnail=True
)
original_channel = Channel.objects.filter(
Q(pk=OuterRef("original_channel_id"))
| Q(main_tree__tree_id=OuterRef("tree_id"))
)
original_node = ContentNode.objects.filter(
node_id=OuterRef("original_source_node_id")
).filter(node_id=F("original_source_node_id"))
queryset = queryset.annotate(
resource_count=SQCount(descendant_resources, field="id"),
coach_count=SQCount(
descendant_resources.filter(role_visibility=roles.COACH),
field="id",
),
error_count=SQCount(descendant_errors, field="id"),
thumbnail_checksum=Subquery(thumbnails.values("checksum")[:1]),
thumbnail_extension=Subquery(
thumbnails.values("file_format__extension")[:1]
),
original_channel_name=Subquery(original_channel.values("name")[:1]),
original_parent_id=Subquery(original_node.values("parent_id")[:1]),
original_node_id=Subquery(original_node.values("pk")[:1]),
has_children=Exists(ContentNode.objects.filter(parent=OuterRef("id"))),
)
queryset = queryset.annotate(content_tags=NotNullArrayAgg("tags__tag_name"))
queryset = queryset.annotate(file_ids=NotNullArrayAgg("files__id"))
queryset = queryset.annotate(
prerequisite_ids=NotNullArrayAgg("prerequisite__id")
)
queryset = queryset.annotate(
assessment_items_ids=NotNullArrayAgg("assessment_items__id")
)
return queryset
def copy(self, pk, from_key=None, **mods):
delete_response = [
dict(key=pk, table=CONTENTNODE, type=DELETED,),
]
try:
with transaction.atomic():
try:
source = ContentNode.objects.get(pk=from_key)
except ContentNode.DoesNotExist:
error = ValidationError("Copy source node does not exist")
return str(error), delete_response
if ContentNode.objects.filter(pk=pk).exists():
raise ValidationError("Copy pk already exists")
# clone the model (in-memory) and update the fields on the cloned model
new_node = copy.copy(source)
new_node.pk = pk
new_node.published = False
new_node.changed = True
new_node.cloned_source = source
new_node.node_id = uuid.uuid4().hex
new_node.source_node_id = source.node_id
new_node.freeze_authoring_data = not Channel.objects.filter(
pk=source.original_channel_id, editors=self.request.user
).exists()
# Creating a new node, by default put it in the orphanage on initial creation.
new_node.parent_id = settings.ORPHANAGE_ROOT_ID
# There might be some legacy nodes that don't have these, so ensure they are added
if (
not new_node.original_channel_id
or not new_node.original_source_node_id
):
original_node = source.get_original_node()
original_channel = original_node.get_channel()
new_node.original_channel_id = (
original_channel.id if original_channel else None
)
new_node.original_source_node_id = original_node.node_id
new_node.source_channel_id = mods.pop("source_channel_id", None)
if not new_node.source_channel_id:
source_channel = source.get_channel()
new_node.source_channel_id = (
source_channel.id if source_channel else None
)
new_node.save(force_insert=True)
# because we don't know the tree yet, and tag data model currently uses channel,
# we can't copy them unless we were given the new channel
channel_id = mods.pop("channel_id", None)
if channel_id:
copy_tags(source, channel_id, new_node)
# Remove these because we do not want to define any mod operations on them during copy
def clean_copy_data(data):
return {
key: empty if key in copy_ignore_fields else value
for key, value in data.items()
}
serializer = ContentNodeSerializer(
instance=new_node, data=clean_copy_data(mods), partial=True
)
serializer.is_valid(raise_exception=True)
node = serializer.save()
node.save()
return (
None,
[
dict(
key=pk,
table=CONTENTNODE,
type=UPDATED,
mods=clean_copy_data(serializer.validated_data),
),
],
)
except ValidationError as e:
return e.detail, None
| {
"content_hash": "3a458271089da931fa70e294a3e065c0",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 102,
"avg_line_length": 36.502024291497975,
"alnum_prop": 0.5944986690328306,
"repo_name": "DXCanas/content-curation",
"id": "53febd45740631432e69db071e370cc6f6a3c36d",
"size": "18032",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contentcuration/contentcuration/viewsets/contentnode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173955"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "HTML",
"bytes": "503467"
},
{
"name": "JavaScript",
"bytes": "601189"
},
{
"name": "Makefile",
"bytes": "3409"
},
{
"name": "Python",
"bytes": "813881"
},
{
"name": "Shell",
"bytes": "6970"
},
{
"name": "Smarty",
"bytes": "6584"
},
{
"name": "Vue",
"bytes": "21539"
}
],
"symlink_target": ""
} |
print "Hello, World!"
| {
"content_hash": "21f3ef1b61e3df6600f4f5c474cf1509",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.6818181818181818,
"repo_name": "chicio/HackerRank",
"id": "687f563f030f613a6aa60405a0aff551e79c4cac",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/SayHelloWorldWithPython.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "104"
},
{
"name": "C++",
"bytes": "177102"
},
{
"name": "Java",
"bytes": "55888"
},
{
"name": "JavaScript",
"bytes": "20539"
},
{
"name": "Objective-C",
"bytes": "41192"
},
{
"name": "Python",
"bytes": "9303"
},
{
"name": "Scala",
"bytes": "1520"
},
{
"name": "Swift",
"bytes": "32093"
}
],
"symlink_target": ""
} |
from short.backend.redis_backend import RedisBackend
backends = {
"redis": RedisBackend
}
def get_backend(name):
return backends[name]
| {
"content_hash": "97cb16666ee0a4db9474f3b66cf10b63",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 52,
"avg_line_length": 16.22222222222222,
"alnum_prop": 0.726027397260274,
"repo_name": "JuhaniImberg/short-piesome",
"id": "0a281f83969b011966fb82b505d57d9f79455abb",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "short/backend/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "8119"
}
],
"symlink_target": ""
} |
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
def get_search_object():
es_client = Elasticsearch()
search_object = Search(using=es_client, index="blog")
return search_object
def get_id_from_response(response):
id_list = []
for hit in response:
id_list.append(hit.meta.id)
return id_list
def get_suggested_id_from_response(suggest_obj):
approximate_id_list = []
for obj in suggest_obj:
for option in obj['options']:
approximate_id_list.append\
(get_id_from_response(search_keyword(option['text'])))
return approximate_id_list
def search_keyword(keyword):
result_set = get_search_object().query("match", title=keyword) \
.suggest("post_suggester", keyword, term={'field': 'title'})
return result_set.execute()
| {
"content_hash": "b165eb2ee854a874468632b57253c6f5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 70,
"avg_line_length": 28.1,
"alnum_prop": 0.6654804270462633,
"repo_name": "Prakash2403/Blog",
"id": "dbb6283a1b0b94afb2bffe902f27d5d0557368a2",
"size": "843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39153"
},
{
"name": "HTML",
"bytes": "26479"
},
{
"name": "JavaScript",
"bytes": "87652"
},
{
"name": "Python",
"bytes": "22970"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from ARFuncs import *
from Common_BuildConfigureLibrary import *
from Common_RunAntScript import *
from Android_CreateFiles import *
from Common_HandlePrebuiltDep import *
import shutil
def Android_BuildLibrary(target, lib, clean=False, debug=False, nodeps=False, inhouse=False):
args = dict(locals())
StartDumpArgs(**args)
ANDROID_SDK_VERSION = os.environ.get('AR_ANDROID_API_VERSION')
# Check that ANDROID_SDK_PATH and ANDROID_NDK_PATH environment variables are set
if not os.environ.get('ANDROID_SDK_PATH') or not os.environ.get('ANDROID_NDK_PATH'):
ARLog('ANDROID_SDK_PATH and ANDROID_NDK_PATH environment variabled must be set to build a library for %(target)s' % locals())
return EndDumpArgs(res=False, **args)
# Sanity check : is library valid for this target
if not lib.isAvailableForTarget(target):
ARLog('lib%(lib)s does not need to be built for %(target)s' % locals())
return EndDumpArgs(res=True, **args)
KnownArchs = ({ 'arch' : 'arm', 'eabi' : 'armeabi', 'host' : 'arm-linux-androideabi' },
{ 'arch' : 'arm', 'eabi' : 'armeabi-v7a', 'host' : 'arm-linux-androideabi' },
{ 'arch' : 'mips', 'eabi' : 'mips', 'host' : 'mipsel-linux-android' },
{ 'arch' : 'x86', 'eabi' : 'x86', 'host' : 'i686-linux-android' },
)
# First thing : build deps
if not nodeps:
for pb in lib.pbdeps:
abis = [arch['eabi'] for arch in KnownArchs]
if not Common_HandlePrebuiltDep(target, pb, outputSuffixes=abis):
ARLog('Error while handling prebuilt library %(pb)s' % locals())
return EndDumpArgs(res=False, **args)
for dep in lib.deps:
ARLog('Building lib%(dep)s (dependancy of lib%(lib)s)' % locals())
if target.hasAlreadyBuilt(dep):
ARLog('Dependancy lib%(dep)s already built for %(target)s' % locals())
elif not dep.isAvailableForTarget(target):
ARLog('Dependancy lib%(dep)s does not need to be built for %(target)s' % locals())
elif Android_BuildLibrary(target, dep, clean, debug):
ARLog('Dependancy lib%(dep)s built' % locals())
else:
ARLog('Error while building dependancy lib%(dep)s' %locals())
return EndDumpArgs(res=False, **args)
else:
ARLog('Skipping deps building for %(lib)s' % locals())
target.addTriedLibrary(lib)
res = True
libLower = lib.name.lower()
suffix = '_dbg' if debug else ''
libPrefix = 'lib' if not libLower.startswith('lib') else ''
hasNative = False
# 1> Autotools part (Optionnal)
if Common_IsConfigureLibrary(lib):
hasNative = True
for archInfos in KnownArchs:
# Read archInfos
arch = archInfos['arch']
eabi = archInfos['eabi']
host = archInfos['host']
# Check that the compiler is in the path
compilerTestName = '%(host)s-gcc' % locals()
if not ARExistsInPath(compilerTestName):
ARLog('%(compilerTestName)s is not in your path' % locals())
ARLog('You need to install it as a standalone toolchain to use this build script')
ARLog('(See NDK Doc)')
return EndDumpArgs(res=False, **args)
# Add extra configure flags
ExtraConfFlags = ['--host=%(host)s' % locals(),
'--disable-static',
'--enable-shared']
LdFlagsArr=['-llog -lz']
if not lib.ext:
LdFlagsString = 'LDFLAGS=" ' + ARListAsBashArg(LdFlagsArr) + '"'
else:
LdFlagsString = 'LIBS=" ' + ARListAsBashArg(LdFlagsArr) + '"'
ExtraConfFlags.append(LdFlagsString)
if not lib.ext:
ExtraConfFlags.append('--disable-so-version')
if eabi == 'armeabi':
ExtraConfFlags.append('CFLAGS=" -march=armv5te"')
elif eabi == 'armeabi-v7a':
ExtraConfFlags.append('CFLAGS=" -march=armv7-a"')
# Call configure/make/make install
stripVersionNumber = lib.ext and not clean
forcedMalloc = ARSetEnvIfEmpty('ac_cv_func_malloc_0_nonnull', 'yes')
forcedRealloc = ARSetEnvIfEmpty('ac_cv_func_realloc_0_nonnull', 'yes')
retStatus = Common_BuildConfigureLibrary(target, lib, extraArgs=ExtraConfFlags, clean=clean, debug=debug, confdirSuffix=eabi, installSubDir=eabi, stripVersionNumber=stripVersionNumber, inhouse=inhouse)
if forcedMalloc:
ARUnsetEnv('ac_cv_func_malloc_0_nonnull')
if forcedRealloc:
ARUnsetEnv('ac_cv_func_realloc_0_nonnull')
if not retStatus:
return EndDumpArgs(res=False, **args)
# 2 Java part (Pure Java or Java + JNI), mandatory
# Declare path
JniPath = lib.path + '/JNI'
AndroidPath = lib.path + '/Android'
JavaBuildDir = ARPathFromHere('Targets/%(target)s/Build/%(libPrefix)s%(lib)s_Java' % locals())
JavaBuildDirDbg = ARPathFromHere('Targets/%(target)s/Build/%(libPrefix)s%(lib)s_Java_dbg' % locals())
OutputJarDir = ARPathFromHere('Targets/%(target)s/Install/jars/release/' % locals())
OutputJar = '%(OutputJarDir)s/%(libPrefix)s%(lib)s.jar' % locals()
OutputJarDirDbg = ARPathFromHere('Targets/%(target)s/Install/jars/debug/' % locals())
OutputJarDbg = '%(OutputJarDirDbg)s/%(libPrefix)s%(lib)s_dbg.jar' % locals()
AndroidSoLib = '%(libPrefix)s%(libLower)s_android.' % locals() + target.soext
AndroidSoLibDbg = '%(libPrefix)s%(libLower)s_android_dbg.' % locals() + target.soext
# Select build path depending on debug flag
ActualJavaBuildDir = JavaBuildDir if not debug else JavaBuildDirDbg
ActualOutputJarDir = OutputJarDir if not debug else OutputJarDirDbg
ActualOutputJar = OutputJar if not debug else OutputJarDbg
ActualAndroidSoLib = AndroidSoLib if not debug else AndroidSoLibDbg
# Check for full java Android projects
if os.path.exists(AndroidPath):
BuildXmlFile = '%(AndroidPath)s/build.xml' % locals()
if not os.path.exists(BuildXmlFile):
ARLog('Unable to build %(libPrefix)s%(lib)s -> Missing build.xml script' % locals())
return EndDumpArgs(res=False, **args)
ClassPath=os.environ.get('ANDROID_SDK_PATH') + '/platforms/android-%(ANDROID_SDK_VERSION)s/android.jar' % locals()
for dep in lib.deps:
ClassPath += ':%(ActualOutputJarDir)s/lib%(dep)s%(suffix)s.jar' % locals()
for pbdep in lib.pbdeps:
ClassPath += ':%(OutputJarDir)s/%(pbdep)s.jar' % locals()
if not os.path.exists(ActualJavaBuildDir):
os.makedirs(ActualJavaBuildDir)
if clean:
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(OutputJarDir)s -Dbuild.dir=%(JavaBuildDir)s -Dproject.classpath=%(ClassPath)s clean' % locals()):
return EndDumpArgs(res=False, **args)
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(OutputJarDirDbg)s -Dbuild.dir=%(JavaBuildDirDbg)s -Dproject.classpath=%(ClassPath)s clean' % locals()):
return EndDumpArgs(res=False, **args)
elif debug:
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(ActualOutputJarDir)s -Dbuild.dir=%(ActualJavaBuildDir)s -Dproject.classpath=%(ClassPath)s debug' % locals()):
return EndDumpArgs(res=False, **args)
else:
if not ARExecute('ant -f %(BuildXmlFile)s -Ddist.dir=%(ActualOutputJarDir)s -Dbuild.dir=%(ActualJavaBuildDir)s -Dproject.classpath=%(ClassPath)s release' % locals()):
return EndDumpArgs(res=False, **args)
# Else, search for JNI subprojects
elif os.path.exists(JniPath):
mustRunAnt = False
# Declare dirs
JniJavaDir = '%(JniPath)s/java' % locals()
JniCDir = '%(JniPath)s/c' % locals()
BuildSrcDir = '%(ActualJavaBuildDir)s/src' % locals()
BuildJniDir = '%(ActualJavaBuildDir)s/jni' % locals()
if not clean:
# Copy files from JNI Dirs to Build Dir
if not os.path.exists(ActualJavaBuildDir):
os.makedirs(ActualJavaBuildDir)
ARCopyAndReplace(JniJavaDir, BuildSrcDir, deletePrevious=True)
ARCopyAndReplace(JniCDir, BuildJniDir, deletePrevious=True)
# Create Android.mk / Application.mk / AndroidManifest.xml
Android_CreateApplicationMk(ActualJavaBuildDir)
Android_CreateAndroidManifest(ActualJavaBuildDir, lib)
Android_CreateAndroidMk(target, ActualJavaBuildDir, ARPathFromHere('Targets/%(target)s/Install' % locals()), lib, debug, hasNative, inhouse=inhouse)
# Call ndk-build
buildDir = Chdir(ActualJavaBuildDir)
ndk_debug = ''
if debug:
ndk_debug = 'NDK_DEBUG=1'
res = ARExecute(os.environ.get('ANDROID_NDK_PATH') + '/ndk-build ' + ndk_debug)
buildDir.exit()
if not res:
ARLog('Error while running ndk-build')
return EndDumpArgs(res=False, **args)
# Call java build (+ make jar)
classpath = ' -cp ' + os.environ.get('ANDROID_SDK_PATH') + '/platforms/android-%(ANDROID_SDK_VERSION)s/android.jar' % locals()
if lib.deps or lib.pbdeps:
classpath += ':"%(ActualOutputJarDir)s/*"' % locals()
JavaFilesDir = '%(BuildSrcDir)s/com/parrot/arsdk/%(libLower)s/' % locals()
JavaFiles = ARExecuteGetStdout(['find', JavaFilesDir, '-name', '*.java']).replace('\n', ' ')
if not ARExecute('javac -source 1.6 -target 1.6 -sourcepath %(BuildSrcDir)s %(JavaFiles)s %(classpath)s' % locals()):
ARLog('Error while building java sources')
return EndDumpArgs(res=False, **args)
if not os.path.exists(ActualOutputJarDir):
os.makedirs(ActualOutputJarDir)
# Move good files in a ./lib directory (instead of ./libs)
for archInfos in KnownArchs:
eabi = archInfos['eabi']
JarLibDir = '%(ActualJavaBuildDir)s/lib/%(eabi)s' % locals()
if not os.path.exists(JarLibDir):
os.makedirs(JarLibDir)
for baseDir, directories, files in os.walk('%(ActualJavaBuildDir)s/libs/%(eabi)s' % locals()):
for _file in files:
if _file == '%(libPrefix)s%(libLower)s%(suffix)s.' % locals() + target.soext or _file == ActualAndroidSoLib:
shutil.copy2(os.path.join(baseDir, _file), os.path.join(JarLibDir, _file))
# Create JAR File
if not ARExecute('jar cf %(ActualOutputJar)s -C %(ActualJavaBuildDir)s ./lib -C %(BuildSrcDir)s .' % locals()):
ARLog('Error while creating jar file')
return EndDumpArgs(res=False, **args)
# Copy output so libraries into target dir
for archInfos in KnownArchs:
eabi = archInfos['eabi']
shutil.copy2('%(ActualJavaBuildDir)s/libs/%(eabi)s/%(ActualAndroidSoLib)s' % locals(),
ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(ActualAndroidSoLib)s' % locals()))
else:
ARDeleteIfExists(OutputJarDbg, OutputJar, JavaBuildDir, JavaBuildDirDbg)
for archInfos in KnownArchs:
eabi = archInfos['eabi']
LibRelease = ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(AndroidSoLib)s' % locals())
LibDebug = ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(AndroidSoLibDbg)s' % locals())
ARDeleteIfExists(LibRelease, LibDebug)
# For autotools only library, just make a jar containing the .so file
elif Common_IsConfigureLibrary(lib):
if not clean:
if not os.path.exists(ActualOutputJarDir):
os.makedirs(ActualOutputJarDir)
LibsDir = '%(ActualJavaBuildDir)s/lib' % locals()
ARDeleteIfExists (LibsDir)
os.makedirs(LibsDir)
for archInfos in KnownArchs:
eabi = archInfos['eabi']
eabiDir = '%(LibsDir)s/%(eabi)s' % locals()
if not os.path.exists(eabiDir):
os.makedirs(eabiDir)
for soname in lib.soLibs:
shutil.copy2(ARPathFromHere('Targets/%(target)s/Install/%(eabi)s/lib/%(soname)s' % locals()), '%(eabiDir)s/%(soname)s' % locals())
if not ARExecute('jar cf %(ActualOutputJar)s -C %(ActualJavaBuildDir)s ./lib' % locals()):
ARLog('Error while creating jar file')
return EndDumpArgs(res=False, **args)
else:
ARDeleteIfExists(OutputJarDbg, OutputJar)
# Mark library as built if all went good
if res:
target.addBuiltLibrary(lib)
return EndDumpArgs(res, **args)
| {
"content_hash": "334d90e04c7dc61d8822019ecc03b477",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 213,
"avg_line_length": 53.802919708029194,
"alnum_prop": 0.6163342830009497,
"repo_name": "149393437/ARSDKBuildUtils",
"id": "7df73ac33286e7733aa15ff40249cb23487390fb",
"size": "14742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Utils/Python/Android_BuildLibrary.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "198185"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
} |
from django.template import Library
from ..forms import ContactForm
register = Library()
@register.assignment_tag(takes_context=True)
def get_contact_form(context):
return ContactForm(request=context.get('request'))
| {
"content_hash": "61e2b8bc8e4723ea68230b27ea5bf7c8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 20.454545454545453,
"alnum_prop": 0.7733333333333333,
"repo_name": "caioariede/openimob",
"id": "bdc4550efdf6f9ebaed94fab5979884e3840ffcc",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/templatetags/contact_form_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57546"
},
{
"name": "HTML",
"bytes": "52533"
},
{
"name": "JavaScript",
"bytes": "33743"
},
{
"name": "Python",
"bytes": "52176"
}
],
"symlink_target": ""
} |
from builtins import str
import jaydebeapi
from airflow.hooks.dbapi_hook import DbApiHook
class JdbcHook(DbApiHook):
"""
General hook for jdbc db access.
JDBC URL, username and password will be taken from the predefined connection.
Note that the whole JDBC URL must be specified in the "host" field in the DB.
Raises an airflow error if the given connection id doesn't exist.
"""
conn_name_attr = 'jdbc_conn_id'
default_conn_name = 'jdbc_default'
supports_autocommit = True
def get_conn(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
login = conn.login
psw = conn.password
jdbc_driver_loc = conn.extra_dejson.get('extra__jdbc__drv_path')
jdbc_driver_name = conn.extra_dejson.get('extra__jdbc__drv_clsname')
conn = jaydebeapi.connect(jclassname=jdbc_driver_name,
url=str(host),
driver_args=[str(login), str(psw)],
jars=jdbc_driver_loc.split(","))
return conn
def set_autocommit(self, conn, autocommit):
"""
Enable or disable autocommit for the given connection.
:param conn: The connection
:return:
"""
conn.jconn.setAutoCommit(autocommit)
| {
"content_hash": "d1d338aa5dda237e409f9089676f99c1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 32.07142857142857,
"alnum_prop": 0.6109873793615441,
"repo_name": "RealImpactAnalytics/airflow",
"id": "8f0cd67c0c71cdd6b583fc3b2f50bd34e153ed74",
"size": "2161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/hooks/jdbc_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "270710"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3765458"
},
{
"name": "Shell",
"bytes": "46923"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name = 'richinput',
packages = ['richinput'],
version = '1.0.0',
description = 'Nonblocking text and password reader',
author = 'Riccardo Attilio Galli',
author_email = 'riccardo@sideralis.org',
url = 'https://github.com/riquito/richinput',
download_url = 'https://github.com/riquito/richinput/tarball/1.0.0',
keywords = ['input', 'terminal', 'nonblocking', 'password'],
license = 'Apache 2.0',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing'
],
)
| {
"content_hash": "d34a884bb5654d065d30722f4643cacd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 89,
"avg_line_length": 37.04545454545455,
"alnum_prop": 0.6441717791411042,
"repo_name": "riquito/richinput",
"id": "3bb154b7013c44a4398b5ce56101b9ba44da75bb",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71395"
}
],
"symlink_target": ""
} |
from django.db import models
from djes.models import Indexable
from elasticsearch_dsl import field
class ColorField(field.Object):
"""Just a sample custom field. In this case, a CharField that's formatted
something like "#FFDDEE"."""
def __init__(self, *args, **kwargs):
super(ColorField, self).__init__(*args, **kwargs)
self.properties["red"] = field.construct_field("string")
self.properties["green"] = field.construct_field("string")
self.properties["blue"] = field.construct_field("string")
def to_es(self, data):
red = data[1:3]
green = data[3:5]
blue = data[5:7]
return {
"red": red,
"green": green,
"blue": blue
}
def to_python(self, data):
return "#{red}{green}{blue}".format(**data)
class PolyParentField(field.Object):
def to_es(self, data):
return self.get_poly_object(data).to_dict()
def get_poly_object(self, data):
cls = data.get_base_class()
for subclass in cls.__subclasses__():
model_name = subclass._meta.model_name
child = getattr(data, model_name, None)
if child:
return child
return data
class SimpleObject(Indexable):
foo = models.IntegerField()
bar = models.CharField(max_length=255)
baz = models.SlugField()
published = models.DateTimeField(null=True, blank=True)
class ManualMappingObject(SimpleObject):
qux = models.URLField()
garbage = models.IntegerField()
@property
def status(self):
return "final"
class Mapping:
class Meta:
doc_type = "super_manual_mapping"
excludes = ("garbage",)
bar = field.String(fields={"raw": field.String(index="not_analyzed")})
status = field.String(index="not_analyzed")
class PolyParent(Indexable):
text = models.CharField(max_length=255)
@classmethod
def get_merged_mapping_properties(cls):
properties = {}
def gather_properties(klass):
properties.update(klass.search_objects.mapping.to_dict())
for subclass in klass.__subclasses__():
gather_properties(subclass)
gather_properties(cls)
return properties
class PolyChildA(PolyParent):
slug = models.CharField(max_length=255)
number = models.IntegerField()
@property
def slug_number(self):
return "%s-%s"
class PolyChildB(PolyParent):
album = models.CharField(max_length=255)
band_name = models.CharField(max_length=255)
@property
def full_title(self):
return self.full_title
class PolyOrphan(PolyParent):
garbage = models.CharField(max_length=256)
class Mapping:
class Meta:
orphaned = True
class PolyRelationship(Indexable):
poly_parent = models.ForeignKey(PolyParent)
class Mapping:
poly_parent = PolyParentField()
class Meta:
dynamic = False
class ChildObject(SimpleObject):
trash = models.TextField()
class Mapping:
trash = field.String(analyzer="snowball")
class GrandchildObject(ChildObject):
qux = models.URLField()
class CustomFieldObject(Indexable):
color = models.CharField(max_length=7)
class Mapping:
color = ColorField()
class RelatedSimpleObject(models.Model):
datums = models.TextField()
class RelatedNestedObject(Indexable):
denormalized_datums = models.TextField()
class RelatableObject(Indexable):
name = models.CharField(max_length=255)
simple = models.ForeignKey(RelatedSimpleObject)
nested = models.ForeignKey(RelatedNestedObject)
class Tag(Indexable):
name = models.CharField(max_length=255)
class DumbTag(models.Model):
name = models.CharField(max_length=255)
class RelationsTestObject(Indexable):
data = models.CharField(max_length=255)
tags = models.ManyToManyField(Tag, related_name="tag")
dumb_tags = models.ManyToManyField(DumbTag, related_name="dumb_tags")
class SelfRelation(Indexable):
name = models.CharField(max_length=255)
related = models.ForeignKey("self")
class Mapping:
class Meta:
excludes = ("related",)
class ReverseRelationsParentObject(Indexable):
name = models.CharField(max_length=255)
class Mapping:
class Meta:
includes = ("children",)
class ReverseRelationsChildObject(Indexable):
name = models.CharField(max_length=255)
parent = models.ForeignKey(ReverseRelationsParentObject, related_name="children")
class Mapping:
class Meta:
excludes = ("parent",)
| {
"content_hash": "9d2f9ccc258940f295749d15d74ac4bc",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 85,
"avg_line_length": 22.36842105263158,
"alnum_prop": 0.6440641711229946,
"repo_name": "theonion/djes",
"id": "1352d3741f6da72104261d2ae0cdc4303f3d9513",
"size": "4675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74133"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ct', '0002_auto_20141110_1820'),
]
operations = [
migrations.AlterField(
model_name='conceptlink',
name='relationship',
field=models.CharField(default='defines', max_length=10, choices=[('is', 'Represents (unique ID for)'), ('defines', 'Defines'), ('informal', 'Intuitive statement of'), ('formaldef', 'Formal definition for'), ('tests', 'Tests understanding of'), ('derives', 'Derives'), ('proves', 'Proves'), ('assumes', 'Assumes'), ('motiv', 'Motivates'), ('illust', 'Illustrates'), ('intro', 'Introduces'), ('comment', 'Comments on'), ('warns', 'Warning about')]),
),
]
| {
"content_hash": "e642ce7cd5fafce3e6bf2f933265c6ce",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 460,
"avg_line_length": 47.0625,
"alnum_prop": 0.6069057104913679,
"repo_name": "cjlee112/socraticqs2",
"id": "84d0216fae9b5eb2cdae9ad2c7d9d12c213d9190",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/ct/migrations/0003_auto_20141110_2153.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138226"
},
{
"name": "Dockerfile",
"bytes": "3865"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "467395"
},
{
"name": "JavaScript",
"bytes": "234788"
},
{
"name": "Makefile",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "1785754"
},
{
"name": "Shell",
"bytes": "2889"
}
],
"symlink_target": ""
} |
class AbstractLogger:
def __init__(self, log_level):
self.log_level = log_level
def log(self, message, severity):
raise NotImplementedError()
def debug(self, message):
raise NotImplementedError()
def info(self, message):
raise NotImplementedError()
def warn(self, message):
raise NotImplementedError()
def error(self, messsage):
raise NotImplementedError()
def fatal(self, message):
raise NotImplementedError()
class DefaultLogger(AbstractLogger):
def __init__(self, log_level = 2):
AbstractLogger.__init__(self, log_level)
self.DEBUG = 0
self.INFO = 1
self.WARN = 2
self.ERROR = 3
self.FATAL = 4
self.NAME_MAP = {
self.DEBUG: 'DEBUG',
self.INFO: 'INFO',
self.WARN: 'WARN',
self.ERROR: 'ERROR',
self.FATAL: 'FATAL'
}
def log(self, message, severity):
# bound severity at FATAL
severity = min(severity, self.FATAL)
if severity >= self.log_level:
print self.NAME_MAP[severity] + ': ' + message
def debug(self, message):
self.log(message, self.DEBUG)
def info(self, message):
self.log(message, self.INFO)
def warn(self, message):
self.log(message, self.WARN)
def error(self, message):
self.log(message, self.ERROR)
def fatal(self, message):
self.log(message, self.FATAL)
| {
"content_hash": "66e62091c2617966aeda46e95b7add3d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 49,
"avg_line_length": 16.125,
"alnum_prop": 0.6666666666666666,
"repo_name": "sgibbons/longingly",
"id": "67e6be8166303ad4a3b1d522b2f4f30c600e9bae",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "longingly/logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16705"
}
],
"symlink_target": ""
} |
"""Utility functions supporting FAUCET/Gauge config parsing."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
# pytype: disable=pyi-error
import yaml
from yaml.constructor import ConstructorError
# handle libyaml-dev not installed
try:
from yaml import CLoader as Loader # type: ignore
except ImportError:
from yaml import Loader
CONFIG_HASH_FUNC = 'sha256'
class UniqueKeyLoader(Loader): # pylint: disable=too-many-ancestors
"""YAML loader that will reject duplicate/overwriting keys."""
def construct_mapping(self, node, deep=False):
"""Check for duplicate YAML keys."""
try:
key_value_pairs = [
(self.construct_object(key_node, deep=deep),
self.construct_object(value_node, deep=deep))
for key_node, value_node in node.value]
except TypeError as err:
raise ConstructorError('invalid key type: %s' % err) from err
mapping = {}
for key, value in key_value_pairs:
try:
if key in mapping:
raise ConstructorError('duplicate key: %s' % key)
except TypeError as type_error:
raise ConstructorError('unhashable key: %s' % key) from type_error
mapping[key] = value
return mapping
yaml.SafeLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
UniqueKeyLoader.construct_mapping)
def get_logger(logname):
"""Return logger instance for config parsing."""
return logging.getLogger(logname + '.config')
def read_config(config_file, logname):
"""Return a parsed YAML config file or None."""
logger = get_logger(logname)
conf_txt = None
conf = None
try:
with open(config_file, 'r') as stream:
conf_txt = stream.read()
conf = yaml.safe_load(conf_txt)
except (yaml.YAMLError, UnicodeDecodeError,
PermissionError, ValueError) as err: # pytype: disable=name-error
logger.error('Error in file %s (%s)', config_file, str(err))
except FileNotFoundError as err: # pytype: disable=name-error
logger.error('Could not find requested file: %s (%s)', config_file, str(err))
return conf, conf_txt
def config_hash_content(content):
"""Return hash of config file content."""
config_hash = getattr(hashlib, CONFIG_HASH_FUNC)
return config_hash(content.encode('utf-8')).hexdigest()
def config_file_hash(config_file_name):
"""Return hash of YAML config file contents."""
with open(config_file_name) as config_file:
return config_hash_content(config_file.read())
def dp_config_path(config_file, parent_file=None):
"""Return full path to config file."""
if parent_file and not os.path.isabs(config_file):
return os.path.realpath(os.path.join(os.path.dirname(parent_file), config_file))
return os.path.realpath(config_file)
def dp_include(config_hashes, config_contents, config_file, logname, # pylint: disable=too-many-locals
top_confs):
"""Handles including additional config files"""
logger = get_logger(logname)
if not os.path.isfile(config_file):
logger.warning('not a regular file or does not exist: %s', config_file)
return False
conf, config_content = read_config(config_file, logname)
if not conf:
logger.warning('error loading config from file: %s', config_file)
return False
valid_conf_keys = set(top_confs.keys()).union({'include', 'include-optional', 'version'})
unknown_top_confs = set(conf.keys()) - valid_conf_keys
if unknown_top_confs:
logger.error('unknown top level config items: %s', unknown_top_confs)
return False
# Add the SHA256 hash for this configuration file, so FAUCET can determine
# whether or not this configuration file should be reloaded upon receiving
# a HUP signal.
new_config_hashes = config_hashes.copy()
new_config_hashes[config_file] = config_hash_content(config_content)
new_config_contents = config_contents.copy()
new_config_contents[config_file] = config_content
# Save the updated configuration state in separate dicts,
# so if an error is found, the changes can simply be thrown away.
new_top_confs = {}
for conf_name, curr_conf in top_confs.items():
new_top_confs[conf_name] = curr_conf.copy()
try:
new_top_confs[conf_name].update(conf.pop(conf_name, {}))
except (TypeError, ValueError):
logger.error('Invalid config for "%s"', conf_name)
return False
for include_directive, file_required in (
('include', True),
('include-optional', False)):
include_values = conf.pop(include_directive, [])
if not isinstance(include_values, list):
logger.error('Include directive is not in a valid format')
return False
for include_file in include_values:
if not isinstance(include_file, str):
include_file = str(include_file)
include_path = dp_config_path(include_file, parent_file=config_file)
logger.info('including file: %s', include_path)
if include_path in config_hashes:
logger.error(
'include file %s already loaded, include loop found in file: %s',
include_path, config_file,)
return False
if not dp_include(
new_config_hashes, config_contents, include_path, logname, new_top_confs):
if file_required:
logger.error('unable to load required include file: %s', include_path)
return False
new_config_hashes[include_path] = None
logger.warning('skipping optional include file: %s', include_path)
# Actually update the configuration data structures,
# now that this file has been successfully loaded.
config_hashes.update(new_config_hashes)
config_contents.update(new_config_contents)
for conf_name, new_conf in new_top_confs.items():
top_confs[conf_name].update(new_conf)
return True
def config_changed(top_config_file, new_top_config_file, config_hashes):
"""Return True if configuration has changed.
Args:
top_config_file (str): name of FAUCET config file
new_top_config_file (str): name, possibly new, of FAUCET config file.
config_hashes (dict): map of config file/includes and hashes of contents.
Returns:
bool: True if the file, or any file it includes, has changed.
"""
if new_top_config_file != top_config_file:
return True
if config_hashes is None or new_top_config_file is None:
return False
for config_file, config_hash in config_hashes.items():
config_file_exists = os.path.isfile(config_file)
# Config file not loaded but exists = reload.
if config_hash is None and config_file_exists:
return True
# Config file loaded but no longer exists = reload.
if config_hash and not config_file_exists:
return True
# Config file hash has changed = reload.
if config_file_exists:
new_config_hash = config_file_hash(config_file)
if new_config_hash != config_hash:
return True
return False
| {
"content_hash": "69bbc23e602c3198c20f588dc084bf12",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 103,
"avg_line_length": 39.78431372549019,
"alnum_prop": 0.6521685559388861,
"repo_name": "mwutzke/faucet",
"id": "7ec0bb00c970bfe523473a28aab942b5597ceeb6",
"size": "8116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faucet/config_parser_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2538"
},
{
"name": "Python",
"bytes": "1615277"
},
{
"name": "Shell",
"bytes": "26601"
}
],
"symlink_target": ""
} |
import os
from gunicorn.http import wsgi
# Used to configure gunicorn settings.
def build_header(name, value):
"""
Takes a header name and value and constructs a valid string to add to the headers list.
"""
stripped_value = value.lstrip(" ").rstrip("\r\n").rstrip("\n")
stripped_name = name.rstrip(":")
return f"{stripped_name}: {stripped_value}\r\n"
class Response(wsgi.Response):
def default_headers(self, *args, **kwargs):
headers = super(Response, self).default_headers(*args, **kwargs)
content_security_policy = os.getenv("CONTENT_SECURITY_POLICY", "").replace('"', "'")
if content_security_policy:
headers.append(build_header("Content-Security-Policy", content_security_policy))
return [header for header in headers if not header.startswith("Server:")]
wsgi.Response = Response
| {
"content_hash": "001dcb432ccfc6161581cb8749febbe7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 92,
"avg_line_length": 33.23076923076923,
"alnum_prop": 0.6712962962962963,
"repo_name": "terranodo/eventkit-cloud",
"id": "715f552cce772eb09654bf70f4e1eacbeb4bde91",
"size": "864",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eventkit_cloud/gunicorn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "72684"
},
{
"name": "HTML",
"bytes": "87673"
},
{
"name": "JavaScript",
"bytes": "3699859"
},
{
"name": "Python",
"bytes": "634218"
},
{
"name": "Shell",
"bytes": "15117"
}
],
"symlink_target": ""
} |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single_cpu
def test_conv_read_write():
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(tmp_path, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
path = tmp_path / setup_path
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
def test_api_append(tmp_path, setup_path):
path = tmp_path / setup_path
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
def test_api_2(tmp_path, setup_path):
path = tmp_path / setup_path
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
def test_api_invalid(tmp_path, setup_path):
path = tmp_path / setup_path
# Invalid.
df = tm.makeDataFrame()
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="fixed")
msg = r"invalid HDFStore format specified \[foo\]"
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=False, format="foo")
# File path doesn't exist
path = ""
msg = f"File {path} does not exist"
with pytest.raises(FileNotFoundError, match=msg):
read_hdf(path, "df")
def test_get(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
def test_put_integer(setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
_check_roundtrip(df, tm.assert_frame_equal, setup_path)
def test_table_values_dtypes_roundtrip(setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
msg = re.escape(
"invalid combination of [values_axes] on appending data "
"[name->values_block_0,cname->values_block_0,"
"dtype->float64,kind->float,shape->(1, 3)] vs "
"current table [name->values_block_0,"
"cname->values_block_0,dtype->int64,kind->integer,"
"shape->None]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_series(setup_path):
s = tm.makeStringSeries()
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
_check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
_check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_tuple_index(setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
_check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r, check_index_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
_check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
if is_platform_windows():
pytest.xfail("known failure on some windows platforms")
else:
raise
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_frame(compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
_check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
_check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
_check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._mgr.is_consolidated()
# empty
_check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
def test_empty_series_frame(setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
_check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
_check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
_check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"])
def test_empty_series(dtype, setup_path):
s = Series(dtype=dtype)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(setup_path, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_store_mixed(compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
_check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
_check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
_check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
def _check_roundtrip(obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_roundtrip_table(obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_unicode_index(setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(setup_path):
# GH 11234
char = "\u0394"
df = DataFrame({"A": [char]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
df = DataFrame({"A": ["a", char], "B": ["b", "b"]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(setup_path):
df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
ts = tm.makeTimeSeries()
df["d"] = ts.index[:3]
_check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
def test_round_trip_equals(tmp_path, setup_path):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
path = tmp_path / setup_path
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
| {
"content_hash": "7866e22378845b639b638d9dd1dbde3d",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 85,
"avg_line_length": 30.688288288288287,
"alnum_prop": 0.6031000469704086,
"repo_name": "pandas-dev/pandas",
"id": "ce71e9e99036423bbff108863bd48afd47b0bc23",
"size": "17032",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/io/pytables/test_round_trip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from subprocess import PIPE, Popen
from textwrap import dedent
from pants.fs.archive import ZIP
from pants.util.contextutil import temporary_dir
class JvmPlatformIntegrationMixin(object):
"""Mixin providing lots of JvmPlatform-related integration tests to java compilers (eg, zinc)."""
def get_pants_compile_args(self):
"""List of arguments to pants that determine what compiler to use.
The compiling task must be the last argument (eg, compile.zinc).
"""
raise NotImplementedError
def determine_version(self, path):
"""Given the filepath to a class file, invokes the 'file' commandline to find its java version.
:param str path: filepath (eg, tempdir/Foo.class)
:return: A java version string (eg, '1.6').
"""
# Map of target version numbers to their equivalent class file versions, which are different.
version_map = {
'50.0': '1.6',
'51.0': '1.7',
'52.0': '1.8',
}
p = Popen(['file', path], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertEqual(0, p.returncode, 'Failed to run file on {}.'.format(path))
match = re.search(r'version (\d+[.]\d+)', out)
self.assertTrue(match is not None, 'Could not determine version for {}'.format(path))
return version_map[match.group(1)]
def _get_jar_class_versions(self, jarname):
path = os.path.join('dist', jarname)
self.assertTrue(os.path.exists(path), '{} does not exist.'.format(path))
class_to_version = {}
with temporary_dir() as tempdir:
ZIP.extract(path, tempdir, filter_func=lambda f: f.endswith('.class'))
for root, dirs, files in os.walk(tempdir):
for name in files:
path = os.path.abspath(os.path.join(root, name))
class_to_version[os.path.relpath(path, tempdir)] = self.determine_version(path)
return class_to_version
def _get_compiled_class_versions(self, spec, more_args=None):
more_args = more_args or []
jar_name = os.path.basename(spec)
while jar_name.endswith(':'):
jar_name = jar_name[:-1]
if ':' in jar_name:
jar_name = jar_name[jar_name.find(':') + 1:]
with temporary_dir() as cache_dir:
config = {'cache.compile.zinc': {'write_to': [cache_dir]}}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
['binary'] + self.get_pants_compile_args()
+ ['compile.checkstyle', '--skip', spec]
+ more_args,
workdir, config)
self.assert_success(pants_run)
return self._get_jar_class_versions('{}.jar'.format(jar_name))
def assert_class_versions(self, expected, received):
def format_dict(d):
return ''.join('\n {} = {}'.format(key, val) for key, val in sorted(d.items()))
self.assertEqual(expected, received,
'Compiled class versions differed.\n expected: {}\n received: {}'
.format(format_dict(expected), format_dict(received)))
def test_compile_java6(self):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java6'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/java6/Six.class': '1.6',
}, self._get_compiled_class_versions(target_spec))
def test_compile_java7(self):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java7'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/java7/Seven.class': '1.7',
}, self._get_compiled_class_versions(target_spec))
def test_compile_java7on6(self):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java7on6'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/java7on6/SevenOnSix.class': '1.7',
'org/pantsbuild/testproject/targetlevels/java6/Six.class': '1.6',
}, self._get_compiled_class_versions(target_spec))
def test_compile_target_coercion(self):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/unspecified'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/unspecified/Unspecified.class': '1.7',
'org/pantsbuild/testproject/targetlevels/unspecified/Six.class': '1.6',
}, self._get_compiled_class_versions(target_spec, more_args=[
'--jvm-platform-validate-check=warn',
'--jvm-platform-default-platform=java7',
]))
def _test_compile(self, target_level, class_name, source_contents, platform_args=None):
with temporary_dir(root_dir=os.path.abspath('.')) as tmpdir:
with open(os.path.join(tmpdir, 'BUILD'), 'w') as f:
f.write(dedent('''
java_library(name='{target_name}',
sources=['{class_name}.java'],
platform='{target_level}',
)
'''.format(target_name=os.path.basename(tmpdir),
class_name=class_name,
target_level=target_level)))
with open(os.path.join(tmpdir, '{}.java'.format(class_name)), 'w') as f:
f.write(source_contents)
platforms = str({
str(target_level): {
'source': str(target_level),
'target': str(target_level),
'args': platform_args or [],
}
})
command = []
command.extend(['--jvm-platform-platforms={}'.format(platforms),
'--jvm-platform-default-platform={}'.format(target_level)])
command.extend(self.get_pants_compile_args())
command.extend([tmpdir])
pants_run = self.run_pants(command)
return pants_run
def test_compile_diamond_operator_java7_works(self):
pants_run = self._test_compile('1.7', 'Diamond', dedent('''
public class Diamond<T> {
public static void main(String[] args) {
Diamond<String> diamond = new Diamond<>();
}
}
'''))
self.assert_success(pants_run)
def test_compile_diamond_operator_java6_fails(self):
pants_run = self._test_compile('1.6', 'Diamond', dedent('''
public class Diamond<T> {
public static void main(String[] args) {
Diamond<String> diamond = new Diamond<>();
}
}
'''))
self.assert_failure(pants_run)
def test_compile_with_javac_args(self):
pants_run = self._test_compile('1.7', 'LintyDiamond', dedent('''
public class LintyDiamond<T> {
public static void main(String[] args) {
LintyDiamond<String> diamond = new LintyDiamond<>();
}
}
'''), platform_args=['-C-Xlint:cast'])
self.assert_success(pants_run)
def test_compile_stale_platform_settings(self):
# Tests that targets are properly re-compiled when their source/target levels change.
with temporary_dir(root_dir=os.path.abspath('.')) as tmpdir:
with open(os.path.join(tmpdir, 'BUILD'), 'w') as f:
f.write(dedent('''
java_library(name='diamond',
sources=['Diamond.java'],
)
'''))
with open(os.path.join(tmpdir, 'Diamond.java'), 'w') as f:
f.write(dedent('''
public class Diamond<T> {
public static void main(String[] args) {
// The diamond operator <> for generics was introduced in jdk7.
Diamond<String> shinyDiamond = new Diamond<>();
}
}
'''))
platforms = {
'java6': {'source': '6'},
'java7': {'source': '7'},
}
# We run these all in the same working directory, because we're testing caching behavior.
with self.temporary_workdir() as workdir:
def compile_diamond(platform):
return self.run_pants_with_workdir(['--jvm-platform-platforms={}'.format(platforms),
'--jvm-platform-default-platform={}'.format(platform),
'-ldebug',
'compile'] + self.get_pants_compile_args() +
['{}:diamond'.format(tmpdir)], workdir=workdir)
# We shouldn't be able to compile this with -source=6.
self.assert_failure(compile_diamond('java6'), 'Diamond.java was compiled successfully with '
'java6 starting from a fresh workdir, but '
'that should not be possible.')
# We should be able to compile this with -source=7.
self.assert_success(compile_diamond('java7'), 'Diamond.java failed to compile in java7, '
'which it should be able to.')
# We still shouldn't be able to compile this with -source=6. If the below passes, it means
# that we saved the cached run from java7 and didn't recompile, which is an error.
self.assert_failure(compile_diamond('java6'), 'Diamond.java erroneously compiled in java6,'
' which means that either compilation was'
' skipped due to bad fingerprinting/caching,'
' or the compiler failed to clean up the'
' previous class from the java7'
' compile.')
| {
"content_hash": "afbe6f2ff525089d3d40b10eac279018",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 100,
"avg_line_length": 43.78341013824885,
"alnum_prop": 0.5970950426270919,
"repo_name": "kwlzn/pants",
"id": "770d1e71e262044a66d10c88f4b4a8492e527d73",
"size": "9648",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/jvm_compile/java/jvm_platform_integration_mixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "450840"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5255934"
},
{
"name": "Scala",
"bytes": "85210"
},
{
"name": "Shell",
"bytes": "58882"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
} |
import gnomekeyring as gkey
import argparse
parser = argparse.ArgumentParser(description='GNome Keyring Credentials Proxy')
parser.add_argument('-u',
dest='get_username',
action='store_const',
const=True,
default=False,
help='return the username')
parser.add_argument('-p',
dest='get_password',
action='store_const',
const=True,
default=False,
help='return the password')
parser.add_argument('repo',
nargs=1,
type=str,
help='repository to use')
parser.add_argument('username',
nargs='?',
type=str,
help='username to store')
def set_credentials(repo, user, pw):
KEYRING_NAME = "offlineimap"
if isinstance(repo, list):
repo = repo[0]
if isinstance(user, list):
user = user[0]
attrs = { "repo": repo, "user": user }
keyring = gkey.get_default_keyring_sync()
print "Set password in keyring [{}] using {}"\
.format(KEYRING_NAME, attrs)
gkey.item_create_sync(keyring, gkey.ITEM_NETWORK_PASSWORD,
KEYRING_NAME, attrs, pw, True)
def get_credentials(repo):
keyring = gkey.get_default_keyring_sync()
attrs = {"repo": repo}
items = gkey.find_items_sync(gkey.ITEM_NETWORK_PASSWORD, attrs)
return (items[0].attributes["user"], items[0].secret)
def get_username(repo):
return get_credentials(repo)[0]
def get_password(repo):
return get_credentials(repo)[1]
# NOTE: the content of the keyring can be inspected and manipulated using the
# "seahorse" GUI under GNOME
if __name__ == "__main__":
import sys
import os
import getpass
# Parse command line args
args = parser.parse_args()
# Store credentials
if args.repo and args.username:
password = getpass.getpass("Enter password for user '%s': " % args.username)
password_confirmation = getpass.getpass("Confirm password: ")
if password != password_confirmation:
print "Error: password confirmation does not match"
sys.exit(1)
set_credentials(args.repo, args.username, password)
sys.exit(0)
# Get username
if args.repo and args.get_username:
print get_username(args.repo[0])
sys.exit(0)
# Get password
if args.repo and args.get_password:
print get_password(args.repo[0])
sys.exit(0)
# By default return complete credentials
credentials = get_credentials(args.repo[0])
print credentials[0], credentials[1]
| {
"content_hash": "6a49108bea6e8ad6e2f8ff6634df9e75",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 84,
"avg_line_length": 32.6144578313253,
"alnum_prop": 0.5895825637236793,
"repo_name": "derkling/dotfiles",
"id": "e39a916441df3b31a63e2e4598fbd4f33b30719a",
"size": "2731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/gnome_keyring.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "2048"
},
{
"name": "Dockerfile",
"bytes": "2597"
},
{
"name": "Emacs Lisp",
"bytes": "26669"
},
{
"name": "Lua",
"bytes": "244630"
},
{
"name": "Makefile",
"bytes": "735"
},
{
"name": "Perl",
"bytes": "209535"
},
{
"name": "Python",
"bytes": "30709"
},
{
"name": "Ruby",
"bytes": "14970"
},
{
"name": "Shell",
"bytes": "59549"
},
{
"name": "Vim script",
"bytes": "5652"
}
],
"symlink_target": ""
} |
from graphene.utils.str_converters import to_snake_case
from graphql import DirectiveLocation, GraphQLDirective
from ..registry import get_global_registry
class BaseExtraGraphQLDirective(GraphQLDirective):
def __init__(self):
registry = get_global_registry()
super(BaseExtraGraphQLDirective, self).__init__(
name=self.get_name(),
description=self.__doc__,
args=self.get_args(),
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.FRAGMENT_SPREAD,
DirectiveLocation.INLINE_FRAGMENT,
],
)
registry.register_directive(self.get_name(), self)
@classmethod
def get_name(cls):
return to_snake_case(cls.__name__.replace("GraphQLDirective", ""))
@staticmethod
def get_args():
return {}
| {
"content_hash": "f54a7ea03a392f36358c461945cfbee9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 30.75,
"alnum_prop": 0.6178861788617886,
"repo_name": "eamigo86/graphene-django-extras",
"id": "6798999950489c381ce37c61911b6bd6e7af2eaa",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphene_django_extras/directives/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148342"
}
],
"symlink_target": ""
} |
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import PaypalProvider
class PaypalOAuth2Adapter(OAuth2Adapter):
provider_id = PaypalProvider.id
supports_state = False
@property
def authorize_url(self):
path = 'webapps/auth/protocol/openidconnect/v1/authorize'
return 'https://www.{0}/{1}'.format(self._get_endpoint(), path)
@property
def access_token_url(self):
path = "v1/identity/openidconnect/tokenservice"
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
@property
def profile_url(self):
path = 'v1/identity/openidconnect/userinfo'
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
def _get_endpoint(self):
settings = self.get_provider().get_settings()
if settings.get('MODE') == 'live':
return 'paypal.com'
else:
return 'sandbox.paypal.com'
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
params={'schema': 'openid',
'access_token': token})
extra_data = response.json()
return self.get_provider().sociallogin_from_response(
request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PaypalOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PaypalOAuth2Adapter)
| {
"content_hash": "737bf4b420e141bec5180c7b82434e04",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 71,
"avg_line_length": 30.6734693877551,
"alnum_prop": 0.6453759148369926,
"repo_name": "spool/django-allauth",
"id": "5e44e84c005d94067c2baa79610441f796c1c7e1",
"size": "1503",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/paypal/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42116"
},
{
"name": "JavaScript",
"bytes": "3388"
},
{
"name": "Makefile",
"bytes": "694"
},
{
"name": "Python",
"bytes": "623679"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import tempfile
import unittest
import json
import six
from telemetry import project_config
from telemetry import decorators
from telemetry.core import util
from telemetry.internal.util import binary_manager
from telemetry.testing import run_tests
from telemetry.testing import unittest_runner
class MockArgs():
def __init__(self):
self.positional_args = []
self.test_filter = ''
self.exact_test_filter = True
self.run_disabled_tests = False
self.skip = []
class MockPossibleBrowser():
def __init__(self, browser_type, os_name, os_version_name,
supports_tab_control):
self.browser_type = browser_type
self.platform = MockPlatform(os_name, os_version_name)
self.supports_tab_control = supports_tab_control
def GetTypExpectationsTags(self):
return []
class MockPlatform():
def __init__(self, os_name, os_version_name):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
def GetOSVersionDetailString(self):
return ''
def _MakeTestFilter(tests):
return '::'.join(tests)
class RunTestsUnitTest(unittest.TestCase):
def setUp(self):
self._test_result = {}
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], six.string_types))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return set(successes), set(failures), set(skips)
def _RunTest(
self, expected_failures, expected_successes, expected_skips,
expected_return_code=0, test_name='', extra_args=None, no_browser=True):
extra_args = extra_args or []
config = project_config.ProjectConfig(
top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
client_configs=[],
benchmark_dirs=[
os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
)
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
try:
passed_args = [
# We don't want the underlying tests to report their results to
# ResultDB.
'--disable-resultsink',
# These tests currently rely on some information sticking around
# between tests, so we need to use the older global process pool
# approach instead of having different pools scoped for
# parallel/serial execution.
'--use-global-pool',
]
if test_name:
passed_args.append(test_name)
if no_browser:
passed_args.append('--no-browser')
passed_args.append('--write-full-results-to=%s' % temp_file_name)
args = unittest_runner.ProcessConfig(config, passed_args + extra_args)
test_runner = run_tests.RunTestsCommand()
with binary_manager.TemporarilyReplaceBinaryManager(None):
ret = test_runner.main(args=args)
assert ret == expected_return_code, (
'actual return code %d, does not equal the expected return code %d' %
(ret, expected_return_code))
with open(temp_file_name) as f:
self._test_result = json.load(f)
(actual_successes,
actual_failures,
actual_skips) = self._ExtractTestResults(self._test_result)
# leave asserts below because we may miss tests
# that are running when they are not supposed to
self.assertEqual(set(actual_failures), set(expected_failures))
self.assertEqual(set(actual_successes), set(expected_successes))
self.assertEqual(set(actual_skips), set(expected_skips))
finally:
os.remove(temp_file_name)
return actual_failures, actual_successes, actual_skips
def _RunTestsWithExpectationsFile(
self, full_test_name, expectations, test_tags='foo', extra_args=None,
expected_exit_code=0):
extra_args = extra_args or []
test_expectations = (('# tags: [ foo bar mac ]\n'
'# results: [ {expectations} ]\n'
'crbug.com/123 [ {tags} ] {test} [ {expectations} ]')
.format(expectations=expectations, tags=test_tags,
test=full_test_name))
expectations_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
expectations_file.write(test_expectations)
results = tempfile.NamedTemporaryFile(delete=False)
results.close()
expectations_file.close()
config = project_config.ProjectConfig(
top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
client_configs=[],
expectations_files=[expectations_file.name],
benchmark_dirs=[
os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
)
try:
passed_args = ([full_test_name, '--no-browser',
('--write-full-results-to=%s' % results.name)] +
['--tag=%s' % tag for tag in test_tags.split()])
args = unittest_runner.ProcessConfig(config, passed_args + extra_args)
test_runner = run_tests.RunTestsCommand()
with binary_manager.TemporarilyReplaceBinaryManager(None):
ret = test_runner.main(args=args)
self.assertEqual(ret, expected_exit_code)
with open(results.name) as f:
self._test_result = json.load(f)
finally:
os.remove(expectations_file.name)
os.remove(results.name)
return self._test_result
@decorators.Disabled('chromeos') # crbug.com/696553
def testIncludeGlobsInTestFilterListWithOutBrowser(self):
test_prefix = 'unit_tests_test.ExampleTests.%s'
expected_failure = test_prefix % 'test_fail'
expected_success = test_prefix % 'test_pass'
expected_skip = test_prefix % 'test_skip'
test_filter = _MakeTestFilter(
[expected_failure, test_prefix % 'test_sk*', expected_success])
self._RunTest(
[expected_failure], [expected_success], [expected_skip],
expected_return_code=1, extra_args=['--skip=*skip',
'--test-filter=%s' % test_filter])
@decorators.Disabled('chromeos') # crbug.com/696553
def testIncludeGlobsInTestFilterListWithBrowser(self):
test_prefix = 'unit_tests_test.ExampleTests.%s'
expected_failure = test_prefix % 'test_fail'
expected_success = test_prefix % 'test_pass'
expected_skip = test_prefix % 'test_skip'
runner = run_tests.typ.Runner()
runner.args.test_filter = _MakeTestFilter(
[expected_failure, test_prefix % 'test_sk*', expected_success])
runner.top_level_dirs = [os.path.join(util.GetTelemetryDir(), 'examples')]
possible_browser = MockPossibleBrowser(
'system', 'mac', 'mavericks', True)
runner.classifier = run_tests.GetClassifier(runner, possible_browser)
_, test_set = runner.find_tests(runner.args)
self.assertEqual(len(test_set.parallel_tests), 3)
test_names_found = [test.name for test in test_set.parallel_tests]
self.assertIn(expected_failure, test_names_found)
self.assertIn(expected_success, test_names_found)
self.assertIn(expected_skip, test_names_found)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipOnlyWhenTestMatchesTestFilterWithBrowser(self):
test_name = 'unit_tests_test.ExampleTests.test_also_fail'
runner = run_tests.typ.Runner()
runner.args.test_filter = test_name
runner.args.skip.append('*fail')
runner.top_level_dirs = [os.path.join(util.GetTelemetryDir(), 'examples')]
possible_browser = MockPossibleBrowser(
'system', 'mac', 'mavericks', True)
runner.classifier = run_tests.GetClassifier(runner, possible_browser)
_, test_set = runner.find_tests(runner.args)
self.assertEqual(len(test_set.tests_to_skip), 1)
self.assertEqual(test_set.tests_to_skip[0].name, test_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipOnlyWhenTestMatchesTestFilterWithoutBrowser(self):
test_name = 'unit_tests_test.ExampleTests.test_also_fail'
_, _, actual_skips = self._RunTest(
[], [], [test_name],
test_name=test_name,
extra_args=['--skip=*fail'])
self.assertEqual(actual_skips, {test_name})
@decorators.Disabled('chromeos') # crbug.com/696553
def testTestFailsAllRetryOnFailureRetriesAndIsNotARegression(self):
self._RunTestsWithExpectationsFile(
'unit_tests_test.ExampleTests.test_fail', 'RetryOnFailure Failure',
extra_args=['--retry-limit=3', '--retry-only-retry-on-failure-tests'],
expected_exit_code=0)
results = (self._test_result['tests']['unit_tests_test']
['ExampleTests']['test_fail'])
self.assertEqual(results['actual'], 'FAIL FAIL FAIL FAIL')
self.assertEqual(results['expected'], 'FAIL')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
@decorators.Disabled('chromeos') # crbug.com/696553
def testDoNotRetryExpectedFailure(self):
self._RunTestsWithExpectationsFile(
'unit_tests_test.ExampleTests.test_fail', 'Failure',
extra_args=['--retry-limit=3'])
test_result = (self._test_result['tests']['unit_tests_test']['ExampleTests']
['test_fail'])
self.assertEqual(test_result['actual'], 'FAIL')
self.assertEqual(test_result['expected'], 'FAIL')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testRetryOnFailureExpectationWithPassingTest(self):
self._RunTestsWithExpectationsFile(
'unit_tests_test.ExampleTests.test_pass', 'RetryOnFailure')
test_result = (self._test_result['tests']['unit_tests_test']['ExampleTests']
['test_pass'])
self.assertEqual(test_result['actual'], 'PASS')
self.assertEqual(test_result['expected'], 'PASS')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestCmdArgNoExpectationsFile(self):
test_name = 'unit_tests_test.ExampleTests.test_pass'
_, _, actual_skips = self._RunTest(
[], [], ['unit_tests_test.ExampleTests.test_pass'], test_name=test_name,
extra_args=['--skip=*test_pass'])
test_result = (self._test_result['tests']['unit_tests_test']
['ExampleTests']['test_pass'])
self.assertEqual(actual_skips, {test_name})
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestNoExpectationsFile(self):
test_name = 'unit_tests_test.ExampleTests.test_skip'
_, _, actual_skips = self._RunTest(
[], [], [test_name], test_name=test_name)
result = (self._test_result['tests']['unit_tests_test']
['ExampleTests']['test_skip'])
self.assertEqual(actual_skips, {test_name})
self.assertEqual(result['actual'], 'SKIP')
self.assertEqual(result['expected'], 'SKIP')
self.assertNotIn('is_unexpected', result)
self.assertNotIn('is_regression', result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestWithExpectationsFileWithSkipExpectation(self):
self._RunTestsWithExpectationsFile(
'unit_tests_test.ExampleTests.test_pass', 'Skip Failure Crash')
test_result = (self._test_result['tests']['unit_tests_test']['ExampleTests']
['test_pass'])
self.assertEqual(test_result['actual'], 'SKIP')
self.assertEqual(test_result['expected'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestCmdArgsWithExpectationsFile(self):
self._RunTestsWithExpectationsFile(
'unit_tests_test.ExampleTests.test_pass', 'Crash Failure',
extra_args=['--skip=*test_pass'])
test_result = (self._test_result['tests']['unit_tests_test']['ExampleTests']
['test_pass'])
self.assertEqual(test_result['actual'], 'SKIP')
self.assertEqual(test_result['expected'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control, runner=None):
if not runner:
runner = run_tests.typ.Runner()
host = runner.host
runner.top_level_dirs = [util.GetTelemetryDir()]
runner.args.tests = [
host.join(util.GetTelemetryDir(), 'telemetry', 'testing',
'disabled_cases.py')
]
possible_browser = MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control)
runner.classifier = run_tests.GetClassifier(runner, possible_browser)
_, test_set = runner.find_tests(runner.args)
return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
def testSystemMacMavericks(self):
self.assertEqual(
{
'testAllEnabled', 'testAllEnabledVersion2', 'testMacOnly',
'testMavericksOnly', 'testNoChromeOS', 'testNoWinLinux',
'testSystemOnly', 'testHasTabs'
}, self._GetEnabledTests('system', 'mac', 'mavericks', True))
def testSystemMacLion(self):
self.assertEqual(
{
'testAllEnabled', 'testAllEnabledVersion2', 'testMacOnly',
'testNoChromeOS', 'testNoMavericks', 'testNoWinLinux',
'testSystemOnly', 'testHasTabs'
}, self._GetEnabledTests('system', 'mac', 'lion', True))
def testCrosGuestChromeOS(self):
self.assertEqual(
{
'testAllEnabled', 'testAllEnabledVersion2', 'testChromeOSOnly',
'testNoMac', 'testNoMavericks', 'testNoSystem', 'testNoWinLinux',
'testHasTabs'
}, self._GetEnabledTests('cros-guest', 'chromeos', '', True))
def testCanaryWindowsWin7(self):
self.assertEqual(
{
'testAllEnabled', 'testAllEnabledVersion2', 'testNoChromeOS',
'testNoMac', 'testNoMavericks', 'testNoSystem',
'testWinOrLinuxOnly', 'testHasTabs'
}, self._GetEnabledTests('canary', 'win', 'win7', True))
def testDoesntHaveTabs(self):
self.assertEqual(
{
'testAllEnabled', 'testAllEnabledVersion2', 'testNoChromeOS',
'testNoMac', 'testNoMavericks', 'testNoSystem', 'testWinOrLinuxOnly'
}, self._GetEnabledTests('canary', 'win', 'win7', False))
def testSkip(self):
runner = run_tests.typ.Runner()
runner.args.skip = [
'telemetry.*testNoMac', '*NoMavericks',
'telemetry.testing.disabled_cases.DisabledCases.testNoSystem']
self.assertEqual(
{
'testAllEnabled', 'testAllEnabledVersion2', 'testNoChromeOS',
'testWinOrLinuxOnly', 'testHasTabs'
}, self._GetEnabledTests('canary', 'win', 'win7', True, runner))
def testtPostionalArgsTestFiltering(self):
runner = run_tests.typ.Runner()
runner.args.partial_match_filter = ['testAllEnabled']
self.assertEqual({'testAllEnabled', 'testAllEnabledVersion2'},
self._GetEnabledTests('system', 'win', 'win7', True,
runner))
def testPostionalArgsTestFiltering(self):
runner = run_tests.typ.Runner()
runner.args.test_filter = (
'telemetry.testing.disabled_cases.DisabledCases.testAllEnabled::'
'telemetry.testing.disabled_cases.DisabledCases.testNoMavericks::'
'testAllEnabledVersion2') # Partial test name won't match
self.assertEqual({'testAllEnabled', 'testNoMavericks'},
self._GetEnabledTests('system', 'win', 'win7', True,
runner))
| {
"content_hash": "8e9437b188a61158c69eedf156d52cd4",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 80,
"avg_line_length": 41.955,
"alnum_prop": 0.6519485162674293,
"repo_name": "catapult-project/catapult",
"id": "c6c7360a47360684100b8d4f3966accdfe48f2cf",
"size": "16945",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/testing/run_tests_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import ProjectReport, ProjectReportParametersMixin, DatespanMixin
def div(num, denom, percent=False):
floater = 100.0 if percent else 1.0
val = num * floater / denom if denom != 0 else 0
return "%.2f" % val + ("%" if percent else "")
class CommConnectReport(GenericTabularReport, ProjectReport, ProjectReportParametersMixin, DatespanMixin):
is_cacheable = True
emailable = True
@property
def base_query(self):
q = {"query": {
"bool": {
"must": [
{"match": {"domain.exact": self.domain}},
{"range": {
'date': {
"from": self.datespan.startdate_param_utc,
"to": self.datespan.enddate_param_utc,
"include_upper": False}}}]}}}
return self.add_recipients_to_query(q)
def add_recipients_to_query(self, q):
if self.users_by_group:
q["query"]["bool"]["must"].append({"in": {"couch_recipient": self.combined_user_ids}})
if self.cases_by_case_group:
q["query"]["bool"]["must"].append({"in": {"couch_recipient": self.cases_by_case_group}})
return q | {
"content_hash": "1f0862ad9bd8effaa020919b3f391111",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 106,
"avg_line_length": 40.63636363636363,
"alnum_prop": 0.5540641312453393,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "07a87afb76dd532225eb47846b99993f54d9b243",
"size": "1341",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "corehq/apps/reports/commconnect/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
import sys
import pytest
import wx
from mock import MagicMock
from gooey.gui.lang import i18n
from gooey.gui.model import MyModel
from gooey.gui.presenter import Presenter
from gooey.gui.util.freeze import get_resource_path
from gooey.python_bindings import config_generator
@pytest.fixture
def build_spec(complete_parser):
return config_generator.create_from_parser(complete_parser, sys.argv[0])
@pytest.fixture
def build_spec_subparser(subparser):
return config_generator.create_from_parser(subparser, sys.argv[0])
@pytest.fixture
def presentation_model(build_spec):
app = wx.App(False)
i18n.load(get_resource_path('languages'), build_spec['language'])
model = MyModel(build_spec)
view = MagicMock()
presentation = Presenter(view, model)
return presentation
@pytest.fixture
def subparser_presentation_model(build_spec_subparser):
app = wx.App(False)
i18n.load(get_resource_path('languages'), 'english')
model = MyModel(build_spec_subparser)
view = MagicMock()
presentation = Presenter(view, model)
return presentation
# ----------------------------
# Tests #
# ----------------------------
def test_presentation_init(presentation_model):
'''Sanity check that the primary fields are set on init '''
presentation = presentation_model
presentation.initialize_view()
assert presentation.view.heading_title == presentation.model.heading_title
assert presentation.view.heading_subtitle == presentation.model.heading_subtitle
assert presentation.view.required_section.populate.called
assert presentation.view.optional_section.populate.called
# should not call when not running in column format
assert not presentation.view.set_list_contents.called
def test_subparser_presentation_init_sets_sidebar(subparser_presentation_model):
presentation = subparser_presentation_model
presentation.initialize_view()
# should be called to initialize the sidebar
assert presentation.view.set_list_contents.called
def test_on_start_shows_err_dlg_if_missing_args(presentation_model):
presentation = presentation_model
presentation.initialize_view()
presentation.on_start()
assert presentation.view.show_missing_args_dialog.called
presentation.view.show_missing_args_dialog.reset_mock()
# the inverse:
# fill the missing args
for arg in presentation.model.required_args:
arg.value = 'foo'
# patch the methods we don't need
presentation.client_runner = MagicMock()
presentation.update_model = MagicMock()
presentation.model.build_command_line_string = MagicMock()
presentation.on_start()
# should no longer show the dialog
assert not presentation.view.show_missing_args_dialog.called
| {
"content_hash": "d7b65f66ec1fb0597634d23dac708374",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 82,
"avg_line_length": 31.258426966292134,
"alnum_prop": 0.7286125089863408,
"repo_name": "jschultz/Gooey",
"id": "1bf17e7baadae4350f854be421815805a71acc9a",
"size": "2782",
"binary": false,
"copies": "2",
"ref": "refs/heads/latest",
"path": "gooey/tests/test_presentation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102598"
}
],
"symlink_target": ""
} |
import pyexiv2
import os
import Image
import time
import webbrowser
import sys
import gdata.gauth
import gdata.photos.service
import gdata.media
import gdata.geo
from picasaclient import PicasaClient
from pictureuploader import PictureUploader, FileWithCallback
import utils
SCOPES = "https://picasaweb.google.com/data/"
USER_AGENT = "personal-photo/video-uploader"
TOKEN_KEY = "gphotos-token"
NICKNAME_KEY = "nickname"
ALBUM_KEY = "albumid"
MAX_VIDEO_SIZE = 104857600
class GoogleUploader(PictureUploader):
def __init__(self, client_id, client_secret, user=None):
PictureUploader.__init__(self)
self._client_id = client_id
self._client_secret = client_secret
self._gd_client = PicasaClient()
self._autobackup_album = None
self._set_service_name("gphotos")
self.original_size = False
self._user_name = user or ""
self._allowed_file_exts += [".mov", ".mp4", ".avi", ".mpg", ".mpeg", ".3gp", ".3gpp"]
self._user_data = None
self._token_key = TOKEN_KEY
if user:
self._token_key += "-" + user
def get_user_feed_data(self):
if self._user_data is None:
albumid = "default"
feed = self._gd_client.GetUserFeed()
user_name = feed.nickname.text
for album in feed.entry:
if album.name.text == "InstantUpload":
albumid = album.gphoto_id.text
break
self._user_data = {NICKNAME_KEY: user_name, ALBUM_KEY: albumid}
self.user_name = self._user_data[NICKNAME_KEY]
return self._user_data
def get_autobackup_album_url(self):
if self._autobackup_album is None:
data = self.get_user_feed_data()
self._autobackup_album = "/data/feed/api/user/default/albumid/" + data[ALBUM_KEY]
return self._autobackup_album
def resize_image(self, org_file_name, rez_file_name, max_width=2048):
img = Image.open(org_file_name)
w, h = img.size
if w <= max_width and h <= max_width:
return False
if w > h:
ratio = (max_width / float(w))
h = int((float(h) * float(ratio)))
w = max_width
elif w < h:
ratio = (max_width / float(h))
w = int((float(w) * float(ratio)))
h = max_width
else:
w = max_width
h = max_width
img = img.resize((w, h), Image.ANTIALIAS)
img.save(rez_file_name, None, quality=88)
org_exif_data = pyexiv2.ImageMetadata(org_file_name)
org_exif_data.read()
rez_exif_data = pyexiv2.ImageMetadata(rez_file_name)
rez_exif_data.read()
org_exif_data.copy(rez_exif_data, True, True, True, True)
rez_exif_data.write()
datetime = utils.get_date_from_file_date(org_file_name)
filetime = time.mktime(datetime.timetuple())
os.utime(rez_file_name, (filetime, filetime))
return True
def upload_file(self, file_name, md5sum=None):
album_url = self.get_autobackup_album_url()
fname = os.path.basename(file_name)
temp_file_name = None
photo_id = 0
try:
content = utils.file_name_to_mimetype(file_name)
if content is None:
sys.stderr.write("Can't determine mime type for file " + file_name + "\n")
return 0
if content.startswith("image") and not self.original_size:
path, name = os.path.split(file_name)
temp_file_name = os.path.join(path, ".resizing___" + name)
if self.resize_image(file_name, temp_file_name):
file_name = temp_file_name
else:
temp_file_name = None
f = FileWithCallback(file_name)
if content.startswith("video") and f.len > MAX_VIDEO_SIZE:
sys.stderr.write("File " + file_name + " is bigger than " + utils.sizeof_fmt(MAX_VIDEO_SIZE) + "\n")
return 0
photo = self._gd_client.InsertPhotoSimple(album_url, fname, "", f, content)
photo_id = photo.gphoto_id.text
except Exception as e:
print "Failed to upload file", fname + ":", e
if temp_file_name is not None:
os.remove(temp_file_name)
return photo_id
def _load_token(self):
try:
token_data = self._dataHelper.get_secure_data(self._token_key)
if token_data is not None:
self._user_data = token_data
return gdata.gauth.token_from_blob(token_data[TOKEN_KEY])
except :
pass
return None
def _save_token(self, token):
if token is None:
self._dataHelper.save_secure_data(self._token_key, None)
else:
tokenb = gdata.gauth.token_to_blob(token)
user_feed = self.get_user_feed_data()
user_feed[TOKEN_KEY] = tokenb
self._dataHelper.save_secure_data(self._token_key, user_feed)
def refresh_token(self, token):
# Hack to fix possible bug in Google SDK (I have no idea what I'm doing)
token._refresh(self._gd_client.http_client.request)
self._save_token(token)
def authenticate(self):
try:
token = self._load_token()
if token is not None:
self.refresh_token(token)
if not token.invalid:
token.authorize(self._gd_client)
return True
token = gdata.gauth.OAuth2Token(
client_id=self._client_id, client_secret=self._client_secret, scope=SCOPES,
user_agent=USER_AGENT)
authorize_url = token.generate_authorize_url()
print "Authorize URL:", authorize_url
webbrowser.open_new_tab(authorize_url)
token.get_access_token(unicode(raw_input('Verifier code: ')))
token.authorize(self._gd_client)
self._save_token(token)
return True
except gdata.gauth.OAuth2AccessTokenError as e:
self._save_token(None)
sys.stderr.write(str(e) + "\n")
except Exception as e:
sys.stderr.write(str(e) + "\n")
return False | {
"content_hash": "4933169b97dab94924f9f06b0e39b5a0",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 116,
"avg_line_length": 33.82539682539682,
"alnum_prop": 0.5631159080244017,
"repo_name": "hmrs-cr/PyArcPics",
"id": "cf02f5e1e8c03e8b628b46fa171c91cf3edec91f",
"size": "6426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gphotosuploader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "58533"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
"""Tests for `models.py` (model cloning, mainly)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class TestModel(keras.Model):
"""A model subclass."""
def __init__(self, n_outputs=4, trainable=True):
"""A test class with one dense layer and number of outputs as a variable."""
super(TestModel, self).__init__()
self.layer1 = keras.layers.Dense(n_outputs)
self.n_outputs = variables.Variable(n_outputs, trainable=trainable)
def call(self, x):
return self.layer1(x)
def _get_layers(input_shape=(4,), add_input_layer=False):
if add_input_layer:
model_layers = [keras.layers.InputLayer(input_shape=input_shape),
keras.layers.Dense(4)]
elif input_shape:
model_layers = [keras.layers.Dense(4, input_shape=input_shape)]
else:
model_layers = [keras.layers.Dense(4)]
model_layers += [
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.5),
keras.layers.Dense(4)]
return model_layers
def _get_model(input_shape=(4,)):
model_layers = _get_layers(input_shape=None, add_input_layer=False)
return testing_utils.get_model_from_layers(
model_layers, input_shape=input_shape)
class TestModelCloning(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'has_input_layer',
'input_shape': (4,),
'add_input_layer': True,
'share_weights': False},
{'testcase_name': 'no_input_layer',
'input_shape': None,
'add_input_layer': False,
'share_weights': False},
{'testcase_name': 'has_input_layer_share_weights',
'input_shape': (4,),
'add_input_layer': True,
'share_weights': True},
{'testcase_name': 'no_input_layer_share_weights',
'input_shape': None,
'add_input_layer': False,
'share_weights': True},
])
def test_clone_sequential_model(
self, input_shape, add_input_layer, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_sequential_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
model = models.Sequential(_get_layers(input_shape, add_input_layer))
# Sanity check
self.assertEqual(
isinstance(
list(model._flatten_layers(include_self=False, recursive=False))[0],
keras.layers.InputLayer), add_input_layer)
self.assertEqual(model._is_graph_network, add_input_layer)
# With placeholder creation -- clone model should have an InputLayer
# if the original model has one.
new_model = clone_fn(model)
self.assertEqual(
isinstance(
list(
new_model._flatten_layers(include_self=False,
recursive=False))[0],
keras.layers.InputLayer), add_input_layer)
self.assertEqual(new_model._is_graph_network, model._is_graph_network)
if input_shape and not ops.executing_eagerly_outside_functions():
# update ops from batch norm needs to be included
self.assertGreaterEqual(len(new_model.updates), 2)
# On top of new tensor -- clone model should always have an InputLayer.
input_a = keras.Input(shape=(4,))
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(
list(new_model._flatten_layers(include_self=False, recursive=False))[0],
keras.layers.InputLayer)
self.assertTrue(new_model._is_graph_network)
# On top of new, non-Keras tensor -- clone model should always have an
# InputLayer.
if not context.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
# saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(
list(new_model._flatten_layers(include_self=False,
recursive=False))[0],
keras.layers.InputLayer)
self.assertTrue(new_model._is_graph_network)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_clone_functional_model(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# With placeholder creation
new_model = clone_fn(model)
if not ops.executing_eagerly_outside_functions():
self.assertGreaterEqual(len(new_model.updates), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
if not ops.executing_eagerly_outside_functions():
self.assertLen(new_model.updates, 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
if not context.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
# saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = clone_fn(model, input_tensors=[input_a, input_b])
self.assertGreaterEqual(len(new_model.updates), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch(None, val_out)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_clone_functional_with_masking(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
x = np.array([[[1.], [1.]], [[0.], [0.]]])
inputs = keras.Input((2, 1))
outputs = keras.layers.Masking(mask_value=0)(inputs)
outputs = keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))(outputs)
model = keras.Model(inputs, outputs)
model = clone_fn(model)
model.compile(
loss='mse',
optimizer=testing_utils.get_v2_optimizer('adam'),
run_eagerly=testing_utils.should_run_eagerly())
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
def test_clone_rnn(self):
# Test cloning a model with multiple cells in an RNN. This exercises a
# few "fancier" features such as the `Bidrectional` wrapper and
# `StackedRNNCells` under the hood.
inputs = keras.Input(shape=(3, 3))
cells = [
keras.layers.LSTMCell(
units=32,
enable_caching_device=True,
implementation=2,
activation='relu')]
rnn = keras.layers.RNN(cells, return_sequences=True)
outputs = keras.layers.Bidirectional(rnn)(inputs)
outputs = keras.layers.Dense(
12, activation='softmax', name='scores')(outputs)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
loss=keras.losses.CategoricalCrossentropy(),
optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.01),
metrics=['accuracy'])
keras.models.clone_model(model)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
def test_functional_cloning_does_not_create_unnecessary_placeholders(self):
with ops.Graph().as_default():
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
graph = ops.Graph()
with graph.as_default():
x = array_ops.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
def test_sequential_cloning_does_not_create_unnecessary_placeholders(self):
with ops.Graph().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
graph = ops.Graph()
with graph.as_default():
x = array_ops.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_functional_cloning_with_tensor_kwarg(self, share_weights):
"""Test that cloning works with models that use Tensor kwargs."""
if share_weights:
clone_fn = functools.partial(
keras.models.clone_model, clone_function=models.share_weights)
else:
clone_fn = keras.models.clone_model
class LayerWithTensorKwarg(keras.layers.Layer):
def call(self, inputs, tensor=None):
if tensor is not None:
return inputs * math_ops.cast(tensor, dtypes.float32)
else:
return inputs
inputs = keras.layers.Input(shape=(3))
t = array_ops.sequence_mask(array_ops.shape(inputs)[1])
model = keras.models.Model(inputs, LayerWithTensorKwarg()(inputs, t))
model.add_loss(math_ops.reduce_sum(model.outputs))
input_arr = np.random.random((1, 3)).astype(np.float32)
clone = clone_fn(model)
if context.executing_eagerly():
clone(input_arr)
loss = clone.losses[0]
else:
with self.session() as sess:
clone(input_arr)
if share_weights:
self.skipTest('Weight sharing with inputs in call **kwargs does '
'not work correctly in v1')
else:
feed_dict = {clone.input: input_arr}
loss = sess.run(clone.losses[0], feed_dict=feed_dict)
self.assertAllClose(np.sum(input_arr), loss)
def _has_placeholder(graph):
ops_types = [op.type for op in graph.get_operations()]
return any('Placeholder' in s for s in ops_types)
class CheckpointingTests(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_optimizer_dependency(self):
model = _get_model()
opt = adam.AdamOptimizer(.01)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
x=np.array([[1., 2., 3., 4.]]),
y=np.array([[1., 1., 1., 1.]]),
epochs=2)
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
beta1_power, _ = opt._get_beta_accumulators()
self.evaluate(beta1_power.assign(12.))
model.save_weights(save_prefix)
self.evaluate(beta1_power.assign(13.))
model.load_weights(save_prefix)
self.assertEqual(12., self.evaluate(beta1_power))
@keras_parameterized.run_all_keras_modes
class TestModelBackend(keras_parameterized.TestCase):
def test_model_backend_float64_use_cases(self):
# Test case for GitHub issue 19318
floatx = keras.backend.floatx()
keras.backend.set_floatx('float64')
x = keras.Input((5,))
y = keras.layers.Dense(1)(x)
model = keras.models.Model(x, y)
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
keras.backend.set_floatx(floatx)
class TestCloneAndBuildModel(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_non_compiled_model(self):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model = _get_model()
with self.assertRaisesRegex(ValueError, 'has not been compiled'):
models.clone_and_build_model(model, compile_clone=True)
is_subclassed = (testing_utils.get_model_type() == 'subclass')
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=False, in_place_reset=is_subclassed)
with self.assertRaisesRegex(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegex(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch(inp, out)
# Create new tensors for inputs.
input_a = keras.Input(shape=(4,))
new_model = models.clone_and_build_model(
model,
input_tensors=input_a,
compile_clone=False,
in_place_reset=is_subclassed)
with self.assertRaisesRegex(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegex(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch(inp, out)
def _assert_same_compile_params(self, model):
"""Assert that two models have the same compile parameters."""
self.assertEqual('mse', model.loss)
self.assertIsInstance(
model.optimizer,
(optimizer_v1.RMSprop, keras.optimizer_v2.rmsprop.RMSprop))
def _clone_and_build_test_helper(self, model, model_type):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
is_subclassed = (model_type == 'subclass')
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
# Create new tensors for inputs.
input_a = keras.Input(shape=(4,), name='a')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, compile_clone=True,
in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
new_model = models.clone_and_build_model(
model,
input_tensors=input_a,
target_tensors=None,
compile_clone=True,
in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_compiled(self):
model = _get_model()
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly())
self._clone_and_build_test_helper(model, testing_utils.get_model_type())
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_sequential_without_inputs_defined(self):
model = models.Sequential(_get_layers(input_shape=None))
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly())
self._clone_and_build_test_helper(model, 'sequential')
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model.train_on_batch(inp, out)
self._clone_and_build_test_helper(model, 'sequential')
def assert_optimizer_iterations_increases(self, optimizer):
model = _get_model()
model.compile(
optimizer,
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly())
global_step = keras.backend.variable(123, dtype=dtypes.int64)
clone_model = models.clone_and_build_model(
model, compile_clone=True, optimizer_iterations=global_step,
in_place_reset=(testing_utils.get_model_type() == 'subclass'))
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
clone_model.train_on_batch(inp, out)
self.assertEqual(K.eval(global_step), 124)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_replace_tf_optimizer_iterations_variable(self):
if context.executing_eagerly():
self.skipTest('v1 optimizers not supported with eager.')
self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01))
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_replace_keras_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases('adam')
def test_clone_optimizer_in_different_graph(self):
with ops.Graph().as_default():
with self.session():
model = testing_utils.get_small_sequential_mlp(3, 4)
optimizer = keras.optimizer_v2.adam.Adam()
model.compile(
optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy],
)
model.fit(
x=np.array([[1., 2., 3., 4.]]),
y=np.array([[1., 1., 1., 1.]]),
epochs=1)
optimizer_config = optimizer.get_config()
with ops.Graph().as_default():
with self.session():
with self.assertRaisesRegex(ValueError, 'Cannot use the given session'):
models.clone_and_build_model(model, compile_clone=True)
# The optimizer_config object allows the model to be cloned in a
# different graph.
models.clone_and_build_model(model, compile_clone=True,
optimizer_config=optimizer_config)
if __name__ == '__main__':
test.main()
| {
"content_hash": "0dc605c7f61d6bb465587af2e87087fc",
"timestamp": "",
"source": "github",
"line_count": 560,
"max_line_length": 80,
"avg_line_length": 36.49107142857143,
"alnum_prop": 0.6601908490335209,
"repo_name": "petewarden/tensorflow",
"id": "12d1c39f100a65ecb7b879bdb6e8ec97449096e3",
"size": "21124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/models_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: newrelic_deployment
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Notify newrelic about app deployments
description:
- Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
options:
token:
description:
- API token, to place in the x-api-key header.
required: true
app_name:
description:
- (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
required: false
application_id:
description:
- (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
required: false
changelog:
description:
- A list of changes for this deployment
required: false
description:
description:
- Text annotation for the deployment - notes for you
required: false
revision:
description:
- A revision number (e.g., git commit SHA)
required: false
user:
description:
- The name of the user/process that triggered this deployment
required: false
appname:
description:
- Name of the application
required: false
environment:
description:
- The environment for this deployment
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
version_added: 1.5.1
requirements: []
'''
EXAMPLES = '''
- newrelic_deployment:
token: AAAAAA
app_name: myapp
user: ansible deployment
revision: '1.0'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
app_name=dict(required=False),
application_id=dict(required=False),
changelog=dict(required=False),
description=dict(required=False),
revision=dict(required=False),
user=dict(required=False),
appname=dict(required=False),
environment=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
),
required_one_of=[['app_name', 'application_id']],
supports_check_mode=True
)
# build list of params
params = {}
if module.params["app_name"] and module.params["application_id"]:
module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
if module.params["app_name"]:
params["app_name"] = module.params["app_name"]
elif module.params["application_id"]:
params["application_id"] = module.params["application_id"]
else:
module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
for item in ["changelog", "description", "revision", "user", "appname", "environment"]:
if module.params[item]:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to NewRelic
url = "https://rpm.newrelic.com/deployments.xml"
data = urlencode(params)
headers = {
'x-api-key': module.params["token"],
}
response, info = fetch_url(module, url, data=data, headers=headers)
if info['status'] in (200, 201):
module.exit_json(changed=True)
else:
module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
if __name__ == '__main__':
main()
| {
"content_hash": "4bd286a84b9e67182ab077d926be5f43",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 138,
"avg_line_length": 30.401459854014597,
"alnum_prop": 0.6417767106842737,
"repo_name": "SergeyCherepanov/ansible",
"id": "4467bb7f0f54a687416bda0148f6a8759d6c19cd",
"size": "4358",
"binary": false,
"copies": "92",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/monitoring/newrelic_deployment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
import Autodesk
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.GeometryConversion)
items = UnwrapElement(IN[0])
elementlist = list()
curvelist = list()
for item in items:
doc = item.Document
calculator = SpatialElementGeometryCalculator(doc)
options = Autodesk.Revit.DB.SpatialElementBoundaryOptions()
# get boundary location from area computation settings
boundloc = Autodesk.Revit.DB.AreaVolumeSettings.GetAreaVolumeSettings(doc).GetSpatialElementBoundaryLocation(SpatialElementType.Room)
options.SpatialElementBoundaryLocation = boundloc
#method #1 - get boundary segments
blist = list()
clist = list()
try:
for boundarylist in item.GetBoundarySegments(options):
for boundary in boundarylist:
blist.append(doc.GetElement(boundary.ElementId))
clist.append(boundary.Curve.ToProtoType())
except:
pass
#method #2 - spatial element geometry calculator
try:
results = calculator.CalculateSpatialElementGeometry(item)
for face in results.GetGeometry().Faces:
for bface in results.GetBoundaryFaceInfo(face):
blist.append(doc.GetElement(bface.SpatialBoundaryElement.HostElementId))
except:
pass
# write results to list
elementlist.append(blist)
curvelist.append(clist)
OUT = (elementlist,curvelist) | {
"content_hash": "40686b7c0e5e3c65f983702aa53c40db",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 134,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.7943156320119671,
"repo_name": "CAAD-RWTH/ClockworkForDynamo",
"id": "a02785ec9876ce4828e2811dd6e73ce362a9452b",
"size": "1337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nodes/0.9.x/python/Room.Boundaries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316146"
}
],
"symlink_target": ""
} |
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
from data import invalid_txs
class InvalidTxRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.setup_clean_chain = True
def bootstrap_p2p(self, *, num_connections=1):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
for _ in range(num_connections):
self.nodes[0].add_p2p_connection(P2PDataStore())
def reconnect_p2p(self, **kwargs):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(**kwargs)
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
best_block = self.nodes[0].getbestblockhash()
tip = int(best_block, 16)
best_block_time = self.nodes[0].getblock(best_block)['time']
block_time = best_block_time + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2ps[0].send_blocks_and_test([block], node, success=True)
self.log.info("Mature the block.")
self.nodes[0].generatetoaddress(100, self.nodes[0].get_deterministic_priv_key().address)
# Iterate through a list of known invalid transaction types, ensuring each is
# rejected. Some are consensus invalid and some just violate policy.
for BadTxTemplate in invalid_txs.iter_all_templates():
self.log.info("Testing invalid transaction: %s", BadTxTemplate.__name__)
template = BadTxTemplate(spend_block=block1)
tx = template.get_tx()
node.p2ps[0].send_txs_and_test(
[tx], node, success=False,
expect_disconnect=template.expect_disconnect,
reject_reason=template.reject_reason,
)
if template.expect_disconnect:
self.log.info("Reconnecting to peer")
self.reconnect_p2p()
# Make two p2p connections to provide the node with orphans
# * p2ps[0] will send valid orphan txs (one with low fee)
# * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
self.reconnect_p2p(num_connections=2)
self.log.info('Test orphan transaction handling ... ')
# Create a root transaction that we withhold until all dependent transactions
# are sent out and in the orphan cache
SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51'
tx_withhold = CTransaction()
tx_withhold.vin.append(CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0)))
tx_withhold.vout.append(CTxOut(nValue=50 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_withhold.vout.append(CTxOut(12000)) # fee
tx_withhold.calc_sha256()
# Our first orphan tx with some outputs to create further orphan txs
tx_orphan_1 = CTransaction()
tx_orphan_1.vin.append(CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0)))
tx_orphan_1.vout = [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)] * 3
tx_orphan_1.vout.append(CTxOut(20 * COIN - 12000)) # fee
tx_orphan_1.calc_sha256()
# A valid transaction with low fee
tx_orphan_2_no_fee = CTransaction()
tx_orphan_2_no_fee.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0)))
tx_orphan_2_no_fee.vout.append(CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
# A valid transaction with sufficient fee
tx_orphan_2_valid = CTransaction()
tx_orphan_2_valid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1)))
tx_orphan_2_valid.vout.append(CTxOut(nValue=10 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_orphan_2_valid.vout.append(CTxOut(12000)) # fee
tx_orphan_2_valid.calc_sha256()
# An invalid transaction with negative fee
tx_orphan_2_invalid = CTransaction()
tx_orphan_2_invalid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2)))
tx_orphan_2_invalid.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_orphan_2_invalid.calc_sha256()
self.log.info('Send the orphans ... ')
# Send valid orphan txs from p2ps[0]
node.p2ps[0].send_txs_and_test([tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid], node, success=False)
# Send invalid tx from p2ps[1]
node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid], node, success=False)
assert_equal(0, node.getmempoolinfo()['size']) # Mempool should be empty
assert_equal(2, len(node.getpeerinfo())) # p2ps[1] is still connected
self.log.info('Send the withhold tx ... ')
with node.assert_debug_log(expected_msgs=["bad-txns-in-ne-out"]):
node.p2ps[0].send_txs_and_test([tx_withhold], node, success=True)
# Transactions that should end up in the mempool
expected_mempool = {
t.hash
for t in [
tx_withhold, # The transaction that is the root for all orphans
tx_orphan_1, # The orphan transaction that splits the coins
tx_orphan_2_valid, # The valid transaction (with sufficient fee)
]
}
# Transactions that do not end up in the mempool
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
# tx_orphan_invalid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
self.log.info('Test orphan pool overflow')
orphan_tx_pool = [CTransaction() for _ in range(101)]
for i in range(len(orphan_tx_pool)):
orphan_tx_pool[i].vin.append(CTxIn(outpoint=COutPoint(i, 333)))
orphan_tx_pool[i].vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
with node.assert_debug_log(['orphanage overflow, removed 1 tx']):
node.p2ps[0].send_txs_and_test(orphan_tx_pool, node, success=False)
rejected_parent = CTransaction()
rejected_parent.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_2_invalid.sha256, 0)))
rejected_parent.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
rejected_parent.rehash()
with node.assert_debug_log(['not keeping orphan with rejected parents {}'.format(rejected_parent.hash)]):
node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False)
if __name__ == '__main__':
InvalidTxRequestTest().main()
| {
"content_hash": "6462b817e01b385b62dfd3896c640fe4",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 113,
"avg_line_length": 45.708333333333336,
"alnum_prop": 0.6453965360072926,
"repo_name": "ElementsProject/elements",
"id": "fa12865c3def6d9fb39db42d54fa5ce4c4993942",
"size": "7893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/p2p_invalid_tx.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1836312"
},
{
"name": "C++",
"bytes": "9659428"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1093"
},
{
"name": "HTML",
"bytes": "22723"
},
{
"name": "Java",
"bytes": "695"
},
{
"name": "M4",
"bytes": "207197"
},
{
"name": "Makefile",
"bytes": "126788"
},
{
"name": "Objective-C++",
"bytes": "5496"
},
{
"name": "Python",
"bytes": "4336448"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "42757"
},
{
"name": "Scheme",
"bytes": "25953"
},
{
"name": "Shell",
"bytes": "212725"
}
],
"symlink_target": ""
} |
"""
pyexcel_io.constants
~~~~~~~~~~~~~~~~~~~
Constants appeared in pyexcel
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License
"""
# flake8: noqa
DEFAULT_NAME = 'pyexcel'
DEFAULT_SHEET_NAME = '%s_sheet1' % DEFAULT_NAME
MESSAGE_INVALID_PARAMETERS = "Invalid parameters"
MESSAGE_ERROR_02 = "No content, file name. Nothing is given"
MESSAGE_ERROR_03 = "cannot handle unknown content"
MESSAGE_WRONG_IO_INSTANCE = "Wrong io instance is passed for your file format."
MESSAGE_CANNOT_WRITE_STREAM_FORMATTER = "Cannot write content of file type %s to stream"
MESSAGE_CANNOT_READ_STREAM_FORMATTER = "Cannot read content of file type %s from stream"
MESSAGE_CANNOT_WRITE_FILE_TYPE_FORMATTER = "Cannot write content of file type %s to file %s"
MESSAGE_CANNOT_READ_FILE_TYPE_FORMATTER = "Cannot read content of file type %s from file %s"
MESSAGE_LOADING_FORMATTER = "The plugin for file type %s is not installed. Please install %s"
MESSAGE_EMPTY_ARRAY = "One empty row is found"
MESSAGE_IGNORE_ROW = "One row is ignored"
MESSAGE_DB_EXCEPTION = """
Warning: Bulk insertion got below exception. Trying to do it one by one slowly."""
FILE_FORMAT_CSV = 'csv'
FILE_FORMAT_TSV = 'tsv'
FILE_FORMAT_CSVZ = 'csvz'
FILE_FORMAT_TSVZ = 'tsvz'
FILE_FORMAT_ODS = 'ods'
FILE_FORMAT_XLS = 'xls'
FILE_FORMAT_XLSX = 'xlsx'
FILE_FORMAT_XLSM = 'xlsm'
DB_SQL = 'sql'
DB_DJANGO = 'django'
KEYWORD_TSV_DIALECT = 'excel-tab'
KEYWORD_LINE_TERMINATOR = 'lineterminator'
SKIP_DATA = -1
TAKE_DATA = 0
STOP_ITERATION = 1
| {
"content_hash": "093b2f685bb265d3f226e74409282e6d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 93,
"avg_line_length": 35.48837209302326,
"alnum_prop": 0.7208387942332897,
"repo_name": "fuhrysteve/pyexcel-io",
"id": "13c33fd1b533bdb6c60286a3ebaec46f7a8e5a77",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyexcel_io/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "223"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "146729"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
} |
"""
Faceted logistic regression
===========================
_thumb: .58, .5
"""
import seaborn as sns
sns.set(style="darkgrid")
# Load the example Titanic dataset
df = sns.load_dataset("titanic")
# Make a custom palette with gendered colors
pal = dict(male="#6495ED", female="#F08080")
# Show the survival probability as a function of age and sex
g = sns.lmplot(x="age", y="survived", col="sex", hue="sex", data=df,
palette=pal, y_jitter=.02, logistic=True)
g.set(xlim=(0, 80), ylim=(-.05, 1.05))
| {
"content_hash": "33c1ef74c127e85ef0b441d335100680",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 27.157894736842106,
"alnum_prop": 0.6317829457364341,
"repo_name": "petebachant/seaborn",
"id": "de6aafae387bdd2faad6e0d774c2270ce3a720fc",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/logistic_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "652614"
}
],
"symlink_target": ""
} |
import os
import os.path
from setuptools import setup, find_packages
def get_package_data():
package_data = []
for root, dirnames, filenames in os.walk('./eva_cttv_pipeline/evidence_string_generation/resources'):
root = root.replace("./eva_cttv_pipeline/", "")
for filename in filenames:
new_fn = os.path.join(root, filename)
package_data.append(new_fn)
return package_data
def get_requires():
requires = []
with open("requirements.txt", "rt") as req_file:
for line in req_file:
requires.append(line.rstrip())
return requires
setup(name='eva_cttv_pipeline',
version='2.5.2',
packages=find_packages(),
install_requires=get_requires(),
#! TBD: list as a dependency subpackage 'clinvar_xml_utils.clinvar_xml_utils.clinvar_xml_utils'
package_data={
'eva_cttv_pipeline': get_package_data(),
'consequence_prediction': ['vep_mapping_pipeline/severity_ranking.txt']
},
tests_require=get_requires(),
setup_requires=get_requires(),
test_suite='tests'
)
| {
"content_hash": "4e739b1588e8822bf7f2ded66cd6083b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 105,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.6343216531895777,
"repo_name": "EBIvariation/eva-cttv-pipeline",
"id": "b5cc46db0d58d72bffae6162814ccef381faabd3",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "223694"
}
],
"symlink_target": ""
} |
from service import *
from jsonbuilder import *
from xmlbuilder import *
class ProductService(Service):
"""
This class represents the Product Service of the GittiGidiyor RESTLIKE API.
"""
def __init__(self, auth):
"""
Base Constructor. \n
auth = Auth(username = 'testuser', password = 'testpassword', key = 'apikey', secret = 'apisecret') \n
productApi = ProductService(auth)
"""
Service.__init__(self, "product", auth)
self.headers = None
def getProducts(self, sessionId, startOffSet, rowCount, status, withData, outputCT, lang):
"""
Performs the 'getProducts' API method for the Product Service API of Gittigidiyor. \n
sessionId: session key fo the logged in user. \n
startOffSet: is the offset in which city we start counting from. \n
rowCount: is the number of how many cities will be fetched. \n
status: status of the user's products. Can be; \n
A - Aktif Şatışlar
L - Yeni Listelenenler
S - Satılan Ürünler
U - Satılmayan Ürünler
R - Yeniden Listelenenler
withData: if True, the product details are also fetched. \n
outputCT: output content type (xml or json) \n
lang: 'tr' or 'en' \n
"""
url = "https://dev.gittigidiyor.com/listingapi/rlws/community/product?method=getProducts"
url += "&outputCT=%s&apiKey=%s&sign=%s&time=%s&sessionId=%s&lang=%s&startOffSet=%d&rowCount=%d&status=%s&withData=%s"
if withData:
withData = "true"
else:
withData = "false"
timestamp = self.createTimeStamp()
signature = self.signature(timestamp)
url = url % (outputCT, self.auth.key, signature, timestamp, sessionId, lang, startOffSet, rowCount, status, withData)
response, content = self.makeRequest(url)
self.headers = response
return content
def getProduct(self, sessionId, productId, outputCT, lang):
"""
Performs the 'getProduct' API method for the Product Service API of Gittigidiyor.
"""
url = "https://dev.gittigidiyor.com/listingapi/rlws/community/product?method=getProduct&outputCT=%s&apiKey=%s&sign=%s&time=%s&sessionId=%s&lang=%s&productId=%s"
timestamp = self.createTimeStamp()
signature = self.signature(timestamp)
url = url % (outputCT, self.auth.key, signature, timestamp, sessionId, lang, productId)
response, content = self.makeRequest(url)
self.headers = response
return content
| {
"content_hash": "41647997a00290167d2746a5aa133b1c",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 168,
"avg_line_length": 42.193548387096776,
"alnum_prop": 0.6276758409785933,
"repo_name": "Annelutfen/gittigidiyor-python",
"id": "a6f84ac48e715abe0afe52b9b21e5e1b64f5d628",
"size": "4329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gittigidiyor/productservice.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import collections
import idaapi
import idautils
import idc
import itertools
import struct
import inspect
_DEBUG_FILE = None
_DEBUG_PREFIX = ""
_INFO = idaapi.get_inf_structure()
# Map of the external functions names which does not return to a tuple containing information
# like the number of agruments and calling convention of the function.
_NORETURN_EXTERNAL_FUNC = {}
RTTI_REFERENCE_TABLE = collections.defaultdict()
FUNC_LSDA_ENTRIES = collections.defaultdict()
IS_ARM = "ARM" in _INFO.procName
# True if we are running on an ELF file.
IS_ELF = (idaapi.f_ELF == _INFO.filetype) or \
(idc.GetLongPrm(idc.INF_FILETYPE) == idc.FT_ELF)
# True if this is a Windows PE file.
IS_PE = idaapi.f_PE == _INFO.filetype
if IS_ARM:
from arm_util import *
else:
from x86_util import *
def INIT_DEBUG_FILE(file):
global _DEBUG_FILE
_DEBUG_FILE = file
def DEBUG_PUSH():
global _DEBUG_PREFIX
_DEBUG_PREFIX += " "
def DEBUG_POP():
global _DEBUG_PREFIX
_DEBUG_PREFIX = _DEBUG_PREFIX[:-2]
def DEBUG(s):
global _DEBUG_FILE
if _DEBUG_FILE:
_DEBUG_FILE.write("{}{}\n".format(_DEBUG_PREFIX, str(s)))
# Python 2.7's xrange doesn't work with `long`s.
def xrange(begin, end=None, step=1):
if end:
return iter(itertools.count(begin, step).next, end)
else:
return iter(itertools.count().next, begin)
_NOT_INST_EAS = set()
# sign extension to the given bits
def sign_extend(x, b):
m = 1 << (b - 1)
x = x & ((1 << b) - 1)
return (x ^ m) - m
# Returns `True` if `ea` belongs to some code segment.
#
# TODO(pag): This functon is extra aggressive, in that it doesn't strictly
# trust the `idc.isCode`. I have observed cases where data in
# `.bss` is treated as code and I am not sure why. Perhaps adding
# a reference to the data did this.
#
# I think it has something to do with ELF thunks, e.g. entries in
# the `.plt` section. When I made this function stricter,
# `mcsema-lift` would report issues where it needed to add tail-calls
# to externals.
def is_code(ea):
if is_invalid_ea(ea):
return False
seg_ea = idc.SegStart(ea)
seg_type = idc.GetSegmentAttr(seg_ea, idc.SEGATTR_TYPE)
return seg_type == idc.SEG_CODE
# A stricter form of `is_code`, where we also check whether IDA thinks something
# is code. IDA is able to identify some things like embedded exception tables
# in the code section as not truly being code.
def is_code_by_flags(ea):
if not is_code(ea):
return False
flags = idc.GetFlags(ea)
return idc.isCode(flags)
def is_read_only_segment(ea):
mask_perms = idaapi.SEGPERM_WRITE | idaapi.SEGPERM_READ
perms = idc.GetSegmentAttr(ea, idc.SEGATTR_PERM)
return idaapi.SEGPERM_READ == (perms & mask_perms)
def is_tls_segment(ea):
try:
seg_name = idc.SegName(ea)
return seg_name in (".tbss", ".tdata", ".tls")
except:
return False
# Returns `True` if `ea` looks like a thread-local thing.
def is_tls(ea):
if is_invalid_ea(ea):
return False
if is_tls_segment(ea):
return True
# Something references `ea`, and that something is commented as being a
# `TLS-reference`. This comes up if you have an thread-local extern variable
# declared/used in a binary, and defined in a shared lib. There will be an
# offset variable.
for source_ea in _drefs_to(ea):
comment = idc.GetCommentEx(source_ea, 0)
if isinstance(comment, str) and "TLS-reference" in comment:
return True
return False
# Mark an address as containing code.
def try_mark_as_code(ea):
if is_code(ea) and not is_code_by_flags(ea):
idc.MakeCode(ea)
idaapi.autoWait()
return True
return False
def mark_as_not_code(ea):
global _NOT_INST_EAS
_NOT_INST_EAS.add(ea)
def read_bytes_slowly(start, end):
bytestr = []
for i in xrange(start, end):
if idc.hasValue(idc.GetFlags(i)):
bt = idc.Byte(i)
bytestr.append(chr(bt))
else:
bytestr.append("\x00")
return "".join(bytestr)
def read_byte(ea):
byte = read_bytes_slowly(ea, ea + 1)
byte = ord(byte)
return byte
def read_word(ea):
bytestr = read_bytes_slowly(ea, ea + 2)
word = struct.unpack("<L", bytestr)[0]
return word
def read_dword(ea):
bytestr = read_bytes_slowly(ea, ea + 4)
dword = struct.unpack("<L", bytestr)[0]
return dword
def read_qword(ea):
bytestr = read_bytes_slowly(ea, ea + 8)
qword = struct.unpack("<Q", bytestr)[0]
return qword
def read_leb128(ea, signed):
""" Read LEB128 encoded data
"""
val = 0
shift = 0
while True:
byte = idc.Byte(ea)
val |= (byte & 0x7F) << shift
shift += 7
ea += 1
if (byte & 0x80) == 0:
break
if shift > 64:
DEBUG("Bad leb128 encoding at {0:x}".format(ea - shift/7))
return idc.BADADDR
if signed and (byte & 0x40):
val -= (1<<shift)
return val, ea
def read_pointer(ea):
if _INFO.is_64bit():
return read_qword(ea)
else:
return read_dword(ea)
def instruction_personality(arg):
global PERSONALITIES
if isinstance(arg, (int, long)):
arg, _ = decode_instruction(arg)
try:
p = PERSONALITIES[arg.itype]
except AttributeError:
p = PERSONALITY_NORMAL
return fixup_personality(arg, p)
def is_conditional_jump(arg):
return instruction_personality(arg) == PERSONALITY_CONDITIONAL_BRANCH
def is_unconditional_jump(arg):
return instruction_personality(arg) in (PERSONALITY_DIRECT_JUMP, PERSONALITY_INDIRECT_JUMP)
def is_direct_jump(arg):
return instruction_personality(arg) == PERSONALITY_DIRECT_JUMP
def is_indirect_jump(arg):
return instruction_personality(arg) == PERSONALITY_INDIRECT_JUMP
def is_function_call(arg):
return instruction_personality(arg) in (PERSONALITY_DIRECT_CALL, PERSONALITY_INDIRECT_CALL)
def is_indirect_function_call(arg):
return instruction_personality(arg) == PERSONALITY_INDIRECT_CALL
def is_direct_function_call(arg):
return instruction_personality(arg) == PERSONALITY_DIRECT_CALL
def is_return(arg):
return instruction_personality(arg) == PERSONALITY_RETURN
def is_control_flow(arg):
return instruction_personality(arg) != PERSONALITY_NORMAL
def instruction_ends_block(arg):
return instruction_personality(arg) in (PERSONALITY_CONDITIONAL_BRANCH,
PERSONALITY_DIRECT_JUMP,
PERSONALITY_INDIRECT_JUMP,
PERSONALITY_RETURN,
PERSONALITY_TERMINATOR,
PERSONALITY_SYSTEM_RETURN)
def is_invalid_ea(ea):
"""Returns `True` if `ea` is not valid, i.e. it doesn't point into any
valid segment."""
if idc.BADADDR == ea:
return True
try:
idc.GetSegmentAttr(idc.SegStart(ea), idc.SEGATTR_TYPE)
return False # If we get here, then it must be a valid ea!
except:
return True
_BAD_INSTRUCTION = (None, "")
def decode_instruction(ea):
"""Read the bytes of an x86/amd64 instruction. This handles things like
combining the bytes of an instruction with its prefix. IDA Pro sometimes
treats these as separate."""
global _NOT_INST_EAS, _BAD_INSTRUCTION, PREFIX_ITYPES
if ea in _NOT_INST_EAS:
return _BAD_INSTRUCTION
decoded_inst = idautils.DecodeInstruction(ea)
if not decoded_inst:
_NOT_INST_EAS.add(ea)
return _BAD_INSTRUCTION
assert decoded_inst.ea == ea
end_ea = ea + decoded_inst.size
decoded_bytes = read_bytes_slowly(ea, end_ea)
# We've got an instruction with a prefix, but the prefix is treated as
# independent.
if 1 == decoded_inst.size and decoded_inst.itype in PREFIX_ITYPES:
decoded_inst, extra_bytes = decode_instruction(end_ea)
decoded_bytes += extra_bytes
return decoded_inst, decoded_bytes
_NOT_EXTERNAL_SEGMENTS = set([idc.BADADDR])
_EXTERNAL_SEGMENTS = set()
def segment_contains_external_function_pointers(seg_ea):
"""Returns `True` if a segment contains pointers to external functions."""
try:
seg_name = idc.SegName(seg_ea)
return seg_name.lower() in (".idata", ".plt.got")
except:
return False
def is_external_segment_by_flags(ea):
"""Returns `True` if IDA believes that `ea` belongs to an external segment."""
try:
seg_ea = idc.SegStart(ea)
seg_type = idc.GetSegmentAttr(seg_ea, idc.SEGATTR_TYPE)
if seg_type == idc.SEG_XTRN:
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
else:
return False
except:
return False
def is_external_segment(ea):
"""Returns `True` if the segment containing `ea` looks to be solely containing
external references."""
global _NOT_EXTERNAL_SEGMENTS
seg_ea = idc.SegStart(ea)
if seg_ea in _NOT_EXTERNAL_SEGMENTS:
return False
if seg_ea in _EXTERNAL_SEGMENTS:
return True
if is_external_segment_by_flags(ea):
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
ext_types = []
seg_name = idc.SegName(seg_ea).lower()
if IS_ELF:
if ".got" in seg_name or ".plt" in seg_name:
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
elif IS_PE:
if ".idata" == seg_name: # Import table.
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
_NOT_EXTERNAL_SEGMENTS.add(seg_ea)
return False
def is_constructor_segment(ea):
"""Returns `True` if the segment containing `ea` belongs to global constructor section"""
seg_ea = idc.SegStart(ea)
seg_name = idc.SegName(seg_ea).lower()
if seg_name in [".init_array", ".ctor"]:
return True
return False
def is_destructor_segment(ea):
"""Returns `True` if the segment containing `ea` belongs to global destructor section"""
seg_ea = idc.SegStart(ea)
seg_name = idc.SegName(seg_ea).lower()
if seg_name in [".fini_array", ".dtor"]:
return True
return False
def get_destructor_segment():
"""Returns the start address of the global destructor section"""
for seg_ea in idautils.Segments():
seg_name = idc.SegName(seg_ea).lower()
if seg_name in [".fini_array", ".dtor"]:
return seg_ea;
def is_internal_code(ea):
if is_invalid_ea(ea):
return False
if is_external_segment(ea):
return False
if is_code(ea):
return True
# find stray 0x90 (NOP) bytes in .text that IDA
# thinks are data items.
flags = idc.GetFlags(ea)
if idaapi.isAlign(flags):
if not try_mark_as_code(ea):
return False
return True
return False
def is_block_or_instruction_head(ea):
"""Returns `True` if `ea` looks like it's the beginning of an actual
instruction."""
return is_internal_code(ea) and idc.ItemHead(ea) == ea
def get_address_size_in_bits():
"""Returns the available address size."""
global _INFO
if _INFO.is_64bit():
return 64
else:
return 32
def get_address_size_in_bytes():
return get_address_size_in_bits() / 8
_FORCED_NAMES = {}
# Tries to set the name of a symbol. IDA can be pretty dumb when symbol names
# conflict with register names, so we have the backup path of splatting things
# into a dictionary.
def set_symbol_name(ea, name):
global _FORCED_NAMES
flags = idaapi.SN_PUBLIC | idaapi.SN_NOCHECK | idaapi.SN_NON_AUTO | idaapi.SN_NOWARN
_FORCED_NAMES[ea] = name
idc.MakeNameEx(ea, name, flags)
# Tries to get the name of a symbol.
def get_symbol_name(from_ea, ea=None, allow_dummy=False):
if ea is None:
ea = from_ea
global _FORCED_NAMES
if ea in _FORCED_NAMES:
return _FORCED_NAMES[ea]
flags = idc.GetFlags(ea)
if not allow_dummy and idaapi.has_dummy_name(flags):
return ""
name = ""
try:
name = name or idc.GetTrueNameEx(from_ea, ea)
except:
pass
try:
name = name or idc.GetFunctionName(ea)
except:
pass
return name
def get_function_bounds(ea):
"""Get the bounds of the function containing `ea`. We want to discover jump
table targets that are missed by IDA, and it's possible that they aren't
marked as being part of the current function, and perhaps are after the
assumed range of the current function. Ideally they will fall before the
beginning of the next function, though.
We need to be pretty careful with the case that one function tail-calls
another. IDA will sometimes treat the end of the tail-called function
(e.g. a thunk) as if it is the end of the caller. For this reason, we start
with loose bounds using the prev/next functions, then try to narrow with
the bounds of the function containing `ea`.
TODO(pag): Handle discontinuous regions (e.g. because of function chunks).
It may be worth to return an object here that can we queried
for membership using the `__in__` method.
"""
seg_start, seg_end = idc.SegStart(ea), idc.SegEnd(ea)
min_ea = seg_start
max_ea = seg_end
if is_invalid_ea(min_ea) or not is_code(ea):
return ea, ea
# Get an upper bound using the next function.
next_func_ea = idc.NextFunction(ea)
if not is_invalid_ea(next_func_ea):
max_ea = min(next_func_ea, max_ea)
# Get a lower bound using the previous function.
prev_func_ea = idc.PrevFunction(ea)
if not is_invalid_ea(prev_func_ea):
min_ea = max(min_ea, prev_func_ea)
prev_func = idaapi.get_func(prev_func_ea)
if prev_func and prev_func.endEA < ea:
min_ea = max(min_ea, prev_func.endEA)
# Try to tighten the bounds using the function containing `ea`.
func = idaapi.get_func(ea)
if func:
min_ea = max(min_ea, func.startEA)
max_ea = min(max_ea, func.endEA)
return min_ea, max_ea
def noreturn_external_function(fname, args, realconv, ret, sign):
""" Update the map of external functions which does not return; The basic
block terminates on seeing a call to the functions
"""
global _NORETURN_EXTERNAL_FUNC
if fname:
_NORETURN_EXTERNAL_FUNC[fname] = (args, realconv, ret, sign)
def is_noreturn_external_function(ea):
"""Returns `True` if ea refers to an external function which does not return.
"""
target_ea = get_reference_target(ea)
return get_symbol_name(target_ea) in _NORETURN_EXTERNAL_FUNC
def is_noreturn_function(ea):
"""Returns `True` if the function at `ea` is a no-return function."""
flags = idc.GetFunctionFlags(ea)
return 0 < flags and \
(flags & idaapi.FUNC_NORET) and \
ea not in FUNC_LSDA_ENTRIES.keys() and \
"cxa_throw" not in get_symbol_name(ea)
_CREFS_FROM = collections.defaultdict(set)
_DREFS_FROM = collections.defaultdict(set)
_CREFS_TO = collections.defaultdict(set)
_DREFS_TO = collections.defaultdict(set)
def make_xref(from_ea, to_ea, xref_constructor, xref_size):
"""Force the data at `from_ea` to reference the data at `to_ea`."""
if not idc.GetFlags(to_ea) or is_invalid_ea(to_ea):
DEBUG(" Not making reference (A) from {:x} to {:x}".format(from_ea, to_ea))
return
make_head(from_ea)
if is_code(from_ea):
_CREFS_FROM[from_ea].add(to_ea)
_CREFS_TO[to_ea].add(from_ea)
else:
_DREFS_FROM[from_ea].add(to_ea)
_DREFS_TO[to_ea].add(from_ea)
# If we can't make a head, then it probably means that we're at the
# end of the binary, e.g. the last thing in the `.extern` segment.
if not make_head(from_ea + xref_size):
assert idc.BADADDR == idc.SegStart(from_ea + xref_size)
idaapi.do_unknown_range(from_ea, xref_size, idc.DOUNK_EXPAND)
xref_constructor(from_ea)
if not is_code_by_flags(from_ea):
idc.add_dref(from_ea, to_ea, idc.XREF_USER|idc.dr_O)
else:
DEBUG(" Not making reference (B) from {:x} to {:x}".format(from_ea, to_ea))
_IGNORE_DREF = (lambda x: [idc.BADADDR])
_IGNORE_CREF = (lambda x, y: [idc.BADADDR])
def _stop_looking_for_xrefs(ea):
"""This is a heuristic to decide whether or not we should stop looking for
cross-references. It is relevant to IDA structs, where IDA will treat structs
and everything in them as one single 'thing', and so all xrefs embedded within
a struct will actually be associated with the first EA of the struct. So
if we're in a struct or something like it, and the item size is bigger than
the address size, then we will assume it's actually in a struct."""
if is_external_segment(ea):
return False
if is_code(ea):
return False
addr_size = get_address_size_in_bytes()
item_size = idc.ItemSize(ea)
return item_size > addr_size
def _xref_generator(ea, get_first, get_next):
target_ea = get_first(ea)
while not is_invalid_ea(target_ea):
yield target_ea
target_ea = get_next(ea, target_ea)
def drefs_from(ea, only_one=False, check_fixup=True):
seen = False
has_one = only_one
fixup_ea = idc.BADADDR
if check_fixup:
fixup_ea = idc.GetFixupTgtOff(ea)
if not is_invalid_ea(fixup_ea) and not is_code(fixup_ea):
seen = only_one
has_one = True
yield fixup_ea
if has_one and _stop_looking_for_xrefs(ea):
return
for target_ea in _xref_generator(ea, idaapi.get_first_dref_from, idaapi.get_next_dref_from):
if target_ea != fixup_ea and not is_invalid_ea(target_ea):
seen = only_one
yield target_ea
if seen:
return
if not seen and ea in _DREFS_FROM:
for target_ea in _DREFS_FROM[ea]:
yield target_ea
seen = only_one
if seen:
return
def crefs_from(ea, only_one=False, check_fixup=True):
flags = idc.GetFlags(ea)
if not idc.isCode(flags):
return
fixup_ea = idc.BADADDR
seen = False
has_one = only_one
if check_fixup:
fixup_ea = idc.GetFixupTgtOff(ea)
if not is_invalid_ea(fixup_ea) and is_code(fixup_ea):
seen = only_one
has_one = True
yield fixup_ea
if has_one and _stop_looking_for_xrefs(ea):
return
for target_ea in _xref_generator(ea, idaapi.get_first_cref_from, idaapi.get_next_cref_from):
if target_ea != fixup_ea and not is_invalid_ea(target_ea):
seen = only_one
yield target_ea
if seen:
return
if not seen and ea in _CREFS_FROM:
for target_ea in _CREFS_FROM[ea]:
seen = only_one
yield target_ea
if seen:
return
def xrefs_from(ea, only_one=False):
fixup_ea = idc.GetFixupTgtOff(ea)
seen = False
has_one = only_one
if not is_invalid_ea(fixup_ea):
seen = only_one
has_one = True
yield fixup_ea
if has_one and _stop_looking_for_xrefs(ea):
return
for target_ea in drefs_from(ea, only_one, check_fixup=False):
if target_ea != fixup_ea:
seen = only_one
yield target_ea
if seen:
return
for target_ea in crefs_from(ea, only_one, check_fixup=False):
if target_ea != fixup_ea:
seen = only_one
yield target_ea
if seen:
return
def _drefs_to(ea):
for source_ea in _xref_generator(ea, idaapi.get_first_dref_to, idaapi.get_next_dref_to):
yield source_ea
def _crefs_to(ea):
for source_ea in _xref_generator(ea, idaapi.get_first_cref_to, idaapi.get_next_cref_to):
yield source_ea
def _xrefs_to(ea):
for source_ea in _drefs_to(ea):
yield source_ea
for source_ea in _crefs_to(ea):
yield source_ea
def _reference_checker(ea, dref_finder=_IGNORE_DREF, cref_finder=_IGNORE_CREF):
"""Looks for references to/from `ea`, and does some sanity checks on what
IDA returns."""
for ref_ea in dref_finder(ea):
return True
for ref_ea in cref_finder(ea):
return True
return False
def remove_all_refs(ea):
"""Remove all references to something."""
assert False
dref_eas = list(drefs_from(ea))
cref_eas = list(crefs_from(ea))
for ref_ea in dref_eas:
idaapi.del_dref(ea, ref_ea)
for ref_ea in cref_eas:
idaapi.del_cref(ea, ref_ea, False)
idaapi.del_cref(ea, ref_ea, True)
def is_thunk(ea):
"""Returns true if some address is a known to IDA to be a thunk."""
flags = idc.GetFunctionFlags(ea)
return 0 < flags and 0 != (flags & idaapi.FUNC_THUNK)
def is_referenced(ea):
"""Returns `True` if the data at `ea` is referenced by something else."""
return _reference_checker(ea, _drefs_to, _crefs_to)
def is_referenced_by(ea, by_ea):
for ref_ea in _drefs_to(ea):
if ref_ea == by_ea:
return True
for ref_ea in _crefs_to(ea):
if ref_ea == by_ea:
return True
return False
def is_runtime_external_data_reference(ea):
"""This can happen in ELF binaries, where you'll have somehting like
`stdout@@GLIBC_2.2.5` in the `.bss` section, where at runtime the
linker will fill in the slot with a pointer to the real `stdout`.
IDA discovers this type of reference, but it has no real way to
cross-reference it to anything, because the target address will
only exist at runtime."""
comment = idc.GetCommentEx(ea, 0)
if comment and "Copy of shared data" in comment:
return True
else:
return False
def is_external_vtable_reference(ea):
""" It checks the references of external vtable in the .bss section, where
it is referred as the `Copy of shared data`. There is no way to resolve
the cross references for these vtable as the target address will only
appear during runtime.
It is introduced to avoid lazy initilization of runtime typeinfo variables
which gets referred by the user-defined exception types.
"""
if not is_runtime_external_data_reference(ea):
return False
comment = idc.GetCommentEx(ea, 0)
if comment and "Alternative name is '`vtable" in comment:
return True
else:
return
def is_reference(ea):
"""Returns `True` if the `ea` references something else."""
if is_invalid_ea(ea):
return False
for target in xrefs_from(ea):
return True
return is_runtime_external_data_reference(ea)
def is_data_reference(ea):
"""Returns `True` if the `ea` references something else."""
if is_invalid_ea(ea):
return False
for target_ea in drefs_from(ea):
return True
return is_runtime_external_data_reference(ea)
def has_flow_to_code(ea):
"""Returns `True` if there are any control flows to the instruction at
`ea`."""
return _reference_checker(ea, cref_finder=idautils.CodeRefsTo)
def get_reference_target(ea):
for ref_ea in xrefs_from(ea, True):
return ref_ea
# This is kind of funny, but it works with how we understand external
# variable references from the CFG production and LLVM side. Really,
# we need a unique location for every reference (internal and external).
# For external references, the location itself is not super important, it's
# used for identification in the LLVM side of things.
#
# When creating cross-references, we need that ability to identify the
# "target" of the cross-reference, and again, that can be anything so long
# as other parts of the code agree on the target.
if is_runtime_external_data_reference(ea):
return ea
return idc.BADADDR
def is_head(ea):
return idc.isHead(idc.GetFlags(ea))
# Make the data at `ea` into a head.
def make_head(ea):
flags = idc.GetFlags(ea)
if not idc.isHead(flags):
idc.SetFlags(ea, flags | idc.FF_DATA)
idaapi.autoWait()
return is_head(ea)
return True
| {
"content_hash": "5f551bb3274de44a98c2a64802aba099",
"timestamp": "",
"source": "github",
"line_count": 793,
"max_line_length": 94,
"avg_line_length": 28.815889029003785,
"alnum_prop": 0.673405977856549,
"repo_name": "trailofbits/mcsema",
"id": "01c095d988e6ba3a7d03cd35c19d868199c5cfd7",
"size": "23439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/mcsema_disass/ida/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "893"
},
{
"name": "C",
"bytes": "440730"
},
{
"name": "C++",
"bytes": "507910"
},
{
"name": "CMake",
"bytes": "65094"
},
{
"name": "Dockerfile",
"bytes": "1510"
},
{
"name": "GDB",
"bytes": "6207"
},
{
"name": "Makefile",
"bytes": "676"
},
{
"name": "Python",
"bytes": "553754"
},
{
"name": "Shell",
"bytes": "21671"
}
],
"symlink_target": ""
} |
version = 14
print(version)
language = "python"
print(language)
# multiple declaration
name, age, day = "john", 25, "tuesday"
print(name, age, day)
# ValueError: too many values to unpack
# v1, v2 = "basics"
# multiple assignment
v1 = v2 = "basics"
print(v1, v2)
# data types: int, string, list, tuple, dictionary
# operators
grade1 = 8
grade2 = 2
sum = grade1 + grade2
minus = grade1 - grade2
mult = grade1 * grade2
div = grade1 / grade2
mod = grade1 % grade2
print(grade1, grade2, sum, minus, mult, div, mod)
# strings
one = "monty"
space = " "
two = "python"
name = one + space + two
print(name)
print(name[6])
print(name[0:5])
print(name[5])
print(name[6:11])
print(name[6:12])
print(name[6:20])
print(name[:])
print(name[:8])
print(name[3:])
print(name[:-1])
print(name[-2:-5])
print(name[-6:])
print(name[-6:-2])
# placeholders
print("%s license" % ("license"))
mit, gnu = "mit", "gnu"
print("%s license" % (mit))
print("%s license" % gnu)
print("%s and %s licenses" % (mit, gnu))
challenge, day = 100, 2
print("this is the %d days of %s challenge and i'm on day %d" % (challenge, two, day))
"""
output:
14
python
john 25 tuesday
basics basics
8 2 10 6 16 4.0 0
monty python
p
monty
pytho
python
python
monty python
monty py
ty python
monty pytho
python
pyth
license license
mit license
gnu license
mit and gnu licenses
this is the 100 days of python challenge and i'm on day 2
"""
| {
"content_hash": "a8a8a0b721d3238d581957950d9c7646",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 86,
"avg_line_length": 15,
"alnum_prop": 0.6695035460992907,
"repo_name": "juancarlosqr/datascience",
"id": "79db4e77e823e273cf3af58653ffc8fe443e7c4b",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/100daysofpython/r1d2_basics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "1984011"
},
{
"name": "Jupyter Notebook",
"bytes": "1731379"
},
{
"name": "Python",
"bytes": "228353"
},
{
"name": "R",
"bytes": "35351"
}
],
"symlink_target": ""
} |
import os
from pathlib import Path
from .exceptions import CreateDirectoryException, MissingEnv
from redis import Redis
from redis.exceptions import ConnectionError
from datetime import datetime, timedelta
import time
import asyncio
from functools import lru_cache
def get_storage_path() -> Path:
if not os.environ.get('VIRTUAL_ENV'):
raise MissingEnv("VIRTUAL_ENV is missing. This project really wants to run from a virtual envoronment.")
return Path(os.environ['VIRTUAL_ENV'])
@lru_cache(64)
def get_homedir() -> Path:
if not os.environ.get('UWHOISD_HOME'):
# Try to open a .env file in the home directory if it exists.
if (Path(__file__).resolve().parent.parent / '.env').exists():
with (Path(__file__).resolve().parent.parent / '.env').open() as f:
for line in f:
key, value = line.strip().split('=', 1)
if value[0] in ['"', "'"]:
value = value[1:-1]
os.environ[key] = value
if not os.environ.get('UWHOISD_HOME'):
guessed_home = Path(__file__).resolve().parent.parent
raise MissingEnv(f"UWHOISD_HOME is missing. \
Run the following command (assuming you run the code from the clonned repository):\
export UWHOISD_HOME='{guessed_home}'")
return Path(os.environ['UWHOISD_HOME'])
def safe_create_dir(to_create: Path) -> None:
if to_create.exists() and not to_create.is_dir():
raise CreateDirectoryException(f'The path {to_create} already exists and is not a directory')
os.makedirs(to_create, exist_ok=True)
def set_running(name: str) -> None:
r = Redis(unix_socket_path=get_socket_path('cache'), db=2, decode_responses=True)
r.hset('running', name, 1)
def unset_running(name: str) -> None:
r = Redis(unix_socket_path=get_socket_path('cache'), db=2, decode_responses=True)
r.hdel('running', name)
def is_running() -> dict:
r = Redis(unix_socket_path=get_socket_path('cache'), db=2, decode_responses=True)
return r.hgetall('running')
def get_socket_path(name: str) -> str:
mapping = {
'cache': Path('cache', 'cache.sock'),
'whowas': Path('whowas', 'whowas.sock'),
}
return str(get_homedir() / mapping[name])
def check_running(name: str) -> bool:
socket_path = get_socket_path(name)
print(socket_path)
try:
r = Redis(unix_socket_path=socket_path)
if r.ping():
return True
except ConnectionError:
return False
def shutdown_requested() -> bool:
try:
r = Redis(unix_socket_path=get_socket_path('cache'), db=2, decode_responses=True)
return r.exists('shutdown')
except ConnectionRefusedError:
return True
except ConnectionError:
return True
async def long_sleep_async(sleep_in_sec: int, shutdown_check: int=10) -> bool:
if shutdown_check > sleep_in_sec:
shutdown_check = sleep_in_sec
sleep_until = datetime.now() + timedelta(seconds=sleep_in_sec)
while sleep_until > datetime.now():
await asyncio.sleep(shutdown_check)
if shutdown_requested():
return False
return True
def long_sleep(sleep_in_sec: int, shutdown_check: int=10) -> bool:
if shutdown_check > sleep_in_sec:
shutdown_check = sleep_in_sec
sleep_until = datetime.now() + timedelta(seconds=sleep_in_sec)
while sleep_until > datetime.now():
time.sleep(shutdown_check)
if shutdown_requested():
return False
return True
| {
"content_hash": "d81c170870c9650ea85eae6c358940e9",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 112,
"avg_line_length": 33.06542056074766,
"alnum_prop": 0.637648388920294,
"repo_name": "Rafiot/uwhoisd",
"id": "31544a673df7ab695d6ad37da7af61710d4c4682",
"size": "3585",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "uwhoisd/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30048"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.conf import settings
from datetime import datetime, timedelta
from restclients.sws import SWS, encode_section_label, get_resource
from restclients.models.sws import Term, Curriculum, Person, College, Department
from restclients.exceptions import DataFailureException
from restclients.exceptions import InvalidSectionURL
class UtilFunctionTest(TestCase):
def test_encode_section_label(self):
self.assertEquals(encode_section_label('2013,winter,C LIT,396/A'),
'2013,winter,C%20LIT,396/A')
def test_get_resource(self):
self.assertEquals(get_resource('/student/v5/campus.json'),
"{u'PageSize': u'10', u'Campuses': [{u'CampusShortName': u'BOTHELL', u'Href': u'/student/v5/campus/BOTHELL.json', u'CampusName': u'UW Bothell', u'CampusFullName': u'UNIVERSITY OF WASHINGTON BOTHELL'}, {u'CampusShortName': u'SEATTLE', u'Href': u'/student/v5/campus/SEATTLE.json', u'CampusName': u'UW Seattle', u'CampusFullName': u'UNIVERSITY OF WASHINGTON SEATTLE'}, {u'CampusShortName': u'TACOMA', u'Href': u'/student/v5/campus/TACOMA.json', u'CampusName': u'UW Tacoma', u'CampusFullName': u'UNIVERSITY OF WASHINGTON TACOMA'}], u'Next': None, u'Current': {u'Href': u'/student/v5/campus.json&page_start=1&page_size=10'}, u'TotalCount': 3, u'PageStart': u'1', u'Previous': None}")
class SWSTest(TestCase):
def test_mock_data_fake_grading_window(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
sws = SWS()
# backwards compatible for term
term = sws.get_term_by_year_and_quarter(2013, 'spring')
self.assertEquals(term.year, 2013)
self.assertEquals(term.quarter, 'spring')
term = sws.get_current_term()
self.assertEquals(term.year, 2013)
self.assertEquals(term.quarter, 'spring')
prev_term = sws.get_previous_term()
self.assertEquals(prev_term.year, 2013)
self.assertEquals(prev_term.quarter, 'winter')
next_term = sws.get_next_term()
self.assertEquals(next_term.year, 2013)
self.assertEquals(next_term.quarter, 'summer')
term_before = sws.get_term_before(next_term)
self.assertEquals(term_before.year, 2013)
self.assertEquals(term_before.quarter, 'spring')
term_after = sws.get_term_after(prev_term)
self.assertEquals(term_after.year, 2013)
self.assertEquals(term_after.quarter, 'spring')
# backwards compatible for section
section = sws.get_section_by_label('2013,winter,ASIAN,203/A')
joint_sections = sws.get_joint_sections(section)
self.assertEquals(len(joint_sections), 1)
section = sws.get_section_by_url('/student/v5/course/2013,summer,TRAIN,100/A.json')
sws.get_linked_sections(section)
section.linked_section_urls = ['2012,summer,TRAIN,100/A']
self.assertRaises(InvalidSectionURL,
sws.get_linked_sections, section)
term = Term(quarter="summer", year=2013)
person = Person(uwregid="FBB38FE46A7C11D5A4AE0004AC494FFE")
sections = sws.get_sections_by_instructor_and_term(person, term)
self.assertEquals(len(sections), 1)
sections = sws.get_sections_by_delegate_and_term(person, term)
self.assertEquals(len(sections), 2)
term = Term(quarter="winter", year=2013)
curriculum = Curriculum(label="ENDO")
sections = sws.get_sections_by_curriculum_and_term(curriculum, term)
self.assertEquals(len(sections), 2)
# backwards compatible for section_status
section_status = sws.get_section_status_by_label(
'2012,autumn,CSE,100/W')
self.assertEquals(section_status.sln, 12588)
# backwards compatible for registration
section = sws.get_section_by_label('2013,winter,DROP_T,100/A')
registrations = sws.get_active_registrations_for_section(section)
self.assertEquals(len(registrations), 0)
registrations = sws.get_all_registrations_for_section(section)
self.assertEquals(len(registrations), 2)
term = sws.get_current_term()
sws.schedule_for_regid_and_term('9136CCB8F66711D5BE060004AC494FFE',
term)
# backwards compatible for enrollment
grades = sws.grades_for_regid_and_term('9136CCB8F66711D5BE060004AC494FFE', term)
self.assertEquals(grades.user.uwnetid, "javerage")
# backwards compatible for campus
campuses = sws.get_all_campuses()
self.assertEquals(len(campuses), 3)
# backwards compatible for college
colleges = sws.get_all_colleges()
self.assertEquals(len(colleges), 20)
# backwards compatible for department
college = College(label="MED")
depts = sws.get_departments_for_college(college)
self.assertEquals(len(depts), 30)
# backwards compatible for curriculum
department = Department(label="EDUC")
curricula = sws.get_curricula_for_department(department)
self.assertEquals(len(curricula), 7)
term = Term(quarter='winter', year=2013)
curricula = sws.get_curricula_for_term(term)
self.assertEquals(len(curricula), 423)
| {
"content_hash": "190fd23a72280573912f46b954dd9697",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 704,
"avg_line_length": 50.08771929824562,
"alnum_prop": 0.6348511383537653,
"repo_name": "uw-it-aca/uw-restclients",
"id": "a21855de93b0d1fa8073261b32148a1c3411ba5a",
"size": "5710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restclients/test/sws/compatible.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "38842"
},
{
"name": "Python",
"bytes": "664277"
},
{
"name": "Roff",
"bytes": "9566"
}
],
"symlink_target": ""
} |
import sys
import marshal
import types
from sonLib.bioio import system
import importlib
class Target(object):
"""Each job wrapper extends this class.
"""
def __init__(self, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""This method must be called by any overiding constructor.
"""
self.__followOn = None
self.__children = []
self.__childCommands = []
self.__memory = memory
self.__time = time #This parameter is no longer used by the batch system.
self.__cpu = cpu
self.globalTempDir = None
if self.__module__ == "__main__":
raise RuntimeError("The module name of class %s is __main__, which prevents us from serialising it properly, \
please ensure you re-import targets defined in main" % self.__class__.__name__)
self.importStrings = set((".".join((self.__module__, self.__class__.__name__)),))
self.loggingMessages = []
def run(self):
"""Do user stuff here, including creating any follow on jobs.
This function must not re-pickle the pickle file, which is an input file.
"""
pass
def setFollowOnTarget(self, followOn):
"""Set the follow on target.
Will complain if follow on already set.
"""
assert self.__followOn == None
self.__followOn = followOn
def setFollowOnFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Sets a follow on target fn. See FunctionWrappingTarget.
"""
self.setFollowOnTarget(FunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def setFollowOnTargetFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Sets a follow on target fn. See TargetFunctionWrappingTarget.
"""
self.setFollowOnTarget(TargetFunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def addChildTarget(self, childTarget):
"""Adds the child target to be run as child of this target.
"""
self.__children.append(childTarget)
def addChildFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Adds a child fn. See FunctionWrappingTarget.
"""
self.addChildTarget(FunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def addChildTargetFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Adds a child target fn. See TargetFunctionWrappingTarget.
"""
self.addChildTarget(TargetFunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def addChildCommand(self, childCommand, runTime=sys.maxint):
"""A command to be run as child of the job tree.
"""
self.__childCommands.append((str(childCommand), float(runTime)))
def getRunTime(self):
"""Get the time the target is anticipated to run.
"""
return self.__time
def getGlobalTempDir(self):
"""Get the global temporary directory.
"""
#Check if we have initialised the global temp dir - doing this
#just in time prevents us from creating temp directories unless we have to.
if self.globalTempDir == None:
self.globalTempDir = self.stack.getGlobalTempDir()
return self.globalTempDir
def getLocalTempDir(self):
"""Get the local temporary directory.
"""
return self.stack.getLocalTempDir()
def getMemory(self):
"""Returns the number of bytes of memory that were requested by the job.
"""
return self.__memory
def getCpu(self):
"""Returns the number of cpus requested by the job.
"""
return self.__cpu
def getFollowOn(self):
"""Get the follow on target.
"""
return self.__followOn
def getChildren(self):
"""Get the child targets.
"""
return self.__children[:]
def getChildCommands(self):
"""Gets the child commands, as a list of tuples of strings and floats, representing the run times.
"""
return self.__childCommands[:]
def logToMaster(self, string):
"""Send a logging message to the master. Will only reported if logging is set to INFO level in the master.
"""
self.loggingMessages.append(str(string))
@staticmethod
def makeTargetFn(fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Makes a Target out of a target function!
In a target function, the first argument to the function will be a reference to the wrapping target, allowing
the function to create children/follow ons.
Convenience function for constructor of TargetFunctionWrappingTarget
"""
return TargetFunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu)
####
#Private functions
####
def setGlobalTempDir(self, globalTempDir):
"""Sets the global temp dir.
"""
self.globalTempDir = globalTempDir
def isGlobalTempDirSet(self):
return self.globalTempDir != None
def setStack(self, stack):
"""Sets the stack object that is calling the target.
"""
self.stack = stack
def getMasterLoggingMessages(self):
return self.loggingMessages[:]
class FunctionWrappingTarget(Target):
"""Target used to wrap a function.
Function can not be nested function or class function, currently.
"""
def __init__(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
Target.__init__(self, time=time, memory=time, cpu=time)
self.fnModule = str(fn.__module__) #Module of function
self.fnName = str(fn.__name__) #Name of function
self.args=args
self.kwargs=kwargs
def run(self):
func = getattr(importlib.import_module(self.fnModule), self.fnName)
func(*self.args, **self.kwargs)
class TargetFunctionWrappingTarget(FunctionWrappingTarget):
"""Target used to wrap a function.
A target function is a function which takes as its first argument a reference
to the wrapping target.
Target function can not be closure.
"""
def run(self):
func = getattr(importlib.import_module(self.fnModule), self.fnName)
func(*((self,) + tuple(self.args)), **self.kwargs)
| {
"content_hash": "0999efe4c4f834842f2755a6a06c37d2",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 129,
"avg_line_length": 38.16571428571429,
"alnum_prop": 0.6300344362928582,
"repo_name": "cooketho/jobTree",
"id": "2d9afd55ea8338a6cf2584b7057f2f92143cfcbd",
"size": "7809",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scriptTree/target.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "956"
},
{
"name": "Python",
"bytes": "219203"
}
],
"symlink_target": ""
} |
from unittest.mock import Mock, patch
from django.test import RequestFactory, SimpleTestCase
from core.tests.utils import TemplateTagsTest
class IsCurrentUrlTagTest(TemplateTagsTest, SimpleTestCase):
""" """
def setUp(self):
# Every test needs access to the request factory.
self.request = RequestFactory().get('/test/')
self.request.resolver_match = Mock(url_name="dummy", namespace="dummy")
def test_url_name_match(self):
template = (
"{% load core_tags %}"
"{% is_current_url url_name='dummy' as val %}"
"{{val}}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "True")
def test_url_name_unmatch(self):
template = (
"{% load core_tags %}"
"{% is_current_url url_name='unit' as val %}"
"{{val}}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "False")
def test_namespace_match(self):
template = (
"{% load core_tags %}"
"{% is_current_url namespace='dummy' as val %}"
"{{val}}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "True")
def test_namespace_unmatch(self):
template = (
"{% load core_tags %}"
"{% is_current_url namespace='unit' as val %}"
"{{val}}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "False")
def test_url_name_and_namespace_match(self):
template = (
"{% load core_tags %}"
"{% is_current_url url_name='dummy' namespace='dummy' as val %}"
"{{val}}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "True")
def test_url_name_and_namespace_unmatch(self):
template = (
"{% load core_tags %}"
"{% is_current_url url_name='dummy' namespace='unit' as val %}"
"{{val}}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "False")
def test_request_without_resolver(self):
template = (
"{% load core_tags %}"
"{% is_current_url url_name='dummy' as val %}"
"{{val}}"
)
self.request.resolver_match = None
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "False")
class MenuActiveTagTest(TemplateTagsTest, SimpleTestCase):
def setUp(self):
# Every test needs access to the request factory.
self.request = RequestFactory().get('/test/')
self.request.resolver_match = Mock(url_name="dummy", namespace="dummy")
@patch("core.templatetags.core_tags.is_current_url", return_value=True)
def test_match_url(self, mis_current_url):
template = (
"{% load core_tags %}{% menu_active url_name='dummy' %}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "active")
@patch("core.templatetags.core_tags.is_current_url", return_value=False)
def test_unmatch_url(self, mis_current_url):
template = (
"{% load core_tags %}{% menu_active url_name='unit' %}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "")
@patch("core.templatetags.core_tags.is_current_url", return_value=True)
def test_css_class(self, mis_current_url):
template = (
"{% load core_tags %}"
"{% menu_active url_name='dummy' css_class='selected' %}"
)
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "selected")
class QuerystringTagTest(TemplateTagsTest, SimpleTestCase):
""" """
def setUp(self):
# Every test needs access to the request factory.
self.request = RequestFactory().get('/test/', {'page': 3})
def test_new_argument(self):
""" """
template = "{% load core_tags %}{% querystring order='name' %}"
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "?page=3&order=name")
def test_update_argument(self):
""" """
template = "{% load core_tags %}{% querystring page='10' %}"
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, "?page=10")
class BodyClassTagTest(TemplateTagsTest, SimpleTestCase):
""" """
def setUp(self):
# Every test needs access to the request factory.
self.request = RequestFactory().get('/test/')
def test_with_namespace(self):
self.request.resolver_match = Mock(
namespace='namespace', url_name='url_name')
template = "{% load core_tags %}{% body_class %}"
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, 'view-namespace-url_name')
def test_without_namespace(self):
self.request.resolver_match = Mock(namespace=None, url_name='url_name')
template = "{% load core_tags %}{% body_class %}"
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, 'view-url_name')
def test_without_resolver_match(self):
self.request.resolver_match = None
template = "{% load core_tags %}{% body_class %}"
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, 'view-noresolver-match')
class VisibilityIconTagTest(TemplateTagsTest, SimpleTestCase):
""" """
def test_private_visibility(self):
template = "{% load core_tags %}{% visibility_icon object %}"
rendered = self.render_template(template, {
'object': Mock(is_private=True)
})
expected = (
"<span data-toggle='tooltip' data-placement='right' "
"title='Private'><i class='fa fa-user-secret' aria-hidden='true'>"
"</i></span>"
)
self.assertHTMLEqual(rendered, expected)
def test_public_visibility(self):
template = "{% load core_tags %}{% visibility_icon object %}"
rendered = self.render_template(template, {
'object': Mock(is_private=False)
})
expected = (
"<span data-toggle='tooltip' data-placement='right' "
"title='Public'><i class='fa fa-globe' aria-hidden='true'></i>"
"</span>"
)
self.assertHTMLEqual(rendered, expected)
class AbsoluteUriTagTest(TemplateTagsTest, SimpleTestCase):
def setUp(self):
# Every test needs access to the request factory.
self.request = RequestFactory().get('/test/')
def test_absolute_uri(self):
template = "{% load core_tags %}{% absolute_uri '/unit/' %}"
rendered = self.render_template(template, {'request': self.request})
self.assertEqual(rendered, 'http://testserver/unit/')
class RepoIconTagTest(TemplateTagsTest, SimpleTestCase):
def test_default_icon(self):
template = "{% load core_tags %}{{ 'dummy'|repo_icon }}"
rendered = self.render_template(template)
self.assertEqual(
rendered,
'<i class="fa fa-fw fa-git-square" aria-hidden="true"></i> Source')
def test_github_icon(self):
template = "{% load core_tags %}{{ 'github'|repo_icon }}"
rendered = self.render_template(template)
self.assertEqual(
rendered,
'<i class="fa fa-fw fa-github" aria-hidden="true"></i> Github')
def test_gitlab_icon(self):
template = "{% load core_tags %}{{ 'gitlab'|repo_icon }}"
rendered = self.render_template(template)
self.assertEqual(
rendered,
'<i class="fa fa-fw fa-gitlab" aria-hidden="true"></i> Gitlab')
def test_bitbucket_icon(self):
template = "{% load core_tags %}{{ 'bitbucket'|repo_icon }}"
rendered = self.render_template(template)
self.assertEqual(
rendered,
'<i class="fa fa-fw fa-bitbucket" aria-hidden="true"></i> '
'Bitbucket')
| {
"content_hash": "f701876206e1f9a46eb2d2d01ada5bd5",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 79,
"avg_line_length": 36.982608695652175,
"alnum_prop": 0.5898189513284741,
"repo_name": "srtab/alexandriadocs",
"id": "a0d3cc4b94044f56a24ee67e1f6ebd7efec844d7",
"size": "8530",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alexandriadocs/core/tests/test_templatetags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8224"
},
{
"name": "HTML",
"bytes": "86219"
},
{
"name": "JavaScript",
"bytes": "2685"
},
{
"name": "Python",
"bytes": "177159"
},
{
"name": "Shell",
"bytes": "151"
}
],
"symlink_target": ""
} |
import os
import unittest
from tempest.test_discover import plugins
def load_tests(loader, tests, pattern):
ext_plugins = plugins.TempestTestPluginManager()
suite = unittest.TestSuite()
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
# Load local tempest tests
for test_dir in ['api', 'scenario']:
full_test_dir = os.path.join(base_path, 'tempest', test_dir)
if not pattern:
suite.addTests(loader.discover(full_test_dir,
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
return suite
# Load any installed plugin tests
for plugin in plugin_load_tests:
test_dir, top_path = plugin_load_tests[plugin]
if not pattern:
suite.addTests(loader.discover(test_dir, top_level_dir=top_path))
else:
suite.addTests(loader.discover(test_dir, pattern=pattern,
top_level_dir=top_path))
return suite
| {
"content_hash": "259d10d6ed34d04ba852f6d0ef85e4a5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 36.82857142857143,
"alnum_prop": 0.5989138867339022,
"repo_name": "openstack/tempest",
"id": "a19f20b696ae98622c93a25e823e3e945c95719b",
"size": "1891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/test_discover/test_discover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5364077"
},
{
"name": "Shell",
"bytes": "8684"
}
],
"symlink_target": ""
} |
from flask import Blueprint, g
from flask_restful import Api, Resource
from flask.ext.restful import abort, fields, marshal_with, reqparse
from app import db
from app.auth.models import Permission, User
from app.base.decorators import login_required
auth_bp = Blueprint('auth_api', __name__)
api = Api(auth_bp)
perm_fields = {
'name': fields.String,
'code': fields.String,
}
user_fields = {
'id': fields.Integer,
'created': fields.DateTime,
'modified': fields.DateTime,
'username': fields.String,
'password': fields.String,
'permissions': fields.Nested(perm_fields),
}
token_fields = {
'token': fields.String,
}
class UserBase(Resource):
def get_user(self, username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404, message="User {} doesn't exist".format(username))
return user
def add_permissions(self, user, perms):
user.permissions = []
if perms is None:
perms = []
for p in perms:
user.add_permission(p)
class UserDetail(UserBase):
put_parser = reqparse.RequestParser()
put_parser.add_argument('cur_password', type=str)
put_parser.add_argument('new_password', type=str)
put_parser.add_argument('permissions', type=str, action='append')
@marshal_with(user_fields)
@login_required
def get(self, username):
user = self.get_user(username)
return user
@login_required
def delete(self, username):
user = self.get_user(username)
db.session.delete(user)
db.session.commit()
return {}, 204
@marshal_with(user_fields)
@login_required
def put(self, username):
args = self.put_parser.parse_args()
user = self.get_user(username)
# Update password if current one matches
if None not in [args['cur_password'], args['new_password']]:
if user.check_password(args['cur_password']):
user.set_password(args['new_password'])
else:
abort(400, message="Invalid password")
# Update permissions
self.add_permissions(user, args['permissions'])
db.session.add(user)
db.session.commit()
return user, 200
class UserList(UserBase):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str)
parser.add_argument('password', type=str)
parser.add_argument('permissions', type=str, action='append')
@marshal_with(user_fields)
@login_required
def get(self):
user = User.query.all()
return user
@marshal_with(user_fields)
def post(self):
parsed_args = self.parser.parse_args()
user = User(
username=parsed_args['username']
)
user.set_password(parsed_args['password'])
self.add_permissions(user, parsed_args['permissions'])
db.session.add(user)
db.session.commit()
return user, 201
class AuthToken(UserBase):
token_parser = reqparse.RequestParser()
token_parser.add_argument('username', type=str)
token_parser.add_argument('password', type=str)
@marshal_with(token_fields)
def post(self):
args = self.token_parser.parse_args()
user = self.get_user(args['username'])
if user.check_password(args['password']):
token = user.generate_auth_token()
return {'token': token.decode('ascii')}, 200
else:
abort(401, message="Invalid login info")
api.add_resource(AuthToken, '/login/')
api.add_resource(UserDetail, '/users/<string:username>')
api.add_resource(UserList, '/users/')
| {
"content_hash": "392ca2014313fc70cd3caf1085a2073d",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 72,
"avg_line_length": 28.33076923076923,
"alnum_prop": 0.626934564213956,
"repo_name": "bmbove/flask-api-boilerplate",
"id": "75c2152a052964828d299943d20a9bba7e99ec05",
"size": "3683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "21684"
}
],
"symlink_target": ""
} |
"""
Just a quick test to make sure there's no ``cls`` or ``self`` odness.
"""
from __future__ import print_function
import sys
import time
from yamicache import Cache
c = Cache(prefix="myapp", hashing=False, debug=False)
@c.cached()
def my_func(argument, power):
"""running my_func"""
return argument ** power
@c.cached()
def return_list(index):
mylists = {0: [1, 2, 3], 1: [4, 5, 6]}
return mylists[index]
def test_main():
assert len(c) == 0
for _ in [0, 1, 2, 3, 4, 5]:
my_func(2, 3)
assert len(c) == 1
c.clear()
def test_lists():
"""Make sure lists are returned"""
assert return_list(0) == [1, 2, 3]
assert return_list(0) == [1, 2, 3]
assert return_list(1) == [4, 5, 6]
assert return_list(1) == [4, 5, 6]
class MyObject(object):
def __init__(self, number):
self.number = number
@c.cached(timeout=1)
def return_object_list():
return [MyObject(0), MyObject(1), MyObject(2)]
def test_object_list():
"""Test a result with a timeout & objects are returned"""
result = return_object_list()
assert result[0].number == 0
assert result[1].number == 1
assert result[2].number == 2
time.sleep(2)
result = return_object_list()
assert result[0].number == 0
assert result[1].number == 1
assert result[2].number == 2
if __name__ == "__main__":
test_main()
| {
"content_hash": "b71af69fabc4d3d16599972e370597fc",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 69,
"avg_line_length": 20.954545454545453,
"alnum_prop": 0.5929139551699205,
"repo_name": "mtik00/yamicache",
"id": "c7c4cc51b481a1ae156a149562a1faf2c7a1a20e",
"size": "1383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_noclass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46773"
}
],
"symlink_target": ""
} |
from netCDF4 import Dataset
import netCDF4
import os
from Volume import Volume
#from Ray import Ray
from Sweep import Sweep
from numpy import *
from pylab import *
class CfRadial:
"""
A class for reading and parsing netCDF file.
Parameters
----------
filename : string
The file name with full path.
Attributes
----------
filename : string
NetCDF file.
dimensions : OrderedDict
Variable names in netCDF file.
variables : list
Variable names for display.
dicTreeList : dict
Storing netCDF attributes.
treeList : list
Storing netCDF attributes.
ncfile : Dataset
NetCDF object.
volume : Volume
Volume object for storing sweeps.
sweepElevation : list
Sweep elevation.
displayMode : string
Display mode in "BASIC" or "BSCAN"
timeLabel : string
Time information read from netCDF file.
"""
BSCANMODE = "BSCAN"
BASICMODE = "BASIC"
def __init__(self, filename):
self.filename = filename
self.dimensions = ''
self.variables = ''
self.dicTreeList = {'dimensions':[],'variables':[], 'global attributes':[]}
self.treeList = []
self.ncfile = None
self.volume = None
self.sweepElevation = None
self.displayMode = self.getDisplayMode()
def readFile(self):
""" read ncfile and get all information from ncfile """
self.ncfile=Dataset(self.filename,'r')
self.getMetadata()
self.timeLabel = self.getTimeLabel()
if self.displayMode == self.BSCANMODE:
sweeps = self.retrieveData_BSCAN()
else:
sweeps = self.retrieveData(self.variables)
return sweeps
def getDisplayMode(self):
self.ncfile=Dataset(self.filename,'r')
self.getMetadata()
if "beta_a_backscat_parallel" in self.variables and \
"extinction" in self.variables:
return self.BSCANMODE
else:
return self.BASICMODE
def getTimeLabel(self):
start = self.ncfile.variables['time_coverage_start'][:]
end = self.ncfile.variables['time_coverage_end'][:]
if type(start) == ma.core.MaskedArray :
start = start.data
end = end.data
label = ''.join(start)
label = label + " - " + ''.join(end)
return label
def retrieveData(self, var_list) :
""" retrieve the list of variables specified by var_list """
nrays = dtime = len(self.ncfile.dimensions['time'])
drange = len(self.ncfile.dimensions['range'])
dsweep = len(self.ncfile.dimensions['sweep']) # number of sweeps
maxgates=self.ncfile.variables['range'].shape[0]
n_points_flag = False
if 'n_points' in self.ncfile.dimensions.keys():
n_points = len(self.ncfile.dimensions['n_points'])
n_points_flag = True
vars_ = {}
# retrieve the specified variables
for v in var_list:
vars_[v] = self.ncfile.variables[v]
if n_points_flag == False:
# Regular 2-D storage - constant number of gates
#sweep_fixed_angle = self.ncfile.variables['fixed_angle'][:]
pass
else:
# Staggered 2-D storage - variable number of gates
ray_len = ray_n_gates = ray_len = self.ncfile.variables['ray_n_gates'][:]
ray_start_index = self.ncfile.variables['ray_start_index'][:]
self.sweepElevation = sweep_fixed_angle = self.ncfile.variables['fixed_angle'][:] ## both Regular and Staggered have this field
time = self.ncfile.variables['time'][:]
ranges = self.ncfile.variables['range'][:]
sweep_start_ray_index = self.ncfile.variables['sweep_start_ray_index'][:]
sweep_end_ray_index = self.ncfile.variables['sweep_end_ray_index'][:]
rg,azg=meshgrid(ranges, self.ncfile.variables['azimuth'][:])
rg,eleg=meshgrid(ranges,self.ncfile.variables['elevation'][:])
x,y,z=self.radar_coords_to_cart(rg,azg, eleg)
if n_points_flag == False:
# Regular 2-D storage - constant number of gates
print 'Regular 2-D storage - constant number of gates'
time_range_list = {}
for v in var_list:
time_range=zeros([nrays, maxgates])-9999.0
for ray in range(nrays):
time_range[ray, :]=vars_[v][ray]
time_range_list[v] = time_range
else:
# Staggered 2-D storage - variable number of gates
print 'Staggered 2-D storage - variable number of gates'
time_range_list = {}
for v in var_list:
time_range=zeros([nrays, maxgates])-9999.0
for ray in range(nrays):
time_range[ray, 0:ray_len[ray]]=vars_[v][ray_start_index[ray]:ray_start_index[ray]+ray_len[ray]]
time_range_list[v] = time_range
## format data into sweeps
sweeps = []
for sweepnumber in range(dsweep):
firstRay = sweep_start_ray_index[sweepnumber]
lastRay = sweep_end_ray_index[sweepnumber]
data = {}
for v in var_list:
data[v] = time_range_list[v][firstRay:lastRay]
# sweep = Sweep(sweepnumber,ranges,x[firstRay:lastRay]/1e3,y[firstRay:lastRay]/1e3,data,self.timeLabel)
sweep = Sweep(sweepnumber,ranges,x[firstRay:lastRay]/1e3,y[firstRay:lastRay]/1e3,data,self.timeLabel,None)
sweeps.append(sweep)
return sweeps
def retrieveData_BSCAN(self):
""" retrieve data as required in BSCAN mode display """
time = self.ncfile.variables['time'][:]
time_coverage_start = self.ncfile.variables['time_coverage_start'][:].compressed().tostring()
time_coverage_end = self.ncfile.variables['time_coverage_end'][:].compressed().tostring()
altitude = self.ncfile.variables['altitude'][:]
ranges = self.ncfile.variables['range'][:]
data = {}
for v in self.variables:
data[v] = self.ncfile.variables[v][:]
data[v] = np.ma.log10(data[v])
sweeps = []
sweep = Sweep(0,ranges,time,ranges,data,None,[time_coverage_start,time_coverage_end])
sweeps.append(sweep)
return sweeps
def radar_coords_to_cart(self, rng, az, ele, debug=False):
"""
Asumes standard atmosphere, ie R=4Re/3
Note that this v
"""
Re=6371.0*1000.0
p_r=4.0*Re/3.0
rm=rng
z=(rm**2 + p_r**2 + 2.0*rm*p_r*np.sin(ele*np.pi/180.0))**0.5 -p_r
#arc length
s=p_r*np.arcsin(rm*np.cos(ele*np.pi/180.)/(p_r+z))
if debug: print "Z=", z, "s=", s
y=s*np.cos(az*np.pi/180.0)
x=s*np.sin(az*np.pi/180.0)
return x,y,z
def cal_cart_BSCAN(self, rng, tx, debug=False):
"""
Asumes standard atmosphere, ie R=4Re/3
Note that this v
"""
y = rng
x = tx
return x,y
def closeFile(self):
""" close ncfile """
self.ncfile.close()
def getValue(self, parent, child):
if parent == 'dimensions':
print '>>>>>>>>>> dim'
value = self.ncfile.dimensions[child]
elif parent == 'variables':
print '>>>>>>>>>> var'
value = self.ncfile.variables[child][:]
elif parent == 'global attributes':
print '>>>>>>>>>> glo'
value = getattr(self.ncfile, parent)
else:
print 'not found!'
print type(value)
print value
return 'done'
def getMetadata(self):
""" get all information from ncfile """
self.dimensions = self.ncfile.dimensions.keys() # list
self.variables = self.varFilter(self.ncfile.variables.keys()) # list
self.globalAttList = self.ncfile.ncattrs() #dir(self.ncfile)
# self.printing()
# print type(self.globalAttList)
# self.fileContent = ncdumpread()
self.dicTreeList['dimensions'] = self.dimensions
self.dicTreeList['variables'] = self.variables
# self.dicTreeList['global attributes'] = self.globalAttList
ld = ['dimensions']
ld.append(self.dimensions)
lv = ['variables']
lv.append(self.variables)
lg = ['global attributes']
lg.append(self.globalAttList)
self.treeList = [ld,lv,lg]
def varFilter(self, variables):
""" filter out variables, might not be necessary """
var_lst = []
n_points_flag = False
if 'n_points' in self.ncfile.dimensions.keys():
n_points = len(self.ncfile.dimensions['n_points'])
n_points_flag = True
for v in variables:
d = self.ncfile.variables[v].dimensions
if n_points_flag == False: # for constant ngates
if len(d) == 2:
if d[0] == 'time' and d[1] == 'range':
var_lst.append(v)
else: # for variable ngates
if len(d) == 1:
if d[0] == 'n_points':
var_lst.append(v)
tmp = [v.lower() for v in var_lst]
if "beta_a_backscat_parallel" in tmp and \
"extinction" in tmp:
var_lst = ['extinction','beta_a_backscat_parallel']
return var_lst
def getVar_npoints(self):
self.ncfile
def convertListToString(self, lList):
res = ''
for l in lList:
res = ' '.join(res,l)
return res
def ncdumpRead(self):
command = 'ncdump -f C ' + os.path.join(self.dirname, self.filename)
#return subprocess.check_output(cmd, shell=True)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output = process.communicate()
retcode = process.poll()
if retcode:
raise subprocess.CalledProcessError(retcode, command, output=output[0])
print type(output)
# print output[0]
print '----------------'
# print output[1]
print 'tuple length = ', len(output)
return output
def printing(self):
print "self.dimensions : ", self.dimensions
print "self.variables : ", self.variables
print "self.globalAttList : ", self.globalAttList
def setVolume(self,sweeps,ranges): ## not called
self.volume = Volume(sweeps, ranges)
def dumpSweeps(sweeps):
for s in sweeps:
pass
if __name__ == "__main__":
# writeToFile("testing.nc")
# readFile("testing.nc")
# readFile("sresa1b_ncar_ccsm3_0_run1_200001.nc")
# readFile("testing3.nc")
cf = CfRadial('/tmp/test.nc')
#sweeps = cf.readFile(['DBZ','VEL'])
print 'readFile complete'
| {
"content_hash": "dbe54f82cafe6af68a282053e56d48ae",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 135,
"avg_line_length": 33.88145896656535,
"alnum_prop": 0.5598815824885619,
"repo_name": "ncareol/lrose-soloPy",
"id": "c0a0484eec48602a7a5885d9a0009fd31b048a0e",
"size": "11547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lrose_solopy/CfRadial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "103446"
}
],
"symlink_target": ""
} |
from .MasirApp import MasirApp
| {
"content_hash": "924502ee180600037136083d5a8de79c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8387096774193549,
"repo_name": "anguoyang/SMQTK",
"id": "a96e56816c744a339ab61ee125abe65de8705a68",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MASIR/python/masir/web/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "168608"
},
{
"name": "C++",
"bytes": "1555888"
},
{
"name": "CMake",
"bytes": "133035"
},
{
"name": "CSS",
"bytes": "173954"
},
{
"name": "Cuda",
"bytes": "138262"
},
{
"name": "HTML",
"bytes": "353039"
},
{
"name": "Java",
"bytes": "197569"
},
{
"name": "JavaScript",
"bytes": "1953035"
},
{
"name": "Makefile",
"bytes": "8196"
},
{
"name": "Matlab",
"bytes": "46934"
},
{
"name": "Perl",
"bytes": "3476237"
},
{
"name": "Perl6",
"bytes": "286157"
},
{
"name": "Python",
"bytes": "2120427"
},
{
"name": "Shell",
"bytes": "4944"
},
{
"name": "TeX",
"bytes": "149162"
}
],
"symlink_target": ""
} |
from fluent_contents.extensions import ContentPlugin, plugin_pool
from models import PromoPluginModel
from forms import PromoForm
from django.utils.translation import ugettext as _
@plugin_pool.register
class PromoPlugin(ContentPlugin):
model = PromoPluginModel # Model where data about this plugin is saved
category = _('Advanced')
render_template = "publication_backbone/plugins/promo/promo.html"
cache_output = False
form=PromoForm
def get_render_template(self, request, instance, **kwargs):
if instance.get_template_name:
return instance.get_template_name
return self.render_template
def get_context(self, request, instance, **kwargs):
"""
Return the context to use in the template defined by ``render_template`` (or :func:`get_render_template`).
By default, it returns the model instance as ``instance`` field in the template.
"""
context = super(PromoPlugin, self).get_context(request, instance, **kwargs)
COLUMN_DIVIDERS = [6,5,4,3,2]
ic = instance.count
divider = None
for cd in COLUMN_DIVIDERS:
n = ic % cd
if n == 0:
if cd != 6:
divider = cd
break
else:
if ic > 12:
divider = cd
break
#instance.category = instance.categories[0]
categories = instance.categories.all()
context.update({
'instance': instance,
'plugin_id': "%s" % instance.pk,
'category': categories[0] if categories else None,
'categories': categories,
'divider': divider,
'to_publication': instance.to_publication if instance.to_publication else None,
})
return context
| {
"content_hash": "ed3e77c9fc5a50a9f45a228829ea2bc6",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 114,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.5912017167381974,
"repo_name": "Excentrics/publication-backbone",
"id": "ae8809fc32a009e7244136761ffef6c8a6c91479",
"size": "1888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publication_backbone/plugins/promo/content_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "447762"
},
{
"name": "HTML",
"bytes": "217091"
},
{
"name": "JavaScript",
"bytes": "904819"
},
{
"name": "Python",
"bytes": "470545"
}
],
"symlink_target": ""
} |
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import unicode_literals
from collections import namedtuple, Counter
from email.errors import HeaderParseError
from email.header import decode_header
from unicodedata import normalize
import base64
import datetime
import email
import functools
import hashlib
import logging
import os
import random
import re
import simplejson as json
import string
import subprocess
import sys
import tempfile
import six
from .const import (
ADDRESSES_HEADERS,
JUNK_PATTERN,
OTHERS_PARTS,
RECEIVED_COMPILED_LIST)
from .exceptions import MailParserOSError, MailParserReceivedParsingError
log = logging.getLogger(__name__)
def custom_log(level="WARNING", name=None): # pragma: no cover
if name:
log = logging.getLogger(name)
else:
log = logging.getLogger()
log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"%(asctime)s | "
"%(name)s | "
"%(module)s | "
"%(funcName)s | "
"%(levelname)s | "
"%(message)s")
ch.setFormatter(formatter)
log.addHandler(ch)
return log
def sanitize(func):
""" NFC is the normalization form recommended by W3C. """
@functools.wraps(func)
def wrapper(*args, **kwargs):
return normalize('NFC', func(*args, **kwargs))
return wrapper
@sanitize
def ported_string(raw_data, encoding='utf-8', errors='ignore'):
"""
Give as input raw data and output a str in Python 3
and unicode in Python 2.
Args:
raw_data: Python 2 str, Python 3 bytes or str to porting
encoding: string giving the name of an encoding
errors: his specifies the treatment of characters
which are invalid in the input encoding
Returns:
str (Python 3) or unicode (Python 2)
"""
if not raw_data:
return six.text_type()
if isinstance(raw_data, six.text_type):
return raw_data
if six.PY2:
try:
return six.text_type(raw_data, encoding, errors)
except LookupError:
return six.text_type(raw_data, "utf-8", errors)
if six.PY3:
try:
return six.text_type(raw_data, encoding)
except (LookupError, UnicodeDecodeError):
return six.text_type(raw_data, "utf-8", errors)
def decode_header_part(header):
"""
Given an raw header returns an decoded header
Args:
header (string): header to decode
Returns:
str (Python 3) or unicode (Python 2)
"""
if not header:
return six.text_type()
output = six.text_type()
try:
for d, c in decode_header(header):
c = c if c else 'utf-8'
output += ported_string(d, c, 'ignore')
# Header parsing failed, when header has charset Shift_JIS
except (HeaderParseError, UnicodeError):
log.error("Failed decoding header part: {}".format(header))
output += header
return output.strip()
def ported_open(file_):
if six.PY2:
return open(file_)
elif six.PY3:
return open(file_, encoding="utf-8", errors='ignore')
def find_between(text, first_token, last_token):
try:
start = text.index(first_token) + len(first_token)
end = text.index(last_token, start)
return text[start:end].strip()
except ValueError:
return
def fingerprints(data):
"""
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
"""
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512")
if six.PY2:
if not isinstance(data, str):
data = data.encode("utf-8")
elif six.PY3:
if not isinstance(data, bytes):
data = data.encode("utf-8")
# md5
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
# sha1
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
# sha256
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
# sha512
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512)
def msgconvert(email):
"""
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
"""
log.debug("Started converting Outlook email")
temph, temp = tempfile.mkstemp(prefix="outlook_")
command = ["msgconvert", "--outfile", temp, email]
try:
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
except OSError as e:
message = "Check if 'msgconvert' tool is installed / {!r}".format(e)
log.exception(message)
raise MailParserOSError(message)
else:
stdoutdata, _ = out.communicate()
return temp, stdoutdata.decode("utf-8").strip()
finally:
os.close(temph)
def parse_received(received):
"""
Parse a single received header.
Return a dictionary of values by clause.
Arguments:
received {str} -- single received header
Raises:
MailParserReceivedParsingError -- Raised when a
received header cannot be parsed
Returns:
dict -- values by clause
"""
values_by_clause = {}
for pattern in RECEIVED_COMPILED_LIST:
matches = [match for match in pattern.finditer(received)]
if len(matches) == 0:
# no matches for this clause, but it's ok! keep going!
log.debug("No matches found for %s in %s" % (
pattern.pattern, received))
continue
elif len(matches) > 1:
# uh, can't have more than one of each clause in a received.
# so either there's more than one or the current regex is wrong
msg = "More than one match found for %s in %s" % (
pattern.pattern, received)
log.error(msg)
raise MailParserReceivedParsingError(msg)
else:
# otherwise we have one matching clause!
log.debug("Found one match for %s in %s" % (
pattern.pattern, received))
match = matches[0].groupdict()
if six.PY2:
values_by_clause[match.keys()[0]] = match.values()[0]
elif six.PY3:
key = list(match.keys())[0]
value = list(match.values())[0]
values_by_clause[key] = value
if len(values_by_clause) == 0:
# we weren't able to match anything...
msg = "Unable to match any clauses in %s" % (received)
# Modification #1: Commenting the following log as
# this raised exception is caught above and then
# raw header is updated in response
# We dont want to get so many errors in our error
# logger as we are not even trying to parse the
# received headers
# Wanted to make it configurable via settiings,
# but this package does not depend on django and
# making configurable setting
# will make it django dependent,
# so better to keep it working with only python
# dependent and on any framework of python
# commenting it just for our use
# log.error(msg)
raise MailParserReceivedParsingError(msg)
return values_by_clause
def receiveds_parsing(receiveds):
"""
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
"""
parsed = []
receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds]
n = len(receiveds)
log.debug("Nr. of receiveds. {}".format(n))
for idx, received in enumerate(receiveds):
log.debug("Parsing received {}/{}".format(idx + 1, n))
log.debug("Try to parse {!r}".format(received))
try:
# try to parse the current received header...
values_by_clause = parse_received(received)
except MailParserReceivedParsingError:
# if we can't, let's append the raw
parsed.append({'raw': received})
else:
# otherwise append the full values_by_clause dict
parsed.append(values_by_clause)
log.debug("len(receiveds) %s, len(parsed) %s" % (
len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
# something really bad happened,
# so just return raw receiveds with hop indices
log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \
parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds)
else:
# all's good! we have parsed or raw receiveds for each received header
return receiveds_format(parsed)
def convert_mail_date(date):
log.debug("Date to parse: {!r}".format(date))
d = email.utils.parsedate_tz(date)
log.debug("Date parsed: {!r}".format(d))
t = email.utils.mktime_tz(d)
log.debug("Date parsed in timestamp: {!r}".format(t))
date_utc = datetime.datetime.utcfromtimestamp(t)
timezone = d[9] / 3600.0 if d[9] else 0
timezone = "{:+.1f}".format(timezone)
log.debug("Calculated timezone: {!r}".format(timezone))
return date_utc, timezone
def receiveds_not_parsed(receiveds):
"""
If receiveds are not parsed, makes a new structure with raw
field. It's useful to have the same structure of receiveds
parsed.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of not parsed receiveds headers with first hop in first position
"""
log.debug("Receiveds for this email are not parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
j = {"raw": i.strip()}
j["hop"] = counter["hop"] + 1
counter["hop"] += 1
output.append(j)
else:
return output
def receiveds_format(receiveds):
"""
Given a list of receiveds hop, adds metadata and reformat
field values
Args:
receiveds (list): list of receiveds hops already formatted
Returns:
list of receiveds reformated and with new fields
"""
log.debug("Receiveds for this email are parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
# Clean strings
j = {k: v.strip() for k, v in i.items() if v}
# Add hop
j["hop"] = counter["hop"] + 1
# Add UTC date
if i.get("date"):
# Modify date to manage strange header like:
# "for <eboktor@romolo.com>; Tue, 7 Mar 2017 14:29:24 -0800",
i["date"] = i["date"].split(";")[-1]
try:
j["date_utc"], _ = convert_mail_date(i["date"])
except TypeError:
j["date_utc"] = None
# Add delay
size = len(output)
now = j.get("date_utc")
if size and now:
before = output[counter["hop"] - 1].get("date_utc")
if before:
j["delay"] = (now - before).total_seconds()
else:
j["delay"] = 0
else:
j["delay"] = 0
# append result
output.append(j)
# new hop
counter["hop"] += 1
else:
for i in output:
if i.get("date_utc"):
i["date_utc"] = i["date_utc"].isoformat()
else:
return output
def get_to_domains(to=[], reply_to=[]):
domains = set()
for i in to + reply_to:
try:
domains.add(i[1].split("@")[-1].lower().strip())
except KeyError:
pass
else:
return list(domains)
def get_header(message, name):
"""
Gets an email.message.Message and a header name and returns
the mail header decoded with the correct charset.
Args:
message (email.message.Message): email message object
name (string): header to get
Returns:
str if there is an header
list if there are more than one
"""
headers = message.get_all(name)
log.debug("Getting header {!r}: {!r}".format(name, headers))
if headers:
headers = [decode_header_part(i) for i in headers]
if len(headers) == 1:
# in this case return a string
return headers[0].strip()
# in this case return a list
return headers
return six.text_type()
def get_mail_keys(message, complete=True):
"""
Given an email.message.Message, return a set with all email parts to get
Args:
message (email.message.Message): email message object
complete (bool): if True returns all email headers
Returns:
set with all email parts
"""
if complete:
log.debug("Get all headers")
all_headers_keys = {i.lower() for i in message.keys()}
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys
else:
log.debug("Get only mains headers")
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS
log.debug("All parts to get: {}".format(", ".join(all_parts)))
return all_parts
def safe_print(data): # pragma: no cover
try:
print(data)
except UnicodeEncodeError:
print(data.encode('utf-8'))
def print_mail_fingerprints(data): # pragma: no cover
md5, sha1, sha256, sha512 = fingerprints(data)
print("md5:\t{}".format(md5))
print("sha1:\t{}".format(sha1))
print("sha256:\t{}".format(sha256))
print("sha512:\t{}".format(sha512))
def print_attachments(attachments, flag_hash): # pragma: no cover
if flag_hash:
for i in attachments:
if i.get("content_transfer_encoding") == "base64":
payload = base64.b64decode(i["payload"])
else:
payload = i["payload"]
i["md5"], i["sha1"], i["sha256"], i["sha512"] = \
fingerprints(payload)
for i in attachments:
safe_print(json.dumps(i, ensure_ascii=False, indent=4))
def write_attachments(attachments, base_path): # pragma: no cover
for a in attachments:
write_sample(
binary=a["binary"],
payload=a["payload"],
path=base_path,
filename=a["filename"],
)
def write_sample(binary, payload, path, filename): # pragma: no cover
"""
This function writes a sample on file system.
Args:
binary (bool): True if it's a binary file
payload: payload of sample, in base64 if it's a binary
path (string): path of file
filename (string): name of file
hash_ (string): file hash
"""
if not os.path.exists(path):
os.makedirs(path)
sample = os.path.join(path, filename)
if binary:
with open(sample, "wb") as f:
f.write(base64.b64decode(payload))
else:
with open(sample, "w") as f:
f.write(payload)
def random_string(string_length=10):
""" Generate a random string of fixed length
Keyword Arguments:
string_length {int} -- String length (default: {10})
Returns:
str -- Random string
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(string_length))
| {
"content_hash": "7cd6bf0edc7421237f23ecf8ad58f20a",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 79,
"avg_line_length": 28.061328790459967,
"alnum_prop": 0.5994414764448761,
"repo_name": "SpamScope/mail-parser",
"id": "b25fa34d84f72931fa4d0676f21d6509389d687d",
"size": "16519",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mailparser/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "430"
},
{
"name": "Makefile",
"bytes": "2134"
},
{
"name": "Python",
"bytes": "81758"
}
],
"symlink_target": ""
} |
"""
Test directory for ensuring working Forseti library
"""
| {
"content_hash": "a14e0be1d8ec5b213e516305b41a2c5b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 51,
"avg_line_length": 20,
"alnum_prop": 0.75,
"repo_name": "MasterOdin/forseti",
"id": "8441bfc790fba04dbf9c58239a5e6a9ded7e50b3",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74093"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import nibabel as nib
from nilabels.tools.detections.check_imperfections import check_missing_labels
from nilabels.tools.aux_methods.label_descriptor_manager import LabelsDescriptorManager
from tests.tools.decorators_tools import pfo_tmp_test, \
write_and_erase_temporary_folder_with_dummy_labels_descriptor
@write_and_erase_temporary_folder_with_dummy_labels_descriptor
def test_check_missing_labels():
# Instantiate a labels descriptor manager
pfi_ld = os.path.join(pfo_tmp_test, 'labels_descriptor.txt')
ldm = LabelsDescriptorManager(pfi_ld, labels_descriptor_convention='itk-snap')
# Create dummy image
data = np.zeros([10, 10, 10])
data[:3, :3, :3] = 1
data[3:5, 3:5, 3:5] = 12
data[5:7, 5:7, 5:7] = 3
data[7:10, 7:10, 7:10] = 7
im_segm = nib.Nifti1Image(data, affine=np.eye(4))
# Apply check_missing_labels, then test the output
pfi_log = os.path.join(pfo_tmp_test, 'check_imperfections_log.txt')
in_descriptor_not_delineated, delineated_not_in_descriptor = check_missing_labels(im_segm, ldm, pfi_log)
print(in_descriptor_not_delineated, delineated_not_in_descriptor)
np.testing.assert_equal(in_descriptor_not_delineated, {8, 2, 4, 5, 6}) # in label descriptor, not in image
np.testing.assert_equal(delineated_not_in_descriptor, {12}) # in image not in label descriptor
assert os.path.exists(pfi_log)
if __name__ == "__main__":
test_check_missing_labels()
| {
"content_hash": "105b93334a492e2ce6c4e9f01142ca3d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 111,
"avg_line_length": 35.30952380952381,
"alnum_prop": 0.7120701281186783,
"repo_name": "SebastianoF/LabelsManager",
"id": "95b49d012102185b16eb3c9bd32d6e5516821912",
"size": "1483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tools/test_detections_check_imperfections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255204"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import netaddr
PART = 'RPC'
PREFIX_NAME = 'RPC'
SNAT_POOL = (
'### CREATE SNATPOOL ###\n'
'create ltm snatpool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL { members replace-all-with {'
' %(snat_pool_addresses)s } }'
)
#Persistance Profile:
PERSISTANCE = [
r'create ltm persistence source-addr /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_IP {'
r' app-service none defaults-from /Common/source_addr'
r' match-across-services enabled timeout 3600 }',
r'create ltm persistence cookie /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_COOKIE {'
r' app-service none cookie-name RPC-COOKIE defaults-from /Common/cookie }''\n'
]
MONITORS = [
r'create ltm monitor mysql /' + PART + '/' + PREFIX_NAME + '_MON_GALERA { count 1 database'
r' information_schema debug no defaults-from mysql destination *:*'
r' interval 3 recv big5_chinese_ci recv-column 2 recv-row 0 send "select'
r' * from CHARACTER_SETS;" time-until-up 0 timeout 10 username monitoring }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_KEYSTONE_ADMIN { defaults-from'
r' http destination *:35357 recv "200 OK" send "HEAD /v3 HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_API_METADATA {'
r' defaults-from http destination *:8775 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_HORIZON { defaults-from http'
r' destination *:80 recv "302 Found" send "HEAD / HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_SPICE_CONSOLE {'
r' defaults-from http destination *:6082 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor https /' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_HORIZON_SSL { defaults-from'
r' https destination *:443 recv "302 FOUND" send "HEAD / HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor https /' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_NOVA_SPICE_CONSOLE {'
r' defaults-from https destination *:6082 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_NOVA_API_EC2 { defaults-from tcp'
r' destination *:8773 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CFN { defaults-from tcp'
r' destination *:8000 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CLOUDWATCH {'
r' defaults-from tcp destination *:8003 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA { defaults-from tcp'
r' destination *:80 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA_SSL { defaults-from tcp'
r' destination *:8443 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_ELASTICSEARCH { defaults-from'
r' tcp destination *:9200 }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_REPO {'
r' defaults-from http destination *:8181 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }'
'\n'
]
NODES = (
'create ltm node /' + PART + '/%(node_name)s { address %(container_address)s }'
)
PRIORITY_ENTRY = '{ priority-group %(priority_int)s }'
POOL_NODE = {
'beginning': 'create ltm pool /' + PART + '/%(pool_name)s {'
' load-balancing-mode fastest-node members replace-all-with'
' { %(nodes)s }',
'priority': 'min-active-members 1',
'end': 'monitor %(mon_type)s }'
}
VIRTUAL_ENTRIES_PARTS = {
'command': 'create ltm virtual /' + PART + '/%(vs_name)s',
}
PERSIST_OPTION = 'persist replace-all-with { /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_IP }'
END_COMMANDS = [
'save sys config',
'run cm config-sync to-group SYNC-FAILOVER'
]
VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(internal_lb_vip_address)s:%(port)s'
' ip-protocol tcp mask 255.255.255.255'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
' %(persist)s'
' source 0.0.0.0/0'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
PUB_SSL_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(ssl_public_ip)s:%(port)s ip-protocol tcp'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/tcp { } %(ssl_profiles)s }'
' %(persist)s'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
PUB_NONSSL_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(ssl_public_ip)s:%(port)s ip-protocol tcp'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
' %(persist)s'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
SEC_HOSTNET_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/' + PREFIX_NAME + '_LIMIT_ACCESS_TO_HOST_NET {'
' destination %(sec_host_net)s:0 ip-forward mask %(sec_host_netmask)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
'rules { /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL }'
' translate-address disabled translate-port disabled vlans'
' replace-all-with { /Common/%(sec_public_vlan_name)s }'
' }'
)
SEC_CONTAINER_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/' + PREFIX_NAME + '_LIMIT_ACCESS_TO_CONTAINER_NET {'
' connection-limit 1 destination %(sec_container_net)s:0 ip-forward mask'
' %(sec_container_netmask)s profiles replace-all-with'
' { /Common/fastL4 { } } rules { /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL'
' } translate-address disabled translate-port disabled'
' }'
)
# This is a dict of all groups and their respected values / requirements
POOL_PARTS = {
'galera': {
'port': 3306,
'backend_port': 3306,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_GALERA',
'priority': True,
'group': 'galera',
'hosts': []
},
'glance_api': {
'port': 9292,
'backend_port': 9292,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'glance_api',
'make_public': True,
'hosts': []
},
'glance_registry': {
'port': 9191,
'backend_port': 9191,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'glance_registry',
'hosts': []
},
'heat_api_cfn': {
'port': 8000,
'backend_port': 8000,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CFN',
'group': 'heat_api_cfn',
'make_public': True,
'hosts': []
},
'heat_api_cloudwatch': {
'port': 8003,
'backend_port': 8003,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CLOUDWATCH',
'group': 'heat_api_cloudwatch',
'make_public': True,
'hosts': []
},
'heat_api': {
'port': 8004,
'backend_port': 8004,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'heat_api',
'make_public': True,
'hosts': []
},
'keystone_admin': {
'port': 35357,
'backend_port': 35357,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_KEYSTONE_ADMIN',
'group': 'keystone',
'hosts': []
},
'keystone_service': {
'port': 5000,
'backend_port': 5000,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'keystone',
'make_public': True,
'hosts': []
},
'neutron_server': {
'port': 9696,
'backend_port': 9696,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'neutron_server',
'make_public': True,
'hosts': []
},
'nova_api_ec2': {
'port': 8773,
'backend_port': 8773,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_NOVA_API_EC2',
'group': 'nova_api_os_compute',
'make_public': True,
'hosts': []
},
'nova_api_metadata': {
'port': 8775,
'backend_port': 8775,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_API_METADATA',
'group': 'nova_api_metadata',
'hosts': []
},
'nova_api_os_compute': {
'port': 8774,
'backend_port': 8774,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'nova_api_os_compute',
'make_public': True,
'hosts': []
},
'nova_console': {
'port': 6082,
'backend_port': 6082,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_SPICE_CONSOLE',
'group': 'nova_console',
'hosts': [],
'ssl_impossible': True,
'make_public': True,
'persist': True
},
'cinder_api': {
'port': 8776,
'backend_port': 8776,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'cinder_api',
'make_public': True,
'hosts': []
},
'horizon': {
'port': 80,
'backend_port': 80,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_HORIZON',
'group': 'horizon',
'hosts': [],
},
'horizon_ssl': {
'port': 443,
'backend_port': 443,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_HORIZON_SSL',
'group': 'horizon',
'hosts': [],
'make_public': True,
'persist': True,
'backend_ssl': True
},
'elasticsearch': {
'port': 9200,
'backend_port': 9200,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_ELASTICSEARCH',
'group': 'elasticsearch',
'hosts': []
},
'kibana': {
'port': 8888,
'backend_port': 80,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA',
'group': 'kibana',
'priority': True,
'hosts': []
},
'kibana_ssl': {
'port': 8443,
'backend_port': 8443,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA_SSL',
'group': 'kibana',
'priority': True,
'hosts': [],
'make_public': True,
'persist': True,
'backend_ssl': True
},
'swift': {
'port': 8080,
'backend_port': 8080,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'swift_proxy',
'make_public': True,
'hosts': []
},
'repo': {
'port': 8181,
'backend_port': 8181,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_REPO',
'group': 'pkg_repo',
'priority': True,
'hosts': []
}
}
def recursive_host_get(inventory, group_name, host_dict=None):
if host_dict is None:
host_dict = {}
inventory_group = inventory.get(group_name)
if not inventory_group:
print('Inventory group "%s" not found, skipping.' % group_name)
return host_dict
if 'children' in inventory_group and inventory_group['children']:
for child in inventory_group['children']:
recursive_host_get(
inventory=inventory, group_name=child, host_dict=host_dict
)
if inventory_group.get('hosts'):
for host in inventory_group['hosts']:
if host not in host_dict['hosts']:
ca = inventory['_meta']['hostvars'][host]['container_address']
node = {
'hostname': host,
'container_address': ca
}
host_dict['hosts'].append(node)
return host_dict
def build_pool_parts(inventory):
for key, value in POOL_PARTS.iteritems():
recursive_host_get(
inventory, group_name=value['group'], host_dict=value
)
return POOL_PARTS
def file_find(filename, user_file=None, pass_exception=False):
"""Return the path to a file.
If no file is found the system will exit.
The file lookup will be done in the following directories:
/etc/openstack_deploy/
$HOME/openstack_deploy/
$(pwd)/openstack_deploy/
:param filename: ``str`` Name of the file to find
:param user_file: ``str`` Additional localtion to look in FIRST for a file
"""
file_check = [
os.path.join(
'/etc', 'openstack_deploy', filename
),
os.path.join(
os.environ.get('HOME'), 'openstack_deploy', filename
),
os.path.join(
os.getcwd(), filename
)
]
if user_file is not None:
file_check.insert(0, os.path.expanduser(user_file))
for f in file_check:
if os.path.isfile(f):
return f
else:
if pass_exception is False:
raise SystemExit('No file found at: %s' % file_check)
else:
return False
def args():
"""Setup argument Parsing."""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description='Rackspace Openstack, Inventory Generator',
epilog='Inventory Generator Licensed "Apache 2.0"')
parser.add_argument(
'-f',
'--file',
help='Inventory file. Default: [ %(default)s ]',
required=False,
default='openstack_inventory.json'
)
parser.add_argument(
'-s',
'--snat-pool-address',
help='LB Main SNAT pool address for [ RPC_SNATPOOL ], for'
' multiple snat pool addresses comma seperate the ip'
' addresses. By default this IP will be .15 from within your'
' containers_cidr as found within inventory.',
required=False,
default=None
)
parser.add_argument(
'--limit-source',
help='Limit available connections to the source IP for all source'
' limited entries.',
required=False,
default=None
)
parser.add_argument(
'--ssl-public-ip',
help='Public IP address for the F5 to use.',
required=False,
default=None
)
parser.add_argument(
'--ssl-domain-name',
help='Name of the domain that will have an ssl cert.',
required=False,
default=None
)
parser.add_argument(
'--sec-host-network',
help='Security host network address and netmask.'
' EXAMPLE: "192.168.1.1:255.255.255.0"',
required=False,
default=None
)
parser.add_argument(
'--sec-container-network',
help='Security container network address and netmask.'
' EXAMPLE: "192.168.1.1:255.255.255.0"',
required=False,
default=None
)
parser.add_argument(
'--sec-public-vlan-name',
help='Security container network address and netmask.'
' EXAMPLE: "192.168.1.1:255.255.255.0"',
required=False,
default=None
)
parser.add_argument(
'--galera-monitor-user',
help='Name of the user that will be available for the F5 to pull when'
' monitoring Galera.',
required=False,
default='openstack'
)
parser.add_argument(
'--print',
help='Print the script to screen, as well as write it out',
required=False,
default=False,
action='store_true'
)
parser.add_argument(
'-e',
'--export',
help='Export the generated F5 configuration script.'
' Default: [ %(default)s ]',
required=False,
default=os.path.join(
os.path.expanduser('~/'), 'rpc_f5_config.sh'
)
)
parser.add_argument(
'-S',
'--Superman',
help='Yes, its Superman ... strange visitor from another planet,'
'who came to Earth with powers and abilities far beyond those of mortal men! '
'Superman ... who can change the course of mighty rivers, bend steel in his bare hands,'
'and who, disguised as Clark Kent, mild-mannered reporter for a great metropolitan newspaper,'
'fights a never-ending battle for truth, justice, and the American way!',
required=False,
default=False,
action='store_true'
)
return vars(parser.parse_args())
def main():
"""Run the main application."""
# Parse user args
user_args = args()
# Get the contents of the system environment json
environment_file = file_find(filename=user_args['file'])
with open(environment_file, 'rb') as f:
inventory_json = json.loads(f.read())
commands = []
nodes = []
pools = []
virts = []
sslvirts = []
pubvirts = []
commands.extend([
'### CREATE SECURITY iRULE ###',
'run util bash',
'tmsh create ltm rule /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL when CLIENT_ACCEPTED { discard }',
'exit',
'### CREATE EXTERNAL MONITOR ###',
' --> Upload External monitor file to disk <--',
' run util bash',
' cd /config/monitors/',
' vi RPC-MON-EXT-ENDPOINT.monitor',
' --> Copy and Paste the External monitor into vi <--',
' create sys file external-monitor /' + PART + '/RPC-MON-EXT-ENDPOINT { source-path file:///config/monitors/RPC-MON-EXT-ENDPOINT.monitor }',
' save sys config',
' create ltm monitor external /' + PART + '/RPC-MON-EXT-ENDPOINT { interval 20 timeout 61 run /' + PART + '/RPC-MON-EXT-ENDPOINT }\n'
])
if user_args['ssl_domain_name']:
commands.extend([
'### UPLOAD SSL CERT KEY PAIR ###',
'cd /RPC',
'install sys crypto cert /' + PART + '/%(ssl_domain_name)s.crt from-editor'
% user_args,
' --> Copy and Paste provided domain cert for public api endpoint <--',
'install sys crypto key /' + PART + '/%(ssl_domain_name)s.key from-editor'
% user_args,
' --> Copy and Paste provided domain key for public api endpoint <--',
'cd /Common\n',
'### CREATE SSL PROFILES ###',
('create ltm profile client-ssl'
' /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s'
' { cert /' + PART + '/%(ssl_domain_name)s.crt key'
' /' + PART + '/%(ssl_domain_name)s.key defaults-from clientssl }')
% user_args,
'create ltm profile server-ssl /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_SERVER { defaults-from /Common/serverssl }\n'
% user_args,
])
if user_args['Superman']:
print " ************************** "
print " .*##*:*####***:::**###*:######*. "
print " *##: .###* *######:,##* "
print " *##: :####: *####*. :##: "
print " *##,:########**********:, :##: "
print " .#########################*, *#* "
print " *#########################*##: "
print " *##, ..,,::**#####: "
print " ,##*,*****, *##* "
print " *#########*########: "
print " *##*:*******###* "
print " .##*. ,##* "
print " :##* *##, "
print " *####: "
print " :, "
# Kal-El
# SUPERMAN
# JNA
pool_parts = build_pool_parts(inventory=inventory_json)
lb_vip_address = inventory_json['all']['vars']['internal_lb_vip_address']
for key, value in pool_parts.iteritems():
value['group_name'] = key.upper()
value['vs_name'] = '%s_VS_%s' % (
PREFIX_NAME, value['group_name']
)
value['pool_name'] = '%s_POOL_%s' % (
PREFIX_NAME, value['group_name']
)
node_data = []
priority = 100
for node in value['hosts']:
node['node_name'] = '%s_NODE_%s' % (PREFIX_NAME, node['hostname'])
nodes.append(NODES % node)
if value.get('persist'):
persist = PERSIST_OPTION
else:
persist = str()
virtual_dict = {
'port': value['port'],
'vs_name': value['vs_name'],
'pool_name': value['pool_name'],
'internal_lb_vip_address': lb_vip_address,
'persist': persist,
'ssl_domain_name': user_args['ssl_domain_name'],
'ssl_public_ip': user_args['ssl_public_ip'],
}
##########################################
virt = '%s' % VIRTUAL_ENTRIES % virtual_dict
if virt not in virts:
virts.append(virt)
if user_args['ssl_public_ip']:
if not value.get('backend_ssl'):
virtual_dict['ssl_profiles'] = (
'/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'
) % user_args
else:
virtual_dict['ssl_profiles'] = '/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_SERVER { context serverside } /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'% user_args
if value.get('make_public'):
if value.get ('ssl_impossible'):
virtual_dict['vs_name'] = '%s_VS_%s' % (
'RPC_PUB', value['group_name']
)
pubvirt = (
'%s\n'
) % PUB_NONSSL_VIRTUAL_ENTRIES % virtual_dict
if pubvirt not in pubvirts:
pubvirts.append(pubvirt)
else:
virtual_dict['vs_name'] = '%s_VS_%s' % (
'RPC_PUB_SSL', value['group_name']
)
sslvirt = '%s' % PUB_SSL_VIRTUAL_ENTRIES % virtual_dict
if sslvirt not in sslvirts:
sslvirts.append(sslvirt)
if value.get('priority') is True:
node_data.append(
'%s:%s %s' % (
node['node_name'],
value['backend_port'],
PRIORITY_ENTRY % {'priority_int': priority}
)
)
priority -= 5
else:
node_data.append(
'%s:%s' % (
node['node_name'],
value['backend_port']
)
)
##########################################
value['nodes'] = ' '.join(node_data)
pool_node = [POOL_NODE['beginning'] % value]
if value.get('priority') is True:
pool_node.append(POOL_NODE['priority'])
pool_node.append(POOL_NODE['end'] % value)
pools.append('%s' % ' '.join(pool_node))
# define the SNAT pool address
snat_pool_adds = user_args.get('snat_pool_address')
if snat_pool_adds is None:
container_cidr = inventory_json['all']['vars']['container_cidr']
network = netaddr.IPNetwork(container_cidr)
snat_pool_adds = str(network[15])
snat_pool_addresses = ' '.join(snat_pool_adds.split(','))
snat_pool = '%s\n' % SNAT_POOL % {
'snat_pool_addresses': snat_pool_addresses
}
script = [
'#!/usr/bin/bash\n',
r'### CREATE RPC PARTITION ###',
'create auth partition %s\n' % PART,
r'### SET DISPLAY PORT NUMBERS ###',
'modify cli global-settings service number\n',
snat_pool
]
script.extend(['### CREATE MONITORS ###'])
script.extend(['%s' % i % user_args for i in MONITORS])
script.extend(['%s' % i for i in commands])
script.extend(['### CREATE PERSISTENCE PROFILES ###'])
script.extend(['%s' % i % user_args for i in PERSISTANCE])
script.extend(['### CREATE NODES ###'])
script.extend(['%s' % i % user_args for i in nodes])
script.extend(['\n### CREATE POOLS ###'])
script.extend(pools)
script.extend(['\n### CREATE VIRTUAL SERVERS ###'])
script.extend(virts)
script.extend(['\n### CREATE PUBLIC SSL OFFLOADED VIRTUAL SERVERS ###'])
script.extend(sslvirts)
script.extend(['\n### CREATE PUBLIC SSL PASS-THROUGH VIRTUAL SERVERS ###'])
script.extend(pubvirts)
if user_args['sec_host_network']:
hostnet, netmask = user_args['sec_host_network'].split(':')
if not user_args['sec_public_vlan_name']:
raise SystemExit('Please set the [ --sec-public-vlan-name ] value')
script.append(
SEC_HOSTNET_VIRTUAL_ENTRIES % {
'sec_host_net': hostnet,
'sec_host_netmask': netmask,
'sec_public_vlan_name': user_args['sec_public_vlan_name']
}
)
if user_args['sec_container_network']:
hostnet, netmask = user_args['sec_container_network'].split(':')
script.append(
SEC_CONTAINER_VIRTUAL_ENTRIES % {
'sec_container_net': hostnet,
'sec_container_netmask': netmask
}
)
script.extend(['%s\n' % i for i in END_COMMANDS])
if user_args['print']:
for i in script:
print(i)
with open(user_args['export'], 'w+') as f:
f.writelines("\n".join(script))
if __name__ == "__main__":
main()
| {
"content_hash": "c684ecaba670f941e995130b615724b4",
"timestamp": "",
"source": "github",
"line_count": 735,
"max_line_length": 222,
"avg_line_length": 35.30340136054422,
"alnum_prop": 0.5109449668567905,
"repo_name": "byronmccollum/rpc-openstack",
"id": "30737347758c0a9843210025506bd53cd9f4e7d8",
"size": "26608",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/f5-config.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2290"
},
{
"name": "Python",
"bytes": "201565"
},
{
"name": "Shell",
"bytes": "32033"
}
],
"symlink_target": ""
} |
"""
[2017-04-14] Challenge #310 [Hard] The Guards and the Mansion
https://www.reddit.com/r/dailyprogrammer/comments/65fkwh/20170414_challenge_310_hard_the_guards_and_the/
# Description
I recently came into some money and built myself a mansion. And I'm afraid of robbers who now want to come and steal
the rest of my money. I built my house in the middle of my property, but now I need some guard towers. I didn't make
*that* much money, so I can't build an *infinite* number of towers with an infinite number of guards - I can only
afford 3. But I do need your help - how many towers do I need to build to give my house adequate coverage, and
sufficient variety of coverage to keep thieves at bay?
For this problem ...
- Assume a Euclidean 2 dimensional space with my mansion at the center (0,0)
- My mansion is circular with a unit radius of 1
- I'll tell you the locations of the guard towers as Euclidean coordinates, for example (1,1). They may be negative.
- The towers only work if they form a triangle that fully emcompasses my mansion (remember, a circle centered at (0,0))
I'll give you the locations of the towers, one at a time, as a pair of integers *x* and *y* representing the
coordinates. For *every* row of input please tell me how many different triangles I can have - that is arrangements of
3 occupied towers. I like diversity, let's keep the thieves guessing as to where the towers are occupied every night.
# Input Description
You'll be given an integer on the first line telling you how many lines of tower coordinate pairs to read. Example:
4
3 -1
-1 3
-1 -1
-5 -2
# Output Description
For *every row of input* tell me how many triangles I can make that fully enclose my mansion at (0,0) with a unit
radius of 1. Example:
0
0
1
2
# Challenge Input
10
2 -7
2 2
4 -9
-4 -6
9 3
-8 -7
6 0
-5 -6
-1 -1
-7 10
"""
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "d52f5a28eba410cdf7406db3ce00b082",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 119,
"avg_line_length": 36.592592592592595,
"alnum_prop": 0.7074898785425101,
"repo_name": "DayGitH/Python-Challenges",
"id": "98768a3d28dade67c67deea62face46e180bbaf5",
"size": "1976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20170414C.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
} |
import json
import re
import sys
from sandpiper.concurrency import KeyLocker
try:
import redis
except ImportError as e:
raise ImportError('Failed to import "redis" ({})'.format(e))
from .abstract import Abstract
from .abstract import NonJSONStringError, SearchError
import logging
require_backward_compatibility = sys.version_info.major == 2
def create_client(**pool_args):
if 'url' in pool_args:
return redis.Redis(connection_pool = redis.BlockingConnectionPool.from_url(pool_args['url']))
if 'port' not in pool_args:
pool_args['port'] = 6379
if 'host' not in pool_args:
raise ValueError('"host" is not defined.')
pool = redis.BlockingConnectionPool(**pool_args)
return redis.Redis(connection_pool=pool)
class Adapter(Abstract):
""" Adapter for Redis """
def __init__(self, storage = None, namespace = None, delimiter = ':', auto_json_convertion = True):
self._storage = storage
self._namespace = namespace or ''
self._delimiter = delimiter
self._re_namespace = re.compile('^{}:'.format(namespace))
self._auto_json_convertion = auto_json_convertion
self._key_locker = KeyLocker()
@property
def api(self):
return self._storage
def get(self, key):
actual_key = self._actual_key(key)
value = self._key_locker.synchronize(None, self._storage.get, actual_key)
if not value:
return value
if not self._auto_json_convertion:
return value
try:
if not require_backward_compatibility and not isinstance(value, bytes):
raise NonJSONStringError('Unable to decode the value (preemptive, py3, bytes)', value)
if isinstance(value, bytes):
return json.loads(value.decode('utf-8'))
# NOTE This is mainly to support Python 2.
if not isinstance(value, str):
raise NonJSONStringError('Unable to decode the value (preemptive)', value)
return json.loads(value)
except json.decoder.JSONDecodeError:
raise NonJSONStringError('Unable to decode the value (final)', value)
def set(self, key, value, ttl = None):
actual_key = self._actual_key(key)
encoded = value
if self._auto_json_convertion:
encoded = json.dumps(value)
self._key_locker.synchronize(None, self._storage.set, actual_key, encoded)
if ttl != None and ttl > 0:
self._storage.expire(actual_key, ttl)
def remove(self, key):
actual_key = self._actual_key(key)
self._key_locker.synchronize(None, self._storage.delete, actual_key)
def find(self, pattern='*', only_keys=False, ignore_non_decodable=True):
actual_pattern = self._actual_key(pattern)
keys = []
if require_backward_compatibility:
keys.extend([
self._re_namespace.sub('', key)
for key in self._storage.keys(actual_pattern)
])
else:
keys.extend([
self._re_namespace.sub('', key.decode('utf-8'))
for key in self._storage.keys(actual_pattern)
if isinstance(key, bytes)
])
if only_keys:
return keys
result = {}
for key in keys:
data = None
try:
data = self.get(key)
except NonJSONStringError:
if not ignore_non_decodable:
raise SearchError(pattern)
# endif
result[key] = data
return result
def _actual_key(self, key):
if not self._namespace:
return key
return '{}{}{}'.format(self._namespace, self._delimiter, key)
| {
"content_hash": "222a54b83ede79fc1a2c2c9d2915c2cf",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 103,
"avg_line_length": 28.274074074074075,
"alnum_prop": 0.5889441970133613,
"repo_name": "shiroyuki/sandpiper",
"id": "a0e56d67ee509b9f04ec6dbd625690f99de76efc",
"size": "3817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandpiper/adapter/xredis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2102"
},
{
"name": "Python",
"bytes": "20735"
}
],
"symlink_target": ""
} |
"""Unit tests for reviewboard.diffviewer.views.DownloadPatchErrorBundleView."""
import kgb
from django.http import HttpResponse
from reviewboard.diffviewer.errors import PatchError
from reviewboard.diffviewer.renderers import DiffRenderer
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing import TestCase
class DownloadPatchErrorBundleViewTests(kgb.SpyAgency, TestCase):
"""Unit tests for DownloadPatchErrorBundleView."""
fixtures = ['test_users', 'test_scmtools']
def test_sends_404_when_no_patch_error(self):
"""Testing DownloadPatchErrorBundleView returns 404 when
no patch error is raised by the renderer.
"""
review_request = self.create_review_request(publish=True,
create_repository=True)
diffset = self.create_diffset(review_request=review_request)
self.create_diffcommit(diffset=diffset)
filediff_id = diffset.files.all()[0].pk
# This does not raise a PatchError, so there is no patch error bundle.
self.spy_on(DiffRenderer.render_to_response,
op=kgb.SpyOpReturn(HttpResponse()))
response = self.client.get(
local_site_reverse(
'patch-error-bundle',
kwargs={
'review_request_id': review_request.pk,
'revision': diffset.revision,
'filediff_id': filediff_id,
}))
self.assertEqual(response.status_code, 404)
def test_sends_bundle_when_patch_error(self):
"""Testing DownloadPatchErrorBundleView sends a patch error bundle
when a PatchError is raised by the renderer.
"""
review_request = self.create_review_request(publish=True,
create_repository=True)
diffset = self.create_diffset(review_request=review_request)
self.create_diffcommit(diffset=diffset)
filediff_id = diffset.files.all()[0].pk
patch_error = PatchError(filename='filename',
error_output='error_output',
orig_file=b'orig_file',
new_file=b'new_file',
diff=b'diff',
rejects=b'rejects')
self.spy_on(DiffRenderer.render_to_response,
op=kgb.SpyOpRaise(patch_error))
response = self.client.get(
local_site_reverse(
'patch-error-bundle',
kwargs={
'review_request_id': review_request.pk,
'revision': diffset.revision,
'filediff_id': filediff_id,
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/zip')
| {
"content_hash": "9513abe3f771fb17afaedb2ea7f0c3dc",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 41.070422535211264,
"alnum_prop": 0.5847050754458162,
"repo_name": "reviewboard/reviewboard",
"id": "83385d43ebddbc7b3a1e9544a06334f85523fa4c",
"size": "2916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/diffviewer/tests/test_download_patch_error_bundle_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.text import slugify
def gen_unique_slug(apps, schema_editor):
FrequentlyAskedQuestion = apps.get_model('cms', 'FrequentlyAskedQuestion')
for row in FrequentlyAskedQuestion.objects.all():
if not row.slug:
max_length = FrequentlyAskedQuestion._meta.get_field('slug').max_length
slug = orig_slug = slugify(row.question)[:max_length]
slug_is_unique = not FrequentlyAskedQuestion.objects.filter(slug=orig_slug).exists()
count = 1
while not slug_is_unique:
slug = "{orig}-{count}".format(
orig=orig_slug[:max_length - len(str(count)) - 1],
count=count)
slug_is_unique = not FrequentlyAskedQuestion.objects.filter(slug=slug).exists()
count += 1
row.slug = slug
row.save()
class Migration(migrations.Migration):
dependencies = [
('cms', '0025_infolinks'),
]
operations = [
# First add a new field for slug
migrations.AddField(
model_name='frequentlyaskedquestion',
name='slug',
field=models.SlugField(default=None, null=True),
),
# Then populate existing rows with unique slugs
migrations.RunPython(gen_unique_slug, reverse_code=migrations.RunPython.noop),
# Now can make this field unique
migrations.AlterField(
model_name='frequentlyaskedquestion',
name='slug',
field=models.SlugField(blank=True, default=None, unique=True),
),
]
| {
"content_hash": "da162cc970955d3570a6bd30de366eaa",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 96,
"avg_line_length": 35.723404255319146,
"alnum_prop": 0.6057176891006552,
"repo_name": "mitodl/micromasters",
"id": "f7d8057df654835aa9a261ffb322b58aebcdebe7",
"size": "1752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/migrations/0026_frequentlyaskedquestion_slug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
} |
"""
"""
import sys, os
sys.path.append('..')
from mendeley import API
def test_definitions():
m = API(user_name='public')
academic_statuses = m.definitions.academic_statuses()
print('Retrieved academic statuses')
subject_areas = m.definitions.subject_areas()
print('Retrieved subject areas')
document_types = m.definitions.document_types()
print('Retrieved document types')
print('Finished running "Definitions" tests')
if __name__ == '__main__':
print('Running "Definitions" tests')
test_definitions() | {
"content_hash": "02de9c07e17beaef90ae9af2bc792433",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.6578014184397163,
"repo_name": "JimHokanson/mendeley_python",
"id": "b1e07cebed5b0cb78d31ac9c0866781eaf83a9e3",
"size": "588",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_definitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77385"
}
],
"symlink_target": ""
} |
__revision__ = "test/MSVS/vs-14.0Exp-exec.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that we can actually build a simple program using our generated
Visual Studio 14.0 project (.vcxproj) and solution (.sln) files
using Visual C++ 14.0 Express edition.
"""
import os
import sys
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
if sys.platform != 'win32':
msg = "Skipping Visual Studio test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
msvs_version = '14.0Exp'
if not msvs_version in test.msvs_versions():
msg = "Visual Studio %s not installed; skipping test.\n" % msvs_version
test.skip_test(msg)
# Let SCons figure out the Visual Studio environment variables for us and
# print out a statement that we can exec to suck them into our external
# environment so we can execute devenv and really try to build something.
test.run(arguments = '-n -q -Q -f -', stdin = """\
env = Environment(tools = ['msvc'], MSVS_VERSION='%(msvs_version)s')
print "os.environ.update(%%s)" %% repr(env['ENV'])
""" % locals())
exec(test.stdout())
test.subdir('sub dir')
test.write(['sub dir', 'SConstruct'], """\
env=Environment(MSVS_VERSION = '%(msvs_version)s')
env.MSVSProject(target = 'foo.vcxproj',
srcs = ['foo.c'],
buildtarget = 'foo.exe',
variant = 'Release')
env.Program('foo.c')
""" % locals())
test.write(['sub dir', 'foo.c'], r"""
int
main(int argc, char *argv)
{
printf("foo.c\n");
exit (0);
}
""")
test.run(chdir='sub dir', arguments='.')
test.vcproj_sys_path(test.workpath('sub dir', 'foo.vcxproj'))
import SCons.Platform.win32
system_dll_path = os.path.join( SCons.Platform.win32.get_system_root(), 'System32' )
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + system_dll_path
test.run(chdir='sub dir',
program=[test.get_msvs_executable(msvs_version)],
arguments=['foo.sln', '/build', 'Release'])
test.run(program=test.workpath('sub dir', 'foo'), stdout="foo.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "281858b9377144419577379ff5b23c2f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 102,
"avg_line_length": 25.458823529411763,
"alnum_prop": 0.6617375231053605,
"repo_name": "EmanueleCannizzaro/scons",
"id": "f933d07f48dc676a63c32c5297b48dcd17e3061c",
"size": "3299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/MSVS/vs-14.0Exp-exec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
"""momentsinfo_convroll4_doublescale_fs5"""
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/"
train_pred_file = sdir+"train--pl_blend5_convroll5_preinit--pl_blend5_convroll5_preinit-schaap-20150313-181700--avg-probs.npy"
valid_pred_file = sdir+"valid--pl_blend5_convroll5_preinit--pl_blend5_convroll5_preinit-schaap-20150313-181700--avg-probs.npy"
test_pred_file = sdir+"test--pl_blend5_convroll5_preinit--pl_blend5_convroll5_preinit-schaap-20150313-181700--avg-probs.npy"
data_loader = load.PredictionsWithMomentsDataLoader(train_pred_file=train_pred_file, valid_pred_file=valid_pred_file, test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train, chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 7))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
print "regu"
lambda_reg = 0.002
# lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
# L2 0.0005 0.5646362
# L2 0.001 0.560494
# L2 0.002 0.559762
# L2 0.01 0.560949
# L2 0.05 0.563861
# 0.559762
# 1 layer 64
| {
"content_hash": "e74135245005fd67549b2d134c76dad1",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 148,
"avg_line_length": 30.74418604651163,
"alnum_prop": 0.6993192133131618,
"repo_name": "benanne/kaggle-ndsb",
"id": "8ae1efe4eb3cddf0328681be496b884bceaff620",
"size": "2645",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "configurations/featmomentsinfo_pl_blend5_convroll5_preinit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "581526"
}
],
"symlink_target": ""
} |
from os.path import join, dirname
PROJECT_ROOT = dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'data.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ea7zs8w3c*h0!o0wo2n!0h(4vu%pm$rinuq37)$*wz@&!w2_g7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'nongratae.middleware.IpNonGrataeMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_extensions',
'nongratae',
)
| {
"content_hash": "5223e64cc7caa00711a5c687c37681bb",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 108,
"avg_line_length": 34.29761904761905,
"alnum_prop": 0.7056577577230129,
"repo_name": "jjdelc/django-ip-nongratae",
"id": "9f88080c0a86fea277fd6294d84e8ecffe95b318",
"size": "2920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8421"
}
],
"symlink_target": ""
} |
import datetime
import random
import time
import glanceclient.exc
from nova import context
from nova import exception
from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
from nova.tests import matchers
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object"""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.TestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb", "virtual": "aaa"}, '
'{"device": "yyy", "virtual": "xxx"}]',
'block_device_mapping':
'[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
'{"virtual_device": "ephemeral0", '
'"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted, converted_expected)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGlanceImageService(test.TestCase):
"""
Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
fakes.stub_out_compute_api_snapshot(self.stubs)
client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port, use_ssl, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
"""Ensure instance_id is persisted as an image-property"""
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
self.assertThat(image_meta, matchers.DictMatches(expected))
image_metas = self.service.detail(self.context)
self.assertThat(image_metas[0], matchers.DictMatches(expected))
def test_create_without_instance_id(self):
"""
Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertThat(actual, matchers.DictMatches(expected))
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertEquals(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['is_public'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
self.service.create(self.context, fixture)['id']
proj = self.context.project_id
self.context.project_id = 'proj1'
image_metas = self.service.detail(self.context)
self.context.project_id = proj
self.assertEqual(1, len(image_metas))
self.assertEqual(image_metas[0]['name'], 'test image')
self.assertEqual(image_metas[0]['is_public'], False)
def test_detail_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[1])
self.assertEquals(len(image_metas), 8)
i = 2
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, limit=5)
self.assertEquals(len(image_metas), 5)
def test_detail_default_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context)
for i, meta in enumerate(image_metas):
self.assertEqual(meta['name'], 'TestImage %d' % (i))
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[3], limit=5)
self.assertEquals(len(image_metas), 5)
i = 4
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_invalid_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
self.assertRaises(exception.Invalid, self.service.detail,
self.context, marker='invalidmarker')
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
print image
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEquals('new image name', new_image_data['name'])
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEquals(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEquals(2, num_images)
self.service.delete(self.context, ids[0])
num_images = len(self.service.detail(self.context))
self.assertEquals(1, num_images)
def test_show_passes_through_to_client(self):
fixture = self._make_fixture(name='image1', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'image1',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(image_meta, expected)
def test_show_raises_when_no_authtoken_in_the_context(self):
fixture = self._make_fixture(name='image1',
is_public=False,
properties={'one': 'two'})
image_id = self.service.create(self.context, fixture)['id']
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
image_id)
def test_detail_passes_through_to_client(self):
fixture = self._make_fixture(name='image10', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_metas = self.service.detail(self.context)
expected = [
{
'id': image_id,
'name': 'image10',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
},
]
self.assertEqual(image_metas, expected)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, writer)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
self.assertEquals(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
self.assertEquals(same_id, image_id)
self.assertEquals(service._client.host,
'something-less-likely')
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceClientWrapper(test.TestCase):
def setUp(self):
super(TestGlanceClientWrapper, self).setUp()
# host1 has no scheme, which is http by default
self.flags(glance_api_servers=['host1:9292', 'https://host2:9293',
'http://host3:9294'])
# Make the test run fast
def _fake_sleep(secs):
pass
self.stubs.Set(time, 'sleep', _fake_sleep)
def test_static_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_default_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host': 'host1',
'port': 9292,
'use_ssl': False}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, info['host'])
self.assertEqual(port, info['port'])
self.assertEqual(use_ssl, info['use_ssl'])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
info = {'num_calls': 0,
'host': 'host2',
'port': 9293,
'use_ssl': True}
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
self.assertRaises(exception.GlanceConnectionFailed,
client2.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_static_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def test_default_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host0': 'host1',
'port0': 9292,
'use_ssl0': False,
'host1': 'host2',
'port1': 9293,
'use_ssl1': True}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
attempt = info['num_calls']
self.assertEqual(host, info['host%s' % attempt])
self.assertEqual(port, info['port%s' % attempt])
self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
info = {'num_calls': 0,
'host0': 'host2',
'port0': 9293,
'use_ssl0': True,
'host1': 'host3',
'port1': 9294,
'use_ssl1': False}
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
| {
"content_hash": "0a02a164308d473f950be768afbbd542",
"timestamp": "",
"source": "github",
"line_count": 687,
"max_line_length": 79,
"avg_line_length": 36.65793304221252,
"alnum_prop": 0.5516597839898348,
"repo_name": "aristanetworks/arista-ovs-nova",
"id": "e8baf4353bf9a34f40d87108ddf843a53fd9a019",
"size": "25860",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/image/test_glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6938504"
},
{
"name": "Shell",
"bytes": "16524"
}
],
"symlink_target": ""
} |
"""Test various command line arguments and configuration file parameters."""
import json
from pathlib import Path
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class SettingsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.wallet_names = []
def run_test(self):
node, = self.nodes
settings = Path(node.datadir, self.chain, "settings.json")
conf = Path(node.datadir, "particl.conf")
# Assert empty settings file was created
self.stop_node(0)
with settings.open() as fp:
assert_equal(json.load(fp), {})
# Assert settings are parsed and logged
with settings.open("w") as fp:
json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}, fp)
with node.assert_debug_log(expected_msgs=[
'Ignoring unknown rw_settings value bool',
'Ignoring unknown rw_settings value list',
'Ignoring unknown rw_settings value null',
'Ignoring unknown rw_settings value num',
'Ignoring unknown rw_settings value string',
'Setting file arg: string = "string"',
'Setting file arg: num = 5',
'Setting file arg: bool = true',
'Setting file arg: null = null',
'Setting file arg: list = [6,7]',
]):
self.start_node(0)
self.stop_node(0)
# Assert settings are unchanged after shutdown
with settings.open() as fp:
assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]})
# Test invalid json
with settings.open("w") as fp:
fp.write("invalid json")
node.assert_start_raises_init_error(expected_msg='Unable to parse settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid json object
with settings.open("w") as fp:
fp.write('"string"')
node.assert_start_raises_init_error(expected_msg='Found non-object value "string" in settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid settings file containing duplicate keys
with settings.open("w") as fp:
fp.write('{"key": 1, "key": 2}')
node.assert_start_raises_init_error(expected_msg='Found duplicate key key in settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid settings file is ignored with command line -nosettings
with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']):
self.start_node(0, extra_args=["-nosettings"])
self.stop_node(0)
# Test invalid settings file is ignored with config file -nosettings
with conf.open('a') as conf:
conf.write('nosettings=1\n')
with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']):
self.start_node(0)
self.stop_node(0)
# Test alternate settings path
altsettings = Path(node.datadir, "altsettings.json")
with altsettings.open("w") as fp:
fp.write('{"key": "value"}')
with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']):
self.start_node(0, extra_args=[f"-settings={altsettings}"])
self.stop_node(0)
if __name__ == '__main__':
SettingsTest().main()
| {
"content_hash": "3280a657c01d28901a3325b8b33390ab",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 140,
"avg_line_length": 41.264367816091955,
"alnum_prop": 0.6050139275766017,
"repo_name": "particl/particl-core",
"id": "4f1472569fe2b7d043260ef4d6646a4cfadcba13",
"size": "3804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/feature_settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "C",
"bytes": "2889723"
},
{
"name": "C++",
"bytes": "13218778"
},
{
"name": "CMake",
"bytes": "29182"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1740"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "229063"
},
{
"name": "Makefile",
"bytes": "159386"
},
{
"name": "Objective-C++",
"bytes": "5486"
},
{
"name": "Python",
"bytes": "3388224"
},
{
"name": "QMake",
"bytes": "1276"
},
{
"name": "Sage",
"bytes": "59728"
},
{
"name": "Scheme",
"bytes": "26427"
},
{
"name": "Shell",
"bytes": "190057"
}
],
"symlink_target": ""
} |
import sys
import os
saved_path = sys.path.copy()
path = os.path.join(os.path.dirname(__file__), 'lib_cast_upgrade_1_5_20.zip')
sys.path.append(path)
from lib_cast_upgrade_1_5_20.internal.upgrader import apply_patch #@UnresolvedImport
apply_patch('1.5.20')
| {
"content_hash": "332f369dcd610b25640fe31e1a2a28fd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 19.357142857142858,
"alnum_prop": 0.7011070110701108,
"repo_name": "CAST-Extend/com.castsoftware.uc.checkanalysiscompleteness",
"id": "748b36399df1ec836043a4a9dd5df106299b3435",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cast_upgrade_1_5_20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1238485"
}
],
"symlink_target": ""
} |
"""
Functional test
Mistake Epic
Storyboard is defined within the comments of the program itself
"""
import unittest
from flask import url_for
from biblib.tests.stubdata.stub_data import UserShop, LibraryShop
from biblib.tests.base import TestCaseDatabase, MockEmailService
from biblib.views import DEFAULT_LIBRARY_NAME_PREFIX, DEFAULT_LIBRARY_DESCRIPTION
class TestMistakeEpic(TestCaseDatabase):
"""
Base class used to test the Mistake Epic
"""
def test_mistake_epic(self):
"""
Carries out the epic 'Mistake', where a user wants to update the meta
data of their library, once they have created the library. They see
that the library already has a pre-filled title that they did not
change, and want to update it afterwards.
:return: no return
"""
# Stub data
user_mary = UserShop()
stub_library = LibraryShop(name='', description='')
# Mary creates a private library and
# Does not fill any of the details requested, and then looks at the
# newly created library.
url = url_for('userview')
response = self.client.post(
url,
data=stub_library.user_view_post_data_json,
headers=user_mary.headers
)
library_id = response.json['id']
library_name = response.json['name']
library_description = response.json['description']
self.assertEqual(response.status_code, 200, response)
self.assertTrue('name' in response.json, response.json)
self.assertTrue(response.json['name'] != '')
# Mary updates the name and description of the library, but leaves the
# details blank. This should not update the names as we do not want
# them as blank.
for meta_data, update in [['name', ''], ['description', '']]:
# Make the change
url = url_for('documentview', library=library_id)
response = self.client.put(
url,
data=stub_library.document_view_put_data_json(
meta_data, update
),
headers=user_mary.headers
)
self.assertEqual(response.status_code, 200)
# Check the change did not work
url = url_for('userview', library=library_id)
with MockEmailService(user_mary, end_type='uid'):
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertEqual(response.status_code, 200)
self.assertEqual(library_name,
'{0} 1'.format(DEFAULT_LIBRARY_NAME_PREFIX))
self.assertEqual(library_description,
DEFAULT_LIBRARY_DESCRIPTION)
# Mary decides to update the name of the library to something more
# sensible
name = 'something sensible'
url = url_for('documentview', library=library_id)
response = self.client.put(
url,
data=stub_library.document_view_put_data_json(
name=name
),
headers=user_mary.headers
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['name'], name)
# She checks the change worked
url = url_for('userview', library=library_id)
with MockEmailService(user_mary, end_type='uid'):
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertEqual(name,
response.json['libraries'][0]['name'])
# Mary decides to make the description also more relevant
description = 'something relevant'
url = url_for('documentview', library=library_id)
response = self.client.put(
url,
data=stub_library.document_view_put_data_json(
description=description
),
headers=user_mary.headers
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['description'], description)
# Mary checks that the change worked
url = url_for('userview', library=library_id)
with MockEmailService(user_mary, end_type='uid'):
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertEqual(description,
response.json['libraries'][0]['description'])
# Mary dislikes both her changes and makes both the changes at once
name = 'Disliked the other one'
description = 'It didn\'t make sense before'
url = url_for('documentview', library=library_id)
response = self.client.put(
url,
data=stub_library.document_view_put_data_json(
name=name,
description=description
),
headers=user_mary.headers
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['name'], name)
self.assertEqual(response.json['description'], description)
# Check the change worked
url = url_for('userview', library=library_id)
with MockEmailService(user_mary, end_type='uid'):
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertEqual(name,
response.json['libraries'][0]['name'])
self.assertEqual(description,
response.json['libraries'][0]['description'])
if __name__ == '__main__':
unittest.main(verbosity=2) | {
"content_hash": "0af44164aaf38f6ad8d916f4235652aa",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 81,
"avg_line_length": 36.5859872611465,
"alnum_prop": 0.5788649025069638,
"repo_name": "adsabs/biblib-service",
"id": "44ac572270bf31a49c1c60e21e1cf343a12c2987",
"size": "5744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "biblib/tests/functional_tests/test_mistake_epic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5583"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Puppet",
"bytes": "1258"
},
{
"name": "Python",
"bytes": "489656"
},
{
"name": "Shell",
"bytes": "533"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import Pyro4.core
import Pyro4.naming
import banks
with Pyro4.core.Daemon() as daemon:
with Pyro4.naming.locateNS() as ns:
uri = daemon.register(banks.Rabobank)
ns.register("example.banks.rabobank", uri)
uri = daemon.register(banks.ABN)
ns.register("example.banks.abn", uri)
print("available banks:")
print(list(ns.list(prefix="example.banks.").keys()))
# enter the service loop.
print("Banks are ready for customers.")
daemon.requestLoop()
| {
"content_hash": "fff1997c79d16004e9017933a3bea763",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 32.05882352941177,
"alnum_prop": 0.6678899082568808,
"repo_name": "irmen/Pyro4",
"id": "9df02dce984311b49cb89b4a63adfd66e5349ee8",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/banks/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1283"
},
{
"name": "Python",
"bytes": "618799"
},
{
"name": "Shell",
"bytes": "2394"
}
],
"symlink_target": ""
} |
""" Util tests
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-08-20
:Copyright: 2016-2018, Karr Lab
:License: MIT
"""
from wc_utils.util.types import assert_value_equal, assert_value_not_equal, cast_to_builtins, is_iterable, get_subclasses, get_superclasses, TypesUtilAssertionError
import numpy as np
import unittest
class TestCastToBuiltins(unittest.TestCase):
def test_iterables(self):
self.assertEqual(cast_to_builtins([1, 2, 3]), [1, 2, 3])
self.assertEqual(cast_to_builtins((1, 2, 3)), [1, 2, 3])
self.assertEqual(cast_to_builtins(set([1, 2, 3])), [1, 2, 3])
def test_dict(self):
self.assertEqual(cast_to_builtins({'x': 1}), {'x': 1})
self.assertEqual(cast_to_builtins(SetAttrClass(x=1)), {'x': 1})
def test_scalars(self):
self.assertEqual(cast_to_builtins('test string'), 'test string')
self.assertEqual(cast_to_builtins(1), 1)
self.assertEqual(cast_to_builtins(2.0), 2.0)
self.assertEqual(cast_to_builtins(np.float64(2.0)), 2.0)
self.assertEqual(cast_to_builtins(np.float64(np.nan)).__class__, float('nan').__class__)
def test_recursive(self):
obj = SetAttrClass(
a=(1, 2, 3),
b=[4, 5, 6],
c=[{'d': 7, 'e': 8}, SetAttrClass(f=9, g=10)],
)
expected = {
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [{'d': 7, 'e': 8}, {'f': 9, 'g': 10}],
}
self.assertEqual(cast_to_builtins(obj), expected)
class TestAssertValueEqual(unittest.TestCase):
def test_type_not_equal(self):
assert_value_equal(1, 1.0)
assert_value_not_equal(1, 1.0, check_type=True)
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_equal(1, 1.0, check_type=True))
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_not_equal(1, 1.0))
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_equal({'x': 1}, ['x', 1]))
assert_value_not_equal({'x': 1}, ['x', 1])
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_equal(['x', 1], {'x': 1}))
assert_value_not_equal(['x', 1], {'x': 1})
def test_iterables(self):
assert_value_equal([1, 3, 2], [1, 2, 3])
assert_value_equal([1, 3, 2, 1], [1, 1, 2, 3])
assert_value_equal((2, 3, 1), [1, 2, 3])
assert_value_equal(set([1, 2, 3]), [1, 2, 3])
assert_value_equal([1, 2, 3], [1, 2, 3], check_iterable_ordering=True)
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_equal([1, 2, 3], [1, 3, 2], check_iterable_ordering=True))
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_equal([1, 2, 3], [1, 1, 3]))
assert_value_not_equal([1, 2, 3], [1, 1, 3])
self.assertRaises(TypesUtilAssertionError, lambda: assert_value_equal([1, 2, 3], [1, 2]))
assert_value_not_equal([1, 2, 3], [1, 2])
def test_dict(self):
assert_value_equal({'y': 2, 'x': 1}, {'x': 1, 'y': 2})
assert_value_equal(SetAttrClass(x=1, y=2), {'y': 2, 'x': 1})
assert_value_equal({'y': 2, 'x': 1}, SetAttrClass(x=1, y=2))
def test_scalars(self):
assert_value_equal('test string', 'test string')
assert_value_equal(1, 1)
assert_value_equal(2.0, 2.0)
assert_value_equal(np.float64(2.0), 2.0)
assert_value_equal(float('nan'), np.nan)
assert_value_equal(float(2.0), np.float64(2.0))
def test_recursive(self):
obj = SetAttrClass(
a=(1, 2, 3),
b=[4, 5, 6],
c=[SetAttrClass(f=9, g=[10, 'h', 11]), {'d': 7, 'e': 8}],
)
expected = {
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [{'d': 7, 'e': 8}, {'f': 9, 'g': [10, 11, 'h']}],
}
assert_value_equal(obj, expected)
def test_is_iterable(self):
self.assertTrue(is_iterable([]))
self.assertTrue(is_iterable(()))
self.assertFalse(is_iterable({}))
self.assertFalse(is_iterable(''))
self.assertFalse(is_iterable(None))
self.assertFalse(is_iterable(int()))
self.assertFalse(is_iterable(float()))
class TestAssertValueNotEqual(unittest.TestCase):
def test_scalars(self):
assert_value_not_equal(1, np.nan)
assert_value_not_equal(1, 2)
class TestGetSubclasses(unittest.TestCase):
def test(self):
self.assertEqual(get_subclasses(Parent1), [Child11, Child12])
self.assertEqual(get_subclasses(GrandParent), [Parent1, Parent2, Child11, Child12, Child21, Child22])
self.assertEqual(get_subclasses(GrandParent, immediate_only=True), [Parent1, Parent2])
# test de-duplication
self.assertEqual(get_subclasses(Root), [Left, Right, Leaf])
class TestGetSuperclasses(unittest.TestCase):
def test(self):
self.assertEqual(get_superclasses(GrandParent), (object, ))
self.assertEqual(get_superclasses(Parent1), (GrandParent, object, ))
self.assertEqual(get_superclasses(Child11, immediate_only=True), (Parent1, ))
self.assertEqual(get_superclasses(Child11), (Parent1, GrandParent, object, ))
class SetAttrClass(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
class GrandParent(object):
pass
class Parent1(GrandParent):
pass
class Parent2(GrandParent):
pass
class Child11(Parent1):
pass
class Child12(Parent1):
pass
class Child21(Parent2):
pass
class Child22(Parent2):
pass
class Root(object): pass
class Left(Root): pass
class Right(Root): pass
class Leaf(Left, Right): pass
| {
"content_hash": "4a9991c5cd5c6598d0de84540bf26d21",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 164,
"avg_line_length": 32.62643678160919,
"alnum_prop": 0.5973225295050203,
"repo_name": "KarrLab/wc_utils",
"id": "e874a4d03919267c1616a29dddab09c67f202318",
"size": "5677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/util/test_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "6784"
},
{
"name": "Python",
"bytes": "415285"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
from ..translation import gettext_lazy as _
class ErrorChoicesMixin(object):
IMPORT_DATA = 0
UNDEFINED = 1
STEP_CHOICES = (
(IMPORT_DATA, _('import data')),
(UNDEFINED, _('unexpected error'))
)
| {
"content_hash": "5fb28737dcca0a711dc84127356bdb4d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 43,
"avg_line_length": 20.818181818181817,
"alnum_prop": 0.6026200873362445,
"repo_name": "mtrgroup/django-mtr-sync",
"id": "52f24001c3a7495d316d003495a721a323fd877f",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mtr/sync/lib/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "192118"
},
{
"name": "Python",
"bytes": "137393"
}
],
"symlink_target": ""
} |
"""Revision management for django-reversion."""
from __future__ import unicode_literals
import operator, warnings
from functools import wraps, reduce, partial
from threading import local
from weakref import WeakValueDictionary
import copy
from collections import defaultdict
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.core.signals import request_finished
from django.db import models, connection, transaction
from django.db.models import Q, Max
from django.db.models.query import QuerySet
from django.db.models.signals import post_save
from django.utils.encoding import force_text
from reversion.signals import pre_revision_commit, post_revision_commit
from reversion.errors import RevisionManagementError, RegistrationError
class VersionAdapter(object):
"""Adapter class for serializing a registered model."""
# Fields to include in the serialized data.
fields = ()
# Fields to exclude from the serialized data.
exclude = ()
# Foreign key relationships to follow when saving a version of this model.
follow = ()
# The serialization format to use.
format = "json"
def __init__(self, model):
"""Initializes the version adapter."""
self.model = model
def get_fields_to_serialize(self):
"""Returns an iterable of field names to serialize in the version data."""
opts = self.model._meta.concrete_model._meta
fields = self.fields or (field.name for field in opts.local_fields + opts.local_many_to_many)
fields = (opts.get_field(field) for field in fields if not field in self.exclude)
for field in fields:
if field.rel:
yield field.name
else:
yield field.attname
def get_followed_relations(self, obj):
"""Returns an iterable of related models that should be included in the revision data."""
for relationship in self.follow:
# Clear foreign key cache.
try:
related_field = obj._meta.get_field(relationship)
except models.FieldDoesNotExist:
pass
else:
if isinstance(related_field, models.ForeignKey):
if hasattr(obj, related_field.get_cache_name()):
delattr(obj, related_field.get_cache_name())
# Get the referenced obj(s).
try:
related = getattr(obj, relationship)
except ObjectDoesNotExist: # pragma: no cover
continue
if isinstance(related, models.Model):
yield related
elif isinstance(related, (models.Manager, QuerySet)):
for related_obj in related.all():
yield related_obj
elif related is not None: # pragma: no cover
raise TypeError("Cannot follow the relationship {relationship}. Expected a model or QuerySet, found {related}".format(
relationship = relationship,
related = related,
))
def get_serialization_format(self):
"""Returns the serialization format to use."""
return self.format
def get_serialized_data(self, obj):
"""Returns a string of serialized data for the given obj."""
return serializers.serialize(
self.get_serialization_format(),
(obj,),
fields = list(self.get_fields_to_serialize()),
)
def get_version_data(self, obj, db=None):
"""Creates the version data to be saved to the version model."""
from reversion.models import has_int_pk
object_id = force_text(obj.pk)
content_type = ContentType.objects.db_manager(db).get_for_model(obj)
if has_int_pk(obj.__class__):
object_id_int = int(obj.pk)
else:
object_id_int = None
return {
"object_id": object_id,
"object_id_int": object_id_int,
"content_type": content_type,
"format": self.get_serialization_format(),
"serialized_data": self.get_serialized_data(obj),
"object_repr": force_text(obj),
}
class RevisionContextStackFrame(object):
def __init__(self, is_managing_manually, is_invalid=False, ignore_duplicates=False):
self.is_managing_manually = is_managing_manually
self.is_invalid = is_invalid
self.ignore_duplicates = ignore_duplicates
self.objects = defaultdict(dict)
self.meta = []
def fork(self, is_managing_manually):
return RevisionContextStackFrame(is_managing_manually, self.is_invalid, self.ignore_duplicates)
def join(self, other_context):
if not other_context.is_invalid:
for manager_name, object_versions in other_context.objects.items():
self.objects[manager_name].update(object_versions)
self.meta.extend(other_context.meta)
class RevisionContextManager(local):
"""Manages the state of the current revision."""
def __init__(self):
"""Initializes the revision state."""
self.clear()
# Connect to the request finished signal.
request_finished.connect(self._request_finished_receiver)
def clear(self):
"""Puts the revision manager back into its default state."""
self._user = None
self._comment = ""
self._stack = []
self._db = None
def is_active(self):
"""Returns whether there is an active revision for this thread."""
return bool(self._stack)
def _assert_active(self):
"""Checks for an active revision, throwning an exception if none."""
if not self.is_active(): # pragma: no cover
raise RevisionManagementError("There is no active revision for this thread")
@property
def _current_frame(self):
self._assert_active()
return self._stack[-1]
def start(self, manage_manually=False):
"""
Begins a revision for this thread.
This MUST be balanced by a call to `end`. It is recommended that you
leave these methods alone and instead use the revision context manager
or the `create_revision` decorator.
"""
if self.is_active():
self._stack.append(self._current_frame.fork(manage_manually))
else:
self._stack.append(RevisionContextStackFrame(manage_manually))
def end(self):
"""Ends a revision for this thread."""
self._assert_active()
stack_frame = self._stack.pop()
if self._stack:
self._current_frame.join(stack_frame)
else:
try:
if not stack_frame.is_invalid:
# Save the revision data.
for manager, manager_context in stack_frame.objects.items():
manager.save_revision(
dict(
(obj, callable(data) and data() or data)
for obj, data
in manager_context.items()
if obj.pk is not None
),
user = self._user,
comment = self._comment,
meta = stack_frame.meta,
ignore_duplicates = stack_frame.ignore_duplicates,
db = self._db,
)
finally:
self.clear()
# Revision context properties that apply to the entire stack.
def get_db(self):
"""Returns the current DB alias being used."""
return self._db
def set_db(self, db):
"""Sets the DB alias to use."""
self._db = db
def set_user(self, user):
"""Sets the current user for the revision."""
self._assert_active()
self._user = user
def get_user(self):
"""Gets the current user for the revision."""
self._assert_active()
return self._user
def set_comment(self, comment):
"""Sets the comments for the revision."""
self._assert_active()
self._comment = comment
def get_comment(self):
"""Gets the current comment for the revision."""
self._assert_active()
return self._comment
# Revision context properties that apply to the current stack frame.
def is_managing_manually(self):
"""Returns whether this revision context has manual management enabled."""
return self._current_frame.is_managing_manually
def invalidate(self):
"""Marks this revision as broken, so should not be commited."""
self._current_frame.is_invalid = True
def is_invalid(self):
"""Checks whether this revision is invalid."""
return self._current_frame.is_invalid
def add_to_context(self, manager, obj, version_data):
"""Adds an object to the current revision."""
self._current_frame.objects[manager][obj] = version_data
def add_meta(self, cls, **kwargs):
"""Adds a class of meta information to the current revision."""
self._current_frame.meta.append((cls, kwargs))
def set_ignore_duplicates(self, ignore_duplicates):
"""Sets whether to ignore duplicate revisions."""
self._current_frame.ignore_duplicates = ignore_duplicates
def get_ignore_duplicates(self):
"""Gets whether to ignore duplicate revisions."""
return self._current_frame.ignore_duplicates
# Signal receivers.
def _request_finished_receiver(self, **kwargs):
"""
Called at the end of a request, ensuring that any open revisions
are closed. Not closing all active revisions can cause memory leaks
and weird behaviour.
"""
while self.is_active(): # pragma: no cover
self.end()
# High-level context management.
def create_revision(self, manage_manually=False):
"""
Marks up a block of code as requiring a revision to be created.
The returned context manager can also be used as a decorator.
"""
return RevisionContext(self, manage_manually)
class RevisionContext(object):
"""An individual context for a revision."""
def __init__(self, context_manager, manage_manually):
"""Initializes the revision context."""
self._context_manager = context_manager
self._manage_manually = manage_manually
def __enter__(self):
"""Enters a block of revision management."""
self._context_manager.start(self._manage_manually)
def __exit__(self, exc_type, exc_value, traceback):
"""Leaves a block of revision management."""
try:
if exc_type is not None:
self._context_manager.invalidate()
finally:
self._context_manager.end()
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
with self:
return func(*args, **kwargs)
return do_revision_context
# A shared, thread-safe context manager.
revision_context_manager = RevisionContextManager()
class RevisionManager(object):
"""Manages the configuration and creation of revisions."""
_created_managers = WeakValueDictionary()
@classmethod
def get_created_managers(cls):
"""Returns all created revision managers."""
return list(cls._created_managers.items())
@classmethod
def get_manager(cls, manager_slug):
"""Returns the manager with the given slug."""
if manager_slug in cls._created_managers:
return cls._created_managers[manager_slug]
raise RegistrationError("No revision manager exists with the slug %r" % manager_slug) # pragma: no cover
def __init__(self, manager_slug, revision_context_manager=revision_context_manager):
"""Initializes the revision manager."""
# Check the slug is unique for this revision manager.
if manager_slug in RevisionManager._created_managers: # pragma: no cover
raise RegistrationError("A revision manager has already been created with the slug %r" % manager_slug)
# Store a reference to this manager.
self.__class__._created_managers[manager_slug] = self
# Store config params.
self._manager_slug = manager_slug
self._registered_models = {}
self._revision_context_manager = revision_context_manager
self._eager_signals = {}
self._signals = {}
# Proxies to common context methods.
self._revision_context = revision_context_manager.create_revision()
# Registration methods.
def _registration_key_for_model(self, model):
meta = model._meta
return (
meta.app_label,
meta.model_name,
)
def is_registered(self, model):
"""
Checks whether the given model has been registered with this revision
manager.
"""
return self._registration_key_for_model(model) in self._registered_models
def get_registered_models(self):
"""Returns an iterable of all registered models."""
return [
apps.get_model(*key)
for key
in self._registered_models.keys()
]
def register(self, model=None, adapter_cls=VersionAdapter, signals=None, eager_signals=None, **field_overrides):
"""Registers a model with this revision manager."""
# Default to post_save if no signals are given
if signals is None and eager_signals is None:
signals = [post_save]
# Store signals for usage in the signal receiver
self._eager_signals[model] = list(eager_signals or [])
self._signals[model] = list(signals or [])
# Return a class decorator if model is not given
if model is None:
return partial(self.register, adapter_cls=adapter_cls, **field_overrides)
# Prevent multiple registration.
if self.is_registered(model):
raise RegistrationError("{model} has already been registered with django-reversion".format(
model = model,
))
# Perform any customization.
if field_overrides:
adapter_cls = type(adapter_cls.__name__, (adapter_cls,), field_overrides)
# Perform the registration.
adapter_obj = adapter_cls(model)
self._registered_models[self._registration_key_for_model(model)] = adapter_obj
# Connect to the selected signals of the model.
all_signals = self._signals[model] + self._eager_signals[model]
for signal in all_signals:
signal.connect(self._signal_receiver, model)
return model
def get_adapter(self, model):
"""Returns the registration information for the given model class."""
if self.is_registered(model):
return self._registered_models[self._registration_key_for_model(model)]
raise RegistrationError("{model} has not been registered with django-reversion".format(
model = model,
))
def unregister(self, model):
"""Removes a model from version control."""
if not self.is_registered(model):
raise RegistrationError("{model} has not been registered with django-reversion".format(
model = model,
))
del self._registered_models[self._registration_key_for_model(model)]
all_signals = self._signals[model] + self._eager_signals[model]
for signal in all_signals:
signal.disconnect(self._signal_receiver, model)
del self._signals[model]
del self._eager_signals[model]
def _follow_relationships(self, objects):
"""Follows all relationships in the given set of objects."""
followed = set()
def _follow(obj, exclude_concrete):
# Check the pk first because objects without a pk are not hashable
if obj.pk is None or obj in followed or (obj.__class__, obj.pk) == exclude_concrete:
return
followed.add(obj)
adapter = self.get_adapter(obj.__class__)
for related in adapter.get_followed_relations(obj):
_follow(related, exclude_concrete)
for obj in objects:
exclude_concrete = None
if obj._meta.proxy:
exclude_concrete = (obj._meta.concrete_model, obj.pk)
_follow(obj, exclude_concrete)
return followed
def _get_versions(self, db=None):
"""Returns all versions that apply to this manager."""
from reversion.models import Version
return Version.objects.using(db).filter(
revision__manager_slug = self._manager_slug,
).select_related("revision")
def save_revision(self, objects, ignore_duplicates=False, user=None, comment="", meta=(), db=None):
"""Saves a new revision."""
from reversion.models import Revision, Version, has_int_pk
# Adapt the objects to a dict.
if isinstance(objects, (list, tuple)):
objects = dict(
(obj, self.get_adapter(obj.__class__).get_version_data(obj, db))
for obj in objects
)
# Create the revision.
if objects:
# Follow relationships.
for obj in self._follow_relationships(objects.keys()):
if obj not in objects:
adapter = self.get_adapter(obj.__class__)
objects[obj] = adapter.get_version_data(obj)
# Create all the versions without saving them
ordered_objects = list(objects.keys())
new_versions = [Version(**objects[obj]) for obj in ordered_objects]
# Check if there's some change in all the revision's objects.
save_revision = True
if ignore_duplicates:
# Find the latest revision amongst the latest previous version of each object.
subqueries = [Q(object_id=version.object_id, content_type=version.content_type) for version in new_versions]
subqueries = reduce(operator.or_, subqueries)
latest_revision = self._get_versions(db).filter(subqueries).aggregate(Max("revision"))["revision__max"]
# If we have a latest revision, compare it to the current revision.
if latest_revision is not None:
previous_versions = self._get_versions(db).filter(revision=latest_revision).values_list("serialized_data", flat=True)
if len(previous_versions) == len(new_versions):
all_serialized_data = [version.serialized_data for version in new_versions]
if sorted(previous_versions) == sorted(all_serialized_data):
save_revision = False
# Only save if we're always saving, or have changes.
if save_revision:
# Save a new revision.
revision = Revision(
manager_slug = self._manager_slug,
user = user,
comment = comment,
)
# Send the pre_revision_commit signal.
pre_revision_commit.send(self,
instances = ordered_objects,
revision = revision,
versions = new_versions,
)
# Save the revision.
with transaction.atomic(using=db):
revision.save(using=db)
# Save version models.
for version in new_versions:
version.revision = revision
version.save()
# Save the meta information.
for cls, kwargs in meta:
cls._default_manager.db_manager(db).create(revision=revision, **kwargs)
# Send the post_revision_commit signal.
post_revision_commit.send(self,
instances = ordered_objects,
revision = revision,
versions = new_versions,
)
# Return the revision.
return revision
# Revision management API.
def get_for_object_reference(self, model, object_id, db=None):
"""
Returns all versions for the given object reference.
The results are returned with the most recent versions first.
"""
from reversion.models import has_int_pk
content_type = ContentType.objects.db_manager(db).get_for_model(model)
versions = self._get_versions(db).filter(
content_type = content_type,
).select_related("revision")
if has_int_pk(model):
# We can do this as a fast, indexed lookup.
object_id_int = int(object_id)
versions = versions.filter(object_id_int=object_id_int)
else:
# We can't do this using an index. Never mind.
object_id = force_text(object_id)
versions = versions.filter(object_id=object_id)
versions = versions.order_by("-pk")
return versions
def get_for_object(self, obj, db=None):
"""
Returns all the versions of the given object, ordered by date created.
The results are returned with the most recent versions first.
"""
return self.get_for_object_reference(obj.__class__, obj.pk, db)
def get_unique_for_object(self, obj, db=None):
"""
Returns unique versions associated with the object.
The results are returned with the most recent versions first.
"""
warnings.warn(
"Use get_for_object().get_unique() instead of get_unique_for_object().",
PendingDeprecationWarning)
return list(self.get_for_object(obj, db).get_unique())
def get_for_date(self, object, date, db=None):
"""Returns the latest version of an object for the given date."""
from reversion.models import Version
versions = self.get_for_object(object, db)
versions = versions.filter(revision__date_created__lte=date)
try:
version = versions[0]
except IndexError:
raise Version.DoesNotExist
else:
return version
def get_deleted(self, model_class, db=None, model_db=None):
"""
Returns all the deleted versions for the given model class.
The results are returned with the most recent versions first.
"""
from reversion.models import has_int_pk
model_db = model_db or db
content_type = ContentType.objects.db_manager(db).get_for_model(model_class)
live_pk_queryset = model_class._default_manager.db_manager(model_db).all().values_list("pk", flat=True)
versioned_objs = self._get_versions(db).filter(
content_type = content_type,
)
if has_int_pk(model_class):
# If the model and version data are in different databases, decouple the queries.
if model_db != db:
live_pk_queryset = list(live_pk_queryset.iterator())
# We can do this as a fast, in-database join.
deleted_version_pks = versioned_objs.exclude(
object_id_int__in = live_pk_queryset
).values_list("object_id_int")
else:
# This join has to be done as two separate queries.
deleted_version_pks = versioned_objs.exclude(
object_id__in = list(live_pk_queryset.iterator())
).values_list("object_id")
deleted_version_pks = deleted_version_pks.annotate(
latest_pk = Max("pk")
).values_list("latest_pk", flat=True)
# HACK: MySQL deals extremely badly with this as a subquery, and can hang infinitely.
# TODO: If a version is identified where this bug no longer applies, we can add a version specifier.
if connection.vendor == "mysql": # pragma: no cover
deleted_version_pks = list(deleted_version_pks)
# Return the deleted versions!
return self._get_versions(db).filter(pk__in=deleted_version_pks).order_by("-pk")
# Signal receivers.
def _signal_receiver(self, instance, signal, **kwargs):
"""Adds registered models to the current revision, if any."""
if self._revision_context_manager.is_active() and not self._revision_context_manager.is_managing_manually():
eager = signal in self._eager_signals[instance.__class__]
adapter = self.get_adapter(instance.__class__)
if eager:
# pre_delete is a special case, because the instance will
# be modified by django right after this.
# don't use a lambda, but get the data out now.
version_data = adapter.get_version_data(instance, self._revision_context_manager._db)
self._revision_context_manager.add_to_context(self, copy.copy(instance), version_data)
for obj in self._follow_relationships([instance]):
adapter = self.get_adapter(obj.__class__)
version_data = adapter.get_version_data(obj, self._revision_context_manager._db)
self._revision_context_manager.add_to_context(self, copy.copy(obj), version_data)
else:
version_data = lambda: adapter.get_version_data(instance, self._revision_context_manager._db)
self._revision_context_manager.add_to_context(self, instance, version_data)
# A shared revision manager.
default_revision_manager = RevisionManager("default")
# Easy registration methods.
register = default_revision_manager.register
is_registered = default_revision_manager.is_registered
unregister = default_revision_manager.unregister
get_adapter = default_revision_manager.get_adapter
get_registered_models = default_revision_manager.get_registered_models
# Context management.
create_revision = revision_context_manager.create_revision
# Revision meta data.
get_db = revision_context_manager.get_db
set_db = revision_context_manager.set_db
get_user = revision_context_manager.get_user
set_user = revision_context_manager.set_user
get_comment = revision_context_manager.get_comment
set_comment = revision_context_manager.set_comment
add_meta = revision_context_manager.add_meta
get_ignore_duplicates = revision_context_manager.get_ignore_duplicates
set_ignore_duplicates = revision_context_manager.set_ignore_duplicates
# Low level API.
get_for_object_reference = default_revision_manager.get_for_object_reference
get_for_object = default_revision_manager.get_for_object
get_unique_for_object = default_revision_manager.get_unique_for_object
get_for_date = default_revision_manager.get_for_date
get_deleted = default_revision_manager.get_deleted
| {
"content_hash": "1a3d26c38f31d8bc679202b895b9ebce",
"timestamp": "",
"source": "github",
"line_count": 667,
"max_line_length": 137,
"avg_line_length": 40.82908545727136,
"alnum_prop": 0.6108397899607094,
"repo_name": "ixc/django-reversion",
"id": "fd2c052a9fe39dfcdf699f0bb33dddc5c95ceb8d",
"size": "27233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/reversion/revisions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6215"
},
{
"name": "Python",
"bytes": "136838"
}
],
"symlink_target": ""
} |
class HDCloudException(Exception):
def __init__(self, code, message=None, details=None):
self.code = code
self.msg = message or self.__class__.__name__
self.details = details
def __str__(self):
return "%s (HTTP %s)" % (self.msg, self.code)
_code_map = dict((c.http_status, c) for c in HDCloudException.__subclasses__())
def from_response(response, body):
"""
Return an instance of a HDCloudException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status != 200:
raise exception_from_response(resp, body)
"""
cls = _code_map.get(response.status, HDCloudException)
if body:
return cls(code=response.status, message=body['errors'][0]['message'])
else:
return cls(code=response.status) | {
"content_hash": "3c816f331b67b4fcb719434b2dbe4b94",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 31.925925925925927,
"alnum_prop": 0.6032482598607889,
"repo_name": "jacobian-archive/python-hdcloud",
"id": "142bc41fc361c644a53c2e21fbec9f05c47490d3",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdcloud/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34191"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from itertools import product
import numpy as np
from toolz import curry
from ..base import tokenize
from .core import Array, normalize_chunks
from .numpy_compat import full
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, shape)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
name = name or 'wrapped-' + tokenize(func, shape, chunks, dtype, args, kwargs)
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = partial(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@curry
def wrap(wrap_func, func, **kwargs):
f = partial(wrap_func, func, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument chunks=(...)
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype='f8')
zeros = w(np.zeros, dtype='f8')
empty = w(np.empty, dtype='f8')
full = w(full)
| {
"content_hash": "4263584dadf9077e457706a0310412ea",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 82,
"avg_line_length": 25.860759493670887,
"alnum_prop": 0.6172295643661282,
"repo_name": "jeffery-do/Vizdoombot",
"id": "15de4a4e614792d86d0b8848a30e83b137617a41",
"size": "2043",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doom/lib/python3.5/site-packages/dask/array/wrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "465717"
},
{
"name": "C++",
"bytes": "219269"
},
{
"name": "CSS",
"bytes": "7132"
},
{
"name": "Cuda",
"bytes": "232079"
},
{
"name": "FORTRAN",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "7089"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "37513702"
},
{
"name": "Shell",
"bytes": "3838"
}
],
"symlink_target": ""
} |
"""xblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from blog.feeds import AllPostsRssFeed
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('blog.urls')),
url(r'', include('comments.urls')),
url(r'^all/rss/$', AllPostsRssFeed(), name='rss'),
]
| {
"content_hash": "f74c39f6939c1bb30c6034b9ace5c6b0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 37.64,
"alnum_prop": 0.6886291179596175,
"repo_name": "yuchou/xblog",
"id": "abbcd06e185528621ab7ada879a5d9785df9be74",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xblog/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "88987"
},
{
"name": "HTML",
"bytes": "12633"
},
{
"name": "JavaScript",
"bytes": "4310"
},
{
"name": "Python",
"bytes": "22848"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
from django.utils.text import slugify
from django.conf import settings
from autoslug import AutoSlugField
from django.core.urlresolvers import reverse
from django.db.models import Sum, Max
import json, uuid
class Charity(models.Model):
name = models.CharField(max_length=30)
slug = AutoSlugField(populate_from='name')
description = models.TextField(max_length=30, blank=True, null=True)
website = models.URLField(blank=True, null=True)
picture = models.URLField(blank=True, null=True)
video = models.URLField(blank=True, null=True)
class Meta:
ordering = ["-name"]
def __str__(self): # __unicode__ on Python 2
return self.name
@staticmethod
def quick_create(name=None):
if name is None: name = "Ubunye Foundation"
data = {
"name": name
}
return Charity.objects.create(**data)
@property
def gateway_properties(self):
vals = self.gatewayproperty_set.all().values()
return {item.get('key'): item.get('value') for item in vals}
class GatewayProperty(models.Model):
charity = models.ForeignKey(Charity)
gateway_name = models.CharField(max_length=30, default='payfast', choices=settings.GATEWAY_BACKENDS)
key = models.CharField(max_length=30)
value = models.CharField(max_length=255, blank=True, null=True)
CAMPAIGN_STATES = [('pending', 'Pending'), ('active', 'Active'), ('complete', 'Complete'), ('cancelled', 'Cancelled')]
class Campaign(models.Model):
name = models.CharField(max_length=30)
slug = AutoSlugField(populate_from='name')
charity = models.ForeignKey(Charity)
picture = models.URLField(blank=True, null=True)
video = models.URLField(blank=True, null=True)
description = models.TextField( blank=True, null=True)
status = models.CharField(max_length=30, choices=CAMPAIGN_STATES, default='pending')
start_date = models.DateTimeField(default=timezone.now)
expiry_date = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["-name"]
def __str__(self):
return self.name
@property
def complete_transactions(self):
return Transaction.objects.filter(campaign=self, status='complete')
@property
def total_raised(self):
# todo: cache
result = self.complete_transactions.aggregate(Sum('amount'))
return result.get('amount__sum', 0)
@property
def scores_by_team(self):
return self.complete_transactions.values('team__name') \
.annotate(amount = Sum('amount')) \
.order_by('-amount')
@property
def scores_by_team_as_percent(self):
amounts = self.scores_by_team
total = self.total_raised
colors = [
'#d44950', # red
'#1ca8dd', # blue
'#1bc98e', # green
]
return [ {"team__name": item.get('team__name'), \
'percent': (item.get('amount')/total * 100), \
'color': colors[index]
} for index, item in enumerate(amounts) ]
@property
def score_summary(self):
scores = self.scores_by_team
if len(scores)>1:
first = scores[0]
second = scores[1]
diff = first.get('amount', 0) - second.get('amount', 0)
return "{} leads by R {}" . format (first.get('team__name'), diff)
else:
return False
def get_absolute_url(self):
return reverse('campaign_detail', args=[self.pk])
def get_active(self):
return Campaign.objects.filter(status='active')
@staticmethod
def quick_create(charity=None, name=None, picture=None):
if name is None: name = "Mandela Day Challenge"
if picture is None: picture = "http://www.gatesfoundation.org/~/media/GFO/Home/banner-1.png"
if charity is None:
if Charity.objects.count() > 0:
charity = Charity.objects.first()
else:
charity = Charity.quick_create()
data = {
"name": name,
"picture": picture,
"charity": charity
}
return Campaign.objects.create(**data)
class Team(models.Model):
campaign = models.ForeignKey(Campaign)
name = models.CharField(max_length=30)
slug = AutoSlugField(populate_from='name')
description = models.TextField(max_length=30, blank=True, null=True)
website = models.URLField(blank=True, null=True)
avatar = models.URLField(blank=True, null=True)
class Meta:
ordering = ["-name"]
def __str__(self): # __unicode__ on Python 2
return self.name
transaction_statuses = [('started', 'Started'), ('complete', 'Complete')]
class Transaction(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
campaign = models.ForeignKey(Campaign)
team = models.ForeignKey(Team, blank=True, null=True)
amount = models.DecimalField(max_digits=6, decimal_places=2, default=0)
status = models.CharField(max_length=30, choices=transaction_statuses, default='started')
| {
"content_hash": "75ece8dbe8c0cbaaebae05a6f1df06fc",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 118,
"avg_line_length": 31.30635838150289,
"alnum_prop": 0.6183530280649926,
"repo_name": "toast38coza/FlashGiving",
"id": "f9801455237384dcd916794d91dec822b0aa25f6",
"size": "5416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campaign/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1956"
},
{
"name": "HTML",
"bytes": "16964"
},
{
"name": "JavaScript",
"bytes": "273886"
},
{
"name": "Python",
"bytes": "34975"
}
],
"symlink_target": ""
} |
from baselines.common.cmd_util import make_mujoco_env, mujoco_arg_parser
from baselines.common import tf_util as U
from baselines import logger
def train(env_id, num_timesteps, seed):
from baselines.ppo1 import mlp_policy, pposgd_simple
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
env = make_mujoco_env(env_id, seed)
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95, schedule='linear',
)
env.close()
def main():
args = mujoco_arg_parser().parse_args()
logger.configure()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| {
"content_hash": "fdd8637a7ea1d14ad180cd6c201c9d57",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 37.074074074074076,
"alnum_prop": 0.6473526473526473,
"repo_name": "dsbrown1331/CoRL2019-DREX",
"id": "638998316b397ede6e945886f7566f806415101a",
"size": "1025",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "drex-atari/baselines/baselines/ppo1/run_mujoco.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "591968"
},
{
"name": "Jupyter Notebook",
"bytes": "1160596"
},
{
"name": "Python",
"bytes": "1438389"
}
],
"symlink_target": ""
} |
X = 11 # Global (module) name/attribute (X, or manynames.X)
def f():
print(X) # Access global X (11)
def g():
X = 22 # Local (function) variable (X, hides module X)
print(X)
class C:
X = 33 # Class attribute (C.X)
def m(self):
X = 44 # Local variable in method (X)
self.X = 55 # Instance attribute (instance.X)
if __name__ == '__main__':
print(X) # 11: module (a.k.a. manynames.X outside file)
f() # 11: global
g() # 22: local
print(X) # 11: module name unchanged
obj = C() # Make instance
print(obj.X) # 33: class name inherited by instance
obj.m() # Attach attribute name X to instance now
print(obj.X) # 55: instance
print(C.X) # 33: class (a.k.a. obj.X if no X in instance)
#print(C.m.X) # FAILS: only visible in method
#print(g.X) # FAILS: only visible in function
| {
"content_hash": "c85248737f633a0d5416de371bb0a5bf",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 37.4,
"alnum_prop": 0.45276292335115864,
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"id": "9676a3051978d90e2f4519d8776c30ccb0d50420",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/scripts/book_code/code/manynames.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1796"
},
{
"name": "Python",
"bytes": "493591"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Extension
from pythran.dist import PythranExtension, PythranBuildExt
setup(name = 'demo2',
version = '1.0',
description = 'This is another demo package',
packages = ['demo2'],
cmdclass={"build_ext": PythranBuildExt},
ext_modules = [PythranExtension('demo2.a', sources = ['a.py'])])
| {
"content_hash": "f8eda8099f63733c3f24ee1a53215e09",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 38.55555555555556,
"alnum_prop": 0.6714697406340058,
"repo_name": "serge-sans-paille/pythran",
"id": "159ef9479639a4397ff86f6f8a2edfd2183d7489",
"size": "347",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythran/tests/test_distutils_packaged/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2074873"
},
{
"name": "Cython",
"bytes": "1701"
},
{
"name": "Jupyter Notebook",
"bytes": "27461"
},
{
"name": "Makefile",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "2025760"
}
],
"symlink_target": ""
} |
"""
工具类
Created on 03/01/2016
@author: Wen Gu
@contact: emptyset110@gmail.com
"""
import requests
import asyncio
import math
import time
from datetime import datetime
import pandas
import os
import ntplib
from pandas import DataFrame
import re
import json
import logging
def camel_to_underscore(name):
pass
def get_logger(
logger_name="main",
log_path="log", #
console_log=True, # 屏幕打印日志开关,默认True
console_log_level=logging.INFO, # 屏幕打印日志的级别,默认为INFO
critical_log=False, # critical单独写文件日志,默认关闭
error_log=True, # error级别单独写文件日志,默认开启
warning_log=False, # warning级别单独写日志,默认关闭
info_log=True, # info级别单独写日志,默认开启
debug_log=False, # debug级别日志,默认关闭
):
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s'
)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if log_path:
# 补全文件夹
if log_path[-1] != '/':
log_path += '/'
if not logger.handlers:
# 屏幕日志打印设置
if console_log:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
if not os.path.exists(log_path + logger_name):
os.makedirs(log_path + logger_name)
# 打开下面的输出到文件
if critical_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/critical.log'
)
log_handler.setLevel(logging.CRITICAL)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if error_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/error.log'
)
log_handler.setLevel(logging.ERROR)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if warning_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/warning.log'
)
log_handler.setLevel(logging.WARNING)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if info_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/info.log'
)
log_handler.setLevel(logging.INFO)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if debug_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/debug.log'
)
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
def generate_token():
import hashlib
token = hashlib.sha1()
token.update(str(time.time()).encode())
token = token.hexdigest()
return token
def symbol_list_to_code(symbolList):
codeList = []
for symbol in symbolList:
codeList.append(symbol[2:8])
return codeList
def _get_public_ip():
return requests.get('http://ipinfo.io/ip').text.strip()
def get_client_ip():
while True:
try:
response = requests.get(
'https://ff.sinajs.cn/?_=%s&list=sys_clientip'
% int(time.time() * 1000)).text
ip = re.findall(r'\"(.*)\"', response)
break
except Exception as e:
try:
ip = _get_public_ip()
return ip
except:
pass
return ip[0]
def slice_list(step=None, num=None, data_list=None):
"""
用于将一个list按一定步长切片,返回这个list切分后的list
:param step:
:param num:
:param data_list:
:return:
"""
if not ((step is None) & (num is None)):
if num is not None:
step = math.ceil(len(data_list) / num)
return [data_list[i: i + step] for i in range(0, len(data_list), step)]
else:
print("step和num不能同时为空")
return False
def symbols_to_string(symbols):
if (
isinstance(symbols, list) or
isinstance(symbols, set) or
isinstance(symbols, tuple) or
isinstance(symbols, pandas.Series)
):
return ','.join(symbols)
else:
return symbols
"""
与时间相关的转化函数
"""
def datetime_to_timestamp(dt, timeFormat='ms'):
if timeFormat == 'ms':
return int(time.mktime(dt.timetuple()) * 1000)
elif timeFormat == 's':
return int(time.mktime(dt.timetuple()))
def date_to_timestamp(date, dateFormat='%Y-%m-%d', timeFormat='ms'):
return datetime_to_timestamp(
dt=datetime.strptime(date, dateFormat),
timeFormat=timeFormat,
)
def string_to_date(date):
return datetime.strptime(date, "%Y-%m-%d").date()
def timestamp_to_datetime(timestamp, timeFormat='ms'):
if timeFormat == 'ms':
timestamp = timestamp / 1000
return datetime.strftime(timestamp)
def time_now():
return int(time.time() * 1000)
# 从国家授时中心获取时间戳
def get_network_time():
start = time.time()
c = ntplib.NTPClient()
response = c.request('pool.ntp.org')
ts = response.tx_time
return ts
def check_time(precision=0.1):
duration = 2.0
while duration > precision:
try:
print("{}, 开始获取网络时间戳".format(time.time()))
start = time.time()
networkTime = get_network_time()
end = time.time()
duration = end - start
except Exception as e:
print("获取网络时间出了点小状况,正重试", duration)
# print("网络耗时:{}".format( duration ) )
# print("{}, 网络时间戳".format( networkTime ) )
# print("{}, 现在时间戳".format( time.time()) )
difference = networkTime - (start + duration)
print("difference = {}, (本地时间戳+difference)=网络时间戳".format(difference))
return difference
"""
symbols相关函数
"""
def split_symbols(symbols):
df = DataFrame(symbols, columns=['s'])
sz = list(df[df.s > 'sz']["s"])
sh = list(df[df.s < 'sz']["s"])
return [sz, sh]
def upper(data_list):
for i in range(0, len(data_list)):
data_list[i] = data_list[i].upper()
return data_list
def read_config(file_path):
# 读取配置
try:
f_config = open(file_path)
cfg = json.load(f_config)
except Exception as e:
print("{}".format(e))
cfg = dict()
print(
"未能正确加载{},请检查路径,json文档格式,或者忽略此警告"
.format(file_path)
)
return cfg
def write_config(cfg, file_path):
# 将配置写入
print("写入配置:\n{}".format(json.dumps(cfg, indent=2)))
f = open(file_path, 'w', encoding='UTF-8')
f.write(json.dumps(cfg, indent=2))
f.close()
| {
"content_hash": "44f9f2d52a97e091453eb6d4c0b902bc",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 26.515503875968992,
"alnum_prop": 0.5700920917994445,
"repo_name": "Emptyset110/SinaL2",
"id": "064693874dab68d242d1b4916a2e1d59ddb0bfcb",
"size": "7369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SinaL2/Sina/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59581"
}
],
"symlink_target": ""
} |
"""This file should be updated as files/classes/functions are deprecated."""
import pytest
from praw.util import _deprecate_args
from .. import UnitTest
@_deprecate_args("arg2", "arg1", "arg3", "arg0")
def arg_test_global(*, arg0=None, arg1=None, arg2=None, arg3=None):
return arg0, arg1, arg2, arg3
@_deprecate_args("arg0", "arg2", "arg1", "arg3")
def arg_test_global_with_positional(arg0, *, arg1=None, arg2=None, arg3=None):
return arg0, arg1, arg2, arg3
class ArgTest:
@_deprecate_args("arg2", "arg1", "arg3", "arg0")
def arg_test(self, *, arg0=None, arg1=None, arg2=None, arg3=None):
return arg0, arg1, arg2, arg3
@_deprecate_args("arg0", "arg2", "arg1", "arg3")
def arg_test_with_positional(self, arg0, *, arg1=None, arg2=None, arg3=None):
return arg0, arg1, arg2, arg3
class TestDeprecateArgs(UnitTest):
def setup_method(self):
self.arg_test = ArgTest()
def test_arg_test(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'ArgTest.arg_test' will no longer be"
" supported in PRAW 8.\nCall this function with 'arg2', 'arg1', 'arg3',"
" and 'arg0' as keyword arguments."
),
):
result_args = self.arg_test.arg_test("arg2", "arg1", "arg3", "arg0")
assert result_args == ("arg0", "arg1", "arg2", "arg3")
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test__keyword(self):
result_args = self.arg_test.arg_test(
arg2="arg2", arg1="arg1", arg3="arg3", arg0="arg0"
)
assert result_args == ("arg0", "arg1", "arg2", "arg3")
def test_arg_test__mix(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'ArgTest.arg_test' will no longer be"
" supported in PRAW 8.\nCall this function with 'arg2' as a keyword"
" argument."
),
):
result_args = self.arg_test.arg_test("arg2", arg1="arg1")
assert result_args == (None, "arg1", "arg2", None)
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test__one_keyword(self):
result_args = self.arg_test.arg_test(arg0="arg0")
assert result_args == ("arg0", None, None, None)
def test_arg_test__one_positional(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'ArgTest.arg_test' will no longer be"
" supported in PRAW 8.\nCall this function with 'arg2' as a keyword"
" argument."
),
):
result_args = self.arg_test.arg_test("arg2")
assert result_args == (None, None, "arg2", None)
def test_arg_test_global(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'arg_test_global' will no longer be supported"
" in PRAW 8.\nCall this function with 'arg2', 'arg1', 'arg3', and"
" 'arg0' as keyword arguments."
),
):
result_args = arg_test_global("arg2", "arg1", "arg3", "arg0")
assert result_args == ("arg0", "arg1", "arg2", "arg3")
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_global__keyword(self):
result_args = arg_test_global(
arg2="arg2", arg1="arg1", arg3="arg3", arg0="arg0"
)
assert result_args == ("arg0", "arg1", "arg2", "arg3")
def test_arg_test_global__mix(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'arg_test_global' will no longer be supported"
" in PRAW 8.\nCall this function with 'arg2' as a keyword argument."
),
):
result_args = arg_test_global("arg2", arg1="arg1")
assert result_args == (None, "arg1", "arg2", None)
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_global__one_keyword(self):
result_args = arg_test_global(arg0="arg0")
assert result_args == ("arg0", None, None, None)
def test_arg_test_global__one_positional(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'arg_test_global' will no longer be supported"
" in PRAW 8.\nCall this function with 'arg2' as a keyword argument."
),
):
result_args = arg_test_global("arg2")
assert result_args == (None, None, "arg2", None)
def test_arg_test_global_with_positional(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'arg_test_global_with_positional' will no"
" longer be supported in PRAW 8.\nCall this function with 'arg2',"
" 'arg1', and 'arg3' as keyword arguments."
),
):
result_args = arg_test_global_with_positional(
"arg0", "arg2", "arg1", "arg3"
)
assert result_args == ("arg0", "arg1", "arg2", "arg3")
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_global_with_positional__keyword(self):
result_args = arg_test_global_with_positional(
arg0="arg0", arg2="arg2", arg1="arg1", arg3="arg3"
)
assert result_args == ("arg0", "arg1", "arg2", "arg3")
def test_arg_test_global_with_positional__mix(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'arg_test_global_with_positional' will no"
" longer be supported in PRAW 8.\nCall this function with 'arg2' and"
" 'arg1' as keyword arguments."
),
):
result_args = arg_test_global_with_positional(
"arg0", "arg2", "arg1", arg3=None
)
assert result_args == ("arg0", "arg1", "arg2", None)
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_global_with_positional__one_keyword(self):
result_args = arg_test_global_with_positional("arg0", arg1="arg1")
assert result_args == ("arg0", "arg1", None, None)
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_global_with_positional__one_positional(self):
result_args = arg_test_global_with_positional("arg0")
assert result_args == ("arg0", None, None, None)
def test_arg_test_with_positional(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'ArgTest.arg_test_with_positional' will no"
" longer be supported in PRAW 8.\nCall this function with 'arg2',"
" 'arg1', and 'arg3' as keyword arguments."
),
):
result_args = self.arg_test.arg_test_with_positional(
"arg0", "arg2", "arg1", "arg3"
)
assert result_args == ("arg0", "arg1", "arg2", "arg3")
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_with_positional__keyword(self):
result_args = self.arg_test.arg_test_with_positional(
arg0="arg0", arg2="arg2", arg1="arg1", arg3="arg3"
)
assert result_args == ("arg0", "arg1", "arg2", "arg3")
def test_arg_test_with_positional__mix(self):
with pytest.warns(
DeprecationWarning,
match=(
"Positional arguments for 'ArgTest.arg_test_with_positional' will no"
" longer be supported in PRAW 8.\nCall this function with 'arg2' as a"
" keyword argument."
),
):
result_args = self.arg_test.arg_test_with_positional(
"arg0", "arg2", arg1="arg1", arg3=None
)
assert result_args == ("arg0", "arg1", "arg2", None)
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_with_positional__one_keyword(self):
result_args = self.arg_test.arg_test_with_positional("arg0", arg1="arg1")
assert result_args == ("arg0", "arg1", None, None)
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
def test_arg_test_with_positional__one_positional(self):
result_args = self.arg_test.arg_test_with_positional("arg0")
assert result_args == ("arg0", None, None, None)
| {
"content_hash": "22fc0a1091e1d93223db1d803ac8f7e2",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 88,
"avg_line_length": 40.80930232558139,
"alnum_prop": 0.5741964896284477,
"repo_name": "praw-dev/praw",
"id": "650c3edc5eaaaf3a1a20c8f92ae6027d241fbf1d",
"size": "8774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/util/test_deprecate_args.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1145273"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
def printlist(head):
print head.val
while head.next:
head = head.next
print head.val
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
slow, fast = head, head
for i in range(n):
fast = fast.next
if not fast:
head = head.next
return head
while fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return head
if __name__ == "__main__":
head = ListNode(1)
# head.next = ListNode(2)
# head.next.next = ListNode(3)
# head.next.next.next = ListNode(4)
# head.next.next.next.next = ListNode(5)
# printlist(head)
n = 1
head = Solution().removeNthFromEnd(head, n)
printlist(head)
| {
"content_hash": "a3453d537ae3f6b7eacd864a531499ff",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 47,
"avg_line_length": 21.73469387755102,
"alnum_prop": 0.5098591549295775,
"repo_name": "zongtongyi/leetcode",
"id": "d2853ea57f56f3828553071065c7641dd8014044",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "019_Remove_Nth_Node_From_End_of_List.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62046"
}
],
"symlink_target": ""
} |
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.dates import days_ago
DAG_NAME = 'dag-to-trigger'
args = {'owner': 'airflow', 'start_date': days_ago(1), 'schedule_interval': "None"}
with DAG(dag_id=DAG_NAME, default_args=args) as dag:
dag_task = DummyOperator(
task_id='dag-task'
)
# [END composer_relationship_child_airflow_1]
| {
"content_hash": "328991d6c3bc7dbed3bde13c4d1e2095",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.7114427860696517,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "81883e51757c25c63cb8a669da7cbcaa6e227cc3",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "composer/airflow_1_samples/relationship_child.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
import os
from os.path import join, basename
from cmsis_svd.model import SVDDevice
import site
import json
import re
def formatNamespace(input):
r = re.compile('[^a-zA-Z0-9_]')
from operator import add
from functools import reduce
return reduce(add, map(lambda s: s.capitalize(), r.sub('',input).lower().split('_')))
def formatVariable(input):
#all c++ keywords
cppKeywords = ['alignas','alignof','and','asm','auto','bitand','bitor','bool','break','case',\
'catch','char','class','compl','concept','const','constexpr','continue','decltype','default',\
'delete','do','double','else','enum','explicit','export','extern','false','float','for'\
'friend','goto','if','inline','int','long','mutable','namespace','new','noexcept','not'\
'nullptr','operator','or','private','protected','public','register','requires','return'\
'short','signed','sizeof','static','struct','switch','template','this','throw','true'\
'try','typedef','typeid','typename','union','unsigned','using','virtual','void','volatile'\
'while','xor']
out = formatNamespace(input)
out = out[:1].lower()+out[1:]
if out in cppKeywords:
out += '_' #suffix register names that are c++ keywords to prevent clash
return out
def formatEnumValue(input):
if input[:1].isdigit():
input = 'v' + input
return formatVariable(input)
def formatComment(input):
if input:
return input.replace('\r','').replace('\n','')
return ""
def useEnumeratedValues(values,fieldWidth):
if values is not None:
if len(values) > 4 or fieldWidth <=2: # hack to get around https://github.com/posborne/cmsis-svd/issues/14
return 1
return 0
def getAccess(field,ext):
access = "read-write"
modifiedWriteValues = None
readAction = None
if field.access is not None:
access = field.access
if getKey(ext,['access']) is not None:
access = getKey(ext,['access'])
if field.modified_write_values is not None:
modifiedWriteValues = field.modified_write_values
if getKey(ext,['modifiedWriteValues']) is not None:
modifiedWriteValues = getKey(ext,['modifiedWriteValues'])
if field.read_action is not None:
readAction = field.read_action
if getKey(ext,['readAction']) is not None:
readAction = getKey(ext,['readAction'])
if modifiedWriteValues is None:
modifiedWriteValues = "normal"
if readAction is None:
readAction = "normal"
if access == "read-write":
access = "readWrite"
if access == "read-only":
access = "readOnly"
if access == "write-only":
access = "writeOnly"
if access == "readWrite" and modifiedWriteValues == "normal" and readAction == "normal":
return "ReadWriteAccess"
return "Access<Register::AccessType::%s,Register::ReadActionType::%s,Register::ModifiedWriteValueType::%s>" % (access,readAction,modifiedWriteValues)
def _getSwdKey(device,args):
for p in device.peripherals:
if p.name==args[0]:
if len(args) == 1:
return p
for r in p.registers:
if r.name == args[1]:
if len(args) == 2:
return r
for f in r.fields:
if f.name == args[2]:
return f
return None
def _getExtKey(extention,args):
current = extention;
last = []
for arg in args[:-1]:
if current is None:
return None
elif not isinstance(current, dict):
return None
elif arg in current:
last = current
current = current[arg]
else:
return None
if args[-1] in current:
return current[args[-1]]
elif 'default' in last and args[-1] in last['default']:
return last['default'][args[-1]]
return None
def getKey(tree,path):
if tree is None:
return None
elif isinstance(tree,SVDDevice):
return _getSwdKey(tree,path)
else:
return _getExtKey(tree,path)
def clearBitsFromRange(msb, lsb, privious = 0):
for ii in range(lsb,msb+1):
privious &= ~(1<<ii)
return privious
def setBitsFromRange(msb, lsb, privious = 0):
for ii in range(lsb,msb+1):
privious |= (1<<ii)
return privious
def formatCpuName(ext,device):
if ext is None:
if device.cpu is not None and device.cpu.name is not None:
if device.cpu.name == 'CM0PLUS':
return 'CM0+'
else:
return device.cpu.name
else:
return "Unknown"
return ext
| {
"content_hash": "0f6d063d462a7753dab3a962a92ed9ea",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 153,
"avg_line_length": 32.42758620689655,
"alnum_prop": 0.5920884729902169,
"repo_name": "porkybrain/Kvasir",
"id": "56e0a722a8808db8277be7e5b090fb1e5b0fb227",
"size": "4704",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tools/ChipFromSvdGenerator/FormattingTools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16510"
},
{
"name": "C++",
"bytes": "763437041"
},
{
"name": "CMake",
"bytes": "1504"
},
{
"name": "Python",
"bytes": "13637"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="bar.outsidetextfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
| {
"content_hash": "fe59ab6573092aec23a30455b78b8b08",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 36.11764705882353,
"alnum_prop": 0.5798045602605864,
"repo_name": "plotly/python-api",
"id": "b781cdb6269881a0744b7287b8e2d7ff71c3216c",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/bar/outsidetextfont/_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
import pytest
from parameterized import parameterized
from airflow.models.dag import DAG
from airflow.sensors.date_time import DateTimeSensor
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
class TestDateTimeSensor:
@classmethod
def setup_class(cls):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
cls.dag = DAG("test_dag", default_args=args)
@parameterized.expand(
[
(
"valid_datetime",
timezone.datetime(2020, 7, 6, 13, tzinfo=timezone.utc),
"2020-07-06T13:00:00+00:00",
),
("valid_str", "20200706T210000+8", "20200706T210000+8"),
]
)
def test_valid_input(self, task_id, target_time, expected):
op = DateTimeSensor(
task_id=task_id,
target_time=target_time,
dag=self.dag,
)
assert op.target_time == expected
def test_invalid_input(self):
with pytest.raises(TypeError):
DateTimeSensor(
task_id="test",
target_time=timezone.utcnow().time(),
dag=self.dag,
)
@parameterized.expand(
[
(
"poke_datetime",
timezone.datetime(2020, 1, 1, 22, 59, tzinfo=timezone.utc),
True,
),
("poke_str_extended", "2020-01-01T23:00:00.001+00:00", False),
("poke_str_basic_with_tz", "20200102T065959+8", True),
]
)
@patch(
"airflow.sensors.date_time.timezone.utcnow",
return_value=timezone.datetime(2020, 1, 1, 23, 0, tzinfo=timezone.utc),
)
def test_poke(self, task_id, target_time, expected, mock_utcnow):
op = DateTimeSensor(task_id=task_id, target_time=target_time, dag=self.dag)
assert op.poke(None) == expected
| {
"content_hash": "88e520f648f76f0adc8caf61b5c387c9",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 83,
"avg_line_length": 30.951612903225808,
"alnum_prop": 0.5648775403856175,
"repo_name": "airbnb/airflow",
"id": "f2011899fdcdfbef4bd8805e071e9bcb2b44e342",
"size": "2706",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/sensors/test_date_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
"""
A lightweight Traits like module.
This is designed to provide a lightweight, simple, pure Python version of
many of the capabilities of enthought.traits. This includes:
* Validation
* Type specification with defaults
* Static and dynamic notification
* Basic predefined types
* An API that is similar to enthought.traits
We don't support:
* Delegation
* Automatic GUI generation
* A full set of trait types. Most importantly, we don't provide container
traits (list, dict, tuple) that can trigger notifications if their
contents change.
* API compatibility with enthought.traits
There are also some important difference in our design:
* enthought.traits does not validate default values. We do.
We choose to create this module because we need these capabilities, but
we need them to be pure Python so they work in all Python implementations,
including Jython and IronPython.
Inheritance diagram:
.. inheritance-diagram:: IPython.utils.traitlets
:parts: 3
Authors:
* Brian Granger
* Enthought, Inc. Some of the code in this file comes from enthought.traits
and is licensed under the BSD license. Also, many of the ideas also come
from enthought.traits even though our implementation is very different.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import inspect
import re
import sys
import types
from types import FunctionType
try:
from types import ClassType, InstanceType
ClassTypes = (ClassType, type)
except:
ClassTypes = (type,)
from .importstring import import_item
from . import py3compat
from .py3compat import iteritems
SequenceTypes = (list, tuple, set, frozenset)
#-----------------------------------------------------------------------------
# Basic classes
#-----------------------------------------------------------------------------
class NoDefaultSpecified ( object ): pass
NoDefaultSpecified = NoDefaultSpecified()
class Undefined ( object ): pass
Undefined = Undefined()
class TraitError(Exception):
pass
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, py3compat.string_types ):
return add_article( object )
return add_article( object.__class__.__name__ )
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
def repr_type(obj):
""" Return a string representation of a value and its type for readable
error messages.
"""
the_type = type(obj)
if (not py3compat.PY3) and the_type is InstanceType:
# Old-style class.
the_type = obj.__class__
msg = '%r %r' % (obj, the_type)
return msg
def is_trait(t):
""" Returns whether the given value is an instance or subclass of TraitType.
"""
return (isinstance(t, TraitType) or
(isinstance(t, type) and issubclass(t, TraitType)))
def parse_notifier_name(name):
"""Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait']
"""
if isinstance(name, str):
return [name]
elif name is None:
return ['anytrait']
elif isinstance(name, (list, tuple)):
for n in name:
assert isinstance(n, str), "names must be strings"
return name
class _SimpleTest:
def __init__ ( self, value ): self.value = value
def __call__ ( self, test ):
return test == self.value
def __repr__(self):
return "<SimpleTest(%r)" % self.value
def __str__(self):
return self.__repr__()
def getmembers(object, predicate=None):
"""A safe version of inspect.getmembers that handles missing attributes.
This is useful when there are descriptor based attributes that for
some reason raise AttributeError even though they exist. This happens
in zope.inteface with the __provides__ attribute.
"""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
pass
else:
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
#-----------------------------------------------------------------------------
# Base TraitType for all traits
#-----------------------------------------------------------------------------
class TraitType(object):
"""A base class for all trait descriptors.
Notes
-----
Our implementation of traits is based on Python's descriptor
prototol. This class is the base class for all such descriptors. The
only magic we use is a custom metaclass for the main :class:`HasTraits`
class that does the following:
1. Sets the :attr:`name` attribute of every :class:`TraitType`
instance in the class dict to the name of the attribute.
2. Sets the :attr:`this_class` attribute of every :class:`TraitType`
instance in the class dict to the *class* that declared the trait.
This is used by the :class:`This` trait to allow subclasses to
accept superclasses for :class:`This` values.
"""
metadata = {}
default_value = Undefined
info_text = 'any value'
def __init__(self, default_value=NoDefaultSpecified, **metadata):
"""Create a TraitType.
"""
if default_value is not NoDefaultSpecified:
self.default_value = default_value
if len(metadata) > 0:
if len(self.metadata) > 0:
self._metadata = self.metadata.copy()
self._metadata.update(metadata)
else:
self._metadata = metadata
else:
self._metadata = self.metadata
self.init()
def init(self):
pass
def get_default_value(self):
"""Create a new instance of the default value."""
return self.default_value
def instance_init(self, obj):
"""This is called by :meth:`HasTraits.__new__` to finish init'ing.
Some stages of initialization must be delayed until the parent
:class:`HasTraits` instance has been created. This method is
called in :meth:`HasTraits.__new__` after the instance has been
created.
This method trigger the creation and validation of default values
and also things like the resolution of str given class names in
:class:`Type` and :class`Instance`.
Parameters
----------
obj : :class:`HasTraits` instance
The parent :class:`HasTraits` instance that has just been
created.
"""
self.set_default_value(obj)
def set_default_value(self, obj):
"""Set the default value on a per instance basis.
This method is called by :meth:`instance_init` to create and
validate the default value. The creation and validation of
default values must be delayed until the parent :class:`HasTraits`
class has been instantiated.
"""
# Check for a deferred initializer defined in the same class as the
# trait declaration or above.
mro = type(obj).mro()
meth_name = '_%s_default' % self.name
for cls in mro[:mro.index(self.this_class)+1]:
if meth_name in cls.__dict__:
break
else:
# We didn't find one. Do static initialization.
dv = self.get_default_value()
newdv = self._validate(obj, dv)
obj._trait_values[self.name] = newdv
return
# Complete the dynamic initialization.
obj._trait_dyn_inits[self.name] = cls.__dict__[meth_name]
def __get__(self, obj, cls=None):
"""Get the value of the trait by self.name for the instance.
Default values are instantiated when :meth:`HasTraits.__new__`
is called. Thus by the time this method gets called either the
default value or a user defined value (they called :meth:`__set__`)
is in the :class:`HasTraits` instance.
"""
if obj is None:
return self
else:
try:
value = obj._trait_values[self.name]
except KeyError:
# Check for a dynamic initializer.
if self.name in obj._trait_dyn_inits:
value = obj._trait_dyn_inits[self.name](obj)
# FIXME: Do we really validate here?
value = self._validate(obj, value)
obj._trait_values[self.name] = value
return value
else:
raise TraitError('Unexpected error in TraitType: '
'both default value and dynamic initializer are '
'absent.')
except Exception:
# HasTraits should call set_default_value to populate
# this. So this should never be reached.
raise TraitError('Unexpected error in TraitType: '
'default value not set properly')
else:
return value
def __set__(self, obj, value):
new_value = self._validate(obj, value)
old_value = self.__get__(obj)
obj._trait_values[self.name] = new_value
if old_value != new_value:
obj._notify_trait(self.name, old_value, new_value)
def _validate(self, obj, value):
if hasattr(self, 'validate'):
return self.validate(obj, value)
elif hasattr(self, 'is_valid_for'):
valid = self.is_valid_for(value)
if valid:
return value
else:
raise TraitError('invalid value for type: %r' % value)
elif hasattr(self, 'value_for'):
return self.value_for(value)
else:
return value
def info(self):
return self.info_text
def error(self, obj, value):
if obj is not None:
e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj),
self.info(), repr_type(value))
else:
e = "The '%s' trait must be %s, but a value of %r was specified." \
% (self.name, self.info(), repr_type(value))
raise TraitError(e)
def get_metadata(self, key):
return getattr(self, '_metadata', {}).get(key, None)
def set_metadata(self, key, value):
getattr(self, '_metadata', {})[key] = value
#-----------------------------------------------------------------------------
# The HasTraits implementation
#-----------------------------------------------------------------------------
class MetaHasTraits(type):
"""A metaclass for HasTraits.
This metaclass makes sure that any TraitType class attributes are
instantiated and sets their name attribute.
"""
def __new__(mcls, name, bases, classdict):
"""Create the HasTraits class.
This instantiates all TraitTypes in the class dict and sets their
:attr:`name` attribute.
"""
# print "MetaHasTraitlets (mcls, name): ", mcls, name
# print "MetaHasTraitlets (bases): ", bases
# print "MetaHasTraitlets (classdict): ", classdict
for k,v in iteritems(classdict):
if isinstance(v, TraitType):
v.name = k
elif inspect.isclass(v):
if issubclass(v, TraitType):
vinst = v()
vinst.name = k
classdict[k] = vinst
return super(MetaHasTraits, mcls).__new__(mcls, name, bases, classdict)
def __init__(cls, name, bases, classdict):
"""Finish initializing the HasTraits class.
This sets the :attr:`this_class` attribute of each TraitType in the
class dict to the newly created class ``cls``.
"""
for k, v in iteritems(classdict):
if isinstance(v, TraitType):
v.this_class = cls
super(MetaHasTraits, cls).__init__(name, bases, classdict)
class HasTraits(py3compat.with_metaclass(MetaHasTraits, object)):
def __new__(cls, *args, **kw):
# This is needed because in Python 2.6 object.__new__ only accepts
# the cls argument.
new_meth = super(HasTraits, cls).__new__
if new_meth is object.__new__:
inst = new_meth(cls)
else:
inst = new_meth(cls, **kw)
inst._trait_values = {}
inst._trait_notifiers = {}
inst._trait_dyn_inits = {}
# Here we tell all the TraitType instances to set their default
# values on the instance.
for key in dir(cls):
# Some descriptors raise AttributeError like zope.interface's
# __provides__ attributes even though they exist. This causes
# AttributeErrors even though they are listed in dir(cls).
try:
value = getattr(cls, key)
except AttributeError:
pass
else:
if isinstance(value, TraitType):
value.instance_init(inst)
return inst
def __init__(self, *args, **kw):
# Allow trait values to be set using keyword arguments.
# We need to use setattr for this to trigger validation and
# notifications.
for key, value in iteritems(kw):
setattr(self, key, value)
def _notify_trait(self, name, old_value, new_value):
# First dynamic ones
callables = []
callables.extend(self._trait_notifiers.get(name,[]))
callables.extend(self._trait_notifiers.get('anytrait',[]))
# Now static ones
try:
cb = getattr(self, '_%s_changed' % name)
except:
pass
else:
callables.append(cb)
# Call them all now
for c in callables:
# Traits catches and logs errors here. I allow them to raise
if callable(c):
argspec = inspect.getargspec(c)
nargs = len(argspec[0])
# Bound methods have an additional 'self' argument
# I don't know how to treat unbound methods, but they
# can't really be used for callbacks.
if isinstance(c, types.MethodType):
offset = -1
else:
offset = 0
if nargs + offset == 0:
c()
elif nargs + offset == 1:
c(name)
elif nargs + offset == 2:
c(name, new_value)
elif nargs + offset == 3:
c(name, old_value, new_value)
else:
raise TraitError('a trait changed callback '
'must have 0-3 arguments.')
else:
raise TraitError('a trait changed callback '
'must be callable.')
def _add_notifiers(self, handler, name):
if name not in self._trait_notifiers:
nlist = []
self._trait_notifiers[name] = nlist
else:
nlist = self._trait_notifiers[name]
if handler not in nlist:
nlist.append(handler)
def _remove_notifiers(self, handler, name):
if name in self._trait_notifiers:
nlist = self._trait_notifiers[name]
try:
index = nlist.index(handler)
except ValueError:
pass
else:
del nlist[index]
def on_trait_change(self, handler, name=None, remove=False):
"""Setup a handler to be called when a trait changes.
This is used to setup dynamic notifications of trait changes.
Static handlers can be created by creating methods on a HasTraits
subclass with the naming convention '_[traitname]_changed'. Thus,
to create static handler for the trait 'a', create the method
_a_changed(self, name, old, new) (fewer arguments can be used, see
below).
Parameters
----------
handler : callable
A callable that is called when a trait changes. Its
signature can be handler(), handler(name), handler(name, new)
or handler(name, old, new).
name : list, str, None
If None, the handler will apply to all traits. If a list
of str, handler will apply to all names in the list. If a
str, the handler will apply just to that name.
remove : bool
If False (the default), then install the handler. If True
then unintall it.
"""
if remove:
names = parse_notifier_name(name)
for n in names:
self._remove_notifiers(handler, n)
else:
names = parse_notifier_name(name)
for n in names:
self._add_notifiers(handler, n)
@classmethod
def class_trait_names(cls, **metadata):
"""Get a list of all the names of this classes traits.
This method is just like the :meth:`trait_names` method, but is unbound.
"""
return cls.class_traits(**metadata).keys()
@classmethod
def class_traits(cls, **metadata):
"""Get a list of all the traits of this class.
This method is just like the :meth:`traits` method, but is unbound.
The TraitTypes returned don't know anything about the values
that the various HasTrait's instances are holding.
This follows the same algorithm as traits does and does not allow
for any simple way of specifying merely that a metadata name
exists, but has any value. This is because get_metadata returns
None if a metadata key doesn't exist.
"""
traits = dict([memb for memb in getmembers(cls) if \
isinstance(memb[1], TraitType)])
if len(metadata) == 0:
return traits
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is not FunctionType:
metadata[meta_name] = _SimpleTest(meta_eval)
result = {}
for name, trait in traits.items():
for meta_name, meta_eval in metadata.items():
if not meta_eval(trait.get_metadata(meta_name)):
break
else:
result[name] = trait
return result
def trait_names(self, **metadata):
"""Get a list of all the names of this classes traits."""
return self.traits(**metadata).keys()
def traits(self, **metadata):
"""Get a list of all the traits of this class.
The TraitTypes returned don't know anything about the values
that the various HasTrait's instances are holding.
This follows the same algorithm as traits does and does not allow
for any simple way of specifying merely that a metadata name
exists, but has any value. This is because get_metadata returns
None if a metadata key doesn't exist.
"""
traits = dict([memb for memb in getmembers(self.__class__) if \
isinstance(memb[1], TraitType)])
if len(metadata) == 0:
return traits
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is not FunctionType:
metadata[meta_name] = _SimpleTest(meta_eval)
result = {}
for name, trait in traits.items():
for meta_name, meta_eval in metadata.items():
if not meta_eval(trait.get_metadata(meta_name)):
break
else:
result[name] = trait
return result
def trait_metadata(self, traitname, key):
"""Get metadata values for trait by key."""
try:
trait = getattr(self.__class__, traitname)
except AttributeError:
raise TraitError("Class %s does not have a trait named %s" %
(self.__class__.__name__, traitname))
else:
return trait.get_metadata(key)
#-----------------------------------------------------------------------------
# Actual TraitTypes implementations/subclasses
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# TraitTypes subclasses for handling classes and instances of classes
#-----------------------------------------------------------------------------
class ClassBasedTraitType(TraitType):
"""A trait with error reporting for Type, Instance and This."""
def error(self, obj, value):
kind = type(value)
if (not py3compat.PY3) and kind is InstanceType:
msg = 'class %s' % value.__class__.__name__
else:
msg = '%s (i.e. %s)' % ( str( kind )[1:-1], repr( value ) )
if obj is not None:
e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj),
self.info(), msg)
else:
e = "The '%s' trait must be %s, but a value of %r was specified." \
% (self.name, self.info(), msg)
raise TraitError(e)
class Type(ClassBasedTraitType):
"""A trait whose value must be a subclass of a specified class."""
def __init__ (self, default_value=None, klass=None, allow_none=True, **metadata ):
"""Construct a Type trait
A Type trait specifies that its values must be subclasses of
a particular class.
If only ``default_value`` is given, it is used for the ``klass`` as
well.
Parameters
----------
default_value : class, str or None
The default value must be a subclass of klass. If an str,
the str must be a fully specified class name, like 'foo.bar.Bah'.
The string is resolved into real class, when the parent
:class:`HasTraits` class is instantiated.
klass : class, str, None
Values of this trait must be a subclass of klass. The klass
may be specified in a string like: 'foo.bar.MyClass'.
The string is resolved into real class, when the parent
:class:`HasTraits` class is instantiated.
allow_none : boolean
Indicates whether None is allowed as an assignable value. Even if
``False``, the default value may be ``None``.
"""
if default_value is None:
if klass is None:
klass = object
elif klass is None:
klass = default_value
if not (inspect.isclass(klass) or isinstance(klass, py3compat.string_types)):
raise TraitError("A Type trait must specify a class.")
self.klass = klass
self._allow_none = allow_none
super(Type, self).__init__(default_value, **metadata)
def validate(self, obj, value):
"""Validates that the value is a valid object instance."""
try:
if issubclass(value, self.klass):
return value
except:
if (value is None) and (self._allow_none):
return value
self.error(obj, value)
def info(self):
""" Returns a description of the trait."""
if isinstance(self.klass, py3compat.string_types):
klass = self.klass
else:
klass = self.klass.__name__
result = 'a subclass of ' + klass
if self._allow_none:
return result + ' or None'
return result
def instance_init(self, obj):
self._resolve_classes()
super(Type, self).instance_init(obj)
def _resolve_classes(self):
if isinstance(self.klass, py3compat.string_types):
self.klass = import_item(self.klass)
if isinstance(self.default_value, py3compat.string_types):
self.default_value = import_item(self.default_value)
def get_default_value(self):
return self.default_value
class DefaultValueGenerator(object):
"""A class for generating new default value instances."""
def __init__(self, *args, **kw):
self.args = args
self.kw = kw
def generate(self, klass):
return klass(*self.args, **self.kw)
class Instance(ClassBasedTraitType):
"""A trait whose value must be an instance of a specified class.
The value can also be an instance of a subclass of the specified class.
"""
def __init__(self, klass=None, args=None, kw=None,
allow_none=True, **metadata ):
"""Construct an Instance trait.
This trait allows values that are instances of a particular
class or its sublclasses. Our implementation is quite different
from that of enthough.traits as we don't allow instances to be used
for klass and we handle the ``args`` and ``kw`` arguments differently.
Parameters
----------
klass : class, str
The class that forms the basis for the trait. Class names
can also be specified as strings, like 'foo.bar.Bar'.
args : tuple
Positional arguments for generating the default value.
kw : dict
Keyword arguments for generating the default value.
allow_none : bool
Indicates whether None is allowed as a value.
Default Value
-------------
If both ``args`` and ``kw`` are None, then the default value is None.
If ``args`` is a tuple and ``kw`` is a dict, then the default is
created as ``klass(*args, **kw)``. If either ``args`` or ``kw`` is
not (but not both), None is replace by ``()`` or ``{}``.
"""
self._allow_none = allow_none
if (klass is None) or (not (inspect.isclass(klass) or isinstance(klass, py3compat.string_types))):
raise TraitError('The klass argument must be a class'
' you gave: %r' % klass)
self.klass = klass
# self.klass is a class, so handle default_value
if args is None and kw is None:
default_value = None
else:
if args is None:
# kw is not None
args = ()
elif kw is None:
# args is not None
kw = {}
if not isinstance(kw, dict):
raise TraitError("The 'kw' argument must be a dict or None.")
if not isinstance(args, tuple):
raise TraitError("The 'args' argument must be a tuple or None.")
default_value = DefaultValueGenerator(*args, **kw)
super(Instance, self).__init__(default_value, **metadata)
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
self.error(obj, value)
if isinstance(value, self.klass):
return value
else:
self.error(obj, value)
def info(self):
if isinstance(self.klass, py3compat.string_types):
klass = self.klass
else:
klass = self.klass.__name__
result = class_of(klass)
if self._allow_none:
return result + ' or None'
return result
def instance_init(self, obj):
self._resolve_classes()
super(Instance, self).instance_init(obj)
def _resolve_classes(self):
if isinstance(self.klass, py3compat.string_types):
self.klass = import_item(self.klass)
def get_default_value(self):
"""Instantiate a default value instance.
This is called when the containing HasTraits classes'
:meth:`__new__` method is called to ensure that a unique instance
is created for each HasTraits instance.
"""
dv = self.default_value
if isinstance(dv, DefaultValueGenerator):
return dv.generate(self.klass)
else:
return dv
class This(ClassBasedTraitType):
"""A trait for instances of the class containing this trait.
Because how how and when class bodies are executed, the ``This``
trait can only have a default value of None. This, and because we
always validate default values, ``allow_none`` is *always* true.
"""
info_text = 'an instance of the same type as the receiver or None'
def __init__(self, **metadata):
super(This, self).__init__(None, **metadata)
def validate(self, obj, value):
# What if value is a superclass of obj.__class__? This is
# complicated if it was the superclass that defined the This
# trait.
if isinstance(value, self.this_class) or (value is None):
return value
else:
self.error(obj, value)
#-----------------------------------------------------------------------------
# Basic TraitTypes implementations/subclasses
#-----------------------------------------------------------------------------
class Any(TraitType):
default_value = None
info_text = 'any value'
class Int(TraitType):
"""An int trait."""
default_value = 0
info_text = 'an int'
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class CInt(Int):
"""A casting version of the int trait."""
def validate(self, obj, value):
try:
return int(value)
except:
self.error(obj, value)
if py3compat.PY3:
Long, CLong = Int, CInt
Integer = Int
else:
class Long(TraitType):
"""A long integer trait."""
default_value = 0
info_text = 'a long'
def validate(self, obj, value):
if isinstance(value, long):
return value
if isinstance(value, int):
return long(value)
self.error(obj, value)
class CLong(Long):
"""A casting version of the long integer trait."""
def validate(self, obj, value):
try:
return long(value)
except:
self.error(obj, value)
class Integer(TraitType):
"""An integer trait.
Longs that are unnecessary (<= sys.maxint) are cast to ints."""
default_value = 0
info_text = 'an integer'
def validate(self, obj, value):
if isinstance(value, int):
return value
if isinstance(value, long):
# downcast longs that fit in int:
# note that int(n > sys.maxint) returns a long, so
# we don't need a condition on this cast
return int(value)
if sys.platform == "cli":
from System import Int64
if isinstance(value, Int64):
return int(value)
self.error(obj, value)
class Float(TraitType):
"""A float trait."""
default_value = 0.0
info_text = 'a float'
def validate(self, obj, value):
if isinstance(value, float):
return value
if isinstance(value, int):
return float(value)
self.error(obj, value)
class CFloat(Float):
"""A casting version of the float trait."""
def validate(self, obj, value):
try:
return float(value)
except:
self.error(obj, value)
class Complex(TraitType):
"""A trait for complex numbers."""
default_value = 0.0 + 0.0j
info_text = 'a complex number'
def validate(self, obj, value):
if isinstance(value, complex):
return value
if isinstance(value, (float, int)):
return complex(value)
self.error(obj, value)
class CComplex(Complex):
"""A casting version of the complex number trait."""
def validate (self, obj, value):
try:
return complex(value)
except:
self.error(obj, value)
# We should always be explicit about whether we're using bytes or unicode, both
# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
# we don't have a Str type.
class Bytes(TraitType):
"""A trait for byte strings."""
default_value = b''
info_text = 'a bytes object'
def validate(self, obj, value):
if isinstance(value, bytes):
return value
self.error(obj, value)
class CBytes(Bytes):
"""A casting version of the byte string trait."""
def validate(self, obj, value):
try:
return bytes(value)
except:
self.error(obj, value)
class Unicode(TraitType):
"""A trait for unicode strings."""
default_value = u''
info_text = 'a unicode string'
def validate(self, obj, value):
if isinstance(value, py3compat.unicode_type):
return value
if isinstance(value, bytes):
return py3compat.unicode_type(value)
self.error(obj, value)
class CUnicode(Unicode):
"""A casting version of the unicode trait."""
def validate(self, obj, value):
try:
return py3compat.unicode_type(value)
except:
self.error(obj, value)
class ObjectName(TraitType):
"""A string holding a valid object name in this version of Python.
This does not check that the name exists in any scope."""
info_text = "a valid object identifier in Python"
if py3compat.PY3:
# Python 3:
coerce_str = staticmethod(lambda _,s: s)
else:
# Python 2:
def coerce_str(self, obj, value):
"In Python 2, coerce ascii-only unicode to str"
if isinstance(value, unicode):
try:
return str(value)
except UnicodeEncodeError:
self.error(obj, value)
return value
def validate(self, obj, value):
value = self.coerce_str(obj, value)
if isinstance(value, str) and py3compat.isidentifier(value):
return value
self.error(obj, value)
class DottedObjectName(ObjectName):
"""A string holding a valid dotted object name in Python, such as A.b3._c"""
def validate(self, obj, value):
value = self.coerce_str(obj, value)
if isinstance(value, str) and py3compat.isidentifier(value, dotted=True):
return value
self.error(obj, value)
class Bool(TraitType):
"""A boolean (True, False) trait."""
default_value = False
info_text = 'a boolean'
def validate(self, obj, value):
if isinstance(value, bool):
return value
self.error(obj, value)
class CBool(Bool):
"""A casting version of the boolean trait."""
def validate(self, obj, value):
try:
return bool(value)
except:
self.error(obj, value)
class Enum(TraitType):
"""An enum that whose value must be in a given sequence."""
def __init__(self, values, default_value=None, allow_none=True, **metadata):
self.values = values
self._allow_none = allow_none
super(Enum, self).__init__(default_value, **metadata)
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
if value in self.values:
return value
self.error(obj, value)
def info(self):
""" Returns a description of the trait."""
result = 'any of ' + repr(self.values)
if self._allow_none:
return result + ' or None'
return result
class CaselessStrEnum(Enum):
"""An enum of strings that are caseless in validate."""
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
if not isinstance(value, py3compat.string_types):
self.error(obj, value)
for v in self.values:
if v.lower() == value.lower():
return v
self.error(obj, value)
class Container(Instance):
"""An instance of a container (list, set, etc.)
To be subclassed by overriding klass.
"""
klass = None
_valid_defaults = SequenceTypes
_trait = None
def __init__(self, trait=None, default_value=None, allow_none=True,
**metadata):
"""Create a container trait type from a list, set, or tuple.
The default value is created by doing ``List(default_value)``,
which creates a copy of the ``default_value``.
``trait`` can be specified, which restricts the type of elements
in the container to that TraitType.
If only one arg is given and it is not a Trait, it is taken as
``default_value``:
``c = List([1,2,3])``
Parameters
----------
trait : TraitType [ optional ]
the type for restricting the contents of the Container. If unspecified,
types are not checked.
default_value : SequenceType [ optional ]
The default value for the Trait. Must be list/tuple/set, and
will be cast to the container type.
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
# allow List([values]):
if default_value is None and not is_trait(trait):
default_value = trait
trait = None
if default_value is None:
args = ()
elif isinstance(default_value, self._valid_defaults):
args = (default_value,)
else:
raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
if is_trait(trait):
self._trait = trait() if isinstance(trait, type) else trait
self._trait.name = 'element'
elif trait is not None:
raise TypeError("`trait` must be a Trait or None, got %s"%repr_type(trait))
super(Container,self).__init__(klass=self.klass, args=args,
allow_none=allow_none, **metadata)
def element_error(self, obj, element, validator):
e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj), validator.info(), repr_type(element))
raise TraitError(e)
def validate(self, obj, value):
value = super(Container, self).validate(obj, value)
if value is None:
return value
value = self.validate_elements(obj, value)
return value
def validate_elements(self, obj, value):
validated = []
if self._trait is None or isinstance(self._trait, Any):
return value
for v in value:
try:
v = self._trait.validate(obj, v)
except TraitError:
self.element_error(obj, v, self._trait)
else:
validated.append(v)
return self.klass(validated)
class List(Container):
"""An instance of a Python list."""
klass = list
def __init__(self, trait=None, default_value=None, minlen=0, maxlen=sys.maxsize,
allow_none=True, **metadata):
"""Create a List trait type from a list, set, or tuple.
The default value is created by doing ``List(default_value)``,
which creates a copy of the ``default_value``.
``trait`` can be specified, which restricts the type of elements
in the container to that TraitType.
If only one arg is given and it is not a Trait, it is taken as
``default_value``:
``c = List([1,2,3])``
Parameters
----------
trait : TraitType [ optional ]
the type for restricting the contents of the Container. If unspecified,
types are not checked.
default_value : SequenceType [ optional ]
The default value for the Trait. Must be list/tuple/set, and
will be cast to the container type.
minlen : Int [ default 0 ]
The minimum length of the input list
maxlen : Int [ default sys.maxsize ]
The maximum length of the input list
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
self._minlen = minlen
self._maxlen = maxlen
super(List, self).__init__(trait=trait, default_value=default_value,
allow_none=allow_none, **metadata)
def length_error(self, obj, value):
e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
% (self.name, class_of(obj), self._minlen, self._maxlen, value)
raise TraitError(e)
def validate_elements(self, obj, value):
length = len(value)
if length < self._minlen or length > self._maxlen:
self.length_error(obj, value)
return super(List, self).validate_elements(obj, value)
class Set(Container):
"""An instance of a Python set."""
klass = set
class Tuple(Container):
"""An instance of a Python tuple."""
klass = tuple
def __init__(self, *traits, **metadata):
"""Tuple(*traits, default_value=None, allow_none=True, **medatata)
Create a tuple from a list, set, or tuple.
Create a fixed-type tuple with Traits:
``t = Tuple(Int, Str, CStr)``
would be length 3, with Int,Str,CStr for each element.
If only one arg is given and it is not a Trait, it is taken as
default_value:
``t = Tuple((1,2,3))``
Otherwise, ``default_value`` *must* be specified by keyword.
Parameters
----------
*traits : TraitTypes [ optional ]
the tsype for restricting the contents of the Tuple. If unspecified,
types are not checked. If specified, then each positional argument
corresponds to an element of the tuple. Tuples defined with traits
are of fixed length.
default_value : SequenceType [ optional ]
The default value for the Tuple. Must be list/tuple/set, and
will be cast to a tuple. If `traits` are specified, the
`default_value` must conform to the shape and type they specify.
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
default_value = metadata.pop('default_value', None)
allow_none = metadata.pop('allow_none', True)
# allow Tuple((values,)):
if len(traits) == 1 and default_value is None and not is_trait(traits[0]):
default_value = traits[0]
traits = ()
if default_value is None:
args = ()
elif isinstance(default_value, self._valid_defaults):
args = (default_value,)
else:
raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
self._traits = []
for trait in traits:
t = trait() if isinstance(trait, type) else trait
t.name = 'element'
self._traits.append(t)
if self._traits and default_value is None:
# don't allow default to be an empty container if length is specified
args = None
super(Container,self).__init__(klass=self.klass, args=args,
allow_none=allow_none, **metadata)
def validate_elements(self, obj, value):
if not self._traits:
# nothing to validate
return value
if len(value) != len(self._traits):
e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
% (self.name, class_of(obj), len(self._traits), repr_type(value))
raise TraitError(e)
validated = []
for t,v in zip(self._traits, value):
try:
v = t.validate(obj, v)
except TraitError:
self.element_error(obj, v, t)
else:
validated.append(v)
return tuple(validated)
class Dict(Instance):
"""An instance of a Python dict."""
def __init__(self, default_value=None, allow_none=True, **metadata):
"""Create a dict trait type from a dict.
The default value is created by doing ``dict(default_value)``,
which creates a copy of the ``default_value``.
"""
if default_value is None:
args = ((),)
elif isinstance(default_value, dict):
args = (default_value,)
elif isinstance(default_value, SequenceTypes):
args = (default_value,)
else:
raise TypeError('default value of Dict was %s' % default_value)
super(Dict,self).__init__(klass=dict, args=args,
allow_none=allow_none, **metadata)
class TCPAddress(TraitType):
"""A trait for an (ip, port) tuple.
This allows for both IPv4 IP addresses as well as hostnames.
"""
default_value = ('127.0.0.1', 0)
info_text = 'an (ip, port) tuple'
def validate(self, obj, value):
if isinstance(value, tuple):
if len(value) == 2:
if isinstance(value[0], py3compat.string_types) and isinstance(value[1], int):
port = value[1]
if port >= 0 and port <= 65535:
return value
self.error(obj, value)
class CRegExp(TraitType):
"""A casting compiled regular expression trait.
Accepts both strings and compiled regular expressions. The resulting
attribute will be a compiled regular expression."""
info_text = 'a regular expression'
def validate(self, obj, value):
try:
return re.compile(value)
except:
self.error(obj, value)
| {
"content_hash": "e30a1cbc5757cae30808a9091c691c87",
"timestamp": "",
"source": "github",
"line_count": 1437,
"max_line_length": 111,
"avg_line_length": 32.920668058455114,
"alnum_prop": 0.560191937768195,
"repo_name": "Carreau/IPConfigurable",
"id": "c6add7d1ce92a5917bfda15b76467598318f0a37",
"size": "47325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IPConfigurable/traitlets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "134105"
}
],
"symlink_target": ""
} |
import json
from tempodb.temporal.validate import convert_iso_stamp, check_time_param
from cursor import DataPointCursor, SeriesCursor, SingleValueCursor
class JSONSerializable(object):
"""Base class for objects that are serializable to and from JSON.
This class defines default methods for serializing each way that use
the class's "properties" class variable to determine what should be
serialized or deserialized. For example::
class MySerialized(JSONSerializable)
properties = ['foo', 'bar']
This would define a class that expects to have the 'foo' and 'bar'
keys in JSON data and would likewise serialize a JSON object with
those keys.
The base constructor calls the :meth:`from_json` method, which
enforces these constraints for object construction. If you override
this constructor (for example, to provide static initialization of
some variables), it is highly recommended that the subclass constructor
call this constructor at some point through super().
:param string json_text: the JSON string to deserialize from"""
properties = []
def __init__(self, json_text, response):
self.from_json(json_text)
self.response = response
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
#due to the architecture of response parsing, particularly
#where the API returns lists, the JSON might have already been
#parsed by the time it gets here
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
setattr(self, p, j[p])
except KeyError, e:
msg = 'Expected key %s in JSON object, found None' % str(e)
raise ValueError(msg)
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
j = {}
for p in self.properties:
j[p] = getattr(self, p)
return json.dumps(j)
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
for p in self.properties:
j[p] = getattr(self, p)
return j
#PLACEHOLDER FOR EMPTY RESPONSES
class Nothing(object):
"""Used to represent empty responses. This class should not be
used directly in user code."""
def __init__(self, *args, **kwargs):
pass
class SeriesSet(object):
"""Represents a set of Series objects as returned by the list series
TempoDB API endpoint. The SeriesSet exposes a cursor that can be iterated
over to examine each series return by the API."""
def __init__(self, json_text, response):
self.data = json.loads(json_text)
self.cursor = SeriesCursor(self.data, Series, response)
class Series(JSONSerializable):
"""Represents a Series object from the TempoDB API. Series objects
are serialized to and from JSON using the :meth:`to_json` and
:meth:`from_json` methods.
Domain object attributes:
* key: string
* name: string
* tags: list
* attributes: dictionary"""
properties = ['key', 'name', 'tags', 'attributes']
def __init__(self, json_text, response):
#the formatting of the series object returned from the series by key
#endpoint is slightly different
if isinstance(json_text, basestring):
j = json.loads(json_text)
else:
j = json_text
if 'series' in j:
self.from_json(j['series'])
else:
self.from_json(json_text)
self.response = response
class DataSet(JSONSerializable):
"""Represents a data set returned using the /data resource in the
TempoDB API. Depending on the original API call, some attributes of
this object (such as rollup) could be None."""
properties = ['data', 'rollup', 'tz']
def __init__(self, json_text, response):
#override to force the instantiation of a cursor
super(DataSet, self).__init__(json_text, response)
self.cursor = DataPointCursor(self.data, DataPoint, response)
if self.rollup is not None:
self.rollup = Rollup(self.rollup)
class SingleValue(JSONSerializable):
"""Represents a data set returned by calling the single value
endpoint of the TempoDB API. This domain object is not cursored, so
it is implemented separately from the more generic DataSet object.
Domain object attributes:
* series: :class:`Series` object
* data: :class:`DataPoint` object"""
properties = ['series', 'data']
def __init__(self, json_text, response):
#force conversion of the subobjects in this datatype after we get
#them
super(SingleValue, self).__init__(json_text, response)
self.series = Series(self.series, response)
if self.data is not None:
self.data = DataPoint(self.data, response, self.data.get('tz'))
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
j['series'] = self.series.to_dictionary()
j['data'] = self.data.to_dictionary()
return j
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
return json.dumps(self.to_dictionary())
class SeriesSummary(JSONSerializable):
properties = ['series', 'summary', 'tz', 'start', 'end']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(SeriesSummary, self).__init__(json_text, response)
self.series = Series(self.series, response)
self.summary = Summary(self.summary, response)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p in ['start', 'end']:
val = convert_iso_stamp(j[p], self.tz)
setattr(self, p, val)
else:
setattr(self, p, j[p])
except KeyError:
pass
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
return json.dumps(self.to_dictionary())
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
d = {'start': self.start.isoformat(),
'end': self.end.isoformat(),
'tz': self.tz,
'summary': self.summary.to_dictionary(),
'series': self.series.to_dictionary()
}
return d
class Summary(JSONSerializable):
"""Represents the summary received from the TempoDB API when a data read
request is sent. The properties are summary statistics for the dataset
returned."""
properties = ['mean', 'sum', 'min', 'max', 'stddev', 'count']
class Rollup(JSONSerializable):
"""Represents the rollup information returned from the TempoDB API when
the API calls demands it."""
properties = ['interval', 'function', 'tz']
class DataPoint(JSONSerializable):
"""Represents a single data point in a series. To construct these objects
in user code, use the class method :meth:`from_data`.
Domain object attributes:
* t: DateTime object
* v: int or float
* key: string (only present when writing DataPoints)
* id: string (only present when writing DataPoints)"""
properties = ['t', 'v', 'key', 'id']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(DataPoint, self).__init__(json_text, response)
@classmethod
def from_data(self, time, value, series_id=None, key=None, tz=None):
"""Create a DataPoint object from data, rather than a JSON object or
string. This should be used by user code to construct DataPoints from
Python-based data like Datetime objects and floats.
The series_id and key arguments are only necessary if you are doing a
multi write, in which case those arguments can be used to specify which
series the DataPoint belongs to.
If needed, the tz argument should be an Olsen database compliant string
specifying the time zone for this DataPoint. This argument is most
often used internally when reading data from TempoDB.
:param time: the point in time for this reading
:type time: ISO8601 string or Datetime
:param value: the value for this reading
:type value: int or float
:param string series_id: (optional) a series ID for this point
:param string key: (optional) a key for this point
:param string tz: (optional) a timezone for this point
:rtype: :class:`DataPoint`"""
t = check_time_param(time)
if type(value) in [float, int]:
v = value
else:
raise ValueError('Values must be int or float. Got "%s".' %
str(value))
j = {
't': t,
'v': v,
'id': series_id,
'key': key
}
return DataPoint(j, None, tz=tz)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p == 't':
val = convert_iso_stamp(j[p], self.tz)
setattr(self, p, val)
else:
setattr(self, p, j[p])
#overriding this exception allows us to handle optional values like
#id and key which are only present during particular API calls like
#multi writes
except KeyError:
pass
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
j = {}
for p in self.properties:
#this logic change allows us to work with optional values for
#this data type
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return json.dumps(j)
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
for p in self.properties:
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return j
class DataPointFound(JSONSerializable):
"""Represents a specialized DataPoint returned by the the /find endpoint
of the TempoDB API. The start and end attributes indicate in what time
period the datapoint was found, the t attribute indicates the exact time
at which the point was found, and the v attribute indicates what the value
of the point was at that time.
Domain object attributes:
* start: DateTime object
* end: DateTime object
* v: int or long
* t: DateTime object"""
properties = ['interval', 'found']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(DataPointFound, self).__init__(json_text, response)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p == 'interval':
self.start = convert_iso_stamp(j[p]['start'], self.tz)
self.end = convert_iso_stamp(j[p]['end'], self.tz)
elif p == 'found':
t = convert_iso_stamp(j[p]['t'], self.tz)
setattr(self, 't', t)
v = j[p]['v']
setattr(self, 'v', v)
#overriding this exception allows us to handle optional values like
#id and key which are only present during particular API calls like
#multi writes
except KeyError:
pass
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
j['interval'] = {'start': self.start.isoformat(),
'end': self.end.isoformat()}
j['found'] = {'v': self.v, 't': self.t.isoformat()}
return j
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
return json.dumps(self.to_dictionary())
class MultiPoint(JSONSerializable):
"""Represents a data point with values for multiple series at a single
timestamp. Returned when performing a multi-series query. The v attribute
is a dictionary mapping series key to value.
Domain object attributes:
* t: DateTime object
* v: dictionary"""
properties = ['t', 'v']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(MultiPoint, self).__init__(json_text, response)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p == 't':
t = convert_iso_stamp(j[p], self.tz)
setattr(self, 't', t)
else:
setattr(self, p, j[p])
#overriding this exception allows us to handle optional values like
#id and key which are only present during particular API calls like
#multi writes
except KeyError:
pass
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
j = {}
for p in self.properties:
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return json.dumps(j)
def get(self, k):
"""Convenience method for getting values for individual series out of
the MultiPoint. This is equivalent to calling::
>>> point.v.get('foo')
:param string k: the key to read
:rtype: number"""
return self.v.get(k)
| {
"content_hash": "b54ed8237111197e5576320395b08781",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 79,
"avg_line_length": 34.50844277673546,
"alnum_prop": 0.5959876039797749,
"repo_name": "tempodb/tempodb-python",
"id": "5ec686bd05141264d92d39336ffafc77b5064e46",
"size": "18393",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1.0",
"path": "tempodb/protocol/objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104467"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
} |
"""
The module ``search`` provides glob paths to search for content in a HDF5 file.
===============================================================================
Overview
===============================================================================
The module implements a strategy similar to Python's |glob|_ module for HDF5
files. In short, it uses regex patterns to match as many possible paths as it
can.
.. |glob| replace:: ``glob``
.. _glob: http://docs.python.org/2/library/glob.html
===============================================================================
API
===============================================================================
"""
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Jun 18, 2014 20:06:44 EDT$"
import re
import collections
import itertools
import h5py
from nanshe.util import iters
# Need in order to have logging information no matter what.
from nanshe.util import prof
# Get the logger
trace_logger = prof.getTraceLogger(__name__)
@prof.log_call(trace_logger)
def get_matching_paths(a_filehandle, a_path_pattern):
"""
Looks for existing paths that match the full provide pattern path.
Returns a list of matches for the given file handle.
Args:
a_filehandle(h5py.File): an HDF5 file.
a_path_pattern(str): an internal path (with patterns for
each group) for the HDF5 file.
Returns:
(list): a list of matching paths.
"""
current_pattern_group_matches = []
if (isinstance(a_filehandle, h5py.Group) and a_path_pattern):
current_group = a_filehandle
a_path_pattern = a_path_pattern.strip("/")
to_split = a_path_pattern.find("/")
if to_split != -1:
current_path = a_path_pattern[:to_split]
next_path = a_path_pattern[1 + to_split:]
else:
current_path, next_path = a_path_pattern, ""
current_pattern_group_regex = re.compile("/" + current_path + "/")
for each_group in current_group:
if current_pattern_group_regex.match("/" + each_group + "/") is not None:
next_group = current_group[each_group]
next_pattern_group_matches = get_matching_paths(
next_group, next_path
)
for each_next_pattern_group_match in next_pattern_group_matches:
current_pattern_group_matches.append(
"/" + each_group + each_next_pattern_group_match
)
else:
current_pattern_group_matches = [""]
return(current_pattern_group_matches)
@prof.log_call(trace_logger)
def get_matching_paths_groups(a_filehandle, a_path_pattern):
"""
Looks for parts of the path pattern and tries to match them in order.
Returns a list of matches that can be combined to yield acceptable
matches for the given file handle.
Note:
This works best when a tree structure is created systematically in
HDF5. Then, this will recreate what the tree structure could and
may contain.
Args:
a_filehandle(h5py.File): an HDF5 file.
a_path_pattern(str): an internal path (with patterns for
each group) for the HDF5 file.
Returns:
(list): a list of matching paths.
"""
def get_matching_paths_groups_recursive(a_filehandle, a_path_pattern):
current_pattern_group_matches = []
if (isinstance(a_filehandle, h5py.Group) and a_path_pattern):
current_pattern_group_matches.append(collections.OrderedDict())
current_group = a_filehandle
a_path_pattern = a_path_pattern.strip("\b").strip("/")
to_split = a_path_pattern.find("/")
if to_split != -1:
current_path = a_path_pattern[:to_split]
next_path = a_path_pattern[1 + to_split:]
else:
current_path, next_path = a_path_pattern, ""
current_pattern_group_regex = re.compile("/" + current_path + "/")
for each_group in current_group:
if current_pattern_group_regex.match("/" + each_group + "/") is not None:
next_group = current_group[each_group]
next_pattern_group_matches = get_matching_paths_groups_recursive(
next_group, next_path
)
current_pattern_group_matches[0][each_group] = None
while (len(current_pattern_group_matches) - 1) < len(next_pattern_group_matches):
current_pattern_group_matches.append(
collections.OrderedDict()
)
for i, each_next_pattern_group_matches in enumerate(
next_pattern_group_matches, start=1
):
for each_next_pattern_group_match in each_next_pattern_group_matches:
current_pattern_group_matches[i][each_next_pattern_group_match] = None
else:
current_pattern_group_matches = []
return(current_pattern_group_matches)
groups = get_matching_paths_groups_recursive(a_filehandle, a_path_pattern)
new_groups = []
for i in iters.irange(len(groups)):
new_groups.append(list(groups[i]))
groups = new_groups
return(groups)
@prof.log_call(trace_logger)
def get_matching_grouped_paths(a_filehandle, a_path_pattern):
"""
Looks for existing paths that match the full provide pattern path.
Returns a list of matches as keys and whether they are found in the
HDF5 file or not.
Args:
a_filehandle(h5py.File): an HDF5 file.
a_path_pattern(str): an internal path (with patterns for
each group) for the HDF5 file.
Returns:
(list): an ordered dictionary with possible
paths that fit the pattern and
whether they are found.
"""
paths_found = collections.OrderedDict()
for each_path_components in itertools.product(
*get_matching_paths_groups(a_filehandle, a_path_pattern)
):
each_path = "/" + "/".join([_ for _ in each_path_components])
paths_found[each_path] = None
paths_found = list(paths_found.keys())
return(paths_found)
@prof.log_call(trace_logger)
def get_matching_grouped_paths_found(a_filehandle, a_path_pattern):
"""
Looks for existing paths that match the full provide pattern path.
Returns a list of matches as keys and whether they are found in the
HDF5 file or not.
Args:
a_filehandle(h5py.File): an HDF5 file.
a_path_pattern(str): an internal path (with patterns for
each group) for the HDF5 file.
Returns:
(collections.OrderedDict): an ordered dictionary with possible
paths that fit the pattern and
whether they are found.
"""
paths_found = collections.OrderedDict()
for each_path_components in itertools.product(
*get_matching_paths_groups(a_filehandle, a_path_pattern)
):
each_path = "/" + "/".join([_ for _ in each_path_components])
paths_found[each_path] = (each_path in a_filehandle)
return(paths_found)
| {
"content_hash": "acf323c28d83cce0b7f09fec9d688368",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 101,
"avg_line_length": 34.80444444444444,
"alnum_prop": 0.5404162942152981,
"repo_name": "DudLab/nanshe",
"id": "f176c4239fd2293854c06e625d4b19050a723d4d",
"size": "7831",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nanshe/io/hdf5/search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1046938"
},
{
"name": "Shell",
"bytes": "3526"
}
],
"symlink_target": ""
} |
r"""
Punkt Sentence Tokenizer
This tokenizer divides a text into a list of sentences,
by using an unsupervised algorithm to build a model for abbreviation
words, collocations, and words that start sentences. It must be
trained on a large collection of plaintext in the target language
before it can be used.
The NLTK data package includes a pre-trained Punkt tokenizer for
English.
>>> import nltk.data
>>> text = '''
... Punkt knows that the periods in Mr. Smith and Johann S. Bach
... do not mark sentence boundaries. And sometimes sentences
... can start with non-capitalized words. i is a good variable
... name.
... '''
>>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
>>> print('\n-----\n'.join(sent_detector.tokenize(text.strip())))
Punkt knows that the periods in Mr. Smith and Johann S. Bach
do not mark sentence boundaries.
-----
And sometimes sentences
can start with non-capitalized words.
-----
i is a good variable
name.
(Note that whitespace from the original text, including newlines, is
retained in the output.)
Punctuation following sentences is also included by default
(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries
flag.
>>> text = '''
... (How does it deal with this parenthesis?) "It should be part of the
... previous sentence." "(And the same with this one.)" ('And this one!')
... "('(And (this)) '?)" [(and this. )]
... '''
>>> print('\n-----\n'.join(
... sent_detector.tokenize(text.strip())))
(How does it deal with this parenthesis?)
-----
"It should be part of the
previous sentence."
-----
"(And the same with this one.)"
-----
('And this one!')
-----
"('(And (this)) '?)"
-----
[(and this. )]
>>> print('\n-----\n'.join(
... sent_detector.tokenize(text.strip(), realign_boundaries=False)))
(How does it deal with this parenthesis?
-----
) "It should be part of the
previous sentence.
-----
" "(And the same with this one.
-----
)" ('And this one!
-----
')
"('(And (this)) '?
-----
)" [(and this.
-----
)]
However, Punkt is designed to learn parameters (a list of abbreviations, etc.)
unsupervised from a corpus similar to the target domain. The pre-packaged models
may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn
parameters from the given text.
:class:`.PunktTrainer` learns parameters such as a list of abbreviations
(without supervision) from portions of text. Using a ``PunktTrainer`` directly
allows for incremental training and modification of the hyper-parameters used
to decide what is considered an abbreviation, etc.
The algorithm for this tokenizer is described in::
Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence
Boundary Detection. Computational Linguistics 32: 485-525.
"""
from __future__ import print_function, unicode_literals
# TODO: Make orthographic heuristic less susceptible to overtraining
# TODO: Frequent sentence starters optionally exclude always-capitalised words
# FIXME: Problem with ending string with e.g. '!!!' -> '!! !'
import re
import math
from collections import defaultdict
from nltk.compat import unicode_repr, python_2_unicode_compatible, string_types
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
######################################################################
#{ Orthographic Context Constants
######################################################################
# The following constants are used to describe the orthographic
# contexts in which a word can occur. BEG=beginning, MID=middle,
# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case.
_ORTHO_BEG_UC = 1 << 1
"""Orthographic context: beginning of a sentence with upper case."""
_ORTHO_MID_UC = 1 << 2
"""Orthographic context: middle of a sentence with upper case."""
_ORTHO_UNK_UC = 1 << 3
"""Orthographic context: unknown position in a sentence with upper case."""
_ORTHO_BEG_LC = 1 << 4
"""Orthographic context: beginning of a sentence with lower case."""
_ORTHO_MID_LC = 1 << 5
"""Orthographic context: middle of a sentence with lower case."""
_ORTHO_UNK_LC = 1 << 6
"""Orthographic context: unknown position in a sentence with lower case."""
_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC
"""Orthographic context: occurs with upper case."""
_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC
"""Orthographic context: occurs with lower case."""
_ORTHO_MAP = {
('initial', 'upper'): _ORTHO_BEG_UC,
('internal', 'upper'): _ORTHO_MID_UC,
('unknown', 'upper'): _ORTHO_UNK_UC,
('initial', 'lower'): _ORTHO_BEG_LC,
('internal', 'lower'): _ORTHO_MID_LC,
('unknown', 'lower'): _ORTHO_UNK_LC,
}
"""A map from context position and first-letter case to the
appropriate orthographic context flag."""
#} (end orthographic context constants)
######################################################################
######################################################################
#{ Decision reasons for debugging
######################################################################
REASON_DEFAULT_DECISION = 'default decision'
REASON_KNOWN_COLLOCATION = 'known collocation (both words)'
REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = 'abbreviation + orthographic heuristic'
REASON_ABBR_WITH_SENTENCE_STARTER = 'abbreviation + frequent sentence starter'
REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = 'initial + special orthographic heuristic'
#} (end decision reasons for debugging)
######################################################################
######################################################################
#{ Language-dependent variables
######################################################################
class PunktLanguageVars(object):
"""
Stores variables, mostly regular expressions, which may be
language-dependent for correct application of the algorithm.
An extension of this class may modify its properties to suit
a language other than English; an instance can then be passed
as an argument to PunktSentenceTokenizer and PunktTrainer
constructors.
"""
__slots__ = ('_re_period_context', '_re_word_tokenizer')
def __getstate__(self):
# All modifications to the class are performed by inheritance.
# Non-default parameters to be pickled must be defined in the inherited
# class.
return 1
def __setstate__(self, state):
return 1
sent_end_chars = ('.', '?', '!')
"""Characters which are candidates for sentence boundaries"""
@property
def _re_sent_end_chars(self):
return '[%s]' % re.escape(''.join(self.sent_end_chars))
internal_punctuation = ',:;' # might want to extend this..
"""sentence internal punctuation, which indicates an abbreviation if
preceded by a period-final token."""
re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)',
re.MULTILINE)
"""Used to realign punctuation that should be included in a sentence
although it follows the period (or ?, !)."""
_re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]"
"""Excludes some characters from starting word tokens"""
_re_non_word_chars = r"(?:[?!)\";}\]\*:@\'\({\[])"
"""Characters that cannot appear within words"""
_re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)"
"""Hyphen and ellipsis are multi-character punctuation"""
_word_tokenize_fmt = r'''(
%(MultiChar)s
|
(?=%(WordStart)s)\S+? # Accept word characters until end is found
(?= # Sequences marking a word's end
\s| # White-space
$| # End-of-string
%(NonWord)s|%(MultiChar)s| # Punctuation
,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word
)
|
\S
)'''
"""Format of a regular expression to split punctuation from words,
excluding period."""
def _word_tokenizer_re(self):
"""Compiles and returns a regular expression for word tokenization"""
try:
return self._re_word_tokenizer
except AttributeError:
self._re_word_tokenizer = re.compile(
self._word_tokenize_fmt %
{
'NonWord': self._re_non_word_chars,
'MultiChar': self._re_multi_char_punct,
'WordStart': self._re_word_start,
},
re.UNICODE | re.VERBOSE
)
return self._re_word_tokenizer
def word_tokenize(self, s):
"""Tokenize a string to split off punctuation other than periods"""
return self._word_tokenizer_re().findall(s)
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
\s+(?P<next_tok>\S+) # or whitespace and some other token
))"""
"""Format of a regular expression to find contexts including possible
sentence boundaries. Matches token which the possible sentence boundary
ends, and matches the following token within a lookahead expression."""
def period_context_re(self):
"""Compiles and returns a regular expression to find contexts
including possible sentence boundaries."""
try:
return self._re_period_context
except:
self._re_period_context = re.compile(
self._period_context_fmt %
{
'NonWord': self._re_non_word_chars,
'SentEndChars': self._re_sent_end_chars,
},
re.UNICODE | re.VERBOSE)
return self._re_period_context
_re_non_punct = re.compile(r'[^\W\d]', re.UNICODE)
"""Matches token types that are not merely punctuation. (Types for
numeric tokens are changed to ##number## and hence contain alpha.)"""
#}
######################################################################
#////////////////////////////////////////////////////////////
#{ Helper Functions
#////////////////////////////////////////////////////////////
def _pair_iter(it):
"""
Yields pairs of tokens from the given iterator such that each input
token will appear as the first element in a yielded tuple. The last
pair will have None as its second element.
"""
it = iter(it)
prev = next(it)
for el in it:
yield (prev, el)
prev = el
yield (prev, None)
######################################################################
#{ Punkt Parameters
######################################################################
class PunktParameters(object):
"""Stores data used to perform sentence boundary detection with Punkt."""
def __init__(self):
self.abbrev_types = set()
"""A set of word types for known abbreviations."""
self.collocations = set()
"""A set of word type tuples for known common collocations
where the first word ends in a period. E.g., ('S.', 'Bach')
is a common collocation in a text that discusses 'Johann
S. Bach'. These count as negative evidence for sentence
boundaries."""
self.sent_starters = set()
"""A set of word types for words that often appear at the
beginning of sentences."""
self.ortho_context = defaultdict(int)
"""A dictionary mapping word types to the set of orthographic
contexts that word type appears in. Contexts are represented
by adding orthographic context flags: ..."""
def clear_abbrevs(self):
self.abbrev_types = set()
def clear_collocations(self):
self.collocations = set()
def clear_sent_starters(self):
self.sent_starters = set()
def clear_ortho_context(self):
self.ortho_context = defaultdict(int)
def add_ortho_context(self, typ, flag):
self.ortho_context[typ] |= flag
def _debug_ortho_context(self, typ):
c = self.ortho_context[typ]
if c & _ORTHO_BEG_UC:
yield 'BEG-UC'
if c & _ORTHO_MID_UC:
yield 'MID-UC'
if c & _ORTHO_UNK_UC:
yield 'UNK-UC'
if c & _ORTHO_BEG_LC:
yield 'BEG-LC'
if c & _ORTHO_MID_LC:
yield 'MID-LC'
if c & _ORTHO_UNK_LC:
yield 'UNK-LC'
######################################################################
#{ PunktToken
######################################################################
@python_2_unicode_compatible
class PunktToken(object):
"""Stores a token of text with annotations produced during
sentence boundary detection."""
_properties = [
'parastart', 'linestart',
'sentbreak', 'abbr', 'ellipsis'
]
__slots__ = ['tok', 'type', 'period_final'] + _properties
def __init__(self, tok, **params):
self.tok = tok
self.type = self._get_type(tok)
self.period_final = tok.endswith('.')
for p in self._properties:
setattr(self, p, None)
for k in params:
setattr(self, k, params[k])
#////////////////////////////////////////////////////////////
#{ Regular expressions for properties
#////////////////////////////////////////////////////////////
# Note: [A-Za-z] is approximated by [^\W\d] in the general case.
_RE_ELLIPSIS = re.compile(r'\.\.+$')
_RE_NUMERIC = re.compile(r'^-?[\.,]?\d[\d,\.-]*\.?$')
_RE_INITIAL = re.compile(r'[^\W\d]\.$', re.UNICODE)
_RE_ALPHA = re.compile(r'[^\W\d]+$', re.UNICODE)
#////////////////////////////////////////////////////////////
#{ Derived properties
#////////////////////////////////////////////////////////////
def _get_type(self, tok):
"""Returns a case-normalized representation of the token."""
return self._RE_NUMERIC.sub('##number##', tok.lower())
@property
def type_no_period(self):
"""
The type with its final period removed if it has one.
"""
if len(self.type) > 1 and self.type[-1] == '.':
return self.type[:-1]
return self.type
@property
def type_no_sentperiod(self):
"""
The type with its final period removed if it is marked as a
sentence break.
"""
if self.sentbreak:
return self.type_no_period
return self.type
@property
def first_upper(self):
"""True if the token's first character is uppercase."""
return self.tok[0].isupper()
@property
def first_lower(self):
"""True if the token's first character is lowercase."""
return self.tok[0].islower()
@property
def first_case(self):
if self.first_lower:
return 'lower'
elif self.first_upper:
return 'upper'
return 'none'
@property
def is_ellipsis(self):
"""True if the token text is that of an ellipsis."""
return self._RE_ELLIPSIS.match(self.tok)
@property
def is_number(self):
"""True if the token text is that of a number."""
return self.type.startswith('##number##')
@property
def is_initial(self):
"""True if the token text is that of an initial."""
return self._RE_INITIAL.match(self.tok)
@property
def is_alpha(self):
"""True if the token text is all alphabetic."""
return self._RE_ALPHA.match(self.tok)
@property
def is_non_punct(self):
"""True if the token is either a number or is alphabetic."""
return _re_non_punct.search(self.type)
#////////////////////////////////////////////////////////////
#{ String representation
#////////////////////////////////////////////////////////////
def __repr__(self):
"""
A string representation of the token that can reproduce it
with eval(), which lists all the token's non-default
annotations.
"""
typestr = (' type=%s,' % unicode_repr(self.type)
if self.type != self.tok else '')
propvals = ', '.join(
'%s=%s' % (p, unicode_repr(getattr(self, p)))
for p in self._properties
if getattr(self, p)
)
return '%s(%s,%s %s)' % (self.__class__.__name__,
unicode_repr(self.tok), typestr, propvals)
def __str__(self):
"""
A string representation akin to that used by Kiss and Strunk.
"""
res = self.tok
if self.abbr:
res += '<A>'
if self.ellipsis:
res += '<E>'
if self.sentbreak:
res += '<S>'
return res
######################################################################
#{ Punkt base class
######################################################################
class PunktBaseClass(object):
"""
Includes common components of PunktTrainer and PunktSentenceTokenizer.
"""
def __init__(self, lang_vars=PunktLanguageVars(), token_cls=PunktToken,
params=PunktParameters()):
self._params = params
self._lang_vars = lang_vars
self._Token = token_cls
"""The collection of parameters that determines the behavior
of the punkt tokenizer."""
#////////////////////////////////////////////////////////////
#{ Word tokenization
#////////////////////////////////////////////////////////////
def _tokenize_words(self, plaintext):
"""
Divide the given text into tokens, using the punkt word
segmentation regular expression, and generate the resulting list
of tokens augmented as three-tuples with two boolean values for whether
the given token occurs at the start of a paragraph or a new line,
respectively.
"""
parastart = False
for line in plaintext.split('\n'):
if line.strip():
line_toks = iter(self._lang_vars.word_tokenize(line))
yield self._Token(next(line_toks),
parastart=parastart, linestart=True)
parastart = False
for t in line_toks:
yield self._Token(t)
else:
parastart = True
#////////////////////////////////////////////////////////////
#{ Annotation Procedures
#////////////////////////////////////////////////////////////
def _annotate_first_pass(self, tokens):
"""
Perform the first pass of annotation, which makes decisions
based purely based on the word type of each word:
- '?', '!', and '.' are marked as sentence breaks.
- sequences of two or more periods are marked as ellipsis.
- any word ending in '.' that's a known abbreviation is
marked as an abbreviation.
- any other word ending in '.' is marked as a sentence break.
Return these annotations as a tuple of three sets:
- sentbreak_toks: The indices of all sentence breaks.
- abbrev_toks: The indices of all abbreviations.
- ellipsis_toks: The indices of all ellipsis marks.
"""
for aug_tok in tokens:
self._first_pass_annotation(aug_tok)
yield aug_tok
def _first_pass_annotation(self, aug_tok):
"""
Performs type-based annotation on a single token.
"""
tok = aug_tok.tok
if tok in self._lang_vars.sent_end_chars:
aug_tok.sentbreak = True
elif aug_tok.is_ellipsis:
aug_tok.ellipsis = True
elif aug_tok.period_final and not tok.endswith('..'):
if (tok[:-1].lower() in self._params.abbrev_types or
tok[:-1].lower().split('-')[-1] in self._params.abbrev_types):
aug_tok.abbr = True
else:
aug_tok.sentbreak = True
return
######################################################################
#{ Punkt Trainer
######################################################################
class PunktTrainer(PunktBaseClass):
"""Learns parameters used in Punkt sentence boundary detection."""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
self._type_fdist = FreqDist()
"""A frequency distribution giving the frequency of each
case-normalized token type in the training data."""
self._num_period_toks = 0
"""The number of words ending in period in the training data."""
self._collocation_fdist = FreqDist()
"""A frequency distribution giving the frequency of all
bigrams in the training data where the first word ends in a
period. Bigrams are encoded as tuples of word types.
Especially common collocations are extracted from this
frequency distribution, and stored in
``_params``.``collocations <PunktParameters.collocations>``."""
self._sent_starter_fdist = FreqDist()
"""A frequency distribution giving the frequency of all words
that occur at the training data at the beginning of a sentence
(after the first pass of annotation). Especially common
sentence starters are extracted from this frequency
distribution, and stored in ``_params.sent_starters``.
"""
self._sentbreak_count = 0
"""The total number of sentence breaks identified in training, used for
calculating the frequent sentence starter heuristic."""
self._finalized = True
"""A flag as to whether the training has been finalized by finding
collocations and sentence starters, or whether finalize_training()
still needs to be called."""
if train_text:
self.train(train_text, verbose, finalize=True)
def get_params(self):
"""
Calculates and returns parameters for sentence boundary detection as
derived from training."""
if not self._finalized:
self.finalize_training()
return self._params
#////////////////////////////////////////////////////////////
#{ Customization Variables
#////////////////////////////////////////////////////////////
ABBREV = 0.3
"""cut-off value whether a 'token' is an abbreviation"""
IGNORE_ABBREV_PENALTY = False
"""allows the disabling of the abbreviation penalty heuristic, which
exponentially disadvantages words that are found at times without a
final period."""
ABBREV_BACKOFF = 5
"""upper cut-off for Mikheev's(2002) abbreviation detection algorithm"""
COLLOCATION = 7.88
"""minimal log-likelihood value that two tokens need to be considered
as a collocation"""
SENT_STARTER = 30
"""minimal log-likelihood value that a token requires to be considered
as a frequent sentence starter"""
INCLUDE_ALL_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word ends in a period. It may be useful in corpora where there is a lot
of variation that makes abbreviations like Mr difficult to identify."""
INCLUDE_ABBREV_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word is an abbreviation. Such collocations override the orthographic
heuristic, but not the sentence starter heuristic. This is overridden by
INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials
and ordinals are considered."""
""""""
MIN_COLLOC_FREQ = 1
"""this sets a minimum bound on the number of times a bigram needs to
appear before it can be considered a collocation, in addition to log
likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True."""
#////////////////////////////////////////////////////////////
#{ Training..
#////////////////////////////////////////////////////////////
def train(self, text, verbose=False, finalize=True):
"""
Collects training data from a given text. If finalize is True, it
will determine all the parameters for sentence boundary detection. If
not, this will be delayed until get_params() or finalize_training() is
called. If verbose is True, abbreviations found will be listed.
"""
# Break the text into tokens; record which token indices correspond to
# line starts and paragraph starts; and determine their types.
self._train_tokens(self._tokenize_words(text), verbose)
if finalize:
self.finalize_training(verbose)
def train_tokens(self, tokens, verbose=False, finalize=True):
"""
Collects training data from a given list of tokens.
"""
self._train_tokens((self._Token(t) for t in tokens), verbose)
if finalize:
self.finalize_training(verbose)
def _train_tokens(self, tokens, verbose):
self._finalized = False
# Ensure tokens are a list
tokens = list(tokens)
# Find the frequency of each case-normalized type. (Don't
# strip off final periods.) Also keep track of the number of
# tokens that end in periods.
for aug_tok in tokens:
self._type_fdist[aug_tok.type] += 1
if aug_tok.period_final:
self._num_period_toks += 1
# Look for new abbreviations, and for types that no longer are
unique_types = self._unique_types(tokens)
for abbr, score, is_add in self._reclassify_abbrev_types(unique_types):
if score >= self.ABBREV:
if is_add:
self._params.abbrev_types.add(abbr)
if verbose:
print((' Abbreviation: [%6.4f] %s' %
(score, abbr)))
else:
if not is_add:
self._params.abbrev_types.remove(abbr)
if verbose:
print((' Removed abbreviation: [%6.4f] %s' %
(score, abbr)))
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = list(self._annotate_first_pass(tokens))
# Check what contexts each word type can appear in, given the
# case of its first letter.
self._get_orthography_data(tokens)
# We need total number of sentence breaks to find sentence starters
self._sentbreak_count += self._get_sentbreak_count(tokens)
# The remaining heuristics relate to pairs of tokens where the first
# ends in a period.
for aug_tok1, aug_tok2 in _pair_iter(tokens):
if not aug_tok1.period_final or not aug_tok2:
continue
# Is the first token a rare abbreviation?
if self._is_rare_abbrev_type(aug_tok1, aug_tok2):
self._params.abbrev_types.add(aug_tok1.type_no_period)
if verbose:
print((' Rare Abbrev: %s' % aug_tok1.type))
# Does second token have a high likelihood of starting a sentence?
if self._is_potential_sent_starter(aug_tok2, aug_tok1):
self._sent_starter_fdist[aug_tok2.type] += 1
# Is this bigram a potential collocation?
if self._is_potential_collocation(aug_tok1, aug_tok2):
self._collocation_fdist[
(aug_tok1.type_no_period, aug_tok2.type_no_sentperiod)] += 1
def _unique_types(self, tokens):
return set(aug_tok.type for aug_tok in tokens)
def finalize_training(self, verbose=False):
"""
Uses data that has been gathered in training to determine likely
collocations and sentence starters.
"""
self._params.clear_sent_starters()
for typ, ll in self._find_sent_starters():
self._params.sent_starters.add(typ)
if verbose:
print((' Sent Starter: [%6.4f] %r' % (ll, typ)))
self._params.clear_collocations()
for (typ1, typ2), ll in self._find_collocations():
self._params.collocations.add( (typ1,typ2) )
if verbose:
print((' Collocation: [%6.4f] %r+%r' %
(ll, typ1, typ2)))
self._finalized = True
#////////////////////////////////////////////////////////////
#{ Overhead reduction
#////////////////////////////////////////////////////////////
def freq_threshold(self, ortho_thresh=2, type_thresh=2, colloc_thres=2,
sentstart_thresh=2):
"""
Allows memory use to be reduced after much training by removing data
about rare tokens that are unlikely to have a statistical effect with
further training. Entries occurring above the given thresholds will be
retained.
"""
if ortho_thresh > 1:
old_oc = self._params.ortho_context
self._params.clear_ortho_context()
for tok in self._type_fdist:
count = self._type_fdist[tok]
if count >= ortho_thresh:
self._params.ortho_context[tok] = old_oc[tok]
self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh)
self._collocation_fdist = self._freq_threshold(
self._collocation_fdist, colloc_thres)
self._sent_starter_fdist = self._freq_threshold(
self._sent_starter_fdist, sentstart_thresh)
def _freq_threshold(self, fdist, threshold):
"""
Returns a FreqDist containing only data with counts below a given
threshold, as well as a mapping (None -> count_removed).
"""
# We assume that there is more data below the threshold than above it
# and so create a new FreqDist rather than working in place.
res = FreqDist()
num_removed = 0
for tok in fdist:
count = fdist[tok]
if count < threshold:
num_removed += 1
else:
res[tok] += count
res[None] += num_removed
return res
#////////////////////////////////////////////////////////////
#{ Orthographic data
#////////////////////////////////////////////////////////////
def _get_orthography_data(self, tokens):
"""
Collect information about whether each token type occurs
with different case patterns (i) overall, (ii) at
sentence-initial positions, and (iii) at sentence-internal
positions.
"""
# 'initial' or 'internal' or 'unknown'
context = 'internal'
tokens = list(tokens)
for aug_tok in tokens:
# If we encounter a paragraph break, then it's a good sign
# that it's a sentence break. But err on the side of
# caution (by not positing a sentence break) if we just
# saw an abbreviation.
if aug_tok.parastart and context != 'unknown':
context = 'initial'
# If we're at the beginning of a line, then we can't decide
# between 'internal' and 'initial'.
if aug_tok.linestart and context == 'internal':
context = 'unknown'
# Find the case-normalized type of the token. If it's a
# sentence-final token, strip off the period.
typ = aug_tok.type_no_sentperiod
# Update the orthographic context table.
flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0)
if flag:
self._params.add_ortho_context(typ, flag)
# Decide whether the next word is at a sentence boundary.
if aug_tok.sentbreak:
if not (aug_tok.is_number or aug_tok.is_initial):
context = 'initial'
else:
context = 'unknown'
elif aug_tok.ellipsis or aug_tok.abbr:
context = 'unknown'
else:
context = 'internal'
#////////////////////////////////////////////////////////////
#{ Abbreviations
#////////////////////////////////////////////////////////////
def _reclassify_abbrev_types(self, types):
"""
(Re)classifies each given token if
- it is period-final and not a known abbreviation; or
- it is not period-final and is otherwise a known abbreviation
by checking whether its previous classification still holds according
to the heuristics of section 3.
Yields triples (abbr, score, is_add) where abbr is the type in question,
score is its log-likelihood with penalties applied, and is_add specifies
whether the present type is a candidate for inclusion or exclusion as an
abbreviation, such that:
- (is_add and score >= 0.3) suggests a new abbreviation; and
- (not is_add and score < 0.3) suggests excluding an abbreviation.
"""
# (While one could recalculate abbreviations from all .-final tokens at
# every iteration, in cases requiring efficiency, the number of tokens
# in the present training document will be much less.)
for typ in types:
# Check some basic conditions, to rule out words that are
# clearly not abbrev_types.
if not _re_non_punct.search(typ) or typ == '##number##':
continue
if typ.endswith('.'):
if typ in self._params.abbrev_types:
continue
typ = typ[:-1]
is_add = True
else:
if typ not in self._params.abbrev_types:
continue
is_add = False
# Count how many periods & nonperiods are in the
# candidate.
num_periods = typ.count('.') + 1
num_nonperiods = len(typ) - num_periods + 1
# Let <a> be the candidate without the period, and <b>
# be the period. Find a log likelihood ratio that
# indicates whether <ab> occurs as a single unit (high
# value of ll), or as two independent units <a> and
# <b> (low value of ll).
count_with_period = self._type_fdist[typ + '.']
count_without_period = self._type_fdist[typ]
ll = self._dunning_log_likelihood(
count_with_period + count_without_period,
self._num_period_toks, count_with_period,
self._type_fdist.N())
# Apply three scaling factors to 'tweak' the basic log
# likelihood ratio:
# F_length: long word -> less likely to be an abbrev
# F_periods: more periods -> more likely to be an abbrev
# F_penalty: penalize occurrences w/o a period
f_length = math.exp(-num_nonperiods)
f_periods = num_periods
f_penalty = (int(self.IGNORE_ABBREV_PENALTY)
or math.pow(num_nonperiods, -count_without_period))
score = ll * f_length * f_periods * f_penalty
yield typ, score, is_add
def find_abbrev_types(self):
"""
Recalculates abbreviations given type frequencies, despite no prior
determination of abbreviations.
This fails to include abbreviations otherwise found as "rare".
"""
self._params.clear_abbrevs()
tokens = (typ for typ in self._type_fdist if typ and typ.endswith('.'))
for abbr, score, is_add in self._reclassify_abbrev_types(tokens):
if score >= self.ABBREV:
self._params.abbrev_types.add(abbr)
# This function combines the work done by the original code's
# functions `count_orthography_context`, `get_orthography_count`,
# and `get_rare_abbreviations`.
def _is_rare_abbrev_type(self, cur_tok, next_tok):
"""
A word type is counted as a rare abbreviation if...
- it's not already marked as an abbreviation
- it occurs fewer than ABBREV_BACKOFF times
- either it is followed by a sentence-internal punctuation
mark, *or* it is followed by a lower-case word that
sometimes appears with upper case, but never occurs with
lower case at the beginning of sentences.
"""
if cur_tok.abbr or not cur_tok.sentbreak:
return False
# Find the case-normalized type of the token. If it's
# a sentence-final token, strip off the period.
typ = cur_tok.type_no_sentperiod
# Proceed only if the type hasn't been categorized as an
# abbreviation already, and is sufficiently rare...
count = self._type_fdist[typ] + self._type_fdist[typ[:-1]]
if (typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF):
return False
# Record this token as an abbreviation if the next
# token is a sentence-internal punctuation mark.
# [XX] :1 or check the whole thing??
if next_tok.tok[:1] in self._lang_vars.internal_punctuation:
return True
# Record this type as an abbreviation if the next
# token... (i) starts with a lower case letter,
# (ii) sometimes occurs with an uppercase letter,
# and (iii) never occus with an uppercase letter
# sentence-internally.
# [xx] should the check for (ii) be modified??
elif next_tok.first_lower:
typ2 = next_tok.type_no_sentperiod
typ2ortho_context = self._params.ortho_context[typ2]
if ( (typ2ortho_context & _ORTHO_BEG_UC) and
not (typ2ortho_context & _ORTHO_MID_UC) ):
return True
#////////////////////////////////////////////////////////////
#{ Log Likelihoods
#////////////////////////////////////////////////////////////
# helper for _reclassify_abbrev_types:
@staticmethod
def _dunning_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that calculates the modified Dunning log-likelihood
ratio scores for abbreviation candidates. The details of how
this works is available in the paper.
"""
p1 = float(count_b) / N
p2 = 0.99
null_hypo = (float(count_ab) * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
alt_hypo = (float(count_ab) * math.log(p2) +
(count_a - count_ab) * math.log(1.0 - p2))
likelihood = null_hypo - alt_hypo
return (-2.0 * likelihood)
@staticmethod
def _col_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that will just compute log-likelihood estimate, in
the original paper it's described in algorithm 6 and 7.
This *should* be the original Dunning log-likelihood values,
unlike the previous log_l function where it used modified
Dunning log-likelihood values
"""
import math
p = 1.0 * count_b / N
p1 = 1.0 * count_ab / count_a
p2 = 1.0 * (count_b - count_ab) / (N - count_a)
summand1 = (count_ab * math.log(p) +
(count_a - count_ab) * math.log(1.0 - p))
summand2 = ((count_b - count_ab) * math.log(p) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p))
if count_a == count_ab:
summand3 = 0
else:
summand3 = (count_ab * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
if count_b == count_ab:
summand4 = 0
else:
summand4 = ((count_b - count_ab) * math.log(p2) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p2))
likelihood = summand1 + summand2 - summand3 - summand4
return (-2.0 * likelihood)
#////////////////////////////////////////////////////////////
#{ Collocation Finder
#////////////////////////////////////////////////////////////
def _is_potential_collocation(self, aug_tok1, aug_tok2):
"""
Returns True if the pair of tokens may form a collocation given
log-likelihood statistics.
"""
return ((self.INCLUDE_ALL_COLLOCS or
(self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) or
(aug_tok1.sentbreak and
(aug_tok1.is_number or aug_tok1.is_initial)))
and aug_tok1.is_non_punct
and aug_tok2.is_non_punct)
def _find_collocations(self):
"""
Generates likely collocations and their log-likelihood.
"""
for types in self._collocation_fdist:
try:
typ1, typ2 = types
except TypeError:
# types may be None after calling freq_threshold()
continue
if typ2 in self._params.sent_starters:
continue
col_count = self._collocation_fdist[types]
typ1_count = self._type_fdist[typ1]+self._type_fdist[typ1+'.']
typ2_count = self._type_fdist[typ2]+self._type_fdist[typ2+'.']
if (typ1_count > 1 and typ2_count > 1
and self.MIN_COLLOC_FREQ <
col_count <= min(typ1_count, typ2_count)):
ll = self._col_log_likelihood(typ1_count, typ2_count,
col_count, self._type_fdist.N())
# Filter out the not-so-collocative
if (ll >= self.COLLOCATION and
(float(self._type_fdist.N())/typ1_count >
float(typ2_count)/col_count)):
yield (typ1, typ2), ll
#////////////////////////////////////////////////////////////
#{ Sentence-Starter Finder
#////////////////////////////////////////////////////////////
def _is_potential_sent_starter(self, cur_tok, prev_tok):
"""
Returns True given a token and the token that preceds it if it
seems clear that the token is beginning a sentence.
"""
# If a token (i) is preceded by a sentece break that is
# not a potential ordinal number or initial, and (ii) is
# alphabetic, then it is a a sentence-starter.
return ( prev_tok.sentbreak and
not (prev_tok.is_number or prev_tok.is_initial) and
cur_tok.is_alpha )
def _find_sent_starters(self):
"""
Uses collocation heuristics for each candidate token to
determine if it frequently starts sentences.
"""
for typ in self._sent_starter_fdist:
if not typ:
continue
typ_at_break_count = self._sent_starter_fdist[typ]
typ_count = self._type_fdist[typ]+self._type_fdist[typ+'.']
if typ_count < typ_at_break_count:
# needed after freq_threshold
continue
ll = self._col_log_likelihood(self._sentbreak_count, typ_count,
typ_at_break_count,
self._type_fdist.N())
if (ll >= self.SENT_STARTER and
float(self._type_fdist.N())/self._sentbreak_count >
float(typ_count)/typ_at_break_count):
yield typ, ll
def _get_sentbreak_count(self, tokens):
"""
Returns the number of sentence breaks marked in a given set of
augmented tokens.
"""
return sum(1 for aug_tok in tokens if aug_tok.sentbreak)
######################################################################
#{ Punkt Sentence Tokenizer
######################################################################
class PunktSentenceTokenizer(PunktBaseClass,TokenizerI):
"""
A sentence tokenizer which uses an unsupervised algorithm to build
a model for abbreviation words, collocations, and words that start
sentences; and then uses that model to find sentence boundaries.
This approach has been shown to work well for many European
languages.
"""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
"""
train_text can either be the sole training text for this sentence
boundary detector, or can be a PunktParameters object.
"""
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
if train_text:
self._params = self.train(train_text, verbose)
def train(self, train_text, verbose=False):
"""
Derives parameters from a given training text, or uses the parameters
given. Repeated calls to this method destroy previous parameters. For
incremental training, instantiate a separate PunktTrainer instance.
"""
if not isinstance(train_text, string_types):
return train_text
return PunktTrainer(train_text, lang_vars=self._lang_vars,
token_cls=self._Token).get_params()
#////////////////////////////////////////////////////////////
#{ Tokenization
#////////////////////////////////////////////////////////////
def tokenize(self, text, realign_boundaries=True):
"""
Given a text, returns a list of the sentences in that text.
"""
return list(self.sentences_from_text(text, realign_boundaries))
def debug_decisions(self, text):
"""
Classifies candidate periods as sentence breaks, yielding a dict for
each that may be used to understand why the decision was made.
See format_debug_decision() to help make this output readable.
"""
for match in self._lang_vars.period_context_re().finditer(text):
decision_text = match.group() + match.group('after_tok')
tokens = self._tokenize_words(decision_text)
tokens = list(self._annotate_first_pass(tokens))
while not tokens[0].period_final:
tokens.pop(0)
yield dict(period_index=match.end() - 1,
text=decision_text,
type1=tokens[0].type,
type2=tokens[1].type,
type1_in_abbrs=bool(tokens[0].abbr),
type1_is_initial=bool(tokens[0].is_initial),
type2_is_sent_starter=tokens[1].type_no_sentperiod in self._params.sent_starters,
type2_ortho_heuristic=self._ortho_heuristic(tokens[1]),
type2_ortho_contexts=set(self._params._debug_ortho_context(tokens[1].type_no_sentperiod)),
collocation=(tokens[0].type_no_sentperiod, tokens[1].type_no_sentperiod) in self._params.collocations,
reason=self._second_pass_annotation(tokens[0], tokens[1]) or REASON_DEFAULT_DECISION,
break_decision=tokens[0].sentbreak,
)
def span_tokenize(self, text, realign_boundaries=True):
"""
Given a text, returns a list of the (start, end) spans of sentences
in the text.
"""
slices = self._slices_from_text(text)
if realign_boundaries:
slices = self._realign_boundaries(text, slices)
return [(sl.start, sl.stop) for sl in slices]
def sentences_from_text(self, text, realign_boundaries=True):
"""
Given a text, generates the sentences in that text by only
testing candidate sentence breaks. If realign_boundaries is
True, includes in the sentence closing punctuation that
follows the period.
"""
return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)]
def _slices_from_text(self, text):
last_break = 0
for match in self._lang_vars.period_context_re().finditer(text):
context = match.group() + match.group('after_tok')
if self.text_contains_sentbreak(context):
yield slice(last_break, match.end())
if match.group('next_tok'):
# next sentence starts after whitespace
last_break = match.start('next_tok')
else:
# next sentence starts at following punctuation
last_break = match.end()
yield slice(last_break, len(text))
def _realign_boundaries(self, text, slices):
"""
Attempts to realign punctuation that falls after the period but
should otherwise be included in the same sentence.
For example: "(Sent1.) Sent2." will otherwise be split as::
["(Sent1.", ") Sent1."].
This method will produce::
["(Sent1.)", "Sent2."].
"""
realign = 0
for sl1, sl2 in _pair_iter(slices):
sl1 = slice(sl1.start + realign, sl1.stop)
if not sl2:
if text[sl1]:
yield sl1
continue
m = self._lang_vars.re_boundary_realignment.match(text[sl2])
if m:
yield slice(sl1.start, sl2.start + len(m.group(0).rstrip()))
realign = m.end()
else:
realign = 0
if text[sl1]:
yield sl1
def text_contains_sentbreak(self, text):
"""
Returns True if the given text includes a sentence break.
"""
found = False # used to ignore last token
for t in self._annotate_tokens(self._tokenize_words(text)):
if found:
return True
if t.sentbreak:
found = True
return False
def sentences_from_text_legacy(self, text):
"""
Given a text, generates the sentences in that text. Annotates all
tokens, rather than just those with possible sentence breaks. Should
produce the same results as ``sentences_from_text``.
"""
tokens = self._annotate_tokens(self._tokenize_words(text))
return self._build_sentence_list(text, tokens)
def sentences_from_tokens(self, tokens):
"""
Given a sequence of tokens, generates lists of tokens, each list
corresponding to a sentence.
"""
tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))
sentence = []
for aug_tok in tokens:
sentence.append(aug_tok.tok)
if aug_tok.sentbreak:
yield sentence
sentence = []
if sentence:
yield sentence
def _annotate_tokens(self, tokens):
"""
Given a set of tokens augmented with markers for line-start and
paragraph-start, returns an iterator through those tokens with full
annotation including predicted sentence breaks.
"""
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = self._annotate_first_pass(tokens)
# Make a second pass through the document, using token context
# information to change our preliminary decisions about where
# sentence breaks, abbreviations, and ellipsis occurs.
tokens = self._annotate_second_pass(tokens)
## [XX] TESTING
#tokens = list(tokens)
#self.dump(tokens)
return tokens
def _build_sentence_list(self, text, tokens):
"""
Given the original text and the list of augmented word tokens,
construct and return a tokenized list of sentence strings.
"""
# Most of the work here is making sure that we put the right
# pieces of whitespace back in all the right places.
# Our position in the source text, used to keep track of which
# whitespace to add:
pos = 0
# A regular expression that finds pieces of whitespace:
WS_REGEXP = re.compile(r'\s*')
sentence = ''
for aug_tok in tokens:
tok = aug_tok.tok
# Find the whitespace before this token, and update pos.
ws = WS_REGEXP.match(text, pos).group()
pos += len(ws)
# Some of the rules used by the punkt word tokenizer
# strip whitespace out of the text, resulting in tokens
# that contain whitespace in the source text. If our
# token doesn't match, see if adding whitespace helps.
# If so, then use the version with whitespace.
if text[pos:pos+len(tok)] != tok:
pat = '\s*'.join(re.escape(c) for c in tok)
m = re.compile(pat).match(text,pos)
if m: tok = m.group()
# Move our position pointer to the end of the token.
assert text[pos:pos+len(tok)] == tok
pos += len(tok)
# Add this token. If it's not at the beginning of the
# sentence, then include any whitespace that separated it
# from the previous token.
if sentence:
sentence += ws
sentence += tok
# If we're at a sentence break, then start a new sentence.
if aug_tok.sentbreak:
yield sentence
sentence = ''
# If the last sentence is emtpy, discard it.
if sentence:
yield sentence
# [XX] TESTING
def dump(self, tokens):
print('writing to /tmp/punkt.new...')
with open('/tmp/punkt.new', 'w') as outfile:
for aug_tok in tokens:
if aug_tok.parastart:
outfile.write('\n\n')
elif aug_tok.linestart:
outfile.write('\n')
else:
outfile.write(' ')
outfile.write(str(aug_tok))
#////////////////////////////////////////////////////////////
#{ Customization Variables
#////////////////////////////////////////////////////////////
PUNCTUATION = tuple(';:,.!?')
#////////////////////////////////////////////////////////////
#{ Annotation Procedures
#////////////////////////////////////////////////////////////
def _annotate_second_pass(self, tokens):
"""
Performs a token-based classification (section 4) over the given
tokens, making use of the orthographic heuristic (4.1.1), collocation
heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3).
"""
for t1, t2 in _pair_iter(tokens):
self._second_pass_annotation(t1, t2)
yield t1
def _second_pass_annotation(self, aug_tok1, aug_tok2):
"""
Performs token-based classification over a pair of contiguous tokens
updating the first.
"""
# Is it the last token? We can't do anything then.
if not aug_tok2:
return
tok = aug_tok1.tok
if not aug_tok1.period_final:
# We only care about words ending in periods.
return
typ = aug_tok1.type_no_period
next_tok = aug_tok2.tok
next_typ = aug_tok2.type_no_sentperiod
tok_is_initial = aug_tok1.is_initial
# [4.1.2. Collocation Heuristic] If there's a
# collocation between the word before and after the
# period, then label tok as an abbreviation and NOT
# a sentence break. Note that collocations with
# frequent sentence starters as their second word are
# excluded in training.
if (typ, next_typ) in self._params.collocations:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_KNOWN_COLLOCATION
# [4.2. Token-Based Reclassification of Abbreviations] If
# the token is an abbreviation or an ellipsis, then decide
# whether we should *also* classify it as a sentbreak.
if ( (aug_tok1.abbr or aug_tok1.ellipsis) and
(not tok_is_initial) ):
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == True:
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC
# [4.1.3. Frequent Sentence Starter Heruistic] If the
# next word is capitalized, and is a member of the
# frequent-sentence-starters list, then label tok as a
# sentence break.
if ( aug_tok2.first_upper and
next_typ in self._params.sent_starters):
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_SENTENCE_STARTER
# [4.3. Token-Based Detection of Initials and Ordinals]
# Check if any initials or ordinals tokens that are marked
# as sentbreaks should be reclassified as abbreviations.
if tok_is_initial or typ == '##number##':
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == False:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
if tok_is_initial:
return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC
else:
return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC
# Special heuristic for initials: if orthogrpahic
# heuristc is unknown, and next word is always
# capitalized, then mark as abbrev (eg: J. Bach).
if ( is_sent_starter == 'unknown' and tok_is_initial and
aug_tok2.first_upper and
not (self._params.ortho_context[next_typ] & _ORTHO_LC) ):
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC
return
def _ortho_heuristic(self, aug_tok):
"""
Decide whether the given token is the first token in a sentence.
"""
# Sentences don't start with punctuation marks:
if aug_tok.tok in self.PUNCTUATION:
return False
ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod]
# If the word is capitalized, occurs at least once with a
# lower case first letter, and never occurs with an upper case
# first letter sentence-internally, then it's a sentence starter.
if ( aug_tok.first_upper and
(ortho_context & _ORTHO_LC) and
not (ortho_context & _ORTHO_MID_UC) ):
return True
# If the word is lower case, and either (a) we've seen it used
# with upper case, or (b) we've never seen it used
# sentence-initially with lower case, then it's not a sentence
# starter.
if ( aug_tok.first_lower and
((ortho_context & _ORTHO_UC) or
not (ortho_context & _ORTHO_BEG_LC)) ):
return False
# Otherwise, we're not sure.
return 'unknown'
DEBUG_DECISION_FMT = '''Text: %(text)r (at offset %(period_index)d)
Sentence break? %(break_decision)s (%(reason)s)
Collocation? %(collocation)s
%(type1)r:
known abbreviation: %(type1_in_abbrs)s
is initial: %(type1_is_initial)s
%(type2)r:
known sentence starter: %(type2_is_sent_starter)s
orthographic heuristic suggests is a sentence starter? %(type2_ortho_heuristic)s
orthographic contexts in training: %(type2_ortho_contexts)s
'''
def format_debug_decision(d):
return DEBUG_DECISION_FMT % d
def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer):
"""Builds a punkt model and applies it to the same text"""
cleanup = lambda s: re.compile(r'(?:\r|^\s+)', re.MULTILINE).sub('', s).replace('\n', ' ')
trainer = train_cls()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
sbd = tok_cls(trainer.get_params())
for l in sbd.sentences_from_text(text):
print(cleanup(l))
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| {
"content_hash": "9c360249120b294e2a797c6644b38cdc",
"timestamp": "",
"source": "github",
"line_count": 1592,
"max_line_length": 118,
"avg_line_length": 38.391331658291456,
"alnum_prop": 0.5557682553706703,
"repo_name": "laiy/Database_Project",
"id": "27006612f85b4dfb1c973ca24d0030711ef4fc97",
"size": "61617",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "third_party/nltk/tokenize/punkt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15128"
},
{
"name": "Python",
"bytes": "9629"
}
],
"symlink_target": ""
} |
factor0 = 6
factor1 = 7
answer = factor0 * factor1
print answer
| {
"content_hash": "4239dad99cdff7ac0a2542c57c7cfcb3",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 26,
"avg_line_length": 16,
"alnum_prop": 0.734375,
"repo_name": "nathano/Perl_to_Python_Converter",
"id": "946fc9224a49be056d87c6d5a0ee1a871c551582",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/subset1/answer3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "20894"
},
{
"name": "Python",
"bytes": "2735"
}
],
"symlink_target": ""
} |
import os
import sys
import copy
import json
import socket
import logging
from threading import local
from collections import OrderedDict
from contextlib import contextmanager
class SwagFormatter(logging.Formatter):
def __init__(self, swaglogger):
logging.Formatter.__init__(self, None, '%a %b %d %H:%M:%S %Z %Y')
self.swaglogger = swaglogger
self.host = socket.gethostname()
def json_handler(self, obj):
# if isinstance(obj, (datetime.date, datetime.time)):
# return obj.isoformat()
return repr(obj)
def format(self, record):
record_dict = OrderedDict()
if isinstance(record.msg, dict):
record_dict['msg'] = record.msg
else:
try:
record_dict['msg'] = record.getMessage()
except (ValueError, TypeError):
record_dict['msg'] = [record.msg]+record.args
record_dict['ctx'] = self.swaglogger.get_ctx()
if record.exc_info:
record_dict['exc_info'] = self.formatException(record.exc_info)
record_dict['level'] = record.levelname
record_dict['levelnum'] = record.levelno
record_dict['name'] = record.name
record_dict['filename'] = record.filename
record_dict['lineno'] = record.lineno
record_dict['pathname'] = record.pathname
record_dict['module'] = record.module
record_dict['funcName'] = record.funcName
record_dict['host'] = self.host
record_dict['process'] = record.process
record_dict['thread'] = record.thread
record_dict['threadName'] = record.threadName
record_dict['created'] = record.created
# asctime = self.formatTime(record, self.datefmt)
return json.dumps(record_dict, default=self.json_handler)
_tmpfunc = lambda: 0
_srcfile = os.path.normcase(_tmpfunc.__code__.co_filename)
class SwagLogger(logging.Logger):
def __init__(self):
logging.Logger.__init__(self, "swaglog")
self.global_ctx = {}
self.log_local = local()
self.log_local.ctx = {}
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
# f = currentframe()
f = sys._getframe(3)
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename in (logging._srcfile, _srcfile):
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def local_ctx(self):
try:
return self.log_local.ctx
except AttributeError:
self.log_local.ctx = {}
return self.log_local.ctx
def get_ctx(self):
return dict(self.local_ctx(), **self.global_ctx)
@contextmanager
def ctx(self, **kwargs):
old_ctx = self.local_ctx()
self.log_local.ctx = copy.copy(old_ctx) or {}
self.log_local.ctx.update(kwargs)
try:
yield
finally:
self.log_local.ctx = old_ctx
def bind(self, **kwargs):
self.local_ctx().update(kwargs)
def bind_global(self, **kwargs):
self.global_ctx.update(kwargs)
def event(self, event_name, *args, **kwargs):
evt = OrderedDict()
evt['event'] = event_name
if args:
evt['args'] = args
evt.update(kwargs)
self.info(evt)
if __name__ == "__main__":
log = SwagLogger()
log.info("asdasd %s", "a")
log.info({'wut': 1})
with log.ctx():
log.bind(user="some user")
log.info("in req")
log.event("do_req")
| {
"content_hash": "49e7f8478e5966917da1ba348179587f",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 71,
"avg_line_length": 26.83582089552239,
"alnum_prop": 0.6351501668520578,
"repo_name": "heidecjj/openpilot",
"id": "2f1218452184db034043470df672d311215b107f",
"size": "3596",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "common/logging_extra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654130"
},
{
"name": "C++",
"bytes": "24497"
},
{
"name": "Cap'n Proto",
"bytes": "27780"
},
{
"name": "Makefile",
"bytes": "7859"
},
{
"name": "Python",
"bytes": "207584"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.