code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
import json
import boto3
import time
from datetime import datetime, timedelta, timezone
def get_region_billing(region_name='cn-northwest-1'):
client = boto3.client('cloudwatch',region_name=region_name) # 创建cloudwatch,可以指定region_name
endTime = datetime.utcnow().replace(tzinfo=timezone.utc) # 统计结束时间(当前时间)
delta = timedelta(days=1) # 时间间隔位1天
startTime = endTime-delta # 统计开始时间(当前时间前一天)
metrics = client.list_metrics( # 获取账单分类度量(按服务分类)
Namespace='AWS/Billing'
)
billings = [] # 创建存放账单信息的列表
for i,v in enumerate(metrics['Metrics']): # 对度量进行循环,获取不同的度量账单
res = client.get_metric_data(
MetricDataQueries=[
{
'Id': 'm1',
'MetricStat': {
'Metric': {
'Namespace': 'AWS/Billing',
'MetricName': 'EstimatedCharges',
'Dimensions': v['Dimensions'] # 将每个度量的Dimensions取出作为参数
},
'Period': 300,
'Stat': 'Average',
'Unit': 'None'
}
},
],
StartTime=startTime, # 当前时间1天前,获取近一天的账单信息
EndTime=endTime # 当前时间
)
# 将每个度量获取的服务名和账单保存进billings列表
serviceName = v['Dimensions'][0]['Value']
servicePrice = res['MetricDataResults'][0]['Values'][0] if res['MetricDataResults'][0]['Values'] else '0.0'
billings.append({'ServiceName':serviceName,"Price":servicePrice})
billTime = res["MetricDataResults"][0]["Timestamps"][0] # 最近账单时间(账单每天出两次,1:30和13:30,UTC时间)
cnBillTime = billTime.astimezone(timezone(timedelta(hours=8))) # 最近账单的中国时间 +8:00
formatTime = cnBillTime.strftime("北京时间%Y年%m月%d日%H:%M:%S") # 对账单时间进行格式化输出
for i,v in enumerate(billings):
if v["ServiceName"] == "CNY":
v["ServiceName"] = "总账单(RMB)" # 替换列表中主账单的名称为"总账单(RMB)"
billings.remove(v) # 把总帐单删除
billings.insert(0,v) # 把总帐单放到列表第一位
break
print(billings)
return(billings[0]['Price'])
def lambda_handler(event, context):
beijing_billing = get_region_billing('cn-north-1')
ningxia_billing = get_region_billing('cn-northwest-1')
print(beijing_billing)
print(ningxia_billing)
return {
'message': 'AWS中国本月消费¥{},其中宁夏区域消费¥{},北京区域消费¥{}'.format(beijing_billing+ningxia_billing, ningxia_billing, beijing_billing)
}
| iceflow/aws-demo | lambda/get_billing/get_aws_china_billing.py | Python | gpl-3.0 | 3,472 |
import urllib
import urllib2
import json
import difflib
import pylast
class iTunesAlbumArt(object):
def __init__(self):
self.base_url = 'https://itunes.apple.com/search'
self.limit = 50
def _form_url(self, artist):
args_dict = {'limit': self.limit,
'term': artist}
args_string = urllib.urlencode(args_dict)
search_url = self.base_url + '?' + args_string
return search_url
def _get_largest_pic_url(self, pic_100_url):
resolutions_to_try = [
'1500x1500',
'1200x1200',
'900x900',
'600x600',
'300x300',
'100x100'
]
head, _, tail = pic_100_url.rpartition('100x100')
for resolution in resolutions_to_try:
try:
potential_pic_url = head + resolution + tail
urllib2.urlopen(potential_pic_url)
except ValueError:
# URL not well formatted
continue
except urllib2.URLError:
# Doesn't seem to exist
continue
break
return potential_pic_url
def _find_album(self, tracks_by_artist, album):
for track in tracks_by_artist:
if track.get('collectionName', None):
difference = difflib.SequenceMatcher(None,
track['collectionName'],
album).ratio()
if difference > 0.5:
return track
return None
def find_art(self, artist, album):
search_url = self._form_url(artist)
response = urllib2.urlopen(search_url)
response_json = json.loads(response.read())
if response_json['resultCount'] == 0:
return None
tracks = response_json['results']
track = self._find_album(tracks, album)
if not track:
return None
large_picture_url = self._get_largest_pic_url(track['artworkUrl100'])
return large_picture_url
class LastFMAlbumArt(object):
"""
Trivially stupid stub
I just wanted to make it look consistant with the iTunes one
"""
def __init__(self, key=None, secret=None):
if not key or not secret:
print 'Last.fm API Key and Secret required'
return None
self.api = pylast.LastFMNetwork(
api_key=key,
api_secret=secret
)
def find_art(self, artist, album_name):
try:
album = self.api.get_album(artist, album_name)
pic_url = album.get_cover_image(pylast.COVER_EXTRA_LARGE)
except pylast.WSError:
pic_url = None
return pic_url
| arfar/art-py | apis.py | Python | gpl-2.0 | 2,779 |
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from enum import IntEnum
# Represents the type of recognizer an ATN applies to.
class ATNType(IntEnum):
LEXER = 0
PARSER = 1
@classmethod
def fromOrdinal(cls, i:int):
return cls._value2member_map_[i]
| damorim/compilers-cin | antigo/ap3/antlr4-python3-runtime-4.7.2/src/antlr4/atn/ATNType.py | Python | mit | 422 |
# Mapping from Mixamo skeleton to SmartBody skeleton
# Before loading character, run this;
# seq mixamo-map.seq
# then map any existing skeletons:
# skeletonmap <skeleton> mixamo
# also map any motions to mixamo:
# motionmap <motion> mixamo
jointMapManager = scene.getJointMapManager()
sinbadMap = jointMapManager.createJointMap("Sinbad.skeleton.xml")
sinbadMap.setMapping("Root", "base")
sinbadMap.setMapping("Neck", "spine4")
sinbadMap.setMapping("Head", "spine5")
sinbadMap.setMapping("Jaw", "skullbase")
sinbadMap.setMapping("Clavicle.L", "l_sternoclavicular")
sinbadMap.setMapping("Humerus.L", "l_shoulder")
sinbadMap.setMapping("Ulna.L", "l_elbow")
sinbadMap.setMapping("Hand.L", "l_wrist")
sinbadMap.setMapping("ThumbProx.L", "l_thumb1")
sinbadMap.setMapping("ThumbMed.L", "l_thumb2")
sinbadMap.setMapping("ThumbDist.L", "l_thumb3")
sinbadMap.setMapping("IndexFingerProx.L", "l_index1")
sinbadMap.setMapping("IndexFingerMed.L", "l_index2")
sinbadMap.setMapping("IndexFingerDist.L", "l_index3")
sinbadMap.setMapping("MiddleFingerProx.L", "l_middle1")
sinbadMap.setMapping("MiddleFingerMed.L", "l_middle2")
sinbadMap.setMapping("MiddleFingerDist.L", "l_middle3")
sinbadMap.setMapping("RingFingerProx.L", "l_ring1")
sinbadMap.setMapping("RingFingerMed.L", "l_ring2")
sinbadMap.setMapping("RingFingerDist.L", "l_ring3")
sinbadMap.setMapping("PinkyProx.L", "l_pinky1")
sinbadMap.setMapping("PinkyMed.L", "l_pinky2")
sinbadMap.setMapping("PinkyDist.L", "l_pinky3")
sinbadMap.setMapping("Clavicle.R", "r_sternoclavicular")
sinbadMap.setMapping("Humerus.R", "r_shoulder")
sinbadMap.setMapping("Ulna.R", "r_elbow")
sinbadMap.setMapping("Hand.R", "r_wrist")
sinbadMap.setMapping("ThumbProx.R", "r_thumb1")
sinbadMap.setMapping("ThumbMed.R", "r_thumb2")
sinbadMap.setMapping("ThumbDist.R", "r_thumb4")
sinbadMap.setMapping("IndexFingerProx.R", "r_index1")
sinbadMap.setMapping("IndexFingerMed.R", "r_index2")
sinbadMap.setMapping("IndexFingerDist.R", "r_index4")
sinbadMap.setMapping("MiddleFingerProx.R", "r_middle1")
sinbadMap.setMapping("MiddleFingerMed.R", "r_middle2")
sinbadMap.setMapping("MiddleFingerDist.R", "r_middle4")
sinbadMap.setMapping("RingFingerProx.R", "r_ring1")
sinbadMap.setMapping("RingFingerMed.R", "r_ring2")
sinbadMap.setMapping("RingFingerDist.R", "r_ring4")
sinbadMap.setMapping("PinkyProx.R", "r_pinky1")
sinbadMap.setMapping("PinkyMed.R", "r_pinky2")
sinbadMap.setMapping("PinkyDist.R", "r_pinky3")
sinbadMap.setMapping("Thigh.L", "l_hip")
sinbadMap.setMapping("Calf.L", "l_knee")
sinbadMap.setMapping("Foot.L", "l_ankle")
sinbadMap.setMapping("Toe.L", "l_forefoot")
sinbadMap.setMapping("LeftToe_End", "l_toe")
sinbadMap.setMapping("Thigh.R", "r_hip")
sinbadMap.setMapping("Calf.R", "r_knee")
sinbadMap.setMapping("Foot.R", "r_ankle")
sinbadMap.setMapping("Toe.R", "r_forefoot")
sinbadMap.setMapping("RightToe_End", "r_toe")
sinbadMap.setMapping("Eye.L", "eyeball_left")
sinbadMap.setMapping("Eye.R", "eyeball_right")
sinbadMap.setMapping("Chest", "spine3")
sinbadMap.setMapping("Stomach", "spine2")
sinbadMap.setMapping("Waist", "spine1")
| gsi-upm/SmartSim | smartbody/data/scripts/ogre-sinbad-map.py | Python | apache-2.0 | 3,142 |
import os
import subprocess
import json
from tempfile import mkstemp
class PDFParserError(Exception):
pass
class InvalidOptionError(Exception):
pass
class PDFParser:
def __init__(self, tmp_path=None, clean_up=True):
self.TEMP_FOLDER_PATH = tmp_path
self._tmp_files = []
self.clean_up = clean_up
self.PDFPARSER_PATH = os.environ.get('PDFPARSER_PATH', 'pdfparser.jar')
def _coerce_to_file_path(self, path_or_file_or_bytes):
"""This converts file-like objects and `bytes` into
existing files and returns a filepath
if strings are passed in, it is assumed that they are existing
files
"""
if not isinstance(path_or_file_or_bytes, str):
if isinstance(path_or_file_or_bytes, bytes):
return self._write_tmp_file(
bytestring=path_or_file_or_bytes)
else:
return self._write_tmp_file(
file_obj=path_or_file_or_bytes)
return path_or_file_or_bytes
def _write_tmp_file(self, file_obj=None, bytestring=None):
"""Take a file-like object or a bytestring,
create a temporary file and return a file path.
file-like objects will be read and written to the tempfile
bytes objects will be written directly to the tempfile
"""
tmp_path = self.TEMP_FOLDER_PATH
os_int, tmp_fp = mkstemp(dir=tmp_path)
with open(tmp_fp, 'wb') as tmp_file:
if file_obj:
tmp_file.write(file_obj.read())
elif bytestring:
tmp_file.write(bytestring)
self._tmp_files.append(tmp_fp)
return tmp_fp
def _load_json(self, raw_string):
return json.loads(raw_string)
def _dump_json(self, data):
return json.dumps(data)
def clean_up_tmp_files(self):
if not self._tmp_files:
return
for i in range(len(self._tmp_files)):
path = self._tmp_files.pop()
os.remove(path)
def _get_name_option_lookup(self, field_data):
return {
item['name']: item.get('options', None)
for item in field_data['fields']
}
def _get_file_contents(self, path):
"""given a file path, return the contents of the file
if decode is True, the contents will be decoded using the default
encoding
"""
return open(path, 'rb').read()
def run_command(self, args):
"""Run a command to pdftk on the command line.
`args` is a list of command line arguments.
This method is reponsible for handling errors that arise from
pdftk's CLI
"""
args = ['java', '-jar', self.PDFPARSER_PATH] + args
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
if err:
raise PDFParserError(err.decode('utf-8'))
return out.decode('utf-8')
def _fill(self, pdf_path, output_path, option_check, answers):
answer_fields = {'fields': []}
for k, v in answers.items():
if k in option_check:
if option_check[k]:
if v not in option_check[k]:
raise InvalidOptionError(
"''{}' is not a valid option for '{}'. Choices: {}".format(
v, k, option_check[k]
))
answer_fields['fields'].append({k: v})
self.run_command([
'set_fields',
pdf_path,
output_path,
self._dump_json(answer_fields)
])
def join_pdfs(self, list_of_pdf_paths):
paths = [self._coerce_to_file_path(p) for p in list_of_pdf_paths]
output_path = self._write_tmp_file()
args = ['concat_files'] + paths + [output_path]
self.run_command(args)
result = self._get_file_contents(output_path)
if self.clean_up:
self.clean_up_tmp_files()
return result
def get_field_data(self, pdf_file_path):
pdf_file_path = self._coerce_to_file_path(pdf_file_path)
string = self.run_command(['get_fields', pdf_file_path])
return self._load_json(string)
def fill_pdf(self, pdf_path, answers):
pdf_path = self._coerce_to_file_path(pdf_path)
field_data = self.get_field_data(pdf_path)
option_check = self._get_name_option_lookup(field_data)
output_path = self._write_tmp_file()
self._fill(pdf_path, output_path, option_check, answers)
result = self._get_file_contents(output_path)
if self.clean_up:
self.clean_up_tmp_files()
return result
def fill_many_pdfs(self, pdf_path, answers_list):
# don't clean up while filling multiple pdfs
_clean_up_setting = self.clean_up
self.clean_up = False
pdf_path = self._coerce_to_file_path(pdf_path)
field_data = self.get_field_data(pdf_path)
option_check = self._get_name_option_lookup(field_data)
tmp_filled_pdf_paths = []
for answers in answers_list:
output_path = self._write_tmp_file()
self._fill(pdf_path, output_path, option_check, answers)
tmp_filled_pdf_paths.append(output_path)
self.clean_up = _clean_up_setting
return self.join_pdfs(tmp_filled_pdf_paths)
| codeforamerica/typeseam | typeseam/form_filler/pdfparser.py | Python | bsd-3-clause | 5,502 |
"""
These settings will always be overriding for all test runs.
"""
import tempfile
EMAIL_FROM_ADDRESS = 'doesnt@matter.com'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
MOZILLIANS_API_APPNAME = 'something'
MOZILLIANS_API_BASE = 'https://shouldneveractuallybeused.net'
MOZILLIANS_API_KEY = 'supersecret'
VIDLY_API_URL = 'http://vid.ly.shouldneveractuallybeused.com/api/'
# So that we never accidentally send tweets during tests
TWITTER_ACCESS_TOKEN_SECRET = "test"
TWITTER_CONSUMER_KEY = TWITTER_ACCESS_TOKEN_SECRET
TWITTER_CONSUMER_SECRET = TWITTER_CONSUMER_KEY
TWITTER_ACCESS_TOKEN = TWITTER_CONSUMER_KEY
TWEETER_BACKEND = None
URL_TRANSFORM_PASSWORDS = {
'bla': 'bla',
}
BITLY_ACCESS_TOKEN = '123456789'
BITLY_URL = 'https://bitly-mock/v3/shorten'
# don't accidentally send anything to sentry whilst running tests
RAVEN_CONFIG = {}
SENTRY_DSN = None
SITE_URL = 'http://localhost:8000'
AWS_ACCESS_KEY_ID = AWS_SECRET_ACCESS_KEY = 'something'
S3_UPLOAD_BUCKET = 'test----air-mozilla-uploads'
EDGECAST_SECURE_KEY = 'soemthing'
AKAMAI_SECURE_KEY = 'something'
BROWSERID_AUDIENCES = ['http://testserver']
MEDIA_ROOT = tempfile.mkdtemp(prefix='testmedia')
SCRAPE_CREDENTIALS = {}
LOG_SEARCHES = True
TWITTER_USERNAME = 'airmozilla'
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
VIDLY_BASE_URL = 'https://vid.ly.example'
VIDLY_USER_ID = 'any...'
VIDLY_USER_KEY = '...thing not empty'
EDGECAST_SECURE_KEY = 'anythingheretoo'
ALLOWED_BID = (
'mozilla.com',
)
# Use memcached only for session storage
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# deliberately set so tests never actually accidentally use it
AUTOCOMPETER_URL = 'https://autocompeter.example/v1'
AUTOCOMPETER_DOMAIN = ''
# make sure these are definitely off
GOD_MODE = False
BROWSERID_DISABLED = False
# Don't actually use celery in tests
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
SCREENCAPTURES_NO_PICTURES = 5 # faster
# Deliberately disabled since reducing the size of PNGs
# slows down the tests significantly and we have deliberate
# tests that re-enables it.
PNGQUANT_LOCATION = None
# Elastic search test indexing
ELASTICSEARCH_INDEX = 'test-events'
# For using the google API
YOUTUBE_API_KEY = 'doesnthavetobesomethingreal'
# Make sure pipeline is enabled so it does not collectstatic on every test
PIPELINE_ENABLED = True
# Make sure it's not real values so it can never actually work, if
# the tests don't correctly mock fanout.
FANOUT_REALM_ID = 'abc123'
FANOUT_REALM_KEY = 'xyz987'
AMARA_USERNAME = 'amara'
AMARA_API_KEY = 'super-secret-key'
AMARA_TEAM = 'muzilla'
AMARA_PROJECT = 'airmuzilla'
# We mock all testing that involves running ffmpeg.
# If a test is poorly mocked, it should actually fail to run.
FFMPEG_LOCATION = '/dont/ever/actually/run/ffmpeg'
REV_CLIENT_API_KEY = 'testsomerevclientapikey'
REV_USER_API_KEY = 'testsomerevuserapikey'
REV_BASE_URL = 'https://api-rev.example.com'
# Custom Key-Value store for sorl.thumbnail that doesn't use the ORM
# at all. In fact, it doesn't even use the LocMemCache.
THUMBNAIL_KVSTORE = 'airmozilla.base.tests.testbase.FastSorlKVStore'
THUMBNAIL_ENGINE = 'airmozilla.base.tests.testbase.FastSorlEngine'
AUTH0_CLIENT_ID = 'test-auth0-test-client-id'
AUTH0_SECRET = 'super-secret-auth0-secret'
AUTH0_DOMAIN = 'example.auth0.bogus'
# This makes sure we don't accidentally leak tests that depend on real
# working URLs
VIDLY_VIDEO_URL_FORMAT = 'https://vid.ly.example/{}?content=video&format=webm'
VIDLY_POSTER_URL_FORMAT = 'https://vid.ly.example/{}/poster'
| blossomica/airmozilla | airmozilla/settings/test.py | Python | bsd-3-clause | 3,601 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import setup, find_packages
from pip.req import parse_requirements
import pip
install_reqs = reqs = [str(ir.req) for ir in parse_requirements('requirements.txt',
session=pip.download.PipSession())]
dev_reqs = [str(ir.req) for ir in parse_requirements('requirements_dev.txt',
session=pip.download.PipSession())]
setup(
name='proton_decay_study',
version='0.2.0',
description="Looks for proton decay. USING NEURAL NETWORKS",
long_description="""
Top-level code base for CNN study of LArTPC data for proton decay.
This relies primarily on Kevlar and Keras with the Tensorflow backend.
Kevlar provides the data interface consumed by the generators. Keras and
Tensorflow provide the framework used to train and utilize the networks.
""",
author="Kevin Wierman",
author_email='kevin.wierman@pnnl.gov',
url='https://github.com/HEP-DL/proton_decay_study',
packages=find_packages(),
package_dir={'proton_decay_study':
'proton_decay_study'},
entry_points={
'console_scripts': [
'test_file_input=proton_decay_study.cli:test_file_input',
'test_threaded_files=proton_decay_study.cli:test_threaded_file_input',
'train_kevnet=proton_decay_study.cli:train_kevnet',
'make_kevnet_featuremap=proton_decay_study.cli:make_kevnet_featuremap'
]
},
include_package_data=True,
install_requires=reqs,
license="MIT license",
zip_safe=True,
keywords='proton_decay_study',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=dev_reqs
)
| HEP-DL/proton_decay_study | setup.py | Python | mit | 1,937 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 19:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administrador', '0002_auto_20160615_0157'),
]
operations = [
migrations.AlterField(
model_name='inventario',
name='fecha',
field=models.DateField(default=datetime.date.today, null=True),
),
]
| isfon/DjangoSales | DjangoSales/apps/administrador/migrations/0003_auto_20160615_1452.py | Python | mit | 521 |
import pytest
from tests.plugins.upload_to_s3 import upload_file_to_s3_by_job_id
pytest_plugins = (
"tests.examples.examples_report_plugin",
"tests.integration.integration_tests_plugin",
"tests.plugins.bokeh_server",
"tests.plugins.jupyter_notebook",
"tests.plugins.phantomjs_screenshot",
"tests.plugins.image_diff",
"tests.plugins.file_server",
)
def pytest_addoption(parser):
parser.addoption(
"--upload", dest="upload", action="store_true", default=False, help="upload test artefacts to S3"
)
parser.addoption(
"--log-file", dest="log_file", metavar="path", action="store", default='examples.log', help="where to write the complete log"
)
def pytest_sessionfinish(session, exitstatus):
try_upload = session.config.option.upload
seleniumreport = session.config.option.htmlpath
is_slave = hasattr(session.config, 'slaveinput')
if try_upload and seleniumreport and not is_slave:
upload_file_to_s3_by_job_id(seleniumreport)
@pytest.yield_fixture(scope="session")
def log_file(request):
is_slave = hasattr(request.config, 'slaveinput')
if not is_slave:
with open(request.config.option.log_file, 'w') as f:
# Clean-out any existing log-file
f.write("")
with open(pytest.config.option.log_file, 'a') as f:
yield f
| percyfal/bokeh | tests/conftest.py | Python | bsd-3-clause | 1,354 |
import re
import unittest
from scrapy.http import HtmlResponse
from scrapy.link import Link
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor, BaseSgmlLinkExtractor
from scrapy.tests import get_testdata
class LinkExtractorTestCase(unittest.TestCase):
def test_basic(self):
html = """<html><head><title>Page title<title>
<body><p><a href="item/12.html">Item 12</a></p>
<p><a href="/about.html">About us</a></p>
<img src="/logo.png" alt="Company logo (not a link)" />
<p><a href="../othercat.html">Other category</a></p>
<p><a href="/">>></a></p>
<p><a href="/" /></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = BaseSgmlLinkExtractor() # default: tag=a, attr=href
self.assertEqual(lx.extract_links(response),
[Link(url='http://example.org/somepage/item/12.html', text='Item 12'),
Link(url='http://example.org/about.html', text='About us'),
Link(url='http://example.org/othercat.html', text='Other category'),
Link(url='http://example.org/', text='>>'),
Link(url='http://example.org/', text='')])
def test_base_url(self):
html = """<html><head><title>Page title<title><base href="http://otherdomain.com/base/" />
<body><p><a href="item/12.html">Item 12</a></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = BaseSgmlLinkExtractor() # default: tag=a, attr=href
self.assertEqual(lx.extract_links(response),
[Link(url='http://otherdomain.com/base/item/12.html', text='Item 12')])
# base url is an absolute path and relative to host
html = """<html><head><title>Page title<title><base href="/" />
<body><p><a href="item/12.html">Item 12</a></p></body></html>"""
response = HtmlResponse("https://example.org/somepage/index.html", body=html)
self.assertEqual(lx.extract_links(response),
[Link(url='https://example.org/item/12.html', text='Item 12')])
# base url has no scheme
html = """<html><head><title>Page title<title><base href="//noschemedomain.com/path/to/" />
<body><p><a href="item/12.html">Item 12</a></p></body></html>"""
response = HtmlResponse("https://example.org/somepage/index.html", body=html)
self.assertEqual(lx.extract_links(response),
[Link(url='https://noschemedomain.com/path/to/item/12.html', text='Item 12')])
def test_link_text_wrong_encoding(self):
html = """<body><p><a href="item/12.html">Wrong: \xed</a></p></body></html>"""
response = HtmlResponse("http://www.example.com", body=html, encoding='utf-8')
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.extract_links(response), [
Link(url='http://www.example.com/item/12.html', text=u'Wrong: \ufffd'),
])
def test_extraction_encoding(self):
body = get_testdata('link_extractor', 'linkextractor_noenc.html')
response_utf8 = HtmlResponse(url='http://example.com/utf8', body=body, headers={'Content-Type': ['text/html; charset=utf-8']})
response_noenc = HtmlResponse(url='http://example.com/noenc', body=body)
body = get_testdata('link_extractor', 'linkextractor_latin1.html')
response_latin1 = HtmlResponse(url='http://example.com/latin1', body=body)
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.extract_links(response_utf8), [
Link(url='http://example.com/sample_%C3%B1.html', text=''),
Link(url='http://example.com/sample_%E2%82%AC.html', text='sample \xe2\x82\xac text'.decode('utf-8')),
])
self.assertEqual(lx.extract_links(response_noenc), [
Link(url='http://example.com/sample_%C3%B1.html', text=''),
Link(url='http://example.com/sample_%E2%82%AC.html', text='sample \xe2\x82\xac text'.decode('utf-8')),
])
self.assertEqual(lx.extract_links(response_latin1), [
Link(url='http://example.com/sample_%F1.html', text=''),
Link(url='http://example.com/sample_%E1.html', text='sample \xe1 text'.decode('latin1')),
])
def test_matches(self):
url1 = 'http://lotsofstuff.com/stuff1/index'
url2 = 'http://evenmorestuff.com/uglystuff/index'
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.matches(url1), True)
self.assertEqual(lx.matches(url2), True)
def test_link_nofollow(self):
html = """
<a href="page.html?action=print" rel="nofollow">Printer-friendly page</a>
<a href="about.html">About us</a>
"""
response = HtmlResponse("http://example.org/page.html", body=html)
lx = SgmlLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/page.html?action=print', text=u'Printer-friendly page', nofollow=True),
Link(url='http://example.org/about.html', text=u'About us', nofollow=False),
])
class SgmlLinkExtractorTestCase(unittest.TestCase):
def setUp(self):
body = get_testdata('link_extractor', 'sgml_linkextractor.html')
self.response = HtmlResponse(url='http://example.com/index', body=body)
def test_urls_type(self):
'''Test that the resulting urls are regular strings and not a unicode objects'''
lx = SgmlLinkExtractor()
self.assertTrue(all(isinstance(link.url, str) for link in lx.extract_links(self.response)))
def test_extraction(self):
'''Test the extractor's behaviour among different situations'''
lx = SgmlLinkExtractor()
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
Link(url='http://www.google.com/something', text=u''),
])
lx = SgmlLinkExtractor(allow=('sample', ))
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
])
lx = SgmlLinkExtractor(allow=('sample', ), unique=False)
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
Link(url='http://example.com/sample3.html', text=u'sample 3 repetition'),
])
lx = SgmlLinkExtractor(allow=('sample', ))
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
])
lx = SgmlLinkExtractor(allow=('sample', ), deny=('3', ))
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
])
lx = SgmlLinkExtractor(allow_domains=('google.com', ))
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://www.google.com/something', text=u''),
])
def test_extraction_using_single_values(self):
'''Test the extractor's behaviour among different situations'''
lx = SgmlLinkExtractor(allow='sample')
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
])
lx = SgmlLinkExtractor(allow='sample', deny='3')
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
])
lx = SgmlLinkExtractor(allow_domains='google.com')
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://www.google.com/something', text=u''),
])
lx = SgmlLinkExtractor(deny_domains='example.com')
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://www.google.com/something', text=u''),
])
def test_matches(self):
url1 = 'http://lotsofstuff.com/stuff1/index'
url2 = 'http://evenmorestuff.com/uglystuff/index'
lx = SgmlLinkExtractor(allow=(r'stuff1', ))
self.assertEqual(lx.matches(url1), True)
self.assertEqual(lx.matches(url2), False)
lx = SgmlLinkExtractor(deny=(r'uglystuff', ))
self.assertEqual(lx.matches(url1), True)
self.assertEqual(lx.matches(url2), False)
lx = SgmlLinkExtractor(allow_domains=('evenmorestuff.com', ))
self.assertEqual(lx.matches(url1), False)
self.assertEqual(lx.matches(url2), True)
lx = SgmlLinkExtractor(deny_domains=('lotsofstuff.com', ))
self.assertEqual(lx.matches(url1), False)
self.assertEqual(lx.matches(url2), True)
lx = SgmlLinkExtractor(allow=('blah1',), deny=('blah2',),
allow_domains=('blah1.com',),
deny_domains=('blah2.com',))
self.assertEqual(lx.matches('http://blah1.com/blah1'), True)
self.assertEqual(lx.matches('http://blah1.com/blah2'), False)
self.assertEqual(lx.matches('http://blah2.com/blah1'), False)
self.assertEqual(lx.matches('http://blah2.com/blah2'), False)
def test_restrict_xpaths(self):
lx = SgmlLinkExtractor(restrict_xpaths=('//div[@id="subwrapper"]', ))
self.assertEqual([link for link in lx.extract_links(self.response)], [
Link(url='http://example.com/sample1.html', text=u''),
Link(url='http://example.com/sample2.html', text=u'sample 2'),
])
def test_restrict_xpaths_encoding(self):
"""Test restrict_xpaths with encodings"""
html = """<html><head><title>Page title<title>
<body><p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us\xa3</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html, encoding='windows-1252')
lx = SgmlLinkExtractor(restrict_xpaths="//div[@class='links']")
self.assertEqual(lx.extract_links(response),
[Link(url='http://example.org/about.html', text=u'About us\xa3')])
def test_restrict_xpaths_concat_in_handle_data(self):
"""html entities cause SGMLParser to call handle_data hook twice"""
body = """<html><body><div><a href="/foo">>\xbe\xa9<\xb6\xab</a></body></html>"""
response = HtmlResponse("http://example.org", body=body, encoding='gb18030')
lx = SgmlLinkExtractor(restrict_xpaths="//div")
self.assertEqual(lx.extract_links(response),
[Link(url='http://example.org/foo', text=u'>\u4eac<\u4e1c',
fragment='', nofollow=False)])
def test_encoded_url(self):
body = """<html><body><div><a href="?page=2">BinB</a></body></html>"""
response = HtmlResponse("http://known.fm/AC%2FDC/", body=body, encoding='utf8')
lx = SgmlLinkExtractor()
self.assertEqual(lx.extract_links(response), [
Link(url='http://known.fm/AC%2FDC/?page=2', text=u'BinB', fragment='', nofollow=False),
])
def test_encoded_url_in_restricted_xpath(self):
body = """<html><body><div><a href="?page=2">BinB</a></body></html>"""
response = HtmlResponse("http://known.fm/AC%2FDC/", body=body, encoding='utf8')
lx = SgmlLinkExtractor(restrict_xpaths="//div")
self.assertEqual(lx.extract_links(response), [
Link(url='http://known.fm/AC%2FDC/?page=2', text=u'BinB', fragment='', nofollow=False),
])
def test_deny_extensions(self):
html = """<a href="page.html">asd</a> and <a href="photo.jpg">"""
response = HtmlResponse("http://example.org/", body=html)
lx = SgmlLinkExtractor()
self.assertEqual(lx.extract_links(response), [
Link(url='http://example.org/page.html', text=u'asd'),
])
def test_process_value(self):
"""Test restrict_xpaths with encodings"""
html = """
<a href="javascript:goToPage('../other/page.html','photo','width=600,height=540,scrollbars'); return false">Link text</a>
<a href="/about.html">About us</a>
"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html, encoding='windows-1252')
def process_value(value):
m = re.search("javascript:goToPage\('(.*?)'", value)
if m:
return m.group(1)
lx = SgmlLinkExtractor(process_value=process_value)
self.assertEqual(lx.extract_links(response),
[Link(url='http://example.org/other/page.html', text='Link text')])
def test_base_url_with_restrict_xpaths(self):
html = """<html><head><title>Page title<title><base href="http://otherdomain.com/base/" />
<body><p><a href="item/12.html">Item 12</a></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = SgmlLinkExtractor(restrict_xpaths="//p")
self.assertEqual(lx.extract_links(response),
[Link(url='http://otherdomain.com/base/item/12.html', text='Item 12')])
if __name__ == "__main__":
unittest.main()
| pablohoffman/scrapy | scrapy/tests/test_contrib_linkextractors.py | Python | bsd-3-clause | 14,608 |
#!/usr/bin/python
"""
Skeleton code for k-means clustering mini-project.
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than five clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
# prints the minimum and maximum salary, ignores NaN values
# eso = [data_dict[key]["salary"] for key in data_dict if data_dict[key]["salary"] != "NaN"]
# print(min(eso), max(eso))
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
# feature_3 = "total_payments"
poi = "poi"
features_list = [poi, feature_1, feature_2] #, feature_3
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
salaries, eso = [], []
for f1, f2 in finance_features:
salaries.append(f1)
eso.append(f2)
scaled_salaries = scaler.fit_transform(salaries)
# print(scaler.transform([200000.0]))
scaled_eso = scaler.fit_transform(eso)
# print(scaler.transform([1000000.0]))
# Convert old feature values in finance_features list to the new scaled feature values.
finance_features = []
for i in range(len(salaries)):
finance_features.append([scaled_salaries[i], scaled_eso[i]])
print(finance_features)
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, the line below assumes 2 features)
for f1, f2 in finance_features: # , _
plt.scatter( f1, f2 )
plt.show()
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
from sklearn.cluster import KMeans
pred = KMeans(n_clusters=2).fit_predict(data)
### rename the "name" parameter when you change the number of features
### so that the figure gets saved to a different file
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
| yavuzovski/playground | machine learning/Udacity/ud120-projects/k_means/k_means_cluster.py | Python | gpl-3.0 | 3,234 |
import PythonQt
from PythonQt import QtCore, QtGui, QtUiTools
from director import lcmUtils
from director import applogic as app
from director.utime import getUtime
from director.timercallback import TimerCallback
import numpy as np
import math
from time import time
from copy import deepcopy
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
class SpindleSpinChecker(object):
def __init__(self, spindleMonitor):
self.spindleMonitor = spindleMonitor
self.timer = TimerCallback(targetFps=3)
self.timer.callback = self.update
self.warningButton = None
self.action = None
def update(self):
if abs(self.spindleMonitor.getAverageSpindleVelocity()) < 0.2:
self.notifyUserStatusBar()
else:
self.clearStatusBarWarning()
def start(self):
self.action.checked = True
self.timer.start()
def stop(self):
self.action.checked = False
self.timer.stop()
def setupMenuAction(self):
self.action = app.addMenuAction('Tools', 'Spindle Stuck Warning')
self.action.setCheckable(True)
self.action.checked = self.timer.isActive()
self.action.connect('triggered()', self.onActionChanged)
def onActionChanged(self):
if self.action.checked:
self.start()
else:
self.stop()
def clearStatusBarWarning(self):
if self.warningButton:
self.warningButton.deleteLater()
self.warningButton = None
def notifyUserStatusBar(self):
if self.warningButton:
return
self.warningButton = QtGui.QPushButton('Spindle Stuck Warning')
self.warningButton.setStyleSheet("background-color:red")
app.getMainWindow().statusBar().insertPermanentWidget(0, self.warningButton)
class MultisensePanel(object):
def __init__(self, multisenseDriver):
self.multisenseDriver = multisenseDriver
self.multisenseChanged = False
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddMultisense.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
self.ui = WidgetDict(self.widget.children())
self.updateTimer = TimerCallback(targetFps=2)
self.updateTimer.callback = self.updatePanel
self.updateTimer.start()
self.widget.headCamGainSpinner.setEnabled(False)
self.widget.headCamExposureSpinner.setEnabled(False)
#connect the callbacks
self.widget.spinRateSpinner.valueChanged.connect(self.spinRateChange)
self.widget.scanDurationSpinner.valueChanged.connect(self.scanDurationChange)
self.widget.headCamFpsSpinner.valueChanged.connect(self.headCamFpsChange)
self.widget.headCamGainSpinner.valueChanged.connect(self.headCamGainChange)
self.widget.headCamExposureSpinner.valueChanged.connect(self.headCamExposureChange)
self.widget.headAutoGainCheck.clicked.connect(self.headCamAutoGainChange)
self.widget.ledOnCheck.clicked.connect(self.ledOnCheckChange)
self.widget.ledBrightnessSpinner.valueChanged.connect(self.ledBrightnessChange)
self.widget.sendButton.clicked.connect(self.sendButtonClicked)
self.updatePanel()
def getCameraFps(self):
return self.widget.headCamFpsSpinner.value
def getCameraGain(self):
return self.widget.headCamGainSpinner.value
def getCameraExposure(self):
return self.widget.headCamExposureSpinner.value
def getCameraLedOn(self):
return self.widget.ledOnCheck.isChecked()
def getCameraLedBrightness(self):
return self.widget.ledBrightnessSpinner.value
def getCameraAutoGain(self):
return self.widget.headAutoGainCheck.isChecked()
def getSpinRate(self):
return self.widget.spinRateSpinner.value
def getScanDuration(self):
return self.widget.scanDurationSpinner.value
def ledBrightnessChange(self, event):
self.multisenseChanged = True
def ledOnCheckChange(self, event):
self.multisenseChanged = True
def headCamExposureChange(self, event):
self.multisenseChanged = True
def headCamAutoGainChange(self, event):
self.multisenseChanged = True
self.widget.headCamGainSpinner.setEnabled(not self.getCameraAutoGain())
self.widget.headCamExposureSpinner.setEnabled(not self.getCameraAutoGain())
def headCamFpsChange(self, event):
self.multisenseChanged = True
def headCamGainChange(self, event):
self.multisenseChanged = True
def spinRateChange(self, event):
self.multisenseChanged = True
spinRate = self.getSpinRate()
if spinRate == 0.0:
scanDuration = 240.0
else:
scanDuration = abs(60.0 / (spinRate * 2))
if scanDuration > 240.0:
scanDuration = 240.0
self.widget.scanDurationSpinner.blockSignals(True)
self.widget.scanDurationSpinner.value = scanDuration
self.widget.scanDurationSpinner.blockSignals(False)
def scanDurationChange(self, event):
self.multisenseChanged = True
scanDuration = self.getScanDuration()
spinRate = abs(60.0 / (scanDuration * 2))
self.widget.spinRateSpinner.blockSignals(True)
self.widget.spinRateSpinner.value = spinRate
self.widget.spinRateSpinner.blockSignals(False)
def sendButtonClicked(self, event):
self.publishCommand()
def updatePanel(self):
if not self.widget.isVisible():
return
def publishCommand(self):
fps = self.getCameraFps()
camGain = self.getCameraGain()
exposure = 1000*self.getCameraExposure()
ledFlash = self.getCameraLedOn()
ledDuty = self.getCameraLedBrightness()
spinRate = self.getSpinRate()
autoGain = 1 if self.getCameraAutoGain() else 0
self.multisenseDriver.sendMultisenseCommand(fps, camGain, exposure, autoGain, spinRate, ledFlash, ledDuty)
self.multisenseChanged = False
self.updateTimer.start()
def _getAction():
return app.getToolBarActions()['ActionMultisensePanel']
def init(driver):
global panel
global dock
panel = MultisensePanel(driver)
dock = app.addWidgetToDock(panel.widget, action=_getAction())
dock.hide()
return panel
| patmarion/director | src/python/director/multisensepanel.py | Python | bsd-3-clause | 6,645 |
__author__ = 'Girish'
import re
import json
from textwrap import wrap
try:
import urllib2 as request
from urllib import quote
except:
from urllib import request
from urllib.parse import quote
class Translator:
def __init__(self, to_lang, from_lang='auto'):
self.from_lang = from_lang
self.to_lang = to_lang
def translate(self, source):
self.source_list = wrap(source, 1000, replace_whitespace=False)
return ' '.join(self._get_translation_from_google(s) for s in self.source_list)
def _get_translation_from_google(self, source):
json5 = self._get_json5_from_google(source)
translation = ''
for sentence in json.loads(json5)['sentences']:
translation += sentence['trans']
return translation
def _get_json5_from_google(self, source):
escaped_source = quote(source, '')
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19'}
req = request.Request(
url="http://translate.google.com/translate_a/t?client=p&ie=UTF-8&oe=UTF-8"
+"&sl=%s&tl=%s&text=%s" % (self.from_lang, self.to_lang, escaped_source)
, headers = headers)
r = request.urlopen(req)
return r.read().decode('utf-8')
| JainamJhaveri/collegeProjects | googleTranslate/Translator.py | Python | mit | 1,366 |
"""KNX core module
This implements some core KNX classes and methods.
"""
import re
from knxip.helper import tohex
E_NO_ERROR = 0x00
E_HOST_PROTOCOL_TYPE = 0x01
E_VERSION_NOT_SUPPORTED = 0x02
E_SEQUENCE_NUMBER = 0x04
E_CONNECTION_ID = 0x21
E_CONNECTION_TYPE = 0x22
E_CONNECTION_OPTION = 0x23
E_NO_MORE_CONNECTIONS = 0x24
E_DATA_CONNECTION = 0x26
E_KNX_CONNECTION = 0x27
E_TUNNELING_LAYER = 0x28
def parse_group_address(addr):
"""Parse KNX group addresses and return the address as an integer.
This allows to convert x/x/x and x/x address syntax to a numeric
KNX group address
"""
if addr is None:
raise KNXException("No address given")
res = None
if re.match('[0-9]+$', addr):
res = int(addr)
match = re.match("([0-9]+)/([0-9]+)$", addr)
if match:
main = match.group(1)
sub = match.group(2)
res = int(main) * 2048 + int(sub)
match = re.match("([0-9]+)/([0-9]+)/([0-9]+)$", addr)
if match:
main = match.group(1)
middle = match.group(2)
sub = match.group(3)
res = int(main) * 256 * 8 + int(middle) * 256 + int(sub)
if res is None:
raise KNXException("Address {} does not match any address scheme".
format(addr))
return res
class ValueCache(object):
"""A simple caching class based on dictionaries"""
def __init__(self):
"""Initialize an empty cache"""
self.values = {}
def get(self, name):
"""Get a value for the given name from cache"""
return self.values.get(name)
def set(self, name, value):
"""Set the cached value for the given name"""
old_val = self.values.get(name)
if old_val != value:
self.values[name] = value
return True
else:
return False
def clear(self):
"""Remove all cached entries."""
self.values = {}
class KNXException(Exception):
"""Exception when handling KNX functionalities"""
errorcode = 0
def __init__(self, message, errorcode=0):
"""Initialize exception with the given error message and error code"""
super(KNXException, self).__init__(message)
self.errorcode = errorcode
def __str__(self):
"""Return a human-readable representation of the exception"""
msg = {
E_NO_ERROR: "no error",
E_HOST_PROTOCOL_TYPE: "protocol type error",
E_VERSION_NOT_SUPPORTED: "version not supported",
E_SEQUENCE_NUMBER: "invalid sequence number",
E_CONNECTION_ID: "invalid connection id",
E_CONNECTION_TYPE: "invalid connection type",
E_CONNECTION_OPTION: "invalid connection option",
E_NO_MORE_CONNECTIONS: "no more connection possible",
E_DATA_CONNECTION: "data connection error",
E_KNX_CONNECTION: "KNX connection error",
E_TUNNELING_LAYER: "tunneling layer error",
}
return super().__str__() + " " + msg.get(self.errorcode,
"unknown error code")
#pylint: disable=too-many-instance-attributes
#pylint: disable=invalid-name
class KNXMessage(object):
"""This represents a message on the KNX bus."""
repeat = 0
priority = 3 # (0 = system, 1 - alarm, 2 - high, 3 - normal)
src_addr = 0
dst_addr = 0
dt = 0
routing = 1
length = 1
data = [0]
multicast = 1
def __init__(self):
"""Initialize an empty KNX message"""
pass
def sanitize(self):
"""Sanitize all fields of the KNX message."""
self.repeat = self.repeat % 2
self.priority = self.priority % 4
self.src_addr = self.src_addr % 0x10000
self.dst_addr = self.dst_addr % 0x10000
self.multicast = self.multicast % 2
self.routing = self.routing % 8
self.length = self.length % 16
for i in range(0, self.length - 1):
self.data[i] = self.data[i] % 0x100
def to_frame(self):
"""Convert the object to its frame format."""
self.sanitize()
res = []
res.append((1 << 7) + (1 << 4) + (self.repeat << 5) +
(self.priority << 2))
res.append(self.src_addr >> 8)
res.append(self.src_addr % 0x100)
res.append(self.dst_addr >> 8)
res.append(self.dst_addr % 0x100)
res.append((self.multicast << 7) + (self.routing << 4) + self.length)
for i in range(0, self.length - 1):
res.append(self.data[i])
checksum = 0
for i in range(0, 5 + self.length):
checksum += res[i]
res.append(checksum % 0x100)
return bytearray(res)
@classmethod
def from_frame(cls, frame):
"""Create a KNXMessage object from the frame format."""
message = cls()
# Check checksum first
checksum = 0
for i in range(0, len(frame) - 1):
checksum += frame[i]
if (checksum % 0x100) != frame[len(frame) - 1]:
raise KNXException('Checksum error in frame {}, '
'expected {} but got {}'
.format(tohex(frame), frame[len(frame) - 1],
checksum % 0x100))
message.repeat = (frame[0] >> 5) & 0x01
message.priority = (frame[0] >> 2) & 0x03
message.src_addr = (frame[1] << 8) + frame[2]
message.dst_addr = (frame[3] << 8) + frame[4]
message.multicast = (frame[5] >> 7)
message.routing = (frame[5] >> 4) & 0x03
message.length = frame[5] & 0x0f
message.data = frame[6:-1]
if len(message.data) + 1 != message.length:
raise KNXException(
'Frame {} has not the correct length'.format(tohex(frame)))
return message
| usul27/pknx | knxip/core.py | Python | mit | 5,873 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 10 15:50:50 2015
simple portfolio from cash flow
@author: xuyu
"""
import datetime
import os
from collections import defaultdict
from security import Security
from parse_flow import FlowRecord
from tdx_parser.parse_tdx_day import get_dayline
from dzh_parser import parse_dzh_div
from dump_flow import load_flow
import pandas as pd
cash = Security('0', 'SZ')
def generate_amount_line(flow_file):
div_file="E:\Stock\dzh365\Download\PWR/full.PWR"
flow = load_flow(flow_file)
div = parse_dzh_div(div_file)
closeLines = dict()
pf = PF()
start_date, end_date = flow[0].date, flow[-1].date
dates = pd.date_range(start_date, end_date, freq='B')
#current = next(dates)
line = list()
i = 0
for day in dates:
day = day.date()
deal_div(pf, day, div)
while i < len(flow) and flow[i].date <= day:
pf.append(flow[i])
i += 1
cash_rest = float(flow[i-1].MRest)
if abs(cash_rest - pf[cash]) > 0.001:
print(day, cash_rest, pf[cash])
amount = calc_amount(pf, day, closeLines)
line.append(amount)
return pd.DataFrame(line, index=dates)
class PF(defaultdict):
# add from flow file
def __init__(self, file = None):
self.end = datetime.date.min
self.default_factory = lambda:0
if file is not None:
self.add_from_file(file)
#self.securities = set()
def append(self, flow):
# assert(flow.date >= self.end)
# print(flow.security, flow.quantity)
self[flow.security] += flow.quantity
if self[flow.security] == 0:
del self[flow.security]
self[cash] += flow.amount
# fee already include in amount
#self[cash] -= flow.fee
self.end = flow.date
def add_from_file(self, file):
for i, line in enumerate(open(file)):
if i < 2:
continue
self.append(FlowRecord(line))
def get_pad_close(sec, start_date, end_date):
return get_dayline(sec).Close.resample('B', fill_method='ffill')
def get_price(sec, date, closeLines):
## TODO: get amount for new stock befor listed (IPO)
try:
if sec == cash:
return 1.0
elif sec in closeLines:
return closeLines[sec][date]
else:
closeLines[sec] = get_pad_close(sec, date, datetime.date.today())
return closeLines[sec][date]
except KeyError as e:
raise KeyError("%s %s" % (sec, date))
def calc_amount(pf, date, closeLines):
return sum(get_price(k, date, closeLines) * v for k,v in pf.items())
def deal_div(pf, day, div):
pass
if __name__ == '__main__':
f = 'test_data/flow2014066-06.xls'
# pf = PF()
# pf.add_from_file(f)
# print(list(zip(pf.keys(),pf.values())))
#print(pf.end)
flow_file = 'test_data/test.pickle'
line = generate_amount_line(flow_file)
import matplotlib.pyplot as plt
plt.plot(line.index, line)
print(line.resample('W-FRI')) | sein-tao/StockAnalysis | PF_simple.py | Python | gpl-2.0 | 3,113 |
from pyCodeLib import *
import warnings
import glob
import re
import argparse
import sys
warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser(
description="compute phi for protein list")
parser.add_argument("proteins", help="The name of the protein list")
parser.add_argument("-m", "--mode", type=int, default=0)
args = parser.parse_args()
print(" ".join(sys.argv))
# transfer database
def transferPDB(file=None, source="database/dompdb_cleaned/"):
if file is None:
p_list = glob.glob(source + "*")
else:
with open(file) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
p_list = [source + x.strip() for x in content]
for p in p_list:
new_p = p.replace(source, "database/dompdb/")
os.system(f"cp {p} {new_p}.pdb")
# add location
def addLocation(source, target):
with open(target, "w") as out:
with open(source, "r") as f:
for l in f:
nl = "../database/dompdb/" + l
out.write(nl)
def convertTo4cName():
with open("proteins_4c_name_list.txt", "w") as out:
with open("proteins_name_list.txt", "r") as f:
for l in f:
nl = l[:4]
out.write(nl+"\n")
# extract seq info from fa file
def extractSeqFromFa(size=7, source="database/cath-dataset-nonredundant-S20Clean.atom.fa"):
os.system("mkdir -p database/S20_seq")
with open(source, "r") as f:
count = 0
for l in f:
if count % 2 == 0:
#extract protein id
assert(l[0] == ">")
# print(l)
if size == 7:
name = re.search('>cath\|(.*)\|(\w{7})\/(.*)', l).group(2)
if size == 4:
name = re.search('>cath\|(.*)\|(\w{4})(.*)\/(.*)', l).group(2)
# name = "test"
# print(name)
else:
assert(l[0] != ">")
# print(l)
with open(f"database/S20_seq/{name}.seq", "w") as out:
out.write(l)
count += 1
# convertTo4cName()
# extractSeqFromFa()
def computePhis(proteins, multiSeq=False, sampleK=1000, **kwargs):
# if addGylcines:
# proteins_location = "".join(proteins.split("/")[:-1]) + "/location_" + proteins.split("/")[-1]
# transferPDB(proteins)
# addLocation(proteins, proteins_location)
# os.chdir('database')
# add_virtual_glycines_list(proteins_location)
# generate_decoy_sequences(proteins, methods=['shuffle'], databaseLocation="../../")
if multiSeq:
evaluate_phis_over_training_set_for_native_structures_Wei(proteins, "phi_list.txt", decoy_method='multiShuffle', max_decoys=1e+10, tm_only=False, num_processors=1, multi_seq=True, sampleK=sampleK)
else:
evaluate_phis_over_training_set_for_native_structures_Wei(proteins, "phi_list.txt", decoy_method='shuffle', max_decoys=1e+10, tm_only=False, num_processors=1)
# extractSeqFromFa()
def computePhisForDecoys(proteins, **kwargs):
proteins_location = "".join(proteins.split("/")[:-1]) + "/location_" + proteins.split("/")[-1]
# transferPDB(proteins)
# addLocation(proteins, proteins_location)
# # os.chdir('database')
# add_virtual_glycines_list(proteins_location)
# generate_decoy_structures(proteins, methods=['lammps'], num_decoys=[10], databaseLocation="../../")
evaluate_phis_over_training_set_for_decoy_structures_Wei(proteins, "phi_list.txt", decoy_method='lammps', max_decoys=1e+5, tm_only=False, num_processors=1, **kwargs)
if args.mode == 0:
computePhis(args.proteins)
elif args.mode == 1:
computePhisForDecoys(args.proteins)
elif args.mode == 2:
computePhisForDecoys(args.proteins, withBiased=True, pickle=False)
elif args.mode == 3:
# use the native structure name. but partial data.
computePhisForDecoys(args.proteins, withBiased=True, mode=1, pickle=False)
if args.mode == 4:
computePhis(args.proteins, multiSeq=True, sampleK=1000)
if args.mode == 5:
evaluate_phis_over_training_set_for_decoy_structures_Wei(args.proteins, "phi_list.txt", decoy_method='shifted', max_decoys=1e+5, tm_only=False, num_processors=1)
if args.mode == 6:
evaluate_phis_over_training_set_for_decoy_structures_Wei(args.proteins, "phi_list.txt", decoy_method='shifted', max_decoys=1e+5, tm_only=False, num_processors=1, withBiased=True, pickle=False)
if args.mode == 7:
evaluate_phis_over_training_set_for_decoy_structures_Wei(args.proteins, "phi_list.txt", decoy_method='openMM', max_decoys=1e+5, tm_only=False, num_processors=1, withBiased=True, pickle=True, mode=1)
if args.mode == 8:
evaluate_phis_over_training_set_for_decoy_structures_Wei(args.proteins, "phi_list.txt", decoy_method='3DRobot', max_decoys=1e+5, tm_only=False, num_processors=1, withBiased=True, pickle=True, mode=1)
| luwei0917/awsemmd_script | compute_phis.py | Python | mit | 4,935 |
from .lib import TestBase
class TestTutorial(TestBase):
def test_example(self):
# Memory Managers
##################
import smmap
# This instance should be globally available in your application
# It is configured to be well suitable for 32-bit or 64 bit applications.
mman = smmap.SlidingWindowMapManager()
# the manager provides much useful information about its current state
# like the amount of open file handles or the amount of mapped memory
assert mman.num_file_handles() == 0
assert mman.mapped_memory_size() == 0
# and many more ...
# Cursors
##########
import smmap.test.lib
with smmap.test.lib.FileCreator(1024 * 1024 * 8, "test_file") as fc:
# obtain a cursor to access some file.
c = mman.make_cursor(fc.path)
# the cursor is now associated with the file, but not yet usable
assert c.is_associated()
assert not c.is_valid()
# before you can use the cursor, you have to specify a window you want to
# access. The following just says you want as much data as possible starting
# from offset 0.
# To be sure your region could be mapped, query for validity
assert c.use_region().is_valid() # use_region returns self
# once a region was mapped, you must query its dimension regularly
# to assure you don't try to access its buffer out of its bounds
assert c.size()
c.buffer()[0] # first byte
c.buffer()[1:10] # first 9 bytes
c.buffer()[c.size() - 1] # last byte
# its recommended not to create big slices when feeding the buffer
# into consumers (e.g. struct or zlib).
# Instead, either give the buffer directly, or use pythons buffer command.
from smmap.util import buffer
buffer(c.buffer(), 1, 9) # first 9 bytes without copying them
# you can query absolute offsets, and check whether an offset is included
# in the cursor's data.
assert c.ofs_begin() < c.ofs_end()
assert c.includes_ofs(100)
# If you are over out of bounds with one of your region requests, the
# cursor will be come invalid. It cannot be used in that state
assert not c.use_region(fc.size, 100).is_valid()
# map as much as possible after skipping the first 100 bytes
assert c.use_region(100).is_valid()
# You can explicitly free cursor resources by unusing the cursor's region
c.unuse_region()
assert not c.is_valid()
# Buffers
#########
# Create a default buffer which can operate on the whole file
buf = smmap.SlidingWindowMapBuffer(mman.make_cursor(fc.path))
# you can use it right away
assert buf.cursor().is_valid()
buf[0] # access the first byte
buf[-1] # access the last ten bytes on the file
buf[-10:] # access the last ten bytes
# If you want to keep the instance between different accesses, use the
# dedicated methods
buf.end_access()
assert not buf.cursor().is_valid() # you cannot use the buffer anymore
assert buf.begin_access(offset=10) # start using the buffer at an offset
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/smmap/test/test_tutorial.py | Python | gpl-3.0 | 3,512 |
from collections import defaultdict
class Solution(object):
def minWindow(self, S, T):
"""
:type S: str
:type T: str
:rtype: str
"""
pre = defaultdict(list)
for i, c in enumerate(T, -1):
pre[c].append(i)
for val in pre.values():
val.reverse()
start_index = [None] * (len(T) + 1)
lo, hi = float('-inf'), 0
for i, c in enumerate(S):
start_index[-1] = i
for p in pre[c]:
if start_index[p] is not None:
start_index[p + 1] = start_index[p]
if (c == T[-1] and start_index[-2] is not None
and i - start_index[-2] < hi - lo):
lo, hi = start_index[-2], i
if lo < 0:
return ''
else:
return S[lo:hi+1]
# print(Solution().minWindow("abcdebdde", "bde"))
# print(Solution().minWindow("nkzcnhczmccqouqadqtmjjzltgdzthm", "bt"))
print(Solution().minWindow("cnhczmccqouqadqtmjjzl", "mm"))
| wufangjie/leetcode | 727. Minimum Window Subsequence.py | Python | gpl-3.0 | 1,035 |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A test script which attempts to detect memory leaks by calling C
functions many times and compare process memory usage before and
after the calls. It might produce false positives.
"""
import functools
import gc
import os
import socket
import sys
import threading
import time
import psutil
import psutil._common
from psutil._compat import xrange, callable
from test_psutil import (WINDOWS, POSIX, OSX, LINUX, SUNOS, BSD, TESTFN,
RLIMIT_SUPPORT, TRAVIS)
from test_psutil import (reap_children, supports_ipv6, safe_remove,
get_test_subprocess)
if sys.version_info < (2, 7):
import unittest2 as unittest # https://pypi.python.org/pypi/unittest2
else:
import unittest
LOOPS = 1000
TOLERANCE = 4096
SKIP_PYTHON_IMPL = True
def skip_if_linux():
return unittest.skipIf(LINUX and SKIP_PYTHON_IMPL,
"not worth being tested on LINUX (pure python)")
class Base(unittest.TestCase):
proc = psutil.Process()
def execute(self, function, *args, **kwargs):
def call_many_times():
for x in xrange(LOOPS - 1):
self.call(function, *args, **kwargs)
del x
gc.collect()
return self.get_mem()
self.call(function, *args, **kwargs)
self.assertEqual(gc.garbage, [])
self.assertEqual(threading.active_count(), 1)
# RSS comparison
# step 1
rss1 = call_many_times()
# step 2
rss2 = call_many_times()
difference = rss2 - rss1
if difference > TOLERANCE:
# This doesn't necessarily mean we have a leak yet.
# At this point we assume that after having called the
# function so many times the memory usage is stabilized
# and if there are no leaks it should not increase any
# more.
# Let's keep calling fun for 3 more seconds and fail if
# we notice any difference.
stop_at = time.time() + 3
while True:
self.call(function, *args, **kwargs)
if time.time() >= stop_at:
break
del stop_at
gc.collect()
rss3 = self.get_mem()
difference = rss3 - rss2
if rss3 > rss2:
self.fail("rss2=%s, rss3=%s, difference=%s"
% (rss2, rss3, difference))
def execute_w_exc(self, exc, function, *args, **kwargs):
kwargs['_exc'] = exc
self.execute(function, *args, **kwargs)
def get_mem(self):
return psutil.Process().memory_info()[0]
def call(self, function, *args, **kwargs):
raise NotImplementedError("must be implemented in subclass")
class TestProcessObjectLeaks(Base):
"""Test leaks of Process class methods and properties"""
def setUp(self):
gc.collect()
def tearDown(self):
reap_children()
def call(self, function, *args, **kwargs):
if callable(function):
if '_exc' in kwargs:
exc = kwargs.pop('_exc')
self.assertRaises(exc, function, *args, **kwargs)
else:
try:
function(*args, **kwargs)
except psutil.Error:
pass
else:
meth = getattr(self.proc, function)
if '_exc' in kwargs:
exc = kwargs.pop('_exc')
self.assertRaises(exc, meth, *args, **kwargs)
else:
try:
meth(*args, **kwargs)
except psutil.Error:
pass
@skip_if_linux()
def test_name(self):
self.execute('name')
@skip_if_linux()
def test_cmdline(self):
self.execute('cmdline')
@skip_if_linux()
def test_exe(self):
self.execute('exe')
@skip_if_linux()
def test_ppid(self):
self.execute('ppid')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_uids(self):
self.execute('uids')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_gids(self):
self.execute('gids')
@skip_if_linux()
def test_status(self):
self.execute('status')
def test_nice_get(self):
self.execute('nice')
def test_nice_set(self):
niceness = psutil.Process().nice()
self.execute('nice', niceness)
@unittest.skipUnless(hasattr(psutil.Process, 'ionice'),
"Linux and Windows Vista only")
def test_ionice_get(self):
self.execute('ionice')
@unittest.skipUnless(hasattr(psutil.Process, 'ionice'),
"Linux and Windows Vista only")
def test_ionice_set(self):
if WINDOWS:
value = psutil.Process().ionice()
self.execute('ionice', value)
else:
from psutil._pslinux import cext
self.execute('ionice', psutil.IOPRIO_CLASS_NONE)
fun = functools.partial(cext.proc_ioprio_set, os.getpid(), -1, 0)
self.execute_w_exc(OSError, fun)
@unittest.skipIf(OSX or SUNOS, "feature not supported on this platform")
@skip_if_linux()
def test_io_counters(self):
self.execute('io_counters')
@unittest.skipUnless(WINDOWS, "not worth being tested on posix")
def test_username(self):
self.execute('username')
@skip_if_linux()
def test_create_time(self):
self.execute('create_time')
@skip_if_linux()
def test_num_threads(self):
self.execute('num_threads')
@unittest.skipUnless(WINDOWS, "Windows only")
def test_num_handles(self):
self.execute('num_handles')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_num_fds(self):
self.execute('num_fds')
@skip_if_linux()
def test_threads(self):
self.execute('threads')
@skip_if_linux()
def test_cpu_times(self):
self.execute('cpu_times')
@skip_if_linux()
def test_memory_info(self):
self.execute('memory_info')
@skip_if_linux()
def test_memory_info_ex(self):
self.execute('memory_info_ex')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_terminal(self):
self.execute('terminal')
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_resume(self):
self.execute('resume')
@skip_if_linux()
def test_cwd(self):
self.execute('cwd')
@unittest.skipUnless(WINDOWS or LINUX or BSD,
"Windows or Linux or BSD only")
def test_cpu_affinity_get(self):
self.execute('cpu_affinity')
@unittest.skipUnless(WINDOWS or LINUX or BSD,
"Windows or Linux or BSD only")
def test_cpu_affinity_set(self):
affinity = psutil.Process().cpu_affinity()
self.execute('cpu_affinity', affinity)
if not TRAVIS:
self.execute_w_exc(ValueError, 'cpu_affinity', [-1])
@skip_if_linux()
def test_open_files(self):
safe_remove(TESTFN) # needed after UNIX socket test has run
with open(TESTFN, 'w'):
self.execute('open_files')
# OSX implementation is unbelievably slow
@unittest.skipIf(OSX, "OSX implementation is too slow")
@skip_if_linux()
def test_memory_maps(self):
self.execute('memory_maps')
@unittest.skipUnless(LINUX, "Linux only")
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_get(self):
self.execute('rlimit', psutil.RLIMIT_NOFILE)
@unittest.skipUnless(LINUX, "Linux only")
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_set(self):
limit = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)
self.execute('rlimit', psutil.RLIMIT_NOFILE, limit)
self.execute_w_exc(OSError, 'rlimit', -1)
@skip_if_linux()
# Windows implementation is based on a single system-wide function
@unittest.skipIf(WINDOWS, "tested later")
def test_connections(self):
def create_socket(family, type):
sock = socket.socket(family, type)
sock.bind(('', 0))
if type == socket.SOCK_STREAM:
sock.listen(1)
return sock
socks = []
socks.append(create_socket(socket.AF_INET, socket.SOCK_STREAM))
socks.append(create_socket(socket.AF_INET, socket.SOCK_DGRAM))
if supports_ipv6():
socks.append(create_socket(socket.AF_INET6, socket.SOCK_STREAM))
socks.append(create_socket(socket.AF_INET6, socket.SOCK_DGRAM))
if hasattr(socket, 'AF_UNIX'):
safe_remove(TESTFN)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(TESTFN)
s.listen(1)
socks.append(s)
kind = 'all'
# TODO: UNIX sockets are temporarily implemented by parsing
# 'pfiles' cmd output; we don't want that part of the code to
# be executed.
if SUNOS:
kind = 'inet'
try:
self.execute('connections', kind=kind)
finally:
for s in socks:
s.close()
p = get_test_subprocess()
DEAD_PROC = psutil.Process(p.pid)
DEAD_PROC.kill()
DEAD_PROC.wait()
del p
class TestProcessObjectLeaksZombie(TestProcessObjectLeaks):
"""Same as above but looks for leaks occurring when dealing with
zombie processes raising NoSuchProcess exception.
"""
proc = DEAD_PROC
def call(self, *args, **kwargs):
try:
TestProcessObjectLeaks.call(self, *args, **kwargs)
except psutil.NoSuchProcess:
pass
if not POSIX:
def test_kill(self):
self.execute('kill')
def test_terminate(self):
self.execute('terminate')
def test_suspend(self):
self.execute('suspend')
def test_resume(self):
self.execute('resume')
def test_wait(self):
self.execute('wait')
class TestModuleFunctionsLeaks(Base):
"""Test leaks of psutil module functions."""
def setUp(self):
gc.collect()
def call(self, function, *args, **kwargs):
fun = getattr(psutil, function)
fun(*args, **kwargs)
@skip_if_linux()
def test_cpu_count_logical(self):
psutil.cpu_count = psutil._psplatform.cpu_count_logical
self.execute('cpu_count')
@skip_if_linux()
def test_cpu_count_physical(self):
psutil.cpu_count = psutil._psplatform.cpu_count_physical
self.execute('cpu_count')
@skip_if_linux()
def test_boot_time(self):
self.execute('boot_time')
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_pid_exists(self):
self.execute('pid_exists', os.getpid())
def test_virtual_memory(self):
self.execute('virtual_memory')
# TODO: remove this skip when this gets fixed
@unittest.skipIf(SUNOS,
"not worth being tested on SUNOS (uses a subprocess)")
def test_swap_memory(self):
self.execute('swap_memory')
@skip_if_linux()
def test_cpu_times(self):
self.execute('cpu_times')
@skip_if_linux()
def test_per_cpu_times(self):
self.execute('cpu_times', percpu=True)
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_disk_usage(self):
self.execute('disk_usage', '.')
def test_disk_partitions(self):
self.execute('disk_partitions')
@skip_if_linux()
def test_net_io_counters(self):
self.execute('net_io_counters')
@unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
'/proc/diskstats not available on this Linux version')
@skip_if_linux()
def test_disk_io_counters(self):
self.execute('disk_io_counters')
# XXX - on Windows this produces a false positive
@unittest.skipIf(WINDOWS, "XXX produces a false positive on Windows")
def test_users(self):
self.execute('users')
@unittest.skipIf(LINUX,
"not worth being tested on Linux (pure python)")
def test_net_connections(self):
self.execute('net_connections')
def test_net_if_addrs(self):
self.execute('net_if_addrs')
@unittest.skipIf(TRAVIS, "EPERM on travis")
def test_net_if_stats(self):
self.execute('net_if_stats')
def main():
test_suite = unittest.TestSuite()
tests = [TestProcessObjectLeaksZombie,
TestProcessObjectLeaks,
TestModuleFunctionsLeaks]
for test in tests:
test_suite.addTest(unittest.makeSuite(test))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
| Q-Leap-Networks/psutil | test/test_memory_leaks.py | Python | bsd-3-clause | 13,362 |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, UnicodeText, Unicode, \
ForeignKey, SmallInteger, Float, Boolean
from sqlalchemy.orm import relationship
from aston.resources import cache
from aston.database import Base, JSONDict
from aston.database.File import Run
from aston.database.Peak import DBPeak
from aston.database.User import Group
from aston.trace.Trace import AstonSeries
from aston.trace.Events import plot_events
from aston.trace.Parser import parse_ion_string
from aston.trace.Parser import istr_type, istr_best_2d_source, token_source
from aston.trace.MathFrames import molmz, mzminus, basemz
class Palette(Base):
__tablename__ = 'palettes'
_palette_id = Column(Integer, primary_key=True)
_group_id = Column(Integer, ForeignKey('groups._group_id'))
group = relationship(Group)
name = Column(Unicode(64))
runs = relationship('PaletteRun', backref='palette')
style = Column(JSONDict) # TODO: include bounds in style?
columns = Column(UnicodeText, default=u'name,vis,style,color')
class PaletteRun(Base):
__tablename__ = 'paletteruns'
_paletterun_id = Column(Integer, primary_key=True)
_palette_id = Column(Integer, ForeignKey('palettes._palette_id'))
_run_id = Column(Integer, ForeignKey('runs._run_id'))
run = relationship(Run)
plots = relationship('Plot', backref='paletterun')
enabled = Column(Boolean, default=False)
#fake = Column(Binary????, default=None)
#TODO: need some way to add in Aston-generated chromatograms
# e.g. predictions, etc
# save here? or save generating function here?
_parent = None
@property
def _children(self):
return self.plots
@cache(maxsize=8)
def datafile(self, source):
# find the data source
for a in self.run.analyses:
if source in [i.lstrip('#*') for i in a.trace.split(',')]:
return a.datafile
def avail_sources(self):
return [i.lstrip('#*') for a in self.run.analyses \
for i in a.trace.split(',')]
def trace(self, istr, twin=None):
istr, source = token_source(istr, self.avail_sources())
if source is None:
return AstonSeries()
df = self.datafile(source)
if istr in {'coda', 'rnie', 'wmsm'}:
#TODO: allow more complicated options to turn
#AstonFrames into plotable AstonSeries
#coda
# Windig W: The use of the Durbin-Watson criterion for
# noise and background reduction of complex liquid
# chromatography/mass spectrometry data and a new algorithm
# to determine sample differences. Chemometrics and
# Intelligent Laboratory Systems. 2005, 77:206-214.
#rnie
# Yunfei L, Qu H, and Cheng Y: A entropy-based method
# for noise reduction of LC-MS data. Analytica Chimica
# Acta 612.1 (2008)
#wmsm
# Fleming C et al. Windowed mass selection method:
# a new data processing algorithm for LC-MS data.
# Journal of Chromatography A 849.1 (1999) 71-85.
pass
elif istr.startswith('m_'):
if istr == 'm_':
m = 0
else:
m = float(istr.split('_')[1])
return mzminus(df.data, m)
elif istr == 'molmz':
return molmz(df.data)
elif istr == 'basemz':
return basemz(df.data)
elif istr in {'r45std', 'r46std'}:
#TODO: calculate isotopic data
pass
# calculate isotopic reference for chromatogram
#if name == 'r45std':
# topion = 45
#else:
# topion = 46
#std_specs = [o for o in \
# self.children_of_type('peak') \
# if o.info['p-type'] == 'Isotope Standard']
#x = [float(o.info['p-s-time']) for o in std_specs]
#y = [o.area(topion) / o.area(44) for o in std_specs \
# if o.area(44) != 0]
#if len(x) == 0 or len(y) == 0:
# return self._const(0.0, twin)
#p0 = [y[0], 0]
#errfunc = lambda p, x, y: p[0] + p[1] * x - y
#try:
# p, succ = leastsq(errfunc, p0, args=(np.array(x), \
# np.array(y)))
#except:
# p = p0
#sim_y = np.array(errfunc(p, t, np.zeros(len(t))))
#return TimeSeries(sim_y, t, [name])
else:
# interpret tolerances
if ':' in istr:
st = float(istr.split(':')[0])
en = float(istr.split(':')[1])
tol = 0.5 * (en - st)
istr = 0.5 * (en + st)
elif u'±' in istr:
tol = float(istr.split(u'±')[1])
istr = float(istr.split(u'±')[0])
else:
tol = 0.5
return df.trace(istr, tol, twin=twin)
#try:
# return df.trace(istr, tol, twin=twin)
#except ValueError:
# return None
class Plot(Base):
__tablename__ = 'plots'
_plot_id = Column(Integer, primary_key=True)
_paletterun_id = Column(Integer, ForeignKey('paletteruns._paletterun_id'))
peaks = relationship(DBPeak, backref='dbplot')
vis = Column(SmallInteger, default=1)
name = Column(UnicodeText, default=u'TIC')
style = Column(Unicode(16), default=u'auto')
color = Column(Unicode(16), default=u'auto')
x_offset = Column(Float, default=0)
x_scale = Column(Float, default=1)
y_offset = Column(Float, default=0)
y_scale = Column(Float, default=1)
is_valid = True
@property
def _parent(self):
return self.paletterun
@property
def _children(self):
return self.peaks
def name_type(self):
return istr_type(self.name.lower())
def scan(self, t, dt=None):
source = istr_best_2d_source(self.name.lower(), \
self.paletterun.avail_sources())
if source is None:
#FIXME: still nothing, return blank scan object
return
else:
# adjust the time appropriately
t = (t - self.x_offset) / self.x_scale
if dt is not None:
dt /= self.x_scale
# find the scan
scan = self.paletterun.datafile(source).scan(t, dt)
scan.source = source
return scan
def trace(self, twin=None):
#TODO: should we just handle events in parse_ion_string?
name_type = istr_type(self.name.lower())
#FIXME!!!!!
if name_type == 'events':
return AstonSeries()
# get a trace given my name
tr_resolver = self.paletterun.trace
trace = parse_ion_string(self.name.lower(), tr_resolver, twin)
if trace is None:
self.is_valid = False
return None
else:
self.is_valid = True
# offset and scale trace
trace = trace * self.y_scale + self.y_offset
if type(trace) is AstonSeries:
trace = trace.adjust_time(offset=self.x_offset, scale=self.x_scale)
else:
trace = AstonSeries([trace], [0], name=self.name.lower())
return trace
def frame(self, twin=None):
#TODO: use twin
source = istr_best_2d_source(self.name.lower(), \
self.paletterun.avail_sources())
return self.paletterun.datafile(source).data
def subtraces(self, method=None, twin=None):
#self.paletterun.datafile(source)
if method == 'coda':
pass
elif method == 'all':
pass
def plot(self, ax, style, color, twin=None):
name_type = istr_type(self.name.lower())
label = self.paletterun.run.name + ' ' + self.name
if name_type == 'events':
df = self.paletterun.datafile(self.name.lower())
evts = df.events(self.name.lower().lstrip('*'))
plot_events(evts, color=color, ax=ax)
elif style in {'heatmap', 'colors'}:
#TODO: should only be on name_type == '2d' ?
# need other ui changes first though
#TODO: use colors
self.frame(twin).plot(style=style, color=color, ax=ax)
else:
#TODO: should technically only be allowed on 1d plots
trace = self.trace(twin)
if trace is None:
return
ls = {'solid': '-', 'dash': '--', 'dot': ':', \
'dash-dot': '-.'}
trace.plot(ax=ax, style=ls[style], color=color, label=label)
| molliewebb/aston | aston/database/Palette.py | Python | gpl-3.0 | 8,785 |
# -*- coding:gbk
import pickle
import os
import json
class Student(object):
def __init__(self,name,age,score):
self.name = name
self.age = age
self.score = score
def student2dict(std):
return {
'name':std.name,
'age':std.age,
'score':std.score
}
d = dict(name='Bob',age=20,score=88)
curPath = os.path.abspath('.')
newFile = os.path.join(curPath,'picking.txt')
#with open(newFile,'wb') as f:
# pickle.dump(d,f)
#with open(newFile,'rb') as f :
# print(pickle.load(f))
with open(newFile,'w') as f:
json.dump(d,f)
with open(newFile,'r') as f:
print(json.load(f))
s = Student('Bob',88,22)
print(json.dumps(s,default=student2dict))
#°ÑÈÎÒâclassµÄʵÀýת±äΪdict
print(json.dumps(s,default=lambda x: x.__dict__))
| xiaxia47/Python-learning | picking.py | Python | gpl-3.0 | 799 |
from utilities import load_data,cross_validate
from utilities import DataClean
import os
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import RandomForestClassifier
from glove import Corpus,Glove
class Glove2AverageVector:
"""Converts words to vector representations derived from
blackbox Word2Vec implementation"""
def __init__(self,data_src,num_features=100,window=10,learning_rate=0.05,epochs=10):
self.learning_rate = learning_rate
self.num_features = num_features
self.window = window
self.epochs = epochs
self.pretrain(data_src)
self.model = Glove.load("glove.model")
def pretrain(self,data_src):
if not os.path.isfile("glove.model"):
data_src = DataClean([
["[^a-z]"," "], # only letters
[" [ ]+", " "], # remove extra spaces
],html_clean=True,split_words=True).fit(data_src).transform(data_src)
corpus_model = Corpus()
corpus_model.fit(data_src,window=self.window)
glove = Glove(no_components=self.num_features,learning_rate=self.learning_rate)
glove.fit(corpus_model.matrix,epochs=self.epochs,verbose=True)
glove.add_dictionary(corpus_model.dictionary)
glove.save("glove.model")
def fit(self,X,y=None):
self.model = Glove.load("glove.model")
return self
def sentence2vector(self,sentence):
sentence_tokens = sentence.split()
nwords = 0.01
feat_vect = np.zeros(self.num_features)
# index2word_set = set(self.model.dictionary.keys())
for word in sentence_tokens:
try:
feat_vect += self.model.word_vectors[self.model.dictionary[word]]
nwords += 1
except:
continue
return feat_vect/nwords
def transform(self,X):
Xtf = np.vstack([self.sentence2vector(x) for x in X])
return Xtf
def fit_transform(self,X,y=None):
self.fit(X,y)
return self.transform(X)
if __name__ == '__main__':
_,unlabelledData = load_data("unsupervised")
ids,X,y = load_data("stanford")
pipeline = Pipeline([
('cleaner',DataClean(clean_list=[
["[^a-z]"," "], # only letters
[" [ ]+", " "], # remove extra spaces
],html_clean=False)),
('w2v',Glove2AverageVector(data_src=unlabelledData)),
('classifier',RandomForestClassifier(n_estimators=100))
])
cross_validate((X,y),pipeline,accuracy_score)
# num_features=100,window=10,learning_rate=0.05,epochs=10
# Stanford
# NB
# accuracy_score : 0.72772 +/- 0.00562665086886
# Confusion Matrix
# [[ 9223. 3277.]
# [ 3530. 8970.]]
# RF
# accuracy_score : 0.78932 +/- 0.00245373185169
# Confusion Matrix
# [[ 9644. 2856.]
# [ 2411. 10089.]]
# Cornell
# NB
# accuracy_score : 0.24543076998 +/- 0.0117745086102
# Confusion Matrix
# [[ 3869. 138. 403. 338. 2324.]
# [ 10873. 731. 3809. 2459. 9401.]
# [ 20249. 2433. 23007. 11613. 22280.]
# [ 7298. 399. 3611. 3818. 17801.]
# [ 1391. 27. 285. 626. 6877.]]
# RF
# accuracy_score : 0.538638825433 +/- 0.00677413249639
# Confusion Matrix
# [[ 3.16000000e+02 2.02600000e+03 4.05700000e+03 6.43000000e+02
# 3.00000000e+01]
# [ 3.72000000e+02 4.67800000e+03 1.92160000e+04 2.92600000e+03
# 8.10000000e+01]
# [ 1.92000000e+02 4.43700000e+03 6.81370000e+04 6.59400000e+03
# 2.22000000e+02]
# [ 3.90000000e+01 1.56000000e+03 2.03260000e+04 1.01990000e+04
# 8.03000000e+02]
# [ 3.00000000e+00 3.39000000e+02 3.79100000e+03 4.34300000e+03
# 7.30000000e+02]]
| saatvikshah1994/SmartMM | Sentiment Analysis/supervised/gloveavgvec.py | Python | mit | 3,924 |
from itertools import chain
from time import time, sleep
from random import random, gauss, shuffle
PORT=2
import rtmidi as rt
def gen_all_off(now):
return ((now, tuple(chain(*((0x90, n, 0) for n in range(127))))),)
def next(prev):
prev += gauss(0,3)
return max(0,min(prev,127))
value = [[random() * 127 for n in range(16)] for controller in range(64)]
def emitter():
source = rt.MidiOut()
avail_ports = source.get_ports()
for i,p in zip(range(len(avail_ports)),avail_ports):
print (i,p)
if avail_ports:
source.open_port(PORT)
else:
source.open_virtual_port('my virt port')
sleep(4)
channels = range(1)
controllers = range(12)
while True:
shuffle(channels)
shuffle(controllers)
for channel in channels:
for controller in controllers:
value[controller][channel] = next(value[controller][channel])
source.send_message((0xB0 | channel, controller, int(value[controller][channel])))
sleep(0.15)
if __name__=='__main__':
emitter()
| joefutrelle/pyaglomidi | brownian_controllers.py | Python | mit | 1,073 |
import requests # not needed really
class Dep:
def this(self):
return 'this'
def that(self):
return 'that'
| mamachanko/camp | example/dep/dep.py | Python | gpl-3.0 | 140 |
import os
import sys
import traceback
import StringIO
import warnings
import re
warnings.simplefilter("error")
from support import get_data_files
from support import TestData, convert, convertExpected, treeTypes
import html5lib
from html5lib import html5parser, treebuilders, constants
#Run the parse error checks
checkParseErrors = False
#XXX - There should just be one function here but for some reason the testcase
#format differs from the treedump format by a single space character
def convertTreeDump(data):
return u"\n".join(convert(3)(data).split(u"\n")[1:])
namespaceExpected = re.compile(ur"^(\s*)<(\S+)>", re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
#XXX - move this out into the setup function
#concatenate all consecutive character tokens into a single token
try:
p = html5parser.HTMLParser(tree = treeClass,
namespaceHTMLElements=namespaceHTMLElements)
except constants.DataLossWarning:
return
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
try:
document = p.parse(input)
except constants.DataLossWarning:
return
except:
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nTraceback:", traceback.format_exc().decode('utf8')])
assert False, errorMsg
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(ur"\1<html \2>", expected)
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nReceived:", output])
assert expected == output, errorMsg
errStr = [u"Line: %i Col: %i %s"%(line, col,
constants.E[errorcode] % datavars if isinstance(datavars, dict) else (datavars,)) for
((line,col), errorcode, datavars) in p.errors]
errorMsg2 = u"\n".join([u"\n\nInput:", input,
u"\nExpected errors (" + unicode(len(errors)) + u"):\n" + u"\n".join(errors),
u"\nActual errors (" + unicode(len(p.errors)) + u"):\n" + u"\n".join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2
def test_parser():
sys.stderr.write('Testing tree builders '+ " ".join(treeTypes.keys()) + "\n")
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat","")
tests = TestData(filename, u"data")
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
u'data', u'errors',
u'document-fragment',
u'document']
if errors:
errors = errors.split(u"\n")
for treeName, treeCls in treeTypes.iteritems():
for namespaceHTMLElements in (True, False):
print input
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
| kordless/wisdom | lib/html5lib/tests/test_parser.py | Python | mit | 3,443 |
#############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://www.qt.io/licensing. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def main():
startApplication("qtcreator -load QmlProjectManager" + SettingsPath)
if not startedWithoutPluginError():
return
for quickVersion in ["1.1", "2.1", "2.2", "2.3", "Controls 1.0", "Controls 1.1", "Controls 1.2"]:
# using a temporary directory won't mess up a potentially existing
workingDir = tempDir()
projectName = createNewQtQuickUI(workingDir, quickVersion)
switchViewTo(ViewConstants.PROJECTS)
clickButton(waitForObject(":*Qt Creator.Add Kit_QPushButton"))
menuItem = Targets.getStringForTarget(Targets.DESKTOP_531_DEFAULT)
if platform.system() == 'Darwin':
waitFor("macHackActivateContextMenuItem(menuItem)", 5000)
else:
activateItem(waitForObjectItem("{type='QMenu' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}", menuItem))
test.log("Running project Qt Quick %s UI" % quickVersion)
qmlViewer = modifyRunSettingsForHookIntoQtQuickUI(2, 1, workingDir, projectName, 11223, quickVersion)
if qmlViewer!=None:
qmlViewerPath = os.path.dirname(qmlViewer)
qmlViewer = os.path.basename(qmlViewer)
result = addExecutableAsAttachableAUT(qmlViewer, 11223)
allowAppThroughWinFW(qmlViewerPath, qmlViewer, None)
if result:
result = runAndCloseApp(True, qmlViewer, 11223, sType=SubprocessType.QT_QUICK_UI, quickVersion=quickVersion)
else:
result = runAndCloseApp(sType=SubprocessType.QT_QUICK_UI)
removeExecutableAsAttachableAUT(qmlViewer, 11223)
deleteAppFromWinFW(qmlViewerPath, qmlViewer)
else:
result = runAndCloseApp(sType=SubprocessType.QT_QUICK_UI)
if result == None:
checkCompile()
else:
logApplicationOutput()
invokeMenuItem("File", "Close All Projects and Editors")
invokeMenuItem("File", "Exit")
| farseerri/git_code | tests/system/suite_qtquick/tst_qtquick_creation3/test.py | Python | lgpl-2.1 | 3,565 |
#############################################################################
# Copyright (c) 2018 Eli Polonsky. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
import copy
import os
import pytest
from dictfile.api import constants
from dictfile.api import exceptions
from dictfile.api import writer
from dictfile.api.repository import Repository
from dictfile.api.repository import ADD_COMMIT_MESSAGE
@pytest.fixture(name='repo', params=constants.SUPPORTED_FORMATS)
def _repo(temp_file, temp_dir, request):
alias = request.node.name
fmt = request.param
writer.dump(obj=get_test_dict(fmt),
file_path=temp_file,
fmt=fmt)
repo = Repository(config_dir=temp_dir)
repo.add(alias=alias, file_path=temp_file, fmt=fmt)
# attach the format to the repo instance so that
# test functions will have it.
repo.test_fmt = fmt
repo.tracked_file = temp_file
yield repo
def get_dict(base_dict, fmt):
if fmt == constants.INI:
dictionary = {'section1': copy.deepcopy(base_dict)}
else:
dictionary = base_dict
return dictionary
def get_test_dict(fmt):
return get_dict(base_dict={'key1': 'value1'}, fmt=fmt)
def test_add_no_file(repo):
with pytest.raises(exceptions.FileNotFoundException):
repo.add(alias='dummy', file_path='doesnt-exist', fmt=repo.test_fmt)
def test_add_alias_with_spaces(repo):
with pytest.raises(exceptions.IllegalAliasException):
repo.add(alias='alias with spaces', file_path='dummy', fmt=repo.test_fmt)
def test_add_alias_with_sep(repo):
with pytest.raises(exceptions.IllegalAliasException):
repo.add(alias='alias{0}with{0}spaces'.format(os.sep),
file_path='dummy',
fmt=repo.test_fmt)
def test_add_file_is_directory(repo, temp_dir):
with pytest.raises(exceptions.FileIsDirectoryException):
repo.add(alias='dummy', file_path=temp_dir, fmt=repo.test_fmt)
def test_add_alias_exists(repo, temp_file, request):
with pytest.raises(exceptions.AliasAlreadyExistsException):
repo.add(alias=request.node.name, file_path=temp_file, fmt=repo.test_fmt)
def test_path(repo, request):
alias = request.node.name
assert repo.tracked_file == repo.path(alias)
def test_path_unknown_alias(repo):
alias = 'unknown'
with pytest.raises(exceptions.AliasNotFoundException):
repo.path(alias)
def test_fmt(repo, request):
alias = request.node.name
assert repo.test_fmt == repo.fmt(alias)
def test_fmt_unknown_alias(repo):
alias = 'unknown'
with pytest.raises(exceptions.AliasNotFoundException):
repo.fmt(alias)
def test_commit(repo, request):
alias = request.node.name
repo.commit(alias, message='this is my message')
# make sure the correct file was created
revision_path = os.path.join(repo.root, alias, '1')
assert os.path.isdir(revision_path)
assert os.path.isfile(os.path.join(revision_path, 'contents'))
assert os.path.isfile(os.path.join(revision_path, 'commit-message'))
def test_commit_unknown_alias(repo):
alias = 'unknown'
with pytest.raises(exceptions.AliasNotFoundException):
repo.commit(alias)
def test_revisions(repo, request):
alias = request.node.name
revisions = repo.revisions(alias)
expected_number_of_revisions = 1
expected_version_number = 0
assert expected_number_of_revisions == len(revisions)
assert alias == revisions[0].alias
assert repo.tracked_file == revisions[0].file_path
assert expected_version_number == revisions[0].version
assert ADD_COMMIT_MESSAGE == revisions[0].commit_message
def test_revisions_unknown_alias(repo):
alias = 'unknown'
with pytest.raises(exceptions.AliasNotFoundException):
repo.revisions(alias)
def test_files(repo, request):
files = repo.files()
expected_number_of_files = 1
assert expected_number_of_files == len(files)
assert request.node.name == files[0].alias
assert repo.tracked_file == files[0].file_path
assert repo.test_fmt == files[0].fmt
def test_message(repo, request):
alias = request.node.name
expected_message = 'my message'
repo.commit(alias=alias, message=expected_message)
assert expected_message == repo.message(alias, 1)
def test_contents(repo, request):
alias = request.node.name
contents = repo.contents(alias=alias, version=0)
assert writer.dumps(get_test_dict(repo.test_fmt),
fmt=repo.test_fmt) == contents
def test_contents_unknown_alias(repo):
with pytest.raises(exceptions.AliasNotFoundException):
repo.contents(alias='unknown', version=1)
def test_contents_wrong_version(repo, request):
with pytest.raises(exceptions.VersionNotFoundException):
repo.contents(alias=request.node.name, version=1)
def test_remove(repo, request):
alias = request.node.name
repo.remove(alias=alias)
with pytest.raises(exceptions.AliasNotFoundException):
repo.remove(alias)
files = repo.files()
expected_number_of_files = 0
assert expected_number_of_files == len(files)
assert not os.path.exists(os.path.join(repo.root, alias))
def test_remove_unknown_alias(repo):
with pytest.raises(exceptions.AliasNotFoundException):
repo.remove(alias='unknown')
| iliapolo/fileconfig | dictfile/tests/api/test_repository.py | Python | apache-2.0 | 5,977 |
# -*- coding: UTF-8 -*-
# Copyright 2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
import datetime
from django.conf import settings
from lino.api import dd, rt, _
from lino_xl.lib.ledger.choicelists import DC, CommonAccounts
from lino.utils import Cycler
REQUEST = settings.SITE.login() # BaseRequest()
def objects():
if dd.plugins.vat.declaration_plugin is None:
return
JournalEntryItem = rt.models.finan.JournalEntryItem
USERS = Cycler(settings.SITE.user_model.objects.all())
PARTNERS = Cycler(rt.models.contacts.Partner.objects.order_by("-id"))
jnl = rt.models.ledger.Journal.objects.get(preliminary=True)
voucher = jnl.create_voucher(
user=USERS.pop(),
entry_date=datetime.date(dd.plugins.ledger.start_year, 1, 1))
yield voucher
# seqno = 0
def vi(ca, amount, **kwargs):
# seqno += 1
kwargs.update(account=ca.get_object(), voucher=voucher)
if jnl.dc == DC.debit:
amount = -amount
kwargs.update(amount=amount)
return JournalEntryItem(**kwargs)
# yield vi(CommonAccounts.net_income_loss, 100)
yield vi(CommonAccounts.vat_due, 10)
yield vi(CommonAccounts.suppliers, 50, partner=PARTNERS.pop())
yield vi(CommonAccounts.customers, -100, partner=PARTNERS.pop())
yield vi(CommonAccounts.best_bank, 40)
voucher.register(REQUEST)
voucher.full_clean()
voucher.save()
| lino-framework/xl | lino_xl/lib/sheets/fixtures/demo_bookings.py | Python | bsd-2-clause | 1,469 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Matt Jeffery <matt@clan.se>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from semetric.apiclient.entity.base import Entity
log = logging.getLogger(__name__)
class ChartItem(Entity):
"""
A ChartItem represents an item in a chart, each chart item is an entity, for legacy
reasons a chart can have multiple entity types, eg. a release group chart will have
the expected releasegroup entity and an artist entity as well.
All chart items have `rank` and `value` properties.
"""
__apiclass__ = None
__apiclass_plural__ = None
def __init__(self, rank, value, **chartitem):
"""
Create an instance of ChartItem
"""
# parse the chart item
self.rank = rank
self.value = value
class Chart(Entity):
"""
A chart is a rank list of entities.
"""
__apiclass__ = "chart"
__apiclass_plural__ = "charts"
__deferrable_properties__ = ["name", "data", "period", "end_time", "data", "name", "now_id", "start_time", "previous_id", "chart_items"]
def __init__(self, id, country='ALL', data=None, period=None, end_time=None, name=None, now_id=None, start_time=None, previous_id=None, **kwargs):
self.id = id
self.name = name
self.data = data
self.period= period
self.end_time = end_time
self.data = data
self.name = name
self.now_id = now_id
self.start_time = start_time
self.previous_id = previous_id
self.country = country
self.chart_items = map(lambda x: ChartItem(supress_mapping_error=True, **x), self.data or [])
self.extras = kwargs
def __len__(self):
"""
Get the length of the chart
"""
return len(self.data)
def __getitem__(self, index):
"""
Get an item or item slice from the chart
"""
return self.chart_items[index]
def __iter__(self):
"""
Simple iterator
"""
for i in xrange(len(self)):
yield self[i]
def reload(self):
"""
Reload the Chart entity from the API
"""
new_chart = super(Chart, self).reload(country=self.country)
self.name = new_chart.name
self.data = new_chart.data
self.period = new_chart.period
self.end_time = new_chart.end_time
self.data = new_chart.data
self.name = new_chart.name
self.now_id = new_chart.now_id
self.start_time = new_chart.start_time
self.previous_id = new_chart.previous_id
self.chart_items = map(lambda x: ChartItem(supress_mapping_error=True, **x), self.data or [])
@classmethod
def __apiget__(cls, id, country=None):
"""
Get the chart from the API by ID
"""
path = "{entity}/{id}".format(entity=cls.__apiclass__, id=id)
args = { 'country': country or 'ALL' }
return path, args
| mattjeffery/semetric-python | semetric/apiclient/entity/chart.py | Python | lgpl-2.1 | 3,737 |
from django.apps import AppConfig
class ScrapperConfig(AppConfig):
name = 'scrapper'
| shashank-sharma/mythical-learning | scrapper/apps.py | Python | mit | 91 |
import aplpy
import matplotlib.pyplot as plt
import pyfits
import numpy
import pyregion
import astropy
from astropy.io import fits
from astropy import wcs
from astropy import coordinates
import matplotlib.patheffects as Patheffects
from astropy import units as u
import good_cores_getsources
def cross_match_CSAR(getsources_core_catalog = '/mnt/scratch-lustre/jkeown/Getsources/Extract/cep1157/120115_flat/combo/+catalogs/L1157.sw.final.reliable.ok.cat', CSAR_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1157/CSAR/CEPl1157_CSAR.dat', high_res_coldens_image = '/mnt/scratch-lustre/jkeown/Getsources/Prepare/Images/cep1157/080615/cep1157_255_mu.image.resamp.fits', CSAR_core_indices='L1157_matched_CSAR_cores.dat'):
# Import getsources "good core" data table
cores_array1 = numpy.loadtxt(getsources_core_catalog,comments='!')
good_core_indices = good_cores_getsources.get_good_cores(getsources_core_catalog)
cores_array = cores_array1[numpy.array(good_core_indices)]
### Import the CSAR catalog
### ***MAKE SURE FIRST TWO COLUMNS OF "CSAR_catalog" FILE ARE X_POSITION AND Y_POSITION OF SOURCE IN DECIMAL DEGREES***
CSAR_array = numpy.loadtxt(CSAR_catalog,comments='#')
#print CSAR_array
CSAR_positions = numpy.column_stack((CSAR_array[:,0], CSAR_array[:,1]))
#print CSAR_positions
w = wcs.WCS(high_res_coldens_image)
pos_pix = w.wcs_world2pix(CSAR_positions, 1)
### Loop through the potential matched cores identified in the step above.
counter = 0
matched_cores = []
for line in cores_array:
x_coor = str(line[3])
y_coor = str(line[4])
### Create a DS9 region string for the core's getsources ellipse,
### from which a mask will be created.
region = ('fk5;ellipse(' + x_coor + ', ' + y_coor + ', ' + str((line[50]/2.0)/3600.) + ', ' +
str((line[51]/2.0)/3600.) + ', ' + str(line[52]+90.0)+')')
r = pyregion.parse(region)
f=fits.open(high_res_coldens_image)
mymask = r.get_mask(hdu=f[0])
f.close()
newmask=mymask
### Set all values outside the core's ellipse to zero,
### all values inside the ellipse are set to one.
newmask=numpy.where(newmask==0,0,1)
mask_shape = numpy.shape(newmask)
### Loop through the CSAR catalog
### If any CSAR cores fall within a getsources core's ellipse,
### store the getsources core's index
match_counter=0
for i in pos_pix:
ypos = int(round(i[1],0))-1
xpos = int(round(i[0],0))-1
if ypos<=mask_shape[0] and xpos<=mask_shape[1]:
if newmask[ypos][xpos]==1 and match_counter==0:
matched_cores.append(counter)
# match_counter prevents counting indices twice
# if two CSAR cores fall within getsources ellipse
match_counter+=1
#print len(matched_cores)
counter += 1
print 'CSAR_matched:total ratio =' + str(round(float(len(matched_cores)) / float(len(cores_array[:,0])),3))
### Save the CSAR matched core indices to a file
#numpy.savetxt(CSAR_core_indices, matched_cores, fmt='%i')
return numpy.array(matched_cores)
#cross_match_CSAR()
#cross_match_CSAR(getsources_good_cores_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1172/core_SED/L1172_good_sources.dat', CSAR_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1172/CSAR/CEPl1172_CSAR.dat', high_res_coldens_image = '/mnt/scratch-lustre/jkeown/Getsources/Prepare/Images/cep1172/082315/cep1172_255_mu.image.resamp.fits', CSAR_core_indices='L1172_matched_CSAR_cores.dat')
#cross_match_CSAR(getsources_good_cores_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1228/core_SED/L1228_good_sources.dat', CSAR_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1228/CSAR/CEPl1228_CSAR.dat', high_res_coldens_image = '/mnt/scratch-lustre/jkeown/Getsources/Prepare/Images/cep1228/082315/cep1228_255_mu.image.resamp.fits', CSAR_core_indices='L1228_matched_CSAR_cores.dat')
#cross_match_CSAR(getsources_core_catalog = '/mnt/scratch-lustre/jkeown/Getsources/Extract/cep1241/120115_flat/combo/+catalogs/L1241.sw.final.reliable.ok.cat', CSAR_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1241/CSAR/CEPl1241_CSAR.dat', high_res_coldens_image = '/mnt/scratch-lustre/jkeown/Getsources/Prepare/Images/cep1241/071415/cep1241_255_mu.image.resamp.fits', CSAR_core_indices='L1241_matched_CSAR_cores.dat')
#cross_match_CSAR(getsources_good_cores_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1251/core_SED/L1251_good_sources.dat', CSAR_catalog = '/mnt/scratch-lustre/jkeown/DS9_regions/L1251/CSAR/CEPl1251_CSAR.dat', high_res_coldens_image = '/mnt/scratch-lustre/jkeown/Getsources/Prepare/Images/cep1251/082315/cep1251_255_mu.image.resamp.fits', CSAR_core_indices='L1251_matched_CSAR_cores.dat')
| jakeown/HGBS_pipeline | CSAR_core_cross_match.py | Python | mit | 4,603 |
for cislo in range(5):
vysledek = cislo**2
print(cislo, 'na druhou je', vysledek)
| Ajuska/pyladies | 04/nadruhou.py | Python | mit | 90 |
from django.db import models
from categories.models import CategoryBase, STORAGE
from django.utils.encoding import force_text
from django.urls import reverse
class Category(CategoryBase):
thumbnail = models.FileField(
upload_to='uploads/categories/thumbnails',
null=True, blank=True,
storage=STORAGE(),)
thumbnail_width = models.IntegerField(blank=True, null=True)
thumbnail_height = models.IntegerField(blank=True, null=True)
order = models.IntegerField(default=0)
alternate_title = models.CharField(
blank=True,
default="",
max_length=100,
help_text="An alternative title to use on pages with this category.")
alternate_url = models.CharField(
blank=True,
max_length=200,
help_text="An alternative URL to use instead of the one derived from "
"the category hierarchy.")
description = models.TextField(blank=True, null=True)
meta_keywords = models.CharField(
blank=True,
default="",
max_length=255,
help_text="Comma-separated keywords for search engines.")
meta_extra = models.TextField(
blank=True,
default="",
help_text="(Advanced) Any additional HTML to be placed verbatim "
"in the <head>")
def get_absolute_url(self):
ancestors = list(self.get_ancestors()) + [self, ]
return reverse('posts:category', args=[self.id, '/'.join([force_text(i.slug) for i in ancestors])])
@property
def path_name_category_list(self):
ancestors = self.get_ancestors()
return [force_text(i.name) for i in ancestors] + [self.name, ]
@property
def path_category_list(self):
return list(self.get_ancestors()) + [self,]
@property
def root(self):
return self.get_root()
def save(self, *args, **kwargs):
if self.thumbnail:
from django.core.files.images import get_image_dimensions
import django
if django.VERSION[1] < 2:
width, height = get_image_dimensions(self.thumbnail.file)
else:
width, height = get_image_dimensions(self.thumbnail.file, close=True)
else:
width, height = None, None
self.thumbnail_width = width
self.thumbnail_height = height
super(Category, self).save(*args, **kwargs)
class Meta(CategoryBase.Meta):
verbose_name_plural = 'categories'
class MPTTMeta:
order_insertion_by = ('order', 'name')
| alien3211/lom-web | posts/categoryModels.py | Python | mit | 2,551 |
# -*- coding: utf-8 -*-
'''
change log:
对应含有图片的微博
=============OLD=============
备份微博内容(仅文字部分)
Created on 2017/08/10
@author: yuyang
'''
import urllib2
import re
from docx import Document
import TencentUtil
class TencentWeibo:
'''
Tencent weibo object.
'''
def __init__(self, id):
'''
Constructor
'''
self.id = id
self.pageIndex = 1
self.haveNextPage = True
self.nextPageUrl = None
self.userAgent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = {'User-Agent' : self.userAgent}
self.stories = []
self.document = Document()
def getNextPage(self):
if self.pageIndex == 1:
url = 'http://t.qq.com/' + self.id + '?mode=0&lang=zh_CN'
else:
url = self.nextPageUrl + '&lang=zh_CN'
try:
request = urllib2.Request(url, headers = self.headers)
response = urllib2.urlopen(request)
return response.read().decode('utf-8')
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print '腾讯微博第 %d 页连接失败: %s' % (self.pageIndex, e.reason)
return None
def getStories(self, page):
pattern = re.compile(r'<ul id="talkList"(.*?)</ul>', re.S)
stories_str = re.findall(pattern, page)
stories = stories_str[0].split('<li')
for match in re.finditer(r'<a href="\?mode=0&.*?"', page):
pass
self.nextPageUrl = match.group().replace('<a href="', 'http://t.qq.com/' + self.id)[:-1]
if not page.__contains__(u'下一页'):
self.haveNextPage = False
return stories
def getItems(self, stories):
pattern_nopic = re.compile(r'<div class="msgBox".*?<div class="userName".*?title="(.*?)" gender=' +
'.*?<div class="msgCnt">(.*?)</div>' +
'.*?<div class="pubInfo.*?from="\\d*">(.*?)</a>', re.S)
pattern_pic = re.compile(r'<div class="msgBox".*?<div class="userName".*?title="(.*?)" gender=' +
'.*?<div class="msgCnt">(.*?)</div>' +
'.*?<div class="picBox">\n<a href="(.*?)" data-like' +
'.*?<div class="pubInfo.*?from="\\d*">(.*?)</a>', re.S)
for story in stories:
havePic = False
if story.__contains__('class="picBox"'):
havePic = True
items = re.findall(pattern_pic, story)
else:
items = re.findall(pattern_nopic, story)
for item in items:
print '作者:', item[0]
self.document.add_heading('', 0)
self.document.add_heading(item[0], level=1)
print '内容:', item[1]
content_valid = ''.join(c for c in item[1] if ord(c) >= 32)# delete control chars
self.document.add_paragraph(content_valid, style='ListBullet')
if havePic:
img_url = item[2]
print '图片:', img_url
filename = TencentUtil.downloadPic(img_url)
self.document.add_picture(filename)
time = item[3]
else:
time = item[2]
print '时间:', time
p = self.document.add_paragraph('', style='ListBullet')
p.add_run(time).italic = True
#self.document.add_page_break()
def start(self):
while self.haveNextPage:
print '开始分析腾讯微博第 %s 页...' % self.pageIndex
page = weibo.getNextPage()
#print page
stories = self.getStories(page)
self.getItems(stories)
print '是否有下一页:', self.haveNextPage
print '得到下一页地址: ', self.nextPageUrl
self.pageIndex += 1
if self.pageIndex > 4:#test code
self.document.save('tencent.docx')#test code
break#test code
else:
print '腾讯微博备份完成,共计 %s 页' % self.pageIndex
self.document.save('tencent.docx')
weibo = TencentWeibo('renminwangcom')
weibo.start() | coolcooldool/tencent-weibo-exporter | version4/TencentWeibo2.py | Python | apache-2.0 | 4,495 |
from numba import unittest_support as unittest
import numpy as np
from numba import cuda, types
import struct
def float_to_int(x):
return np.int32(x)
def int_to_float(x):
return np.float64(x) / 2
def float_to_unsigned(x):
return types.uint32(x)
def float_to_complex(x):
return np.complex128(x)
class TestCasting(unittest.TestCase):
def _create_wrapped(self, pyfunc, intype, outtype):
wrapped_func = cuda.jit(device=True)(pyfunc)
@cuda.jit
def cuda_wrapper_fn(arg, res):
res[0] = wrapped_func(arg[0])
def wrapper_fn(arg):
argarray = np.zeros(1, dtype=intype)
argarray[0] = arg
resarray = np.zeros(1, dtype=outtype)
cuda_wrapper_fn(argarray, resarray)
return resarray[0]
return wrapper_fn
def test_float_to_int(self):
pyfunc = float_to_int
cfunc = self._create_wrapped(pyfunc, np.float32, np.int32)
self.assertEqual(cfunc(12.3), pyfunc(12.3))
self.assertEqual(cfunc(12.3), int(12.3))
self.assertEqual(cfunc(-12.3), pyfunc(-12.3))
self.assertEqual(cfunc(-12.3), int(-12.3))
def test_int_to_float(self):
pyfunc = int_to_float
cfunc = self._create_wrapped(pyfunc, np.int64, np.float64)
self.assertEqual(cfunc(321), pyfunc(321))
self.assertEqual(cfunc(321), 321. / 2)
def test_float_to_unsigned(self):
pyfunc = float_to_unsigned
cfunc = self._create_wrapped(pyfunc, np.float32, np.uint32)
self.assertEqual(cfunc(3.21), pyfunc(3.21))
self.assertEqual(cfunc(3.21), struct.unpack('I', struct.pack('i',
3))[0])
def test_float_to_complex(self):
pyfunc = float_to_complex
cfunc = self._create_wrapped(pyfunc, np.float64, np.complex128)
self.assertEqual(cfunc(-3.21), pyfunc(-3.21))
self.assertEqual(cfunc(-3.21), -3.21 + 0j)
if __name__ == '__main__':
unittest.main()
| stefanseefeld/numba | numba/cuda/tests/cudapy/test_casting.py | Python | bsd-2-clause | 2,045 |
# -*- coding: UTF-8 -*-
from __future__ import with_statement
from gnr.core import gnrdate
import datetime
def test_relativeDay():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("today", workdate=workdate)
assert res == '2008-04-25'
res = gnrdate.decodeDatePeriod("yesterday", workdate=workdate)
assert res == '2008-04-24'
res = gnrdate.decodeDatePeriod("tomorrow", workdate=workdate)
assert res == '2008-04-26'
workdate = datetime.date(2008, 4, 1)
res = gnrdate.decodeDatePeriod("yesterday", workdate=workdate)
assert res == '2008-03-31'
workdate = datetime.date(2008, 4, 30)
res = gnrdate.decodeDatePeriod("tomorrow", workdate=workdate)
assert res == '2008-05-01'
def test_relativeDayLocal():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("oggi", workdate=workdate, locale='it')
assert res == '2008-04-25'
res = gnrdate.decodeDatePeriod("ieri", workdate=workdate, locale='it')
assert res == '2008-04-24'
res = gnrdate.decodeDatePeriod("domani", workdate=workdate, locale='it')
assert res == '2008-04-26'
def test_week():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("this week", workdate=workdate)
assert res == '2008-04-21;2008-04-27'
res = gnrdate.decodeDatePeriod("next week", workdate=workdate)
assert res == '2008-04-28;2008-05-04'
res = gnrdate.decodeDatePeriod("last week", workdate=workdate)
assert res == '2008-04-14;2008-04-20'
def test_month():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("this month", workdate=workdate)
assert res == '2008-04-01;2008-04-30'
res = gnrdate.decodeDatePeriod("month", workdate=workdate)
assert res == '2008-04-01;2008-04-30'
res = gnrdate.decodeDatePeriod("next month", workdate=workdate)
assert res == '2008-05-01;2008-05-31'
res = gnrdate.decodeDatePeriod("last month", workdate=workdate)
assert res == '2008-03-01;2008-03-31'
def test_monthLocal():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("questo mese", workdate=workdate, locale='it')
assert res == '2008-04-01;2008-04-30'
res = gnrdate.decodeDatePeriod("mese", workdate=workdate, locale='it')
assert res == '2008-04-01;2008-04-30'
def test_year():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("2007", workdate=workdate)
assert res == '2007-01-01;2007-12-31'
res = gnrdate.decodeDatePeriod("07", workdate=workdate)
assert res == '2007-01-01;2007-12-31'
res = gnrdate.decodeDatePeriod("96", workdate=workdate)
assert res == '1996-01-01;1996-12-31'
def test_monthName():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("february", workdate=workdate)
assert res == '2008-02-01;2008-02-29'
def test_periodTo():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("to tomorrow", workdate=workdate)
assert res == ';2008-04-26'
res = gnrdate.decodeDatePeriod("to january", workdate=workdate)
assert res == ';2008-01-31'
res = gnrdate.decodeDatePeriod("to april", workdate=workdate)
assert res == ';2008-04-30'
res = gnrdate.decodeDatePeriod("to december", workdate=workdate)
assert res == ';2007-12-31'
res = gnrdate.decodeDatePeriod("to december 2007", workdate=workdate)
assert res == ';2007-12-31'
def test_periodFrom():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("from tomorrow + 2", workdate=workdate)
assert res == '2008-04-28;'
res = gnrdate.decodeDatePeriod("from december 07", workdate=workdate)
assert res == '2007-12-01;'
res = gnrdate.decodeDatePeriod("from december", workdate=workdate)
assert res == '2008-12-01;'
res = gnrdate.decodeDatePeriod("from february", workdate=workdate)
assert res == '2008-02-01;'
def test_periodFull():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("from february to today", workdate=workdate)
assert res == '2008-02-01;2008-04-25'
res = gnrdate.decodeDatePeriod("december to today", workdate=workdate)
assert res == '2007-12-01;2008-04-25'
res = gnrdate.decodeDatePeriod("from december 06 to march", workdate=workdate)
assert res == '2006-12-01;2008-03-31'
res = gnrdate.decodeDatePeriod("from december to march 06", workdate=workdate)
assert res == '2005-12-01;2006-03-31'
res = gnrdate.decodeDatePeriod("from december to this month", workdate=workdate)
assert res == '2007-12-01;2008-04-30'
res = gnrdate.decodeDatePeriod("between december and this month", workdate=workdate)
assert res == '2007-12-01;2008-04-30'
res = gnrdate.decodeDatePeriod("from last week to next month", workdate=workdate)
assert res == '2008-04-14;2008-05-31'
def test_periodLocal():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("da dicembre a mar 06", workdate=workdate, locale='it')
assert res == '2005-12-01;2006-03-31'
res = gnrdate.decodeDatePeriod("da dicembre a questo mese", workdate=workdate, locale='it')
assert res == '2007-12-01;2008-04-30'
res = gnrdate.decodeDatePeriod("da settimana scorsa al mese prossimo", workdate=workdate, locale='it')
assert res == '2008-04-14;2008-05-31'
res = gnrdate.decodeDatePeriod(u"da dicembre", workdate=workdate, locale='it')
assert res == '2008-12-01;'
res = gnrdate.decodeDatePeriod(u"a dicembre", workdate=workdate, locale='it')
assert res == ';2007-12-31'
res = gnrdate.decodeDatePeriod(u"dal 23-12-07 a aprile", workdate=workdate, locale='it')
assert res == '2007-12-23;2008-04-30'
def test_weekDay():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod("monday", workdate=workdate)
assert res == '2008-04-21'
res = gnrdate.decodeDatePeriod(u"lunedì", workdate=workdate, locale='it')
assert res == '2008-04-21'
res = gnrdate.decodeDatePeriod(u"da lunedì a oggi", workdate=workdate, locale='it')
assert res == '2008-04-21;2008-04-25'
res = gnrdate.decodeDatePeriod(u"da lunedì a oggi", workdate=workdate, locale='it')
assert res == '2008-04-21;2008-04-25'
def test_localDate():
workdate = datetime.date(2008, 4, 25)
# res = gnrdate.decodeDatePeriod(u"02 01, 2007", workdate=workdate, locale='en') ### TODO: fails in babel.dates.parse_date
# assert res == '2007-02-01'
res = gnrdate.decodeDatePeriod(u"02/01/08", workdate=workdate, locale='en_au')
assert res == '2008-01-02'
res = gnrdate.decodeDatePeriod(u"02/01/08", workdate=workdate, locale='it')
assert res == '2008-01-02'
res = gnrdate.decodeDatePeriod(u"02/01/2008", workdate=workdate, locale='it')
assert res == '2008-01-02'
res = gnrdate.decodeDatePeriod(u"02-01-2008", workdate=workdate, locale='it')
assert res == '2008-01-02'
res = gnrdate.decodeDatePeriod(u"02 01 2008", workdate=workdate, locale='it')
assert res == '2008-01-02'
def test_isoDate():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod(u"2008-01-02", workdate=workdate, locale='it')
assert res == '2008-01-02'
res = gnrdate.decodeDatePeriod(u"2008-01-02 to 2008-02-02", workdate=workdate)
assert res == '2008-01-02;2008-02-02'
def test_localDateNoSep():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod(u"02012008", workdate=workdate, locale='it')
assert res == '2008-01-02'
res = gnrdate.decodeDatePeriod(u"020108", workdate=workdate, locale='it')
assert res == '2008-01-02'
def test_localPeriodNoSep():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod(u"01012008 a 31012008", workdate=workdate, locale='it')
assert res == '2008-01-01;2008-01-31'
res = gnrdate.decodeDatePeriod(u"010108 a 310108", workdate=workdate, locale='it')
assert res == '2008-01-01;2008-01-31'
def test_quarter():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod(u"1st quarter", workdate=workdate, locale='en')
assert res == '2008-01-01;2008-03-31'
res = gnrdate.decodeDatePeriod(u"from 1st quarter to 2nd quarter", workdate=workdate, locale='en')
assert res == '2008-01-01;2008-06-30'
res = gnrdate.decodeDatePeriod(u"Q1", workdate=workdate, locale='en')
assert res == '2008-01-01;2008-03-31'
res = gnrdate.decodeDatePeriod(u"from Q1 to Q2", workdate=workdate, locale='en')
assert res == '2008-01-01;2008-06-30'
res = gnrdate.decodeDatePeriod(u"1o trimestre", workdate=workdate, locale='it')
assert res == '2008-01-01;2008-03-31'
res = gnrdate.decodeDatePeriod(u"dal 1o trimestre al 2o trimestre", workdate=workdate, locale='it')
assert res == '2008-01-01;2008-06-30'
res = gnrdate.decodeDatePeriod(u"T1", workdate=workdate, locale='it')
assert res == '2008-01-01;2008-03-31'
res = gnrdate.decodeDatePeriod(u"da T1 a T2", workdate=workdate, locale='it')
assert res == '2008-01-01;2008-06-30'
def test_addToDay():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod(u"today + 1", workdate=workdate)
assert res == '2008-04-26'
res = gnrdate.decodeDatePeriod(u"today + 6", workdate=workdate)
assert res == '2008-05-01'
res = gnrdate.decodeDatePeriod(u"tomorrow + 6", workdate=workdate)
assert res == '2008-05-02'
res = gnrdate.decodeDatePeriod(u"yesterday + 6", workdate=workdate)
assert res == '2008-04-30'
res = gnrdate.decodeDatePeriod(u"today - 6", workdate=workdate)
assert res == '2008-04-19'
res = gnrdate.decodeDatePeriod(u"from today - 6 to tomorrow + 2", workdate=workdate)
assert res == '2008-04-19;2008-04-28'
def test_addToMonth():
workdate = datetime.date(2008, 4, 25)
res = gnrdate.decodeDatePeriod(u"this month + 1", workdate=workdate)
assert res == '2008-05-01;2008-05-31'
res = gnrdate.decodeDatePeriod(u"this month - 1", workdate=workdate)
assert res == '2008-03-01;2008-03-31'
res = gnrdate.decodeDatePeriod(u"from this month - 1 to this month + 1", workdate=workdate)
assert res == '2008-03-01;2008-05-31'
res = gnrdate.decodeDatePeriod(u"from this month - 6 to this month + 6", workdate=workdate)
assert res == '2007-10-01;2008-10-31'
res = gnrdate.decodeDatePeriod(u"from this month - 12 to this month + 12", workdate=workdate)
assert res == '2007-04-01;2009-04-30'
def test_toTime():
dt = datetime.datetime(2010, 4, 8, 10, 30)
t = datetime.time(10, 30)
assert isinstance(gnrdate.toTime(dt), datetime.time)
assert isinstance(gnrdate.toTime(t), datetime.time)
assert isinstance(gnrdate.toTime('10:30'), datetime.time)
assert gnrdate.toTime(dt) == t
assert gnrdate.toTime('10:30') == t
def test_toDate():
dt = datetime.datetime(2010, 4, 8, 10, 30)
d = datetime.date(2010, 4, 8)
assert isinstance(gnrdate.toDate(dt), datetime.date)
assert isinstance(gnrdate.toDate(d), datetime.date)
assert gnrdate.toDate(dt) == d
def test_dateRange():
dtstart = datetime.datetime(2010, 4, 1)
dtstop = datetime.datetime(2010, 4, 10)
expected = [datetime.datetime(2010, 4, d) for d in range(1, 10)]
assert list(gnrdate.dateRange(dtstart, dtstop)) == expected
def test_TimeInterval():
i = gnrdate.TimeInterval('8:30-10:30')
assert str(i) == '8:30-10:30'
assert i.start == datetime.time(8, 30)
assert i.stop == datetime.time(10, 30)
assert str(gnrdate.TimeInterval(datetime.time(8, 30), datetime.time(10, 30))) == str(i)
assert str(gnrdate.TimeInterval((datetime.time(8, 30), datetime.time(10, 30)))) == str(i)
def test_TimeInterval_alt_construction():
i = gnrdate.TimeInterval('8:30-10:30')
i2 = gnrdate.TimeInterval(stop='10:30', minutes=120)
assert i == i2
def test_TimeInterval_operators():
ti = gnrdate.TimeInterval
assert ti('8:30-10:30') == '8:30-10:30'
assert ti('8:30-10:30') == ti('8:30-10:30')
assert not (ti('8:30-10:30') != ti('8:30-10:30'))
assert ti('8:30-10:30') < ti('11:00-12:00')
assert ti('8:30-10:30') <= ti('11:00-12:00')
assert not (ti('8:30-10:30') < ti('10:00-12:00'))
assert ti('8:30-10:30') <= ti('10:00-12:00')
assert ti('11:00-12:00') > ti('8:30-10:30')
assert ti('11:00-12:00') >= ti('8:30-10:30')
assert not (ti('10:00-12:00') > ti('8:30-10:30'))
assert ti('10:00-12:00') >= ti('8:30-10:30')
assert ti('8:30-10:30') in ti('10:00-12:00')
assert ti('8:30-10:30') not in ti('11:00-12:00')
assert ti('8:30-9:30') in ti('8:00-12:00')
def test_TimeInterval_minutes():
ti = gnrdate.TimeInterval
i = ti('8:30-9:30')
assert i.minutes == 60
i.minutes = 30
assert ti('8:30-9:00') == i
def test_TimeInterval_overlaps():
ti = gnrdate.TimeInterval
assert ti('8:00-10:00').overlaps(ti('14:00-16:00')) == ti.NO_OVERLAP
assert ti('8:30-10:30').overlaps(ti('9:00-9:30')) == ti.FULLY_CONTAINS
assert ti('9:00-9:30').overlaps(ti('8:30-10:30')) == ti.FULLY_CONTAINED
assert ti('8:00-10:00').overlaps(ti('9:00-12:00')) == ti.COVER_LEFT
assert ti('9:00-12:00').overlaps(ti('8:00-10:00')) == ti.COVER_RIGHT
t = ti('8:00-10:00')
assert t.overlaps(t) == ti.FULLY_CONTAINS
def test_TimeInterval_sorted():
ti = gnrdate.TimeInterval('9:00-10:00')
tp = gnrdate.TimePeriod('8:00-12:00')
tp.remove(ti)
assert tp == gnrdate.TimePeriod('8:00-9:00, 10:00-12:00')
lst = [ti] + tp.intervals
assert gnrdate.TimeInterval.sorted(lst) == ['8:00-9:00', '9:00-10:00', '10:00-12:00']
def test_TimePeriod():
p = gnrdate.TimePeriod('8:30-10:30', '9:30-11:00')
assert p.intervals == [gnrdate.TimeInterval('8:30-11:00')]
assert str(p) == '8:30-11:00'
p.add(gnrdate.TimeInterval('14:00-16:00')) # non-overlapping => add
assert str(p) == '8:30-11:00, 14:00-16:00'
p.remove(gnrdate.TimeInterval('10:30-12:00')) # overlapping => reduce existing interval
assert str(p) == '8:30-10:30, 14:00-16:00'
p.remove(gnrdate.TimeInterval('12:00-13:00')) # non-overlapping => noop
assert str(p) == '8:30-10:30, 14:00-16:00'
p.remove(gnrdate.TimeInterval('14:00-16:00')) # fully overlapping => remove
assert str(p) == '8:30-10:30'
def test_TimePeriod_complex():
p = gnrdate.TimePeriod('8:00-12:00', '16:00-20:00')
print "p=", p
for i in ('8:00-9:00', '9:30-10:00', '10:00-11:30', '16:00-16:30', '17:00-18:00', '18:00-19:00', '19:00-20:00'):
print "removing", i
p.remove(i)
print "p=", p
assert str(p) == '9:00-9:30, 11:30-12:00, 16:30-17:00'
def test_TimePeriod_complex_attributes():
iv1 = gnrdate.TimeInterval('8:00-12:00')
iv1.name = 'morning'
iv2 = gnrdate.TimeInterval('16:00-20:00')
iv2.name = 'afternoon'
p = gnrdate.TimePeriod(iv1, iv2)
print "p=", p
for i in ('8:00-9:00', '9:30-10:00', '10:00-11:30', '16:00-16:30', '17:00-18:00', '18:00-19:00', '19:00-20:00'):
print "removing", i
p.remove(i)
print "p=", p
assert str(p) == '9:00-9:30, 11:30-12:00, 16:30-17:00'
assert p.intervals[0].name == 'morning'
assert p.intervals[1].name == 'morning'
assert p.intervals[2].name == 'afternoon'
def test_TimePeriod_sequence():
p = gnrdate.TimePeriod('8:30-10:30', '16:00-20:00')
assert len(p) == 2
assert str(p[0]) == '8:30-10:30'
assert str(p[1]) == '16:00-20:00'
it = iter(p)
assert str(it.next()) == '8:30-10:30'
assert str(it.next()) == '16:00-20:00'
def test_TimePeriod_TimePeriod():
tp = gnrdate.TimePeriod
p = tp('8:30-10:30', '16:00-20:00')
p.add(tp('10:00-12:00', '13:00-16:00'))
assert str(p) == '8:30-12:00, 13:00-20:00'
p.remove(tp('10:00-16:00'))
assert str(p) == '8:30-10:00, 16:00-20:00'
p1 = tp('8:30-10:30', '16:00-20:00')
p2 = tp('8:30-10:30', '16:00-20:00')
assert p1 == p2
p3 = tp('8:30-10:30', '16:00-18:00')
assert p1 != p3
def test_TimePeriod_BugAtEnd():
tp = gnrdate.TimePeriod
p = tp('8:00-12:00')
p.remove('10:00-12:00')
assert str(p) == '8:00-10:00'
if __name__ == "__main__":
test_TimePeriod_BugAtEnd() | poppogbr/genropy | gnrpy/tests/core/gnrdate_test.py | Python | lgpl-2.1 | 16,226 |
#Should handle most of the default functions that will come back throughout
def lowerit(i): #Makes the input lowercase for if blocks
return i.lower()
def stringit(i): #Makes the input strings for whatever reason
return str(i)
def left_is_right(i): #Function for making the "Left" direction correct
stringit(i)
lowerit(i)
if i == "left":
return "correct"
elif i == "right":
return "wrong"
else:
return "error (420)"
def right_is_right(i): #Function for making the "Right" direction correct
stringit(i)
lowerit(i)
if i == "left":
return "wrong"
elif i == "right":
return "correct"
else:
return "error (420)"
def up_is_right(i): #Function for making the "Up" direction correct
stringit(i)
lowerit(i)
if i == "up":
return "correct"
elif i == "down":
return "wrong"
else:
return "error (420)"
def down_is_right(i): #Function for making the "Down" direction correct
stringit(i)
lowerit(i)
if i == "up":
return "wrong"
elif i == "down":
return "correct"
else:
return "error (420)"
def pickup_is_right(i): #Function for making the "Pick Up" option correct
stringit(i)
lowerit(i)
if i == "pick up":
return "correct"
elif i =="leave there":
return "wrong"
else:
return "error (420)"
def leavethere_is_right(i): #Function for making the "Leave there" option correct
stringit(i)
lowerit(i)
if i == "pick up":
return "wrong"
elif i == "leave there":
return "correct"
else:
return "error (420)"
def yes_or_no(i): #Function returning yes or no
#OR statement removed due to it breaking the block
#Will fix soonish
stringit(i)
lowerit(i)
if i == "yes": #or "y":
return "yes"
elif i =="no": #or "n":
return "no"
else:
return "error (420)"
def menufunc(i): #Function managing menu
#OR statement removed to it breaking the block
#Will fix soonish
stringit(i)
lowerit(i)
if i == "start game": #or "continue":
return "starting game"
elif i == "how to play": #or "htp":
return "how to play"
elif i == "passcode" or i =="pc":
return "passcode"
else:
return "Error (69)"
def wake_up_is_right(i): #Wake up sequence function
stringit(i)
lowerit(i)
if i == "wake up":
return "correct"
elif i == "hit snooze":
return "wrong"
else:
return "error (420)"
def open_is_right(i): #Door open function
stringit(i)
lowerit(i)
if i == "open":
return "correct"
elif i == "keep closed":
return "wrong"
else:
return "error (420)"
def close_is_right(i): #Door open function
stringit(i)
lowerit(i)
if i == "open":
return "wrong"
elif i == "keep closed":
return "correct"
else:
return "error (420)"
def turn_on_is_right(i): #Turning on is right
if i == "turn on":
return "correct"
elif i == "keep off":
return "wrong"
else:
return "Error (420)"
def keep_off_is_right(i): #Keeping off is right
if i == "turn on":
return "wrong"
elif i == "keep off":
return "correct"
else:
return "Error 420" | QMasterMoo/Turtle-Text-Adventure | basicfunc.py | Python | gpl-2.0 | 3,369 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Attribute.description'
db.add_column(u'layers_attribute', 'description',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Attribute.description'
db.delete_column(u'layers_attribute', 'description')
models = {
u'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': u"orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'base.contactrole': {
'Meta': {'unique_together': "(('contact', 'resource', 'role'),)", 'object_name': 'ContactRole'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Profile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.ResourceBase']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Role']"})
},
u'base.region': {
'Meta': {'ordering': "('name',)", 'object_name': 'Region'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'base.resourcebase': {
'Meta': {'object_name': 'ResourceBase'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bbox_x0': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'bbox_x1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'bbox_y0': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'bbox_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.TopicCategory']", 'null': 'True', 'blank': 'True'}),
'constraints_other': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['people.Profile']", 'through': u"orm['base.ContactRole']", 'symmetrical': 'False'}),
'csw_anytext': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'csw_insert_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'csw_mdsource': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '256'}),
'csw_schema': ('django.db.models.fields.CharField', [], {'default': "'http://www.isotc211.org/2005/gmd'", 'max_length': '64'}),
'csw_type': ('django.db.models.fields.CharField', [], {'default': "'dataset'", 'max_length': '32'}),
'csw_typename': ('django.db.models.fields.CharField', [], {'default': "'gmd:MD_Metadata'", 'max_length': '32'}),
'csw_wkt_geometry': ('django.db.models.fields.TextField', [], {'default': "'POLYGON((-180 -90,-180 90,180 90,180 -90,-180 -90))'"}),
'data_quality_statement': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_type': ('django.db.models.fields.CharField', [], {'default': "'publication'", 'max_length': '255'}),
'distribution_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'distribution_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'eng'", 'max_length': '3'}),
'maintenance_frequency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'metadata_uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata_xml': ('django.db.models.fields.TextField', [], {'default': '\'<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"/>\'', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.Region']", 'symmetrical': 'False'}),
'restriction_code_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.RestrictionCodeType']", 'null': 'True', 'blank': 'True'}),
'spatial_representation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.SpatialRepresentationType']", 'null': 'True', 'blank': 'True'}),
'srid': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '255'}),
'supplemental_information': ('django.db.models.fields.TextField', [], {'default': "u'No information provided'"}),
'temporal_extent_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'temporal_extent_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.Thumbnail']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
u'base.restrictioncodetype': {
'Meta': {'ordering': "('identifier',)", 'object_name': 'RestrictionCodeType'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'gn_description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_choice': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'base.spatialrepresentationtype': {
'Meta': {'ordering': "('identifier',)", 'object_name': 'SpatialRepresentationType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gn_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_choice': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'base.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thumb_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'thumb_spec': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'})
},
u'base.topiccategory': {
'Meta': {'ordering': "('identifier',)", 'object_name': 'TopicCategory'},
'description': ('django.db.models.fields.TextField', [], {}),
'documents_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gn_description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "'location'", 'max_length': '255'}),
'is_choice': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layers_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maps_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'layers.attribute': {
'Meta': {'object_name': 'Attribute'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'attribute_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'attribute_type': ('django.db.models.fields.CharField', [], {'default': "'xsd:string'", 'max_length': '50'}),
'average': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_stats_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_set'", 'to': u"orm['layers.Layer']"}),
'max': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'median': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'min': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'stddev': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'sum': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'unique_values': ('django.db.models.fields.TextField', [], {'default': "'NA'", 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'layers.layer': {
'Meta': {'object_name': 'Layer', '_ormbases': [u'base.ResourceBase']},
'default_style': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'layer_default_style'", 'null': 'True', 'to': u"orm['layers.Style']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'popular_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'resourcebase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.ResourceBase']", 'unique': 'True', 'primary_key': 'True'}),
'share_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'store': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'storeType': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'styles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'layer_styles'", 'symmetrical': 'False', 'to': u"orm['layers.Style']"}),
'typename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'workspace': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'layers.style': {
'Meta': {'object_name': 'Style'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sld_body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sld_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sld_url': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'sld_version': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'workspace': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'people.profile': {
'Meta': {'object_name': 'Profile'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'delivery': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'voice': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'people.role': {
'Meta': {'object_name': 'Role'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['layers'] | sopac/pacgeo | geonode/layers/migrations/0009_auto__add_field_attribute_description.py | Python | gpl-3.0 | 21,076 |
import requests
from bs4 import BeautifulSoup
from becquerel_django.ratatouille.webparser import ingredients_parser as parse
url = "www.marmiton.org/recettes/recette_galette-des-rois_10832.aspx"
r = requests.get("http://" +url)
data = r.text
soup = BeautifulSoup(data, 'html.parser')
# print(soup.prettify())
recipeFilter = "m_content_recette_ingredients m_avec_substitution"
ingredients = soup.find('div', {'class': recipeFilter})
# print(ingredients.text)
a = ingredients.text
ingredients_rev = a.replace('\n',' ')
# print(ingredients_rev)
parse(ingredients_rev)
# for link in soup.find_all('a'):
# print(link.get('href'))
| BecquerelRecipe/Trunk | scraper.py | Python | apache-2.0 | 635 |
__author__ = 'Ilya Chernyakov'
__alias__ = 'eliuha'
# Shalom everybody !
def validate_israeli_id_number(iID):
iID = str(iID)
if len(iID) == 8:
iID = '0' + str(iID)
if len(iID) != 9:
return False
num_12_arr = [1, 2, 1, 2, 1, 2, 1, 2, 1]
dig = list(int(d) for d in str(iID))
sum_of_digits = 0
for i in range(0, 9):
temp = num_12_arr[i] * dig[i]
if temp > 9:
t = list(int(d_1) for d_1 in str(temp))
temp = t[0] + t[1]
sum_of_digits += temp
if sum_of_digits % 10 != 0:
return False
return True
| eliuha/pyIsraeliId | pyIsraeliId/checker.py | Python | apache-2.0 | 605 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
from distutils.core import setup
except ImportError:
sys.stderr.write("""\
.------------------------------------------------------------------------.
| It seems that the package Distutils is not available for this Python. |
| You might fetch and install Distutils and retry your command, or else, |
| figure out where the Pymacs/ directory should go, and make that copy. |
`------------------------------------------------------------------------'
""")
sys.exit(1)
package = 'Pymacs'
version = '0.24-beta2'
setup(name=package, version=version,
description="Interface between Emacs Lisp and Python",
author='François Pinard', author_email='pinard@iro.umontreal.ca',
url='http://pymacs.progiciels-bpi.ca',
packages=['Pymacs'])
| carlhu/emacs_terminal | pinard-Pymacs-016b0bc/setup.py | Python | gpl-3.0 | 832 |
import json
import re
from yawp.parser import YAWiktionaryParser
LANG_NAME = 'Serbo-Croatian'
PARTS_OF_SPEECH = ['Verb', 'Noun', 'Adjective', 'Adverb', 'Preposition', 'Interjection', 'Pronoun', 'Conjunction', 'Letter', 'Particle', 'Proper Noun']
INFLECTIONS = ['Conjugation', 'Declension']
NOUN_CASES = ['Nominative', 'Genitive', 'Dative', 'Accusative', 'Vocative', 'Locative', 'Instrumental']
_conjugation_regex = re.compile(r"^\|(?P<form>\w+\.\w+)=(?P<inflected_word>.*)$", re.RegexFlag.MULTILINE)
_noun_declension_regex = re.compile(r"\|(?P<inflected_word1>.*?)\|(?P<inflected_word2>.*)$", re.RegexFlag.MULTILINE)
_adj_full_declension_regex = re.compile(r"sh-adj-full\|(?P<root1>\w+)\|\w+\|(?P<root2>\w+)\|\w+", re.RegexFlag.MULTILINE)
_adj_def_declension_regex = re.compile(r"sh-adj-def\|(?P<root>\w+)\|\w+", re.RegexFlag.MULTILINE)
_adj_defindef_declension_regex = re.compile(r"sh-adj-defindef\|(?P<root>\w+)\|\w+", re.RegexFlag.MULTILINE)
#TODO: PJC There are other declension templates, see https://en.wiktionary.org/wiki/Category:Serbo-Croatian_declension-table_templates
def get(word):
p = YAWiktionaryParser()
entry = p.get(word)
if entry:
for term in entry.terms:
if term.language == LANG_NAME:
return parse_term(term)
return None
def get_json(word):
defs = get(word)
if defs:
json_defs = "["
for d in defs:
json_defs += d.toJSON() + ","
json_defs = json_defs.rstrip(",")
json_defs += "]"
return json_defs
return ""
def parse_term(term):
definitions = []
headings = []
for heading in term.headings:
if heading.title in PARTS_OF_SPEECH:
if len(headings) > 0:
definitions.append(Definition(term.word, headings))
headings = []
headings.append(heading)
elif heading.title in INFLECTIONS:
headings.append(heading)
#TODO: I would like to grab Derived terms and Related terms later as well.
if len(headings) > 0:
definitions.append(Definition(term.word, headings))
return definitions
class Definition:
def __init__(self, word, headings):
self.word = word
#self.headings = headings
self.part_of_speech = ''
self.meanings = []
self.inflection = {}
for heading in headings:
self.parse_heading(heading)
def parse_heading(self, heading):
if heading.title in PARTS_OF_SPEECH:
self.part_of_speech = heading.title
lines = heading.text.splitlines()
for line in lines:
if line.strip().startswith('#'):
#TODO: PJC clean up line
self.meanings.append(line)
elif heading.title in INFLECTIONS:
self.inflection['type'] = heading.title
if heading.title == 'Conjugation':
self.parse_conjugation(heading)
elif heading.title == 'Declension':
if self.part_of_speech == 'Noun':
self.parse_noun_declension(heading)
elif self.part_of_speech == 'Adjective':
self.parse_adj_declension(heading)
elif self.part_of_speech == 'Pronoun':
pass #TODO:
else:
pass # TODO: log unknown declension type
def parse_adj_declension(self, heading):
match = _adj_full_declension_regex.search(heading.text)
if match:
self.parse_adj_full_declension(match.groupdict()['root1'],match.groupdict()['root2'])
else:
match = _adj_def_declension_regex.search(heading.text)
if match:
self.parse_adj_def_declension(match.groupdict()['root'])
else:
match = _adj_defindef_declension_regex.search(heading.text)
if match:
self.parse_adj_defindef_declension(match.groupdict()['root'])
#TODO: more else
def parse_adj_full_declension(self, root1, root2):
self.parse_adj_indef_declension(root1)
self.parse_adj_def_declension(root1)
self.parse_adj_comparative_declension(root2)
#TODO: superlative
def parse_adj_defindef_declension(self, root):
self.parse_adj_indef_declension(root)
self.parse_adj_def_declension(root)
def parse_adj_indef_declension(self, root):
self.inflection['Indefinite nominative masculine singular'] = self.word
self.inflection['Indefinite nominative feminine singular'] = root + 'a'
self.inflection['Indefinite nominative neuter singular'] = root + 'o'
self.inflection['Indefinite genitive masculine singular'] = root + 'a'
self.inflection['Indefinite genitive feminine singular'] = root + 'e'
self.inflection['Indefinite genitive neuter singular'] = root + 'a'
self.inflection['Indefinite dative masculine singular'] = root + 'u'
self.inflection['Indefinite dative feminine singular'] = root + 'oj'
self.inflection['Indefinite dative neuter singular'] = root + 'u'
self.inflection['Indefinite accusative masculine inanimate singular'] = self.word
self.inflection['Indefinite accusative feminine singular'] = root + 'u'
self.inflection['Indefinite accusative neuter singular'] = root + 'o'
self.inflection['Indefinite accusative masculine animate singular'] = root + 'a'
self.inflection['Indefinite vocative masculine singular'] = self.word
self.inflection['Indefinite vocative feminine singular'] = root + 'a'
self.inflection['Indefinite vocative neuter singular'] = root + 'o'
self.inflection['Indefinite locative masculine singular'] = root + 'u'
self.inflection['Indefinite locative feminine singular'] = root + 'oj'
self.inflection['Indefinite locative neuter singular'] = root + 'u'
self.inflection['Indefinite instrumental masculine singular'] = root + 'im'
self.inflection['Indefinite instrumental feminine singular'] = root + 'om'
self.inflection['Indefinite instrumental neuter singular'] = root + 'im'
self.inflection['Indefinite nominative masculine plural'] = root + 'i'
self.inflection['Indefinite nominative feminine plural'] = root + 'e'
self.inflection['Indefinite nominative neuter plural'] = root + 'a'
self.inflection['Indefinite genitive masculine plural'] = root + 'ih'
self.inflection['Indefinite genitive feminine plural'] = root + 'ih'
self.inflection['Indefinite genitive neuter plural'] = root + 'ih'
self.inflection['Indefinite dative masculine plural'] = root + 'im(a)'
self.inflection['Indefinite dative feminine plural'] = root + 'im(a)'
self.inflection['Indefinite dative neuter plural'] = root + 'im(a)'
self.inflection['Indefinite accusative masculine plural'] = root + 'e'
self.inflection['Indefinite accusative feminine plural'] = root + 'e'
self.inflection['Indefinite accusative neuter plural'] = root + 'a'
self.inflection['Indefinite vocative masculine plural'] = root + 'i'
self.inflection['Indefinite vocative feminine plural'] = root + 'e'
self.inflection['Indefinite vocative neuter plural'] = root + 'a'
self.inflection['Indefinite locative masculine plural'] = root + 'im(a)'
self.inflection['Indefinite locative feminine plural'] = root + 'im(a)'
self.inflection['Indefinite locative neuter plural'] = root + 'im(a)'
self.inflection['Indefinite instrumental masculine plural'] = root + 'im(a)'
self.inflection['Indefinite instrumental feminine plural'] = root + 'im(a)'
self.inflection['Indefinite instrumental neuter plural'] = root + 'im(a)'
def parse_adj_def_declension(self, root):
self.inflection['Definite nominative masculine singular'] = root + 'i'
self.inflection['Definite nominative feminine singular'] = root + 'a'
self.inflection['Definite nominative neuter singular'] = root + 'o'
self.inflection['Definite genitive masculine singular'] = root + 'og(a)'
self.inflection['Definite genitive feminine singular'] = root + 'e'
self.inflection['Definite genitive neuter singular'] = root + 'og(a)'
self.inflection['Definite dative masculine singular'] = root + 'om(u)'
self.inflection['Definite dative feminine singular'] = root + 'oj'
self.inflection['Definite dative neuter singular'] = root + 'om(u)'
self.inflection['Definite accusative masculine inanimate singular'] = root + 'i'
self.inflection['Definite accusative masculine animate singular'] = root + 'og(a)'
self.inflection['Definite accusative feminine singular'] = root + 'u'
self.inflection['Definite accusative neuter singular'] = root + 'o'
self.inflection['Definite vocative masculine singular'] = root + 'i'
self.inflection['Definite vocative feminine singular'] = root + 'a'
self.inflection['Definite vocative neuter singular'] = root + 'o'
self.inflection['Definite locative masculine singular'] = root + 'om(u)'
self.inflection['Definite locative feminine singular'] = root + 'oj'
self.inflection['Definite locative neuter singular'] = root + 'om(u)'
self.inflection['Definite instrumental masculine singular'] = root + 'im'
self.inflection['Definite instrumental feminine singular'] = root + 'om'
self.inflection['Definite instrumental neuter singular'] = root + 'im'
self.inflection['Definite nominative masculine plural'] = root + 'i'
self.inflection['Definite nominative feminine plural'] = root + 'e'
self.inflection['Definite nominative neuter plural'] = root + 'a'
self.inflection['Definite genitive masculine plural'] = root + 'ih'
self.inflection['Definite genitive feminine plural'] = root + 'ih'
self.inflection['Definite genitive neuter plural'] = root + 'ih'
self.inflection['Definite dative masculine plural'] = root + 'im(a)'
self.inflection['Definite dative feminine plural'] = root + 'im(a)'
self.inflection['Definite dative neuter plural'] = root + 'im(a)'
self.inflection['Definite accusative masculine plural'] = root + 'e'
self.inflection['Definite accusative feminine plural'] = root + 'e'
self.inflection['Definite accusative neuter plural'] = root + 'a'
self.inflection['Definite vocative masculine plural'] = root + 'i'
self.inflection['Definite vocative feminine plural'] = root + 'e'
self.inflection['Definite vocative neuter plural'] = root + 'a'
self.inflection['Definite locative masculine plural'] = root + 'im(a)'
self.inflection['Definite locative feminine plural'] = root + 'im(a)'
self.inflection['Definite locative neuter plural'] = root + 'im(a)'
self.inflection['Definite instrumental masculine plural'] = root + 'im(a)'
self.inflection['Definite instrumental feminine plural'] = root + 'im(a)'
self.inflection['Definite instrumental neuter plural'] = root + 'im(a)'
def parse_adj_comparative_declension(self, root):
self.inflection['Comparative nominative masculine singular'] = root + 'i'
self.inflection['Comparative nominative feminine singular'] = root + 'a'
self.inflection['Comparative nominative neuter singular'] = root + 'o'
self.inflection['Comparative genitive masculine singular'] = root + 'og(a)'
self.inflection['Comparative genitive feminine singular'] = root + 'e'
self.inflection['Comparative genitive neuter singular'] = root + 'og(a)'
self.inflection['Comparative dative masculine singular'] = root + 'om(u)'
self.inflection['Comparative dative feminine singular'] = root + 'oj'
self.inflection['Comparative dative neuter singular'] = root + 'om(u)'
self.inflection['Comparative accusative masculine inanimate singular'] = root + 'i'
self.inflection['Comparative accusative feminine singular'] = root + 'u'
self.inflection['Comparative accusative neuter singular'] = root + 'o'
self.inflection['Comparative accusative masculine animate singular'] = root + 'og(a)'
self.inflection['Comparative vocative masculine singular'] = root + 'i'
self.inflection['Comparative vocative feminine singular'] = root + 'a'
self.inflection['Comparative vocative neuter singular'] = root + 'o'
self.inflection['Comparative locative masculine singular'] = root + 'om(u)'
self.inflection['Comparative locative feminine singular'] = root + 'oj'
self.inflection['Comparative locative neuter singular'] = root + 'om(u)'
self.inflection['Comparative instrumental masculine singular'] = root + 'im'
self.inflection['Comparative instrumental feminine singular'] = root + 'om'
self.inflection['Comparative instrumental neuter singular'] = root + 'im'
self.inflection['Comparative nominative masculine plural'] = root + 'i'
self.inflection['Comparative nominative feminine plural'] = root + 'e'
self.inflection['Comparative nominative neuter plural'] = root + 'a'
self.inflection['Comparative genitive masculine plural'] = root + 'ih'
self.inflection['Comparative genitive feminine plural'] = root + 'ih'
self.inflection['Comparative genitive neuter plural'] = root + 'ih'
self.inflection['Comparative dative masculine plural'] = root + 'im(a)'
self.inflection['Comparative dative feminine plural'] = root + 'im(a)'
self.inflection['Comparative dative neuter plural'] = root + 'im(a)'
self.inflection['Comparative accusative masculine plural'] = root + 'e'
self.inflection['Comparative accusative feminine plural'] = root + 'e'
self.inflection['Comparative accusative neuter plural'] = root + 'a'
self.inflection['Comparative vocative masculine plural'] = root + 'i'
self.inflection['Comparative vocative feminine plural'] = root + 'e'
self.inflection['Comparative vocative neuter plural'] = root + 'a'
self.inflection['Comparative locative masculine plural'] = root + 'im(a)'
self.inflection['Comparative locative feminine plural'] = root + 'im(a)'
self.inflection['Comparative locative neuter plural'] = root + 'im(a)'
self.inflection['Comparative instrumental masculine plural'] = root + 'im(a)'
self.inflection['Comparative instrumental feminine plural'] = root + 'im(a)'
self.inflection['Comparative instrumental neuter plural'] = root + 'im(a)'
def parse_noun_declension(self, heading):
lines = heading.text.splitlines()
match_count = 0
for line in lines:
match = _noun_declension_regex.search(line)
if match:
self.inflection[NOUN_CASES[match_count] + ' singular'] = match.groupdict()['inflected_word1']
self.inflection[NOUN_CASES[match_count] + ' plural'] = match.groupdict()['inflected_word2']
match_count += 1
def parse_conjugation(self, heading):
self.inflection['Infinitive'] = self.word
match_start = -1
match_end = -1
form = ''
inflected_word = ''
match = _conjugation_regex.search(heading.text)
while match:
if match_start >= 0:
match_end = match.start() - 1
self.add_inflection(form, inflected_word)
match_start = match.start()
form = match.groupdict()['form']
inflected_word = match.groupdict()['inflected_word']
match = _conjugation_regex.search(heading.text, match_start + 1)
# Don't forget the last match
if match_start >= 0:
self.add_inflection(form, inflected_word)
def add_inflection(self, form, inflected_word):
if form == 'p.va':
self.inflection['Past verbal adverb'] = inflected_word
elif form == 'pr.va':
self.inflection['Present verbal adverb'] = inflected_word
elif form == 'vn':
self.inflection['Verbal noun'] = inflected_word
elif form == 'pr.1s':
self.inflection['First person singular present'] = inflected_word
elif form == 'pr.2s':
self.inflection['Second person singular present'] = inflected_word
elif form == 'pr.3s':
self.inflection['Third person singular present'] = inflected_word
elif form == 'pr.1p':
self.inflection['First person plural present'] = inflected_word
elif form == 'pr.2p':
self.inflection['Second person plural present'] = inflected_word
elif form == 'pr.3p':
self.inflection['Third person plural present'] = inflected_word
elif form == 'f1.hr':
self.inflection['First person singular future I (Croatian)'] = inflected_word + ' ću'
self.inflection['Second person singular future I (Croatian)'] = inflected_word + ' ćeš'
self.inflection['Third person singular future I (Croatian)'] = inflected_word + ' će'
self.inflection['First person plural future I (Croatian)'] = inflected_word + ' ćemo'
self.inflection['Second person plural future I (Croatian)'] = inflected_word + ' ćete'
self.inflection['Third person plural future I (Croatian)'] = inflected_word + ' će'
elif form == 'f1.stem':
self.inflection['First person singular future I'] = inflected_word + 'ću'
self.inflection['Second person singular future I'] = inflected_word + 'ćeš'
self.inflection['Third person singular future I'] = inflected_word + 'će'
self.inflection['First person plural future I'] = inflected_word + 'ćemo'
self.inflection['Second person plural future I'] = inflected_word + 'ćete'
self.inflection['Third person plural future I'] = inflected_word + 'će'
elif form == 'impf.1s':
self.inflection['First person singular past imperfect'] = inflected_word
elif form == 'impf.2s':
self.inflection['Second person singular past imperfect'] = inflected_word
elif form == 'impf.3s':
self.inflection['Third person singular past imperfect'] = inflected_word
elif form == 'impf.1p':
self.inflection['First person plural past imperfect'] = inflected_word
elif form == 'impf.2p':
self.inflection['Second person plural past imperfect'] = inflected_word
elif form == 'impf.3p':
self.inflection['Third person plural past imperfect'] = inflected_word
elif form == 'a.1s':
self.inflection['First person singular past aorist'] = inflected_word
elif form == 'a.2s':
self.inflection['Second person singular past aorist'] = inflected_word
elif form == 'a.3s':
self.inflection['Third person singular past aorist'] = inflected_word
elif form == 'a.1p':
self.inflection['First person plural past aorist'] = inflected_word
elif form == 'a.2p':
self.inflection['Second person plural past aorist'] = inflected_word
elif form == 'a.3p':
self.inflection['Third person plural past aorist'] = inflected_word
elif form == 'impt.2s':
self.inflection['Second person singular imperative'] = inflected_word
elif form == 'impt.1p':
self.inflection['First person plural imperative'] = inflected_word
elif form == 'impt.2p':
self.inflection['Second person plural imperative'] = inflected_word
elif form == 'app.ms':
self.inflection['Active past participle masculine singular'] = inflected_word
self.inflection['First person masculine singular future II'] = 'budem ' + inflected_word
self.inflection['Second person masculine singular future II'] = 'budeš ' + inflected_word
self.inflection['Third person masculine singular future II'] = 'bude ' + inflected_word
self.inflection['First person masculine singular past perfect'] = inflected_word + ' sam'
self.inflection['Second person masculine singular past perfect'] = inflected_word + ' si'
self.inflection['Third person masculine singular past perfect'] = inflected_word + ' je'
self.inflection['First person masculine singular past pluperfect'] = 'bio sam ' + inflected_word
self.inflection['Second person masculine singular past pluperfect'] = 'bio si ' + inflected_word
self.inflection['Third person masculine singular past pluperfect'] = 'bio je ' + inflected_word
self.inflection['First person masculine singular conditional I'] = inflected_word + ' bih'
self.inflection['Second person masculine singular conditional I'] = inflected_word + ' bi'
self.inflection['Third person masculine singular conditional I'] = inflected_word + ' bi'
self.inflection['First person masculine singular conditional II'] = 'bio bih ' + inflected_word
self.inflection['Second person masculine singular conditional II'] = 'bio bi ' + inflected_word
self.inflection['Third person masculine singular conditional II'] = 'bio bi ' + inflected_word
elif form == 'app.fs':
self.inflection['Active past participle feminine singular'] = inflected_word
self.inflection['First person feminine singular future II'] = 'budem ' + inflected_word
self.inflection['Second person feminine singular future II'] = 'budeš ' + inflected_word
self.inflection['Third person feminine singular future II'] = 'bude ' + inflected_word
self.inflection['First person feminine singular past perfect'] = inflected_word + ' sam'
self.inflection['Second person feminine singular past perfect'] = inflected_word + ' si'
self.inflection['Third person feminine singular past perfect'] = inflected_word + ' je'
self.inflection['First person feminine singular past pluperfect'] = 'bila sam ' + inflected_word
self.inflection['Second person feminine singular past pluperfect'] = 'bila si ' + inflected_word
self.inflection['Third person feminine singular past pluperfect'] = 'bila je ' + inflected_word
self.inflection['First person feminine singular conditional I'] = inflected_word + ' bih'
self.inflection['Second person feminine singular conditional I'] = inflected_word + ' bi'
self.inflection['Third person feminine singular conditional I'] = inflected_word + ' bi'
self.inflection['First person feminine singular conditional II'] = 'bila bih ' + inflected_word
self.inflection['Second person feminine singular conditional II'] = 'bila bi ' + inflected_word
self.inflection['Third person feminine singular conditional II'] = 'bila bi ' + inflected_word
elif form == 'app.ns':
self.inflection['Active past participle neuter singular'] = inflected_word
self.inflection['First person neuter singular future II'] = 'budem ' + inflected_word
self.inflection['Second person neuter singular future II'] = 'budeš ' + inflected_word
self.inflection['Third person neuter singular future II'] = 'bude ' + inflected_word
self.inflection['First person neuter singular past perfect'] = inflected_word + ' sam'
self.inflection['Second person neuter singular past perfect'] = inflected_word + ' si'
self.inflection['Third person neuter singular past perfect'] = inflected_word + ' je'
self.inflection['First person neuter singular past pluperfect'] = 'bilo sam ' + inflected_word
self.inflection['Second person neuter singular past pluperfect'] = 'bilo si ' + inflected_word
self.inflection['Third person neuter singular past pluperfect'] = 'bilo je ' + inflected_word
self.inflection['First person neuter singular conditional I'] = inflected_word + ' bih'
self.inflection['Second person neuter singular conditional I'] = inflected_word + ' bi'
self.inflection['Third person neuter singular conditional I'] = inflected_word + ' bi'
self.inflection['First person neuter singular conditional II'] = 'bilo bih ' + inflected_word
self.inflection['Second person neuter singular conditional II'] = 'bilo bi ' + inflected_word
self.inflection['Third person neuter singular conditional II'] = 'bilo bi ' + inflected_word
elif form == 'app.mp':
self.inflection['Active past participle masculine plural'] = inflected_word
self.inflection['First person masculine plural future II'] = 'budemo ' + inflected_word
self.inflection['Second person masculine plural future II'] = 'budete ' + inflected_word
self.inflection['Third person masculine plural future II'] = 'budu ' + inflected_word
self.inflection['First person masculine plural past perfect'] = inflected_word + ' smo'
self.inflection['Second person masculine plural past perfect'] = inflected_word + ' ste'
self.inflection['Third person masculine plural past perfect'] = inflected_word + ' su'
self.inflection['First person masculine plural past pluperfect'] = 'bili smo ' + inflected_word
self.inflection['Second person masculine plural past pluperfect'] = 'bili ste ' + inflected_word
self.inflection['Third person masculine plural past pluperfect'] = 'bili su ' + inflected_word
self.inflection['First person masculine plural conditional I'] = inflected_word + ' bismo'
self.inflection['Second person masculine plural conditional I'] = inflected_word + ' biste'
self.inflection['Third person masculine plural conditional I'] = inflected_word + ' bi'
self.inflection['First person masculine plural conditional II'] = 'bili bismo ' + inflected_word
self.inflection['Second person masculine plural conditional II'] = 'bili biste ' + inflected_word
self.inflection['Third person masculine plural conditional II'] = 'bili bi ' + inflected_word
elif form == 'app.fp':
self.inflection['Active past participle feminine plural'] = inflected_word
self.inflection['First person feminine plural future II'] = 'budemo ' + inflected_word
self.inflection['Second person feminine plural future II'] = 'budete ' + inflected_word
self.inflection['Third person feminine plural future II'] = 'budu ' + inflected_word
self.inflection['First person feminine plural past perfect'] = inflected_word + ' smo'
self.inflection['Second person feminine plural past perfect'] = inflected_word + ' ste'
self.inflection['Third person feminine plural past perfect'] = inflected_word + ' su'
self.inflection['First person feminine plural past pluperfect'] = 'bile smo ' + inflected_word
self.inflection['Second person feminine plural past pluperfect'] = 'bile ste ' + inflected_word
self.inflection['Third person feminine plural past pluperfect'] = 'bile su ' + inflected_word
self.inflection['First person feminine plural conditional I'] = inflected_word + ' bismo'
self.inflection['Second person feminine plural conditional I'] = inflected_word + ' biste'
self.inflection['Third person feminine plural conditional I'] = inflected_word + ' bi'
self.inflection['First person feminine plural conditional II'] = 'bile bismo ' + inflected_word
self.inflection['Second person feminine plural conditional II'] = 'bile biste ' + inflected_word
self.inflection['Third person feminine plural conditional II'] = 'bile bi ' + inflected_word
elif form == 'app.np':
self.inflection['Active past participle neuter plural'] = inflected_word
self.inflection['First person neuter plural future II'] = 'budemo ' + inflected_word
self.inflection['Second person neuter plural future II'] = 'budete ' + inflected_word
self.inflection['Third person neuter plural future II'] = 'budu ' + inflected_word
self.inflection['First person neuter plural past perfect'] = inflected_word + ' smo'
self.inflection['Second person neuter plural past perfect'] = inflected_word + ' ste'
self.inflection['Third person neuter plural past perfect'] = inflected_word + ' su'
self.inflection['First person neuter plural past pluperfect'] = 'bila smo ' + inflected_word
self.inflection['Second person neuter plural past pluperfect'] = 'bila ste ' + inflected_word
self.inflection['Third person neuter plural past pluperfect'] = 'bila su ' + inflected_word
self.inflection['First person neuter plural conditional I'] = inflected_word + ' bismo'
self.inflection['Second person neuter plural conditional I'] = inflected_word + ' biste'
self.inflection['Third person neuter plural conditional I'] = inflected_word + ' bi'
self.inflection['First person neuter plural conditional II'] = 'bila bismo ' + inflected_word
self.inflection['Second person neuter plural conditional II'] = 'bila biste ' + inflected_word
self.inflection['Third person neuter plural conditional II'] = 'bila bi ' + inflected_word
elif form == 'ppp.ms':
self.inflection['Passive past participle masculine singular'] = inflected_word
elif form == 'ppp.fs':
self.inflection['Passive past participle feminine singular'] = inflected_word
elif form == 'ppp.ns':
self.inflection['Passive past participle neuter singular'] = inflected_word
elif form == 'ppp.mp':
self.inflection['Passive past participle masculine plural'] = inflected_word
elif form == 'ppp.fp':
self.inflection['Passive past participle feminine plural'] = inflected_word
elif form == 'ppp.np':
self.inflection['Passive past participle neuter plural'] = inflected_word
else:
print('Unknown form: ', form, ' ', inflected_word, ' (', self.word, ' ')
def _try(self, o):
try:
return o.__dict__
except:
return str(o)
def toJSON(self):
return json.dumps(self, default=lambda o: self._try(o), sort_keys=True, indent=0, separators=(',',':')).replace('\n', '')
def __str__(self):
return "{0}: {1}".format(self.part_of_speech, self.meanings) | peterjcarroll/yawp | yawp/dict/serbocroatian.py | Python | mit | 31,534 |
from datetime import datetime
import json
def read_json(filename):
# catapp.logger.debug("Reading '{}'".format(filename))
try:
with open(filename) as fh:
data = json.loads(fh.read())
except Exception as e:
# catapp.logger.error("Reading '{}' {}".format(filename, e))
data = {}
pass
return data
def in_sec(length):
parts = [int(x) for x in length.split(':')]
sec = 0
#print(parts)
while len(parts) > 0:
sec *= 60
sec += parts.pop(0)
return sec
def future(cat):
now = datetime.now().strftime('%Y-%m-%d')
return sorted(
list(filter(lambda e: e['event_start'] >= now, cat["events"].values())),
key = lambda e: e['event_start'])
# vim: expandtab
| rollandf/codeandtalk.com | cat/tools.py | Python | apache-2.0 | 765 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from robotide.lib.robot.utils import (secs_to_timestr, timestr_to_secs,
IRONPYTHON, JYTHON, WINDOWS)
from robotide.lib.robot.errors import TimeoutError, DataError, FrameworkError
if JYTHON:
from .jython import Timeout
elif IRONPYTHON:
from .ironpython import Timeout
elif WINDOWS:
from .windows import Timeout
else:
from .posix import Timeout
class _Timeout(object):
def __init__(self, timeout=None, message='', variables=None):
self.string = timeout or ''
self.message = message
self.secs = -1
self.starttime = -1
self.error = None
if variables:
self.replace_variables(variables)
@property
def active(self):
return self.starttime > 0
def replace_variables(self, variables):
try:
self.string = variables.replace_string(self.string)
if not self:
return
self.secs = timestr_to_secs(self.string)
self.string = secs_to_timestr(self.secs)
self.message = variables.replace_string(self.message)
except (DataError, ValueError) as err:
self.secs = 0.000001 # to make timeout active
self.error = 'Setting %s timeout failed: %s' \
% (self.type.lower(), unicode(err))
def start(self):
if self.secs > 0:
self.starttime = time.time()
def time_left(self):
if not self.active:
return -1
elapsed = time.time() - self.starttime
# Timeout granularity is 1ms. Without rounding some timeout tests fail
# intermittently on Windows, probably due to threading.Event.wait().
return round(self.secs - elapsed, 3)
def timed_out(self):
return self.active and self.time_left() <= 0
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.string
def __cmp__(self, other):
return cmp(not self.active, not other.active) \
or cmp(self.time_left(), other.time_left())
def __nonzero__(self):
return bool(self.string and self.string.upper() != 'NONE')
def run(self, runnable, args=None, kwargs=None):
if self.error:
raise DataError(self.error)
if not self.active:
raise FrameworkError('Timeout is not active')
timeout = self.time_left()
if timeout <= 0:
raise TimeoutError(self.get_message())
executable = lambda: runnable(*(args or ()), **(kwargs or {}))
return Timeout(timeout, self._timeout_error).execute(executable)
def get_message(self):
if not self.active:
return '%s timeout not active.' % self.type
if not self.timed_out():
return '%s timeout %s active. %s seconds left.' \
% (self.type, self.string, self.time_left())
return self._timeout_error
@property
def _timeout_error(self):
if self.message:
return self.message
return '%s timeout %s exceeded.' % (self.type, self.string)
class TestTimeout(_Timeout):
type = 'Test'
_keyword_timeout_occurred = False
def set_keyword_timeout(self, timeout_occurred):
if timeout_occurred:
self._keyword_timeout_occurred = True
def any_timeout_occurred(self):
return self.timed_out() or self._keyword_timeout_occurred
class KeywordTimeout(_Timeout):
type = 'Keyword'
| fingeronthebutton/RIDE | src/robotide/lib/robot/running/timeouts/__init__.py | Python | apache-2.0 | 4,107 |
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from django.conf import settings
from . import views
products = r'/products/(?P<product>\w+)'
versions = r'/versions/(?P<versions>[;\w\.()]+)'
version = r'/versions/(?P<version>[;\w\.()]+)'
perm_legacy_redirect = settings.PERMANENT_LEGACY_REDIRECTS
urlpatterns = patterns(
'', # prefix
url('^robots\.txt$',
views.robots_txt,
name='robots_txt'),
url(r'^status/json/$',
views.status_json,
name='status_json'),
url(r'^status/revision/$',
views.status_revision,
name='status_revision'),
url(r'^crontabber-state/$',
views.crontabber_state,
name='crontabber_state'),
url('^crashes-per-day/$',
views.crashes_per_day,
name='crashes_per_day'),
url(r'^exploitability/$',
views.exploitability_report,
name='exploitability_report'),
url(r'^report/index/(?P<crash_id>[\w-]+)$',
views.report_index,
name='report_index'),
url(r'^search/quick/$',
views.quick_search,
name='quick_search'),
url(r'^buginfo/bug', views.buginfo,
name='buginfo'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36})-(?P<name>\w+)\.'
r'(?P<extension>json|dmp|json\.gz)$',
views.raw_data,
name='raw_data_named'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36}).(?P<extension>json|dmp)$',
views.raw_data,
name='raw_data'),
url(r'^login/$',
views.login,
name='login'),
url(r'^graphics_report/$',
views.graphics_report,
name='graphics_report'),
url(r'^about/throttling/$',
views.about_throttling,
name='about_throttling'),
# if we do a permanent redirect, the browser will "cache" the redirect and
# it will make it very hard to ever change the DEFAULT_PRODUCT
url(r'^$',
RedirectView.as_view(
url='/home/product/%s' % settings.DEFAULT_PRODUCT,
permanent=False # this is not a legacy URL
)),
# redirect deceased Advanced Search URL to Super Search
url(r'^query/$',
RedirectView.as_view(
url='/search/',
query_string=True,
permanent=True
)),
# redirect deceased Report List URL to Signature report
url(r'^report/list$',
RedirectView.as_view(
pattern_name='signature:signature_report',
query_string=True,
permanent=True
)),
# redirect deceased Daily Crashes URL to Crasher per Day
url(r'^daily$',
RedirectView.as_view(
pattern_name='crashstats:crashes_per_day',
query_string=True,
permanent=True
)),
# Redirect old independant pages to the unified Profile page.
url(r'^your-crashes/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
url(r'^permissions/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
# Redirect deleted status page to monitoring page.
url(
r'^status/$',
RedirectView.as_view(
pattern_name='monitoring:index',
permanent=not settings.DEBUG,
),
name='status_redirect',
),
# handle old-style URLs
url(r'^products/(?P<product>\w+)/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
url(r'^products/(?P<product>\w+)/versions/(?P<versions>[;\w\.()]+)/$',
RedirectView.as_view(
url='/home/products/%(product)s/versions/%(versions)s',
permanent=perm_legacy_redirect
)),
url('^home' + products + '/versions/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
)
| AdrianGaudebert/socorro | webapp-django/crashstats/crashstats/urls.py | Python | mpl-2.0 | 3,962 |
from django.apps import AppConfig
class TestimonialsConfig(AppConfig):
name = 'testimonials'
| slavpetroff/sweetshop | backend/django/apps/testimonials/apps.py | Python | mit | 99 |
from ..ndtypes import (make_slice_type, make_array_type, ptr_type,
ArrayT, TupleT, ScalarT, Type, PtrT, Int64, IntT, Float64)
from ..syntax import (Alloc, AllocArray, ArrayView, Const, Index, Slice, Struct, Var, Select, Expr)
from ..syntax.helpers import (const, zero_i64, wrap_if_constant, slice_none, unwrap_constant)
from arith_builder import ArithBuilder
class ArrayBuilder(ArithBuilder):
"""
Builder for constructing arrays and getting their properties
"""
def elt_type(self, x):
if isinstance(x, Type):
try:
return x.elt_type
except:
return x
elif self.is_array(x):
return x.type.elt_type
else:
return x.type
def alloc_array(self, elt_t, dims, name = "array",
explicit_struct = False,
array_view = False,
order = "C"):
"""
Given an element type and sequence of expressions denoting each dimension
size, generate code to allocate an array and its shape/strides metadata. For
now I'm assuming that all arrays are in row-major, eventually we should make
the layout an option.
"""
assert order == "C", "Only row-major layout supported so far, not %s" % order
if self.is_tuple(dims):
shape = dims
dims = self.tuple_elts(shape)
else:
if not isinstance(dims, (list, tuple)):
dims = [dims]
shape = self.tuple(dims, "shape", explicit_struct = explicit_struct)
rank = len(dims)
array_t = make_array_type(elt_t, rank)
if explicit_struct or array_view:
nelts = self.prod(dims, name = "nelts")
ptr_t = ptr_type(elt_t)
ptr_var = self.assign_name(Alloc(elt_t, nelts, type = ptr_t), "data_ptr")
stride_elts = [const(1)]
# assume row-major for now!
for d in reversed(dims[1:]):
next_stride = self.mul(stride_elts[0], d, "dim")
stride_elts = [next_stride] + stride_elts
strides = self.tuple(stride_elts, "strides", explicit_struct = explicit_struct)
if explicit_struct:
array = Struct([ptr_var, shape, strides, zero_i64, nelts], type = array_t)
else:
array = ArrayView(data = ptr_var,
shape = shape,
strides = strides,
offset = zero_i64,
size = nelts,
type = array_t)
else:
array = AllocArray(shape, elt_type = elt_t, type = array_t)
if name is None:
return array
return self.assign_name(array, name)
def len(self, array):
return self.shape(array, 0)
def nelts(self, array, explicit_struct = False):
shape_elts = self.tuple_elts(self.shape(array), explicit_struct = explicit_struct)
return self.prod(shape_elts, name = "nelts")
def rank(self, value):
if self.is_array(value):
return value.type.rank
else:
return 0
def shape(self, array, dim = None, explicit_struct = False, temp = True):
if array.type.__class__ is ArrayT:
shape = self.attr(array, "shape", temp = temp)
if dim is None:
return shape
if isinstance(dim, Expr):
dim = unwrap_constant(dim)
assert isinstance(dim, (int, long)), "Expected array dimension to be an int, got %s" % dim
dim_value = self.tuple_proj(shape, dim, explicit_struct = explicit_struct)
if temp:
return self.assign_name(dim_value, "dim%d" % dim)
else:
return dim_value
else:
return self.tuple([])
def strides(self, array, dim = None, explicit_struct = False):
assert array.type.__class__ is ArrayT
strides = self.attr(array, "strides")
if dim is None:
return strides
else:
elt_value = self.tuple_proj(strides, dim, explicit_struct = explicit_struct)
return self.assign_name(elt_value, "stride%d" % dim)
def slice_value(self, start, stop, step):
slice_t = make_slice_type(start.type, stop.type, step.type)
return Slice(start, stop, step, type = slice_t)
def build_slice_indices(self, rank, axis, idx):
"""
Build index tuple to pull out the 'idx' element along the given axis
"""
if rank == 1:
assert axis == 0
return idx
indices = []
for i in xrange(rank):
if i == axis:
indices.append(idx)
else:
s = self.slice_value(self.none, self.none, self.int(1))
indices.append(s)
return self.tuple(indices)
def elts_in_slice(self, start, stop, step):
start_minus_start = self.sub(stop, start, name = "stop_minus_start")
nelts = self.div(self.cast(start_minus_start, Float64), step, name = "nelts")
ceil = self.ceil(nelts)
nelts = self.cast(ceil, Int64)
return self.max(nelts, self.zero(nelts.type))
def slice_along_axis(self, arr, axis, idx):
"""
Pull out a slice if the array has the given axis,
otherwise just return the array
"""
r = self.rank(arr)
if isinstance(axis, Expr):
axis = unwrap_constant(axis)
idx = wrap_if_constant(idx)
if r == 1 and (axis is None or axis == 0):
return self.index(arr, idx)
elif axis is None:
if isinstance(idx.type, ScalarT):
idx = self.tuple((idx,) * r)
return self.index(arr, idx)
elif r > axis:
index_tuple = self.build_slice_indices(r, axis, idx)
return self.index(arr, index_tuple)
else:
return arr
def output_slice(self, output, axis, idx):
"""
Create an expression which acts as an LHS output location
for a slice throught the variable 'output' along the given axis
"""
r = self.rank(output)
if r > 1:
output_indices = self.build_slice_indices(r, axis, idx)
elif r == 1:
output_idx = self.slice_value(idx, self.none, self.int(1))
output_indices = self.tuple([output_idx])
else:
output_idx = self.slice_value(self.none, self.none, self.none)
output_indices = self.tuple([output_idx])
return self.index(output, output_indices)
def size_along_axis(self, value, axis):
return self.shape(value, axis)
def check_equal_sizes(self, sizes):
pass
def index(self, arr, idx, temp = False, name = None):
"""Index into array or tuple differently depending on the type"""
temp = temp or name is not None
arr_t = arr.type
if isinstance(arr_t, ScalarT):
# even though it's not correct externally, it's
# often more convenient to treat indexing
# into scalars as the identity function.
# Just be sure to catch this as an error in
# the user's code earlier in the pipeline.
return arr
if isinstance(arr_t, TupleT):
if isinstance(idx, Const):
idx = idx.value
assert isinstance(idx, int), \
"Index into tuple must be an integer, got %s" % idx
if isinstance(idx, Const):
idx = idx.value
proj = self.tuple_proj(arr, idx)
if temp:
return self.assign_name(proj, "tuple_elt%d" % idx if name is None else name)
else:
return proj
if self.is_tuple(idx):
indices = self.tuple_elts(idx)
elif isinstance(idx, (list,tuple)) or hasattr(idx, '__iter__'):
indices = tuple(map(wrap_if_constant,idx))
else:
indices = (wrap_if_constant(idx),)
n_required = arr_t.rank
n_indices = len(indices)
if n_indices < n_required:
# all unspecified dimensions are considered fully sliced
extra = (slice_none,) * (n_required - n_indices)
indices = indices + extra
if len(indices) > 1:
idx = self.tuple(indices, name = name)
else:
idx = indices[0]
t = arr_t.index_type(idx.type)
idx_expr = Index(arr, idx, type=t)
if temp:
return self.assign_name(idx_expr, "array_elt" if name is None else name)
else:
return idx_expr
def index_along_axis(self, arr, axis, idx, name=None):
if arr.type.__class__ is not ArrayT:
return arr
assert isinstance(axis, int), \
"Axis must be a known constant int, got: " + str(axis)
indices = []
for i in xrange(arr.type.rank):
if i == axis:
indices.append(wrap_if_constant(idx))
else:
indices.append(slice_none)
index_tuple = self.tuple(indices)
result_t = arr.type.index_type(index_tuple.type)
idx_expr = Index(arr, index_tuple, type=result_t)
if name:
return self.assign_name(idx_expr, name)
else:
return idx_expr
def setidx(self, arr, idx, v):
self.assign(self.index(arr, idx, temp=False), v)
def array_view(self, data, shape, strides, offset, nelts):
assert isinstance(data.type, PtrT), \
"Data field of array must be a pointer, got %s" % data.type
if data.__class__ is not Var:
data = self.assign_name(data, "data_ptr")
if isinstance(shape.type, ScalarT):
shape = self.tuple([shape])
assert isinstance(shape.type, TupleT), \
"Shape of array must be a tuple, got: %s" % shape.type
if isinstance(strides.type, ScalarT):
strides = self.tuple(strides)
assert isinstance(strides.type, TupleT), \
"Strides of array must be a tuple, got: %s" % strides.type
ndims = len(strides.type.elt_types)
assert ndims == len(shape.type.elt_types)
elt_t = data.type.elt_type
array_t = make_array_type(elt_t, ndims)
return ArrayView(data = data, shape = shape, strides = strides,
offset = offset, size = nelts,
type = array_t)
| pombredanne/parakeet | parakeet/builder/array_builder.py | Python | bsd-3-clause | 9,545 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import testtools
import uuid
from openstack.message.v2 import subscription
FAKE1 = {
"age": 1632,
"id": "576b54963990b48c644bb7e7",
"subscriber": "http://10.229.49.117:5679",
"subscription_id": "576b54963990b48c644bb7e7",
"source": "test",
"ttl": 3600,
"options": {
"name": "test"
},
"queue_name": "queue1"
}
FAKE2 = {
"age": 1632,
"id": "576b54963990b48c644bb7e7",
"subscriber": "http://10.229.49.117:5679",
"subscription_id": "576b54963990b48c644bb7e7",
"source": "test",
"ttl": 3600,
"options": {
"name": "test"
},
"queue_name": "queue1",
"client_id": "OLD_CLIENT_ID",
"project_id": "OLD_PROJECT_ID"
}
class TestSubscription(testtools.TestCase):
def test_basic(self):
sot = subscription.Subscription()
self.assertEqual("subscriptions", sot.resources_key)
self.assertEqual("/queues/%(queue_name)s/subscriptions", sot.base_path)
self.assertEqual("messaging", sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = subscription.Subscription.new(**FAKE2)
self.assertEqual(FAKE2["age"], sot.age)
self.assertEqual(FAKE2["id"], sot.id)
self.assertEqual(FAKE2["options"], sot.options)
self.assertEqual(FAKE2["source"], sot.source)
self.assertEqual(FAKE2["subscriber"], sot.subscriber)
self.assertEqual(FAKE2["subscription_id"], sot.subscription_id)
self.assertEqual(FAKE2["ttl"], sot.ttl)
self.assertEqual(FAKE2["queue_name"], sot.queue_name)
self.assertEqual(FAKE2["client_id"], sot.client_id)
self.assertEqual(FAKE2["project_id"], sot.project_id)
@mock.patch.object(uuid, "uuid4")
def test_create(self, mock_uuid):
sess = mock.Mock()
resp = mock.Mock()
sess.post.return_value = resp
sess.get_project_id.return_value = "NEW_PROJECT_ID"
mock_uuid.return_value = "NEW_CLIENT_ID"
FAKE = copy.deepcopy(FAKE1)
sot = subscription.Subscription(**FAKE1)
sot._translate_response = mock.Mock()
res = sot.create(sess)
url = "/queues/%(queue)s/subscriptions" % {
"queue": FAKE.pop("queue_name")}
headers = {"Client-ID": "NEW_CLIENT_ID",
"X-PROJECT-ID": "NEW_PROJECT_ID"}
sess.post.assert_called_once_with(url, endpoint_filter=sot.service,
headers=headers, json=FAKE)
sess.get_project_id.assert_called_once_with()
self.assertEqual(sot, res)
def test_create_client_id_project_id_exist(self):
sess = mock.Mock()
resp = mock.Mock()
sess.post.return_value = resp
FAKE = copy.deepcopy(FAKE2)
sot = subscription.Subscription(**FAKE2)
sot._translate_response = mock.Mock()
res = sot.create(sess)
url = "/queues/%(queue)s/subscriptions" % {
"queue": FAKE.pop("queue_name")}
headers = {"Client-ID": FAKE.pop("client_id"),
"X-PROJECT-ID": FAKE.pop("project_id")}
sess.post.assert_called_once_with(url, endpoint_filter=sot.service,
headers=headers, json=FAKE)
self.assertEqual(sot, res)
@mock.patch.object(uuid, "uuid4")
def test_get(self, mock_uuid):
sess = mock.Mock()
resp = mock.Mock()
sess.get.return_value = resp
sess.get_project_id.return_value = "NEW_PROJECT_ID"
mock_uuid.return_value = "NEW_CLIENT_ID"
sot = subscription.Subscription(**FAKE1)
sot._translate_response = mock.Mock()
res = sot.get(sess)
url = "queues/%(queue)s/subscriptions/%(subscription)s" % {
"queue": FAKE1["queue_name"], "subscription": FAKE1["id"]}
headers = {"Client-ID": "NEW_CLIENT_ID",
"X-PROJECT-ID": "NEW_PROJECT_ID"}
sess.get.assert_called_with(url, endpoint_filter=sot.service,
headers=headers)
sess.get_project_id.assert_called_once_with()
sot._translate_response.assert_called_once_with(resp)
self.assertEqual(sot, res)
def test_get_client_id_project_id_exist(self):
sess = mock.Mock()
resp = mock.Mock()
sess.get.return_value = resp
sot = subscription.Subscription(**FAKE2)
sot._translate_response = mock.Mock()
res = sot.get(sess)
url = "queues/%(queue)s/subscriptions/%(subscription)s" % {
"queue": FAKE2["queue_name"], "subscription": FAKE2["id"]}
headers = {"Client-ID": "OLD_CLIENT_ID",
"X-PROJECT-ID": "OLD_PROJECT_ID"}
sess.get.assert_called_with(url, endpoint_filter=sot.service,
headers=headers)
sot._translate_response.assert_called_once_with(resp)
self.assertEqual(sot, res)
@mock.patch.object(uuid, "uuid4")
def test_delete(self, mock_uuid):
sess = mock.Mock()
resp = mock.Mock()
sess.delete.return_value = resp
sess.get_project_id.return_value = "NEW_PROJECT_ID"
mock_uuid.return_value = "NEW_CLIENT_ID"
sot = subscription.Subscription(**FAKE1)
sot._translate_response = mock.Mock()
sot.delete(sess)
url = "queues/%(queue)s/subscriptions/%(subscription)s" % {
"queue": FAKE1["queue_name"], "subscription": FAKE1["id"]}
headers = {"Client-ID": "NEW_CLIENT_ID",
"X-PROJECT-ID": "NEW_PROJECT_ID"}
sess.delete.assert_called_with(url, endpoint_filter=sot.service,
headers=headers)
sess.get_project_id.assert_called_once_with()
sot._translate_response.assert_called_once_with(resp, has_body=False)
def test_delete_client_id_project_id_exist(self):
sess = mock.Mock()
resp = mock.Mock()
sess.delete.return_value = resp
sot = subscription.Subscription(**FAKE2)
sot._translate_response = mock.Mock()
sot.delete(sess)
url = "queues/%(queue)s/subscriptions/%(subscription)s" % {
"queue": FAKE2["queue_name"], "subscription": FAKE2["id"]}
headers = {"Client-ID": "OLD_CLIENT_ID",
"X-PROJECT-ID": "OLD_PROJECT_ID"}
sess.delete.assert_called_with(url, endpoint_filter=sot.service,
headers=headers)
sot._translate_response.assert_called_once_with(resp, has_body=False)
| briancurtin/python-openstacksdk | openstack/tests/unit/message/v2/test_subscription.py | Python | apache-2.0 | 7,255 |
# -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
test_cli
========
Unit tests for cli.py
"""
from argparse import Namespace
from contextlib import contextmanager
from os.path import abspath, dirname, join
import sys
from unittest.mock import patch
from pathlib import PosixPath
import pytest
PYSPREADPATH = abspath(join(dirname(__file__) + "/.."))
LIBPATH = abspath(PYSPREADPATH + "/lib")
@contextmanager
def insert_path(path):
sys.path.insert(0, path)
yield
sys.path.pop(0)
with insert_path(PYSPREADPATH):
from ..cli import PyspreadArgumentParser
param_test_cli = [
(['pyspread'],
Namespace(file=None, default_settings=False)),
(['pyspread', 'test.pys'],
Namespace(file=PosixPath("test.pys"), default_settings=False)),
(['pyspread', '--help'],
None),
(['pyspread', '--version'],
None),
(['pyspread', '--default-settings'],
Namespace(file=None, default_settings=True)),
]
@pytest.mark.parametrize("argv, res", param_test_cli)
def test_cli(argv, res):
with patch('argparse._sys.argv', argv):
parser = PyspreadArgumentParser()
if res is None:
with pytest.raises(SystemExit) as exc:
args, unknown = parser.parse_known_args()
assert exc.value.code == 0
else:
args, unknown = parser.parse_known_args()
assert args == res
| manns/pyspread | pyspread/test/test_cli.py | Python | gpl-3.0 | 2,217 |
'''
Low-level programming constructs for key-value stores
Originally developed as part of Zephyr
https://zephyr.space/
'''
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import object
import warnings
import numpy as np
from functools import reduce
from future.utils import with_metaclass
class ClassProperty(property):
'''
Class decorator to enable property behaviour in classes
'''
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class AMMetaClass(type):
'''
Meta class that enables AttributeMapper functionality, including inheritance
in the dictionary 'initMap'.
'''
def __new__(mcs, name, bases, attrs):
'Build a new subclass of AttributeMapper'
baseMaps = [getattr(base, 'initMap', {}) for base in bases][::-1]
baseMaps.append(attrs.get('initMap', {}))
initMap = {}
for baseMap in baseMaps:
initMap.update(baseMap)
for key in initMap:
if initMap[key] is None:
del(initMap[key])
attrs['initMap'] = initMap
baseMasks = reduce(set.union, (getattr(base, 'maskKeys', set()) for base in bases))
maskKeys = set.union(baseMasks, attrs.get('maskKeys', set()))
if maskKeys:
attrs['maskKeys'] = maskKeys
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
'Instantiate a subsclass of AttributeMapper'
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
systemConfig = args[0]
obj = cls.__new__(cls)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for key in obj.initMap:
if (key not in systemConfig) and obj.initMap[key][0]:
raise ValueError('Class {0!s} requires parameter \'{1!s}\''.format(cls.__name__, key))
if key in systemConfig:
if obj.initMap[key][2] is None:
def typer (x):
return x
else:
def typer(x):
newtype = obj.initMap[key][2]
try:
return obj.initMap[key][2](x)
except TypeError:
if np.iscomplex(x) and issubclass(newtype, np.floating):
return typer(x.real)
raise
if obj.initMap[key][1] is None:
setattr(obj, key, typer(systemConfig[key]))
else:
setattr(obj, obj.initMap[key][1], typer(systemConfig[key]))
obj.__init__(*args, **kwargs)
return obj
class AttributeMapper(with_metaclass(AMMetaClass, object)):
'''
An AttributeMapper subclass defines a dictionary initMap, which
includes keys for mappable inputs expected from the systemConfig
parameter. The dictionary takes the form:
initMap = {
# Argument Required Rename as ... Store as type
'c': (True, '_c', np.complex128),
'rho': (False, '_rho', np.float64),
'freq': (True, None, np.complex128),
'dx': (False, '_dx', np.float64),
'dz': (False, '_dz', np.float64),
'nx': (True, None, np.int64),
'nz': (True, None, np.int64),
'freeSurf': (False, '_freeSurf', list),
}
Each value in the dictionary is a tuple, which is interpreted by
the metaclass (i.e., AMMetaClass) to determine how to process the
value corresponding to the same key in systemConfig.
An exception will be raised if the first element in the tuple
is set to true, but the corresponding key does not exist in the
systemConfig parameter.
If the second element in the tuple is set to None, the key will be
defined in the subclass's attribute dictionary as it stands, whereas
if the second element is a string then that overrides the key.
If the third element in the tuple is set to None, the input argument
will be set in the subclass dictionary unmodified; however, if the
third element is a callable then it will be applied to the element
(e.g., to allow copying and/or typecasting of inputs).
NB: Complex numpy arguments are handled specially: the real part of
the value is kept and the imaginary part is discarded when they are
typecast to a float.
'''
def __init__(self, systemConfig):
'''
AttributeMapper(systemConfig)
Args:
systemConfig (dict): A set of setup keys
'''
pass
@ClassProperty
@classmethod
def required(cls):
'Property to return required fields in initMap'
return {key for key in cls.initMap if cls.initMap[key][0]}
@ClassProperty
@classmethod
def optional(cls):
'Property to return optional fields in initMap'
return {key for key in cls.initMap if not cls.initMap[key][0]}
class SCFilter(object):
'''
A SCFilter class is initialized with a list of classes as arguments.
For any of those classes that are AttributeMapper subclasses, SCFilter
determines the required fields in their initMap trees, and the optional
fields. When called, the SCFilter discards any key in the passed dictionary
that does not match one of those fields, and raises an error if any of the
required fields are not present.
'''
def __init__(self, clslist):
'''
SCFilter(clslist)
Args:
clslist (list): List of classes from which to build the filter
Returns:
new SCFilter instance
'''
if not hasattr(clslist, '__contains__'):
clslist = [clslist]
self.required = reduce(set.union, (cls.required for cls in clslist if issubclass(cls, AttributeMapper)))
self.optional = reduce(set.union, (cls.optional for cls in clslist if issubclass(cls, AttributeMapper)))
self.optional.symmetric_difference_update(self.required)
def __call__(self, systemConfig):
'''
Args:
systemConfig (dict): A systemConfig dictionary to filter
Returns:
dict: Filtered dictionary
Raises:
ValueError: If a required key is not in the systemConfig
'''
for key in self.required:
if key not in systemConfig:
raise ValueError('{0!s} requires parameter \'{1!s}\''.format(cls.__name__, key))
return {key: systemConfig[key] for key in set.union(self.required, self.optional) if key in systemConfig}
class BaseSCCache(AttributeMapper):
'''
Subclass of AttributeMapper that caches (a filtered version of) the
systemConfig object used to initialize it.
'''
maskKeys = set()
cacheItems = []
def __init__(self, systemConfig):
super(BaseSCCache, self).__init__(systemConfig)
self.systemConfig = {key: systemConfig[key] for key in systemConfig if key not in self.maskKeys}
@property
def systemConfig(self):
return self._systemConfig
@systemConfig.setter
def systemConfig(self, value):
self._systemConfig = value
self.clearCache()
def clearCache(self):
'Clears cached items (e.g., when model is reset).'
for attr in self.cacheItems:
if hasattr(self, attr):
delattr(self, attr)
| bsmithyman/galoshes | galoshes/meta.py | Python | mit | 7,934 |
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined otherwise.
.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
"""
import sphinx
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
import os, re, pydoc
from docscrape_sphinx import get_doc_object, SphinxDocString
from sphinx.util.compat import Directive
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict(use_plots=app.config.numpydoc_use_plots,
show_class_members=app.config.numpydoc_show_class_members)
if what == 'module':
# Strip top title
title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
else:
doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
lines[:] = unicode(doc).split(u"\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [u'', u'.. htmlonly::', '']
lines += [u' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(ur'^\d+$', r):
new_r = u"R%d" % (reference_offset[0] + int(r))
else:
new_r = u"%s%d" % (r, reference_offset[0])
lines[i] = lines[i].replace(u'[%s]_' % r,
u'[%s]_' % new_r)
lines[i] = lines[i].replace(u'.. [%s]' % r,
u'.. [%s]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(u"^[^(]*", u"", doc['Signature'])
return sig, u''
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
#------------------------------------------------------------------------------
# Docstring-mangling domains
#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in self.directive_mangling_map.items():
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| selimnairb/2014-02-25-swctest | lessons/thw-documentation/sphinxext/numpydoc.py | Python | bsd-2-clause | 5,688 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: throttlerdata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='throttlerdata.proto',
package='throttlerdata',
syntax='proto3',
serialized_pb=_b('\n\x13throttlerdata.proto\x12\rthrottlerdata\"\x11\n\x0fMaxRatesRequest\"{\n\x10MaxRatesResponse\x12\x39\n\x05rates\x18\x01 \x03(\x0b\x32*.throttlerdata.MaxRatesResponse.RatesEntry\x1a,\n\nRatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\"!\n\x11SetMaxRateRequest\x12\x0c\n\x04rate\x18\x01 \x01(\x03\"#\n\x12SetMaxRateResponse\x12\r\n\x05names\x18\x01 \x03(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MAXRATESREQUEST = _descriptor.Descriptor(
name='MaxRatesRequest',
full_name='throttlerdata.MaxRatesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=55,
)
_MAXRATESRESPONSE_RATESENTRY = _descriptor.Descriptor(
name='RatesEntry',
full_name='throttlerdata.MaxRatesResponse.RatesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='throttlerdata.MaxRatesResponse.RatesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='throttlerdata.MaxRatesResponse.RatesEntry.value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=180,
)
_MAXRATESRESPONSE = _descriptor.Descriptor(
name='MaxRatesResponse',
full_name='throttlerdata.MaxRatesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rates', full_name='throttlerdata.MaxRatesResponse.rates', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MAXRATESRESPONSE_RATESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=180,
)
_SETMAXRATEREQUEST = _descriptor.Descriptor(
name='SetMaxRateRequest',
full_name='throttlerdata.SetMaxRateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rate', full_name='throttlerdata.SetMaxRateRequest.rate', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=215,
)
_SETMAXRATERESPONSE = _descriptor.Descriptor(
name='SetMaxRateResponse',
full_name='throttlerdata.SetMaxRateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='throttlerdata.SetMaxRateResponse.names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=252,
)
_MAXRATESRESPONSE_RATESENTRY.containing_type = _MAXRATESRESPONSE
_MAXRATESRESPONSE.fields_by_name['rates'].message_type = _MAXRATESRESPONSE_RATESENTRY
DESCRIPTOR.message_types_by_name['MaxRatesRequest'] = _MAXRATESREQUEST
DESCRIPTOR.message_types_by_name['MaxRatesResponse'] = _MAXRATESRESPONSE
DESCRIPTOR.message_types_by_name['SetMaxRateRequest'] = _SETMAXRATEREQUEST
DESCRIPTOR.message_types_by_name['SetMaxRateResponse'] = _SETMAXRATERESPONSE
MaxRatesRequest = _reflection.GeneratedProtocolMessageType('MaxRatesRequest', (_message.Message,), dict(
DESCRIPTOR = _MAXRATESREQUEST,
__module__ = 'throttlerdata_pb2'
# @@protoc_insertion_point(class_scope:throttlerdata.MaxRatesRequest)
))
_sym_db.RegisterMessage(MaxRatesRequest)
MaxRatesResponse = _reflection.GeneratedProtocolMessageType('MaxRatesResponse', (_message.Message,), dict(
RatesEntry = _reflection.GeneratedProtocolMessageType('RatesEntry', (_message.Message,), dict(
DESCRIPTOR = _MAXRATESRESPONSE_RATESENTRY,
__module__ = 'throttlerdata_pb2'
# @@protoc_insertion_point(class_scope:throttlerdata.MaxRatesResponse.RatesEntry)
))
,
DESCRIPTOR = _MAXRATESRESPONSE,
__module__ = 'throttlerdata_pb2'
# @@protoc_insertion_point(class_scope:throttlerdata.MaxRatesResponse)
))
_sym_db.RegisterMessage(MaxRatesResponse)
_sym_db.RegisterMessage(MaxRatesResponse.RatesEntry)
SetMaxRateRequest = _reflection.GeneratedProtocolMessageType('SetMaxRateRequest', (_message.Message,), dict(
DESCRIPTOR = _SETMAXRATEREQUEST,
__module__ = 'throttlerdata_pb2'
# @@protoc_insertion_point(class_scope:throttlerdata.SetMaxRateRequest)
))
_sym_db.RegisterMessage(SetMaxRateRequest)
SetMaxRateResponse = _reflection.GeneratedProtocolMessageType('SetMaxRateResponse', (_message.Message,), dict(
DESCRIPTOR = _SETMAXRATERESPONSE,
__module__ = 'throttlerdata_pb2'
# @@protoc_insertion_point(class_scope:throttlerdata.SetMaxRateResponse)
))
_sym_db.RegisterMessage(SetMaxRateResponse)
_MAXRATESRESPONSE_RATESENTRY.has_options = True
_MAXRATESRESPONSE_RATESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| danielmt/vshard | vendor/github.com/youtube/vitess/py/vtproto/throttlerdata_pb2.py | Python | mit | 7,327 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# Imports
import math
import time
# PySol imports
from pysollib.mygettext import _
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.mfxutil import kwdefault
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import AbstractHint, DefaultHint, CautiousDefaultHint
from pysollib.pysoltk import MfxCanvasText
from pysollib.util import ANY_RANK, ANY_SUIT, NO_RANK, \
UNLIMITED_ACCEPTS, \
UNLIMITED_CARDS, \
UNLIMITED_MOVES
from pysollib.stack import \
AC_RowStack, \
AbstractFoundationStack, \
BasicRowStack, \
DealRowTalonStack, \
InitialDealTalonStack, \
OpenStack, \
RK_RowStack, \
ReserveStack, \
SS_FoundationStack, \
SS_RowStack, \
StackWrapper, \
WasteStack, \
isSameSuitSequence, \
WasteTalonStack
# ************************************************************************
# * Dashavatara Foundation Stacks
# ***********************************************************************/
class Dashavatara_FoundationStack(AbstractFoundationStack):
def __init__(self, x, y, game, suit, **cap):
kwdefault(cap, max_move=0, max_cards=12)
SS_FoundationStack.__init__(self, x, y, game, suit, **cap)
def updateText(self):
AbstractFoundationStack.updateText(self)
self.game.updateText()
class Journey_Foundation(AbstractFoundationStack):
def __init__(self, x, y, game, suit, **cap):
kwdefault(cap, mod=12, dir=0, base_rank=NO_RANK, max_move=0)
AbstractFoundationStack.__init__(self, x, y, game, suit, **cap)
def acceptsCards(self, from_stack, cards):
if not AbstractFoundationStack.acceptsCards(self, from_stack, cards):
return 0
if not self.cards:
return 1
stack_dir = self.game.getFoundationDir()
if stack_dir == 0:
card_dir = (cards[0].rank - self.cards[-1].rank) % self.cap.mod
return card_dir in (1, 11)
else:
return (self.cards[-1].rank + stack_dir) % \
self.cap.mod == cards[0].rank
class AppachansWaterfall_Foundation(AbstractFoundationStack):
def __init__(self, x, y, game, suit, **cap):
kwdefault(cap, base_suit=0, mod=12, max_cards=120, max_move=0)
AbstractFoundationStack.__init__(self, x, y, game, suit, **cap)
def acceptsCards(self, from_stack, cards):
if not (from_stack in self.game.s.rows and
AbstractFoundationStack.acceptsCards(self, from_stack, cards)):
return 0
pile, rank, suit = from_stack.getPile(), 0, 0
if self.cards:
rank = (self.cards[-1].rank + 1) % 12
suit = self.cards[-1].suit + (rank == 0)
if (not pile or len(pile) <= 11 - rank or
not isSameSuitSequence(pile[-(12 - rank):])):
return 0
return cards[0].suit == suit and cards[0].rank == rank
# ************************************************************************
# * Dashavatara Row Stacks
# ***********************************************************************/
class Dashavatara_OpenStack(OpenStack):
def __init__(self, x, y, game, yoffset, **cap):
kwdefault(cap, max_move=UNLIMITED_MOVES, max_cards=UNLIMITED_CARDS,
max_accept=UNLIMITED_ACCEPTS, base_rank=0, dir=-1)
OpenStack.__init__(self, x, y, game, **cap)
self.CARD_YOFFSET = yoffset
def currentForce(self, card):
force = self._getForce(card)
hour = time.localtime(time.time())[3]
if not (hour >= 7 and hour <= 19):
force = not force
return force
def _getForce(self, card):
return int(card.suit >= 5)
def isRankSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
for c2 in cards[1:]:
if not c1.rank + dir == c2.rank:
return 0
c1 = c2
return 1
def isAlternateColorSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
for c2 in cards[1:]:
if not ((c1.suit + c2.suit) % 2 and
c1.rank + dir == c2.rank):
return 0
c1 = c2
return 1
def isAlternateForceSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
f1 = self._getForce(c1)
for c2 in cards[1:]:
f2 = self._getForce(c2)
if f1 == f2 or c1.rank + dir != c2.rank:
return 0
c1 = c2
f1 = f2
return 1
def isSuitSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
for c2 in cards[1:]:
if not (c1.suit == c2.suit and c1.rank + dir == c2.rank):
return 0
c1 = c2
return 1
class Dashavatara_AC_RowStack(Dashavatara_OpenStack):
def acceptsCards(self, from_stack, cards):
if not self.basicAcceptsCards(from_stack, cards) \
or not self.isAlternateColorSequence(cards):
return 0
stackcards = self.cards
if not len(stackcards):
return cards[0].rank == 11 or self.cap.base_rank == ANY_RANK
return self.isAlternateColorSequence([stackcards[-1], cards[0]])
class Dashavatara_AF_RowStack(Dashavatara_OpenStack):
def acceptsCards(self, from_stack, cards):
if not self.basicAcceptsCards(from_stack, cards) \
or not self.isAlternateForceSequence(cards):
return 0
stackcards = self.cards
if not len(stackcards):
return cards[0].rank == 11 or self.cap.base_rank == ANY_RANK
return self.isAlternateForceSequence([stackcards[-1], cards[0]])
class Dashavatara_RK_RowStack(Dashavatara_OpenStack):
def acceptsCards(self, from_stack, cards):
if not self.basicAcceptsCards(from_stack, cards) \
or not self.isRankSequence(cards):
return 0
stackcards = self.cards
if not len(stackcards):
return cards[0].rank == 11 or self.cap.base_rank == ANY_RANK
return self.isRankSequence([stackcards[-1], cards[0]])
class Dashavatara_SS_RowStack(Dashavatara_OpenStack):
def acceptsCards(self, from_stack, cards):
if not self.basicAcceptsCards(from_stack, cards) \
or not self.isSuitSequence(cards):
return 0
stackcards = self.cards
if not len(stackcards):
return cards[0].rank == 11 or self.cap.base_rank == ANY_RANK
return self.isSuitSequence([stackcards[-1], cards[0]])
class Circles_RowStack(SS_RowStack):
def __init__(self, x, y, game, base_rank):
SS_RowStack.__init__(self, x, y, game, base_rank=base_rank,
max_accept=1, max_move=1)
self.CARD_YOFFSET = 1
class Journey_BraidStack(OpenStack):
def __init__(self, x, y, game, xoffset, yoffset):
OpenStack.__init__(self, x, y, game)
self.CARD_YOFFSET = int(self.game.app.images.CARD_YOFFSET * yoffset)
# use a sine wave for the x offsets
self.CARD_XOFFSET = []
j = 1
for i in range(30):
self.CARD_XOFFSET.append(int(math.sin(j) * xoffset))
j = j + .9
class Journey_StrongStack(ReserveStack):
def fillStack(self):
if not self.cards:
if self.game.s.braidstrong.cards:
self.game.moveMove(1, self.game.s.braidstrong, self)
elif self.game.s.braidweak.cards:
self.game.moveMove(1, self.game.s.braidweak, self)
def getBottomImage(self):
return self.game.app.images.getBraidBottom()
class Journey_WeakStack(ReserveStack):
def fillStack(self):
if not self.cards:
if self.game.s.braidweak.cards:
self.game.moveMove(1, self.game.s.braidweak, self)
elif self.game.s.braidstrong.cards:
self.game.moveMove(1, self.game.s.braidstrong, self)
def getBottomImage(self):
return self.game.app.images.getBraidBottom()
class Journey_ReserveStack(ReserveStack):
def acceptsCards(self, from_stack, cards):
if (from_stack is self.game.s.braidstrong or
from_stack is self.game.s.braidweak or
from_stack in self.game.s.rows):
return 0
return ReserveStack.acceptsCards(self, from_stack, cards)
def getBottomImage(self):
return self.game.app.images.getTalonBottom()
class AppachansWaterfall_RowStack(RK_RowStack):
def canDropCards(self, stacks):
pile, stack, rank = self.getPile(), stacks[0], 0
if stack.cards:
rank = (stack.cards[-1].rank + 1) % 12
if (not pile or len(pile) <= 11 - rank or
not isSameSuitSequence(pile[-(12 - rank):]) or
not stack.acceptsCards(self, pile[-1:])):
return (None, 0)
return (stack, 1)
# ************************************************************************
# * Dashavatara Game Stacks
# ************************************************************************
class Dashavatara_TableauStack(Dashavatara_OpenStack):
def __init__(self, x, y, game, base_rank, yoffset, **cap):
kwdefault(cap, dir=3, max_move=99, max_cards=4, max_accept=1,
base_rank=base_rank)
OpenStack.__init__(self, x, y, game, **cap)
self.CARD_YOFFSET = yoffset
def acceptsCards(self, from_stack, cards):
if not self.basicAcceptsCards(from_stack, cards):
return 0
# check that the base card is correct
if self.cards and self.cards[0].rank != self.cap.base_rank:
return 0
if not self.cards:
return cards[0].rank == self.cap.base_rank
return (self.cards[-1].suit == cards[0].suit and
self.cards[-1].rank + self.cap.dir == cards[0].rank)
def getBottomImage(self):
return self.game.app.images.getLetter(self.cap.base_rank)
class Dashavatara_ReserveStack(ReserveStack):
def __init__(self, x, y, game, **cap):
kwdefault(cap, max_cards=1, max_accept=1, base_rank=ANY_RANK)
OpenStack.__init__(self, x, y, game, **cap)
def acceptsCards(self, from_stack, cards):
return (ReserveStack.acceptsCards(self, from_stack, cards) and
self.game.s.talon.cards)
class Dashavatara_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return 0
# check
return not (self.cards or self.game.s.talon.cards)
def canMoveCards(self, cards):
return 1
def getBottomImage(self):
return self.game.app.images.getTalonBottom()
# ************************************************************************
# *
# ***********************************************************************/
class AbstractDashavataraGame(Game):
SUITS = (_("Fish"), _("Tortoise"), _("Boar"), _("Lion"), _("Dwarf"),
_("Axe"), _("Arrow"), _("Plow"), _("Lotus"), _("Horse"))
RANKS = (_("Ace"), "2", "3", "4", "5", "6", "7", "8", "9", "10",
_("Pradhan"), _("Raja"))
COLORS = (_("Black"), _("Red"), _("Yellow"), _("Green"), _("Brown"),
_("Orange"), _("Grey"), _("White"), _("Olive"), _("Crimson"))
FORCE = (_("Strong"), _("Weak"))
def updateText(self):
pass
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.suit == card2.suit and
(card1.rank + 1 == card2.rank or card1.rank - 1 == card2.rank))
class Journey_Hint(DefaultHint):
# FIXME: demo is not too clever in this game
pass
# ************************************************************************
# * Dashavatara Circles
# ***********************************************************************/
class DashavataraCircles(AbstractDashavataraGame):
Hint_Class = CautiousDefaultHint
#
# Game layout
#
def createGame(self):
l, s = Layout(self), self.s
# Set window size
w, h = l.XM + l.XS * 9, l.YM + l.YS * 7
self.setSize(w, h)
# Create row stacks
x = w // 2 - l.CW // 2
y = h // 2 - l.YS // 2
x0 = (-.7, .3, .7, -.3,
-1.7, -1.5, -.6, .6, 1.5, 1.7, 1.5, .6, -.6, -1.5,
-2.7, -2.5, -1.9, -1, 0, 1, 1.9, 2.5, 2.7, 2.5, 1.9,
1, 0, -1, -1.9, -2.5)
y0 = (-.3, -.45, .3, .45,
0, -.8, -1.25, -1.25, -.8, 0, .8, 1.25, 1.25, .8,
0, -.9, -1.6, -2, -2.2, -2, -1.6, -.9, 0, .9, 1.6,
2, 2.2, 2, 1.6, .9)
for i in range(30):
# FIXME:
_x, _y = x+l.XS*x0[i], y+l.YS*y0[i]+l.YM*y0[i]*2
if _x < 0:
_x = 0
if _y < 0:
_y = 0
s.rows.append(Circles_RowStack(_x, _y, self, base_rank=ANY_RANK))
# Create reserve stacks
s.reserves.append(ReserveStack(l.XM, h - l.YS, self))
s.reserves.append(ReserveStack(w - l.XS, h - l.YS, self))
# Create foundations
x, y = l.XM, l.YM
for j in range(2):
for i in range(5):
s.foundations.append(
SS_FoundationStack(x, y, self, i + j * 5, mod=12,
max_move=0, max_cards=12))
y = y + l.YS
x, y = w - l.XS, l.YM
# from pprint import pprint
# pprint(s.rows)
# print (l.XM + l.XS, 0, w - l.XS - l.XM, 999999)
self.setRegion(s.rows, (l.XM + l.XS, 0, w - l.XS - l.XM, 999999))
# Create talon
s.talon = InitialDealTalonStack(l.XM + l.XS, l.YM, self)
# Define stack groups
l.defaultStackGroups()
#
# Game over rides
#
def startGame(self):
assert len(self.s.talon.cards) == 120
for i in range(3):
self.s.talon.dealRow(rows=self.s.rows, flip=1, frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.rows, flip=1, frames=3)
self.s.talon.dealCards()
# ************************************************************************
# * Ten Avatars
# ***********************************************************************/
class TenAvatars(AbstractDashavataraGame):
#
# Game layout
#
def createGame(self):
l, s = Layout(self), self.s
# Set window size
self.setSize(l.XM * 3 + l.XS * 11, l.YM + l.YS * 6)
# Create row stacks
x = l.XM
y = l.YM
for i in range(10):
s.rows.append(RK_RowStack(x, y, self, base_rank=11,
max_move=12, max_cards=99))
x = x + l.XS
# Create reserve stacks
x = self.width - l.XS
y = l.YM
for i in range(6):
s.reserves.append(ReserveStack(x, y, self))
y = y + l.YS
y = y - l.YS
for i in range(6):
x = x - l.XS
s.reserves.append(ReserveStack(x, y, self))
self.setRegion(s.rows, (0, 0, l.XM + l.XS * 10, l.YS * 5))
# Create talon
s.talon = DealRowTalonStack(l.XM, self.height - l.YS, self)
l.createText(s.talon, "n")
# Define stack groups
l.defaultStackGroups()
#
# Game over rides
#
def startGame(self):
assert len(self.s.talon.cards) == 120
for i in range(4):
self.s.talon.dealRow(flip=1, frames=0)
self.startDealSample()
self.s.talon.dealCards()
def isGameWon(self):
if len(self.s.talon.cards):
return 0
for s in self.s.rows:
if len(s.cards) != 12 or not isSameSuitSequence(s.cards):
return 0
return 1
# ************************************************************************
# * Balarama
# ***********************************************************************/
class Balarama(AbstractDashavataraGame):
Layout_Method = staticmethod(Layout.ghulamLayout)
Talon_Class = InitialDealTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = Dashavatara_AC_RowStack
BASE_RANK = ANY_RANK
#
# Game layout
#
def createGame(self, **layout):
l, s = Layout(self), self.s
kwdefault(layout, rows=16, reserves=4, texts=0)
self.Layout_Method(l, **layout)
self.setSize(l.size[0], l.size[1])
# Create foundations
for r in l.s.foundations:
s.foundations.append(self.Foundation_Class(r.x, r.y, self,
r.suit, mod=12, max_cards=12))
# Create reserve stacks
for r in l.s.reserves:
s.reserves.append(ReserveStack(r.x, r.y, self, ))
# Create row stacks
for r in l.s.rows:
s.rows.append(self.RowStack_Class(r.x, r.y, self, l.YOFFSET,
suit=ANY_SUIT, base_rank=self.BASE_RANK,
max_cards=12))
# Create talon
s.talon = self.Talon_Class(l.s.talon.x, l.s.talon.y, self)
# Define stack groups
l.defaultAll()
#
# Game over rides
#
def startGame(self):
assert len(self.s.talon.cards) == 120
for i in range(6):
self.s.talon.dealRow(rows=self.s.rows, flip=1, frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.rows, flip=1, frames=3)
self.s.talon.dealRow(rows=self.s.rows[:8], flip=1, frames=3)
self.s.talon.dealCards()
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.color % 2 != card2.color % 2 and
(card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank))
# ************************************************************************
# * Hayagriva
# ***********************************************************************/
class Hayagriva(Balarama):
Layout_Method = staticmethod(Layout.ghulamLayout)
Talon_Class = InitialDealTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = Dashavatara_RK_RowStack
BASE_RANK = 11
#
# Game layout
#
def createGame(self, **layout):
Balarama.createGame(self)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank)
# ************************************************************************
# * Shanka
# ***********************************************************************/
class Shanka(Balarama):
Layout_Method = staticmethod(Layout.ghulamLayout)
Talon_Class = InitialDealTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = Dashavatara_RK_RowStack
BASE_RANK = 11
#
# Game layout
#
def createGame(self, **layout):
Balarama.createGame(self, reserves=0)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
if stack1 in self.s.foundations:
return (card1.suit == card2.suit and
(card1.rank + 1 == card2.rank or
card2.rank + 1 == card1.rank))
return (card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank)
# ************************************************************************
# * Surukh
# ***********************************************************************/
class Surukh(Balarama):
Layout_Method = staticmethod(Layout.ghulamLayout)
Talon_Class = InitialDealTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = Dashavatara_AF_RowStack
BASE_RANK = ANY_RANK
#
# Game layout
#
def createGame(self, **layout):
Balarama.createGame(self, reserves=4)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
if card1.suit <= 4:
force0 = 0
else:
force0 = 1
if card2.suit <= 4:
force1 = 0
else:
force1 = 1
return (force0 != force1 and
(card1.rank + 1 == card2.rank or
card2.rank + 1 == card1.rank))
# ************************************************************************
# * Matsya
# ***********************************************************************/
class Matsya(AbstractDashavataraGame):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = RK_RowStack
BASE_RANK = 11
#
# Game layout
#
def createGame(self, max_rounds=1, num_deal=1, **layout):
l, s = Layout(self), self.s
kwdefault(layout, rows=10, waste=1)
self.Layout_Method(l, **layout)
self.setSize(l.size[0], l.size[1])
# Create talon
s.talon = self.Talon_Class(l.s.talon.x, l.s.talon.y, self,
max_rounds=max_rounds, num_deal=num_deal)
s.waste = WasteStack(l.s.waste.x, l.s.waste.y, self)
# Create foundations
for r in l.s.foundations:
s.foundations.append(self.Foundation_Class(r.x, r.y, self,
r.suit, mod=12, max_cards=12, max_move=0))
# Create row stacks
for r in l.s.rows:
s.rows.append(self.RowStack_Class(r.x, r.y, self,
suit=ANY_SUIT, base_rank=self.BASE_RANK))
# Define stack groups
l.defaultAll()
#
# Game over rides
#
def startGame(self):
assert len(self.s.talon.cards) == 120
for i in range(10):
self.s.talon.dealRow(rows=self.s.rows[i+1:], flip=0, frames=0)
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank)
# ************************************************************************
# * Kurma
# ***********************************************************************/
class Kurma(Matsya):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = SS_RowStack
BASE_RANK = ANY_RANK
#
# Game layout
#
def createGame(self, **layout):
Matsya.createGame(self, max_rounds=-1)
# ************************************************************************
# * Varaha
# ***********************************************************************/
class Varaha(Matsya):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = SS_RowStack
BASE_RANK = ANY_RANK
#
# Game layout
#
def createGame(self, **layout):
Matsya.createGame(self, max_rounds=-1, num_deal=3)
# ************************************************************************
# * Narasimha
# ***********************************************************************/
class Narasimha(Matsya):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = AC_RowStack
BASE_RANK = 11
#
# Game layout
#
def createGame(self, **layout):
Matsya.createGame(self, max_rounds=1, num_deal=1)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.color % 2 != card2.color % 2 and
(card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank))
# ************************************************************************
# * Vamana
# ***********************************************************************/
class Vamana(Matsya):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = AC_RowStack
BASE_RANK = 11
#
# Game layout
#
def createGame(self, **layout):
Matsya.createGame(self, max_rounds=-1, num_deal=3)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.color % 2 != card2.color % 2 and
(card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank))
# ************************************************************************
# * Parashurama
# ***********************************************************************/
class Parashurama(Matsya):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = RK_RowStack
BASE_RANK = 11
#
# Game layout
#
def createGame(self, **layout):
Matsya.createGame(self, max_rounds=2, num_deal=3)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank)
# ************************************************************************
# * Journey to Cuddapah
# ************************************************************************
class Journey(AbstractDashavataraGame):
Hint_Class = Journey_Hint
BRAID_CARDS = 15
BRAID_OFFSET = 1
#
# game layout
#
def createGame(self):
# create layout
l, s = Layout(self), self.s
# set window
# (piles up to 20 cards are playable - needed for Braid_BraidStack)
decks = self.gameinfo.decks
h = max(5 * l.YS + 35, 2*l.YM + 2*l.YS +
(self.BRAID_CARDS - 1) * l.YOFFSET*self.BRAID_OFFSET)
self.setSize(l.XM + l.XS * (7 + decks * 2), l.YM + h)
# extra settings
self.base_card = None
# Create foundations, rows, reserves
s.addattr(braidstrong=None) # register extra stack variable
s.addattr(braidweak=None) # register extra stack variable
x, y = l.XM, l.YM
for j in range(5):
for i in range(decks):
s.foundations.append(Journey_Foundation(x + l.XS * i, y, self,
j, mod=12, max_cards=12))
s.rows.append(Journey_StrongStack(x + l.XS * decks, y, self))
s.rows.append(
Journey_ReserveStack(x + l.XS * (1 + decks), y, self))
y = y + l.YS
x, y = x + l.XS * (5 + decks), l.YM
for j in range(5):
s.rows.append(Journey_ReserveStack(x, y, self))
s.rows.append(Journey_WeakStack(x + l.XS, y, self))
for i in range(decks, 0, -1):
s.foundations.append(
Journey_Foundation(x + l.XS * (1 + i), y, self,
j + 5, mod=12, max_cards=12))
y = y + l.YS
self.texts.info = MfxCanvasText(
self.canvas,
self.width // 2, h - l.YM // 2,
anchor="center",
font=self.app.getFont("canvas_default"))
# Create braids
x, y = l.XM + l.XS * 2.15 + l.XS * decks, l.YM
s.braidstrong = Journey_BraidStack(
x, y, self, xoffset=12, yoffset=self.BRAID_OFFSET)
x = x + l.XS * 1.7
s.braidweak = Journey_BraidStack(
x, y, self, xoffset=-12, yoffset=self.BRAID_OFFSET)
# Create talon
x, y = l.XM + l.XS * 2 + l.XS * decks, h - l.YS - l.YM
s.talon = WasteTalonStack(x, y, self, max_rounds=3)
l.createText(s.talon, "s")
s.talon.texts.rounds = MfxCanvasText(
self.canvas,
self.width // 2, h - l.YM * 2.5,
anchor="center",
font=self.app.getFont("canvas_default"))
x = x + l.XS * 2
s.waste = WasteStack(x, y, self)
l.createText(s.waste, "s")
# define stack-groups
self.sg.talonstacks = [s.talon] + [s.waste]
self.sg.openstacks = s.foundations + s.rows
self.sg.dropstacks = [s.braidstrong] + [s.braidweak] + s.rows \
+ [s.waste]
#
# game overrides
#
def startGame(self):
self.startDealSample()
self.base_card = None
self.updateText()
for i in range(self.BRAID_CARDS):
self.s.talon.dealRow(rows=[self.s.braidstrong])
for i in range(self.BRAID_CARDS):
self.s.talon.dealRow(rows=[self.s.braidweak])
self.s.talon.dealRow()
# deal base_card to foundations, update cap.base_rank
self.base_card = self.s.talon.getCard()
to_stack = self.s.foundations[
self.base_card.suit * self.gameinfo.decks]
self.flipMove(self.s.talon)
self.moveMove(1, self.s.talon, to_stack)
self.updateText()
for s in self.s.foundations:
s.cap.base_rank = self.base_card.rank
# deal first card to WasteStack
self.s.talon.dealCards()
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.suit == card2.suit and
((card1.rank + 1) % 12 == card2.rank or
(card2.rank + 1) % 12 == card1.rank))
def getHighlightPilesStacks(self):
return ()
def _restoreGameHook(self, game):
self.base_card = self.cards[game.loadinfo.base_card_id]
for s in self.s.foundations:
s.cap.base_rank = self.base_card.rank
def _loadGameHook(self, p):
self.loadinfo.addattr(base_card_id=None) # register extra load var.
self.loadinfo.base_card_id = p.load()
def _saveGameHook(self, p):
p.dump(self.base_card.id)
#
# game extras
#
def updateText(self):
if self.preview > 1 or not self.texts.info:
return
if not self.base_card:
t = ""
else:
t = self.RANKS[self.base_card.rank]
dir = self.getFoundationDir() % 12
if dir == 1:
t = t + _(" Ascending")
elif dir == 11:
t = t + _(" Descending")
self.texts.info.config(text=t)
# ************************************************************************
# * Long Journey to Cuddapah
# ************************************************************************
class LongJourney(Journey):
BRAID_CARDS = 20
BRAID_OFFSET = .7
# ************************************************************************
# * Appachan's Waterfall
# ***********************************************************************/
class AppachansWaterfall(AbstractDashavataraGame):
#
# Game layout
#
def createGame(self):
l, s = Layout(self), self.s
# Set window size
w, h = l.XM + l.XS * 10, l.YM + l.YS * 6
self.setSize(w, h)
# Create row stacks
x, y = l.XM, l.YM
for i in range(10):
s.rows.append(AppachansWaterfall_RowStack(x, y, self,
base_rank=ANY_RANK,
max_move=12,
max_cards=99))
x = x + l.XS
self.setRegion(s.rows, (-999, -999, 999999, l.YM + l.YS * 5))
# Create foundation
x, y = w // 2 - l.CW // 2, h - l.YS
s.foundations.append(AppachansWaterfall_Foundation(x, y, self, -1))
# Create reserves
s.reserves.append(ReserveStack(x - l.XS * 2, y, self))
s.reserves.append(ReserveStack(x + l.XS * 2, y, self))
# Create talon
s.talon = DealRowTalonStack(l.XM, y, self)
l.createText(s.talon, "n")
# Define stack groups
l.defaultStackGroups()
#
# Game over rides
#
def startGame(self):
assert len(self.s.talon.cards) == 120
for i in range(4):
self.s.talon.dealRow(flip=1, frames=0)
self.startDealSample()
self.s.talon.dealCards()
def isGameWon(self):
return len(self.s.foundations[0].cards) == 120
# ************************************************************************
# * Hiranyaksha
# ************************************************************************
class Hiranyaksha(AbstractDashavataraGame):
RowStack_Class = StackWrapper(Dashavatara_RK_RowStack, base_rank=NO_RANK)
#
# game layout
#
def createGame(self, rows=11, reserves=10):
# create layout
l, s = Layout(self), self.s
# set size
maxrows = max(rows, reserves)
self.setSize(l.XM + (maxrows + 2) * l.XS, l.YM + 6 * l.YS)
#
playcards = 4 * l.YS // l.YOFFSET
xoffset, yoffset = [], []
for i in range(playcards):
xoffset.append(0)
yoffset.append(l.YOFFSET)
for i in range(96 * self.gameinfo.decks - playcards):
xoffset.append(l.XOFFSET)
yoffset.append(0)
# create stacks
x, y = l.XM + (maxrows - reserves) * l.XS // 2, l.YM
for i in range(reserves):
s.reserves.append(ReserveStack(x, y, self))
x = x + l.XS
x, y = l.XM + (maxrows - rows) * l.XS // 2, l.YM + l.YS
self.setRegion(s.reserves, (-999, -999, 999999, y - l.YM // 2))
for i in range(rows):
stack = self.RowStack_Class(x, y, self, yoffset=l.YOFFSET)
stack.CARD_XOFFSET = xoffset
stack.CARD_YOFFSET = yoffset
s.rows.append(stack)
x = x + l.XS
x, y = l.XM + maxrows * l.XS, l.YM
for i in range(2):
for suit in range(5):
s.foundations.append(SS_FoundationStack(x, y, self,
suit=suit + (5 * i)))
y = y + l.YS
x, y = x + l.XS, l.YM
self.setRegion(self.s.foundations, (x - l.XS * 2, -999, 999999,
self.height - (l.YS + l.YM)), priority=1)
s.talon = InitialDealTalonStack(
self.width - 3 * l.XS // 2, self.height - l.YS, self)
# define stack-groups
l.defaultStackGroups()
#
# game overrides
#
def startGame(self):
self.startDealSample()
i = 0
while self.s.talon.cards:
if self.s.talon.cards[-1].rank == 11:
if self.s.rows[i].cards:
i = i + 1
self.s.talon.dealRow(rows=[self.s.rows[i]], frames=4)
# must look at cards
def _getClosestStack(self, cx, cy, stacks, dragstack):
closest, cdist = None, 999999999
for stack in stacks:
if stack.cards and stack is not dragstack:
dist = (stack.cards[-1].x - cx)**2 + \
(stack.cards[-1].y - cy)**2
else:
dist = (stack.x - cx)**2 + (stack.y - cy)**2
if dist < cdist:
closest, cdist = stack, dist
return closest
def shallHighlightMatch(self, stack1, card1, stack2, card2):
row = self.s.rows[0]
sequence = row.isRankSequence
return (sequence([card1, card2]) or sequence([card2, card1]))
# ************************************************************************
# * Dashavatara Hint
# ************************************************************************
class Dashavatara_Hint(AbstractHint):
def computeHints(self):
game = self.game
# 2)See if we can move a card to the tableaux
if not self.hints:
for r in game.sg.dropstacks:
pile = r.getPile()
if not pile or len(pile) != 1:
continue
if r in game.s.tableaux:
rr = self.ClonedStack(r, stackcards=r.cards[:-1])
if rr.acceptsCards(None, pile):
# do not move a card that is already in correct place
continue
base_score = 80000 + (4 - r.cap.base_suit)
else:
base_score = 80000
# find a stack that would accept this card
for t in game.s.tableaux:
if t is not r and t.acceptsCards(r, pile):
score = base_score + 100 * (self.K - pile[0].rank)
self.addHint(score, 1, r, t)
break
# 3)See if we can move a card from the tableaux
# to a row stack. This can only happen if there are
# no more cards to deal.
if not self.hints:
for r in game.s.tableaux:
pile = r.getPile()
if not pile or len(pile) != 1:
continue
rr = self.ClonedStack(r, stackcards=r.cards[:-1])
if rr.acceptsCards(None, pile):
# do not move a card that is already in correct place
continue
# find a stack that would accept this card
for t in game.s.rows:
if t is not r and t.acceptsCards(r, pile):
score = 70000 + 100 * (self.K - pile[0].rank)
self.addHint(score, 1, r, t)
break
# 4)See if we can move a card within the row stacks
if not self.hints:
for r in game.s.rows:
pile = r.getPile()
if not pile or len(pile) != 1 or len(pile) == len(r.cards):
continue
base_score = 60000
# find a stack that would accept this card
for t in game.s.rows:
if t is not r and t.acceptsCards(r, pile):
score = base_score + 100 * (self.K - pile[0].rank)
self.addHint(score, 1, r, t)
break
# 5)See if we can deal cards
if self.level >= 2:
if game.canDealCards():
self.addHint(self.SCORE_DEAL, 0, game.s.talon, None)
# ************************************************************************
# * Dashavatara
# ************************************************************************
class Dashavatara(Game):
Hint_Class = Dashavatara_Hint
#
# game layout
#
def createGame(self):
# create layout
l, s = Layout(self), self.s
TABLEAU_YOFFSET = min(9, max(3, l.YOFFSET // 3))
# set window
th = l.YS + 3 * TABLEAU_YOFFSET
# (set piles so that at least 2/3 of a card is visible with 10 cards)
h = 10 * l.YOFFSET + l.CH * 2//3
self.setSize(11 * l.XS + l.XM * 2, l.YM + 3 * th + l.YM + h)
# create stacks
s.addattr(tableaux=[]) # register extra stack variable
x = l.XM + 8 * l.XS + l.XS // 2
y = l.YM
for i in range(3, 0, -1):
x = l.XM
for j in range(10):
s.tableaux.append(
Dashavatara_TableauStack(
x, y, self, i - 1, TABLEAU_YOFFSET))
x = x + l.XS
x = x + l.XM
s.reserves.append(Dashavatara_ReserveStack(x, y, self))
y = y + th
x, y = l.XM, y + l.YM
for i in range(10):
s.rows.append(Dashavatara_RowStack(x, y, self, max_accept=1))
x = x + l.XS
x = self.width - l.XS
y = self.height - l.YS
s.talon = DealRowTalonStack(x, y, self)
l.createText(s.talon, "sw")
# define stack-groups
self.sg.openstacks = s.tableaux + s.rows + s.reserves
self.sg.talonstacks = [s.talon]
self.sg.dropstacks = s.tableaux + s.rows
#
# game overrides
#
def startGame(self):
self.s.talon.dealRow(rows=self.s.tableaux, frames=0)
self._startAndDealRow()
def isGameWon(self):
for stack in self.s.tableaux:
if len(stack.cards) != 4:
return 0
return 1
def fillStack(self, stack):
if self.s.talon.cards:
if stack in self.s.rows and len(stack.cards) == 0:
self.s.talon.dealRow(rows=[stack])
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.suit == card2.suit and
(card1.rank + 3 == card2.rank or card2.rank + 3 == card1.rank))
def getHighlightPilesStacks(self):
return ()
# ************************************************************************
# *
# ***********************************************************************/
def r(id, gameclass, name, game_type, decks, redeals, skill_level):
game_type = game_type | GI.GT_DASHAVATARA_GANJIFA
gi = GameInfo(id, gameclass, name, game_type, decks, redeals, skill_level,
suits=list(range(10)), ranks=list(range(12)))
registerGame(gi)
return gi
r(15406, Matsya, "Matsya", GI.GT_DASHAVATARA_GANJIFA, 1, 0, GI.SL_BALANCED)
r(15407, Kurma, "Kurma", GI.GT_DASHAVATARA_GANJIFA, 1, -1, GI.SL_BALANCED)
r(15408, Varaha, "Varaha", GI.GT_DASHAVATARA_GANJIFA, 1, -1, GI.SL_BALANCED)
r(15409, Narasimha, "Narasimha", GI.GT_DASHAVATARA_GANJIFA, 1, 0,
GI.SL_BALANCED)
r(15410, Vamana, "Vamana", GI.GT_DASHAVATARA_GANJIFA, 1, -1, GI.SL_BALANCED)
r(15411, Parashurama, "Parashurama", GI.GT_DASHAVATARA_GANJIFA, 1, 1,
GI.SL_BALANCED)
r(15412, TenAvatars, "Ten Avatars", GI.GT_DASHAVATARA_GANJIFA, 1, 0,
GI.SL_MOSTLY_SKILL)
r(15413, DashavataraCircles, "Dashavatara Circles", GI.GT_DASHAVATARA_GANJIFA,
1, 0, GI.SL_MOSTLY_SKILL)
r(15414, Balarama, "Balarama", GI.GT_DASHAVATARA_GANJIFA, 1, 0,
GI.SL_MOSTLY_SKILL)
r(15415, Hayagriva, "Hayagriva", GI.GT_DASHAVATARA_GANJIFA, 1, 0,
GI.SL_MOSTLY_SKILL)
r(15416, Shanka, "Shanka", GI.GT_DASHAVATARA_GANJIFA, 1, 0, GI.SL_MOSTLY_SKILL)
r(15417, Journey, "Journey to Cuddapah", GI.GT_DASHAVATARA_GANJIFA, 1, 2,
GI.SL_BALANCED)
r(15418, LongJourney, "Long Journey to Cuddapah", GI.GT_DASHAVATARA_GANJIFA,
2, 2, GI.SL_BALANCED)
r(15419, Surukh, "Surukh", GI.GT_DASHAVATARA_GANJIFA, 1, 0, GI.SL_BALANCED)
r(15420, AppachansWaterfall, "Appachan's Waterfall", GI.GT_DASHAVATARA_GANJIFA,
1, 0, GI.SL_MOSTLY_SKILL)
r(15421, Hiranyaksha, 'Hiranyaksha', GI.GT_DASHAVATARA_GANJIFA, 1, 0,
GI.SL_MOSTLY_SKILL)
r(15422, Dashavatara, 'Dashavatara', GI.GT_DASHAVATARA_GANJIFA, 1, 0,
GI.SL_BALANCED)
del r
| jimsize/PySolFC | pysollib/games/ultra/dashavatara.py | Python | gpl-3.0 | 43,519 |
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404, render
from easy_avatar.forms import UploadFileForm
from django.views.decorators.csrf import requires_csrf_token
from django.template import RequestContext
from django.conf import settings
from easy_avatar.models import Easy_Avatar
from django.http import HttpResponse
import os
import errno
from PIL import Image
import json
@requires_csrf_token
def upload(request):
message = ""
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
return handle_uploaded_file(request, request.FILES['file'])
else:
form = UploadFileForm()
return render(request, {'form': form, 'message':message})
def handle_uploaded_file(request, f):
response = {}
image = ""
FILE_URL_PATH = getattr(settings, "FILE_URL_PATH", None)
FILE_SAVE_PATH = getattr(settings, "FILE_SAVE_PATH", None)
if request.user.is_authenticated():
user = request.user
FILE_SAVE_PATH = FILE_SAVE_PATH + '/' + user.username + '/'
OVERWRITE_PREVIOUS_FILES = getattr(settings, "OVERWRITE_PREVIOUS_FILES", None)
## this determines whether user file already exist, if not it creates one. If one exist it deletes what's there to replace
try:
os.makedirs(FILE_SAVE_PATH)
except OSError as exception:
## true by default unless overridden by settings.py
if OVERWRITE_PREVIOUS_FILES != False:
for the_file in os.listdir(FILE_SAVE_PATH):
file_path = os.path.join(FILE_SAVE_PATH, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
pass
if exception.errno != errno.EEXIST:
raise
try:
size = (100, 100)
image = Image.open(f)
image.thumbnail(size,Image.ANTIALIAS)
image.save(FILE_SAVE_PATH + f.name, image.format)
## create the user avatar if one if not created
insert = Easy_Avatar.objects.get_or_create(user=user)
## now that a user is created we can update it's avatar image location
avatar = Easy_Avatar.objects.get(user=user)
avatar.docfile = FILE_SAVE_PATH + f.name
avatar.image_url = FILE_URL_PATH + user.username + '/' + f.name
avatar.save()
response["message"] = "Success"
response["image_url"] = FILE_URL_PATH + user.username + '/' + f.name
except:
response["message"] = "I'm sorry we could not upload your image"
response["image_url"] = ""
return HttpResponse(json.dumps(response), content_type="application/json") | chawk/django-easy-avatar | easy_avatar/views.py | Python | mit | 2,874 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Lora Receive File
# Generated: Thu Mar 30 15:01:18 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import fftsink2
from gnuradio.wxgui import forms
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import lora
import wx
class lora_receive_file(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Lora Receive File")
##################################################
# Variables
##################################################
self.target_freq = target_freq = 868.1e6
self.sf = sf = 7
self.samp_rate = samp_rate = 10e6
self.capture_freq = capture_freq = 866.0e6
self.bw = bw = 125e3
self.symbols_per_sec = symbols_per_sec = bw / (2**sf)
self.offset = offset = -(capture_freq - target_freq)
self.firdes_tap = firdes_tap = firdes.low_pass(1, samp_rate, bw, 10000, firdes.WIN_HAMMING, 6.67)
self.finetune = finetune = -95
self.bitrate = bitrate = sf * (1 / (2**sf / bw))
##################################################
# Blocks
##################################################
self.wxgui_fftsink2_1 = fftsink2.fft_sink_c(
self.GetWin(),
baseband_freq=capture_freq,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title='FFT Plot',
peak_hold=False,
)
self.Add(self.wxgui_fftsink2_1.win)
self.lora_lora_receiver_0 = lora.lora_receiver(samp_rate, capture_freq, offset, 7, 1000000, 0.01)
_finetune_sizer = wx.BoxSizer(wx.VERTICAL)
self._finetune_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_finetune_sizer,
value=self.finetune,
callback=self.set_finetune,
label='finetune',
converter=forms.int_converter(),
proportion=0,
)
self._finetune_slider = forms.slider(
parent=self.GetWin(),
sizer=_finetune_sizer,
value=self.finetune,
callback=self.set_finetune,
minimum=-150,
maximum=150,
num_steps=300,
style=wx.SL_HORIZONTAL,
cast=int,
proportion=1,
)
self.Add(_finetune_sizer)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, 'counting_cr4_sf7.cfile', True)
##################################################
# Connections
##################################################
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.lora_lora_receiver_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.wxgui_fftsink2_1, 0))
def get_target_freq(self):
return self.target_freq
def set_target_freq(self, target_freq):
self.target_freq = target_freq
self.set_offset(-(self.capture_freq - self.target_freq))
def get_sf(self):
return self.sf
def set_sf(self, sf):
self.sf = sf
self.set_symbols_per_sec(self.bw / (2**self.sf))
self.set_bitrate(self.sf * (1 / (2**self.sf / self.bw)))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_fftsink2_1.set_sample_rate(self.samp_rate)
self.set_firdes_tap(firdes.low_pass(1, self.samp_rate, self.bw, 10000, firdes.WIN_HAMMING, 6.67))
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
def get_capture_freq(self):
return self.capture_freq
def set_capture_freq(self, capture_freq):
self.capture_freq = capture_freq
self.set_offset(-(self.capture_freq - self.target_freq))
self.wxgui_fftsink2_1.set_baseband_freq(self.capture_freq)
def get_bw(self):
return self.bw
def set_bw(self, bw):
self.bw = bw
self.set_symbols_per_sec(self.bw / (2**self.sf))
self.set_firdes_tap(firdes.low_pass(1, self.samp_rate, self.bw, 10000, firdes.WIN_HAMMING, 6.67))
self.set_bitrate(self.sf * (1 / (2**self.sf / self.bw)))
def get_symbols_per_sec(self):
return self.symbols_per_sec
def set_symbols_per_sec(self, symbols_per_sec):
self.symbols_per_sec = symbols_per_sec
def get_offset(self):
return self.offset
def set_offset(self, offset):
self.offset = offset
self.lora_lora_receiver_0.set_offset(self.offset)
def get_firdes_tap(self):
return self.firdes_tap
def set_firdes_tap(self, firdes_tap):
self.firdes_tap = firdes_tap
def get_finetune(self):
return self.finetune
def set_finetune(self, finetune):
self.finetune = finetune
self._finetune_slider.set_value(self.finetune)
self._finetune_text_box.set_value(self.finetune)
def get_bitrate(self):
return self.bitrate
def set_bitrate(self, bitrate):
self.bitrate = bitrate
def main(top_block_cls=lora_receive_file, options=None):
tb = top_block_cls()
tb.Start(True)
tb.Wait()
if __name__ == '__main__':
main()
| Wosser1sProductions/gr-lora | apps/lora_receive_file.py | Python | gpl-3.0 | 6,108 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | GoogleCloudPlatform/jupyterhub-gcp-proxies-authenticator | gcpproxiesauthenticator/__init__.py | Python | apache-2.0 | 574 |
def task():
pass
| pybursa/homeworks | aa_example/hw1/test.py | Python | gpl-2.0 | 21 |
#!/bin/env python
import logging
from optparse import OptionParser
import json
import subprocess
import urllib,urllib2
import os
from os import path, access, R_OK
import getpass
import re
import random
import string
import time
import sys
sys.path.insert(0, sys.path[0])
from config import *
from funcs import *
from os.path import basename
class runWorkflow:
url=""
f=""
def __init__(self, url, f ):
self.url = url
self.f = f
def remove_comments(self, line, sep):
for s in sep:
line = line.split(s)[0]
line += "\n"
return line
def import_workflow(self, input_file, logging):
wf = list()
with open(input_file, 'r') as infile:
for line in infile.readlines():
line=self.remove_comments(line, '#')
if (len(line) < 5): continue
servicename, command, waittime = re.split(r'[\t\,]+', line)
logging.info( "servicename=[%s]\ncommand=[%s]waittime=[%s]"%(servicename, command, waittime) )
wf.append(Service(servicename,command,waittime))
return wf
def import_param(self, input_file=None):
wf = list()
whole_input = ""
with open(input_file, 'r') as fo:
for line in fo.readlines():
line=self.remove_comments(line, '#')
if (len(line) < 2): continue
whole_input += line
whole_input=re.sub('[\t\s]*[\n\r]','\n', whole_input)
whole_input=re.sub('\n@',';@', whole_input)
whole_input=re.sub('\n',':', whole_input)
whole_input=re.sub(r'[\t\s]*=+[\t\s:]*', '=', whole_input)
whole_input=re.sub(r'[\t\s]+', ',', whole_input)
whole_input=re.sub(r':@?$', '', whole_input)
return whole_input
def updateRunParams(self, wkey, rpid, logging):
data = urllib.urlencode({'func':'updateRunParams', 'wkey':wkey, 'runparamsid':rpid })
self.f.queryAPI(self.url, data, "updateRunParams:"+wkey, logging)
def startWorkflow(self, inputparam, defaultparam, username, wfname, wkey, outdir, slen, logging):
data = urllib.urlencode({'func':'startWorkflow', 'inputparam':inputparam,
'defaultparam':defaultparam, 'username':username,
'workflow':wfname, 'wkey':wkey, 'outdir':outdir, 'services':slen})
return self.f.queryAPI(self.url, data, "workflow started:"+wkey, logging)
def startService(self, service, wkey, username, logging):
data = urllib.urlencode({'func':'startService', 'servicename':service.servicename,
'wkey':wkey, 'command':service.command, 'username':username})
return self.f.queryAPI(self.url, data, "service started:"+service.servicename, logging)
def endWorkflow(self, wkey, logging):
data = urllib.urlencode({'func':'endWorkflow', 'wkey':wkey})
return self.f.queryAPI(self.url, data, "endworkflow:"+wkey, logging)
def checkPermissions(self, username, outdir):
data = urllib.urlencode({'func':'checkPermissions', 'username':username, 'outdir':outdir})
return self.f.queryAPI(self.url, data, None, None)
class Service:
def __init__(self, servicename="service", command="command", waittime="60"):
self.servicename = servicename
self.command = command
self.waittime = waittime
def main():
try:
parser = OptionParser()
parser.add_option('-i', '--inputparam', help='input parameters for the workflow', dest='inputparam')
parser.add_option('-p', '--defaultparam', help='defined parameter file that will be run on cluster', dest='defaultparam')
parser.add_option('-u', '--username', help='defined user in the cluster', dest='username')
parser.add_option('-k', '--wkey', help='defined key for the workflow', dest='wkey')
parser.add_option('-w', '--workflowfile', help='workflow filename', dest='workflowfile')
parser.add_option('-d', '--dbhost', help='dbhost name', dest='dbhost')
parser.add_option('-o', '--outdir', help='output directory in the cluster', dest='outdir')
parser.add_option('-f', '--config', help='configuration parameter section', dest='config')
parser.add_option('-r', '--runid', help='runid', dest='runid')
(options, args) = parser.parse_args()
except:
print "OptionParser Error:for help use --help"
sys.exit(2)
INPUTPARAM = options.inputparam
DEFAULTPARAM = options.defaultparam
USERNAME = options.username
WKEY = options.wkey
WORKFLOWFILE = options.workflowfile
DBHOST = options.dbhost
OUTDIR = options.outdir
CONFIG = options.config
RUNID = options.runid
f = funcs()
config = getConfig(CONFIG)
workflow = runWorkflow(config['url'], f)
LOGPATH=config['logpath']
#This section is just for username conversion in the cluster can be removed in the future
if (CONFIG != "Docker" and CONFIG != "Travis" and CONFIG != "Amazon"):
com="grep "+USERNAME+" /project/umw_biocore/svcgalaxy/conv.file|awk '{print $2}'"
USERNAME=str(os.popen(com).readline().rstrip())
########
if (USERNAME and len(USERNAME)<3):
print "Error:Username doesn't exist"
sys.exit(2)
if (OUTDIR==None):
OUTDIR="~/out"
if (OUTDIR.find("/")==-1):
OUTDIR="~/"+OUTDIR
if (INPUTPARAM!=None):
if path.isfile(INPUTPARAM) and access(INPUTPARAM, R_OK):
INPUTPARAM = workflow.import_param(INPUTPARAM)
else:
INPUTPARAM = re.sub(" ", "", INPUTPARAM)
logging.basicConfig(filename=LOGPATH+'/run'+str(RUNID)+'.log', filemode='a',format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
logging.info(USERNAME+":"+OUTDIR)
logging.info(INPUTPARAM)
if (WKEY==None):
WKEY="start"
else:
workflow.updateRunParams(WKEY, RUNID, logging)
services=workflow.import_workflow(WORKFLOWFILE, logging)
slen=str(len(services))
wfbase = os.path.splitext(basename(WORKFLOWFILE))[0]
wfname = wfbase.split('.')[0]
wkey = workflow.startWorkflow(INPUTPARAM, DEFAULTPARAM, USERNAME, wfname, WKEY, OUTDIR, slen,logging)
if (wkey.startswith("ERROR")):
logging.warning("ERROR:"+ wkey)
print "Check the parameter files:\n"
sys.exit(2);
print "WORKFLOW STARTED:"+wkey+"\n"
logging.info('WORKFLOW STARTED'+wkey+"\n")
workflow.updateRunParams(wkey, RUNID, logging)
for service in services:
br=1
checkcount=0
while ( br==1):
ret=workflow.startService(service, wkey, USERNAME, logging)
print ret + "\n"
time.sleep(5)
if (ret.startswith("RUNNING") and float(service.waittime)>0):
time.sleep(float(service.waittime))
elif (ret.startswith("ERROR")):
print service.servicename + ":" + ret + "\n"
logging.warning("ERROR:"+ret)
logging.warning("ERROR:"+service.command)
print "Check the command:\n"
print service.command + "\n"
sys.exit(2);
elif (ret.startswith("DONE")):
checkcount=0
br=0
checkcount=checkcount+1
br=1
print "All the services Ended"
while ( br==1):
res=workflow.endWorkflow(wkey, logging)
#print ret + "\n"
if (ret.startswith("WRUNNING")):
time.sleep(5)
else:
br=0
if __name__ == "__main__":
main()
| nephantes/dolphin-tools | src/runWorkflow.py | Python | mit | 7,698 |
class Solution:
def translateNum(self, num: int) -> int:
if num < 10:
return 1
table = {}
for i in range(26):
table[str(i)] = chr(ord('a') + i)
num_str = str(num)
arr = []
def solve(eated, s, last):
if not s:
arr.append(eated)
return
i = s[0]
if last == '1' or (last == '2' and i in ('0', '1', '2', '3', '4', '5')):
# if ord(eated[-1]) < ord('k'):
solve(table[last + i], s[1:], last + i)
solve(eated + table[i], s[1:], i)
solve('', num_str, None)
return len(arr)
| fy0/my-leetcode | 990.Satisfiability Of Equality Equations/main.py | Python | apache-2.0 | 674 |
"""
The MIT License (MIT)
Copyright (c) [2015-2018] [Andrew Annex]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .spiceypy import *
from .utils import support_types
__author__ = 'AndrewAnnex'
# Default setting for error reporting so that programs don't just exit out!
erract("set", 10, "return")
errdev("set", 10, "null")
| drbitboy/SpiceyPy | spiceypy/__init__.py | Python | mit | 1,313 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the visualization registry."""
from core.domain import visualization_registry
from core.tests import test_utils
class VisualizationRegistryUnitTests(test_utils.GenericTestBase):
"""Test for the visualization registry."""
def test_visualization_registry(self):
"""Sanity checks on the visualization registry."""
self.assertGreater(
len(visualization_registry.Registry.get_all_visualization_ids()),
0)
def test_get_full_html(self):
"""Check that the visualization HTML contains templates and directives
for all visualizations.
"""
full_html = visualization_registry.Registry.get_full_html()
all_visualization_ids = (
visualization_registry.Registry.get_all_visualization_ids())
for visualization_id in all_visualization_ids:
self.assertIn('oppiaVisualization%s' % visualization_id, full_html)
self.assertIn(
'<script type="text/ng-template" id="visualizations/%s">' %
visualization_id, full_html)
| himanshu-dixit/oppia | core/domain/visualization_registry_test.py | Python | apache-2.0 | 1,717 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyCoverage(PythonPackage):
""" Testing coverage checker for python """
homepage = "http://nedbatchelder.com/code/coverage/"
url = "https://pypi.io/packages/source/c/coverage/coverage-4.3.4.tar.gz"
version('4.3.4', '89759813309185efcf4af8b9f7762630')
version('4.0a6', '1bb4058062646148965bef0796b61efc')
depends_on('py-setuptools', type='build')
| TheTimmy/spack | var/spack/repos/builtin/packages/py-coverage/package.py | Python | lgpl-2.1 | 1,639 |
#! /usr/bin/env python
# in this file, the input frame is in rgb format
import Queue
import StringIO
import base64
import json
import logging
import multiprocessing
import sys
import os
import threading
import time
from operator import itemgetter
import cv2
import dlib
import numpy as np
from PIL import Image
import traceback
from camShift import camshiftTracker, meanshiftTracker
from NetworkProtocol import *
from openfaceClient import OpenFaceClient, AsyncOpenFaceClientProcess
from demo_config import Config
from MyUtils import *
DEBUG = Config.DEBUG
WRITE_PICTURE_DEBUG=Config.WRITE_PICTURE_DEBUG
if WRITE_PICTURE_DEBUG:
remove_dir(Config.WRITE_PICTURE_DEBUG_PATH)
create_dir(Config.WRITE_PICTURE_DEBUG_PATH)
MIN_WIDTH_THRESHOLD=3
MIN_HEIGHT_THRESHOLD=3
DETECT_TRACK_RATIO = 10
class TrackerInitializer(object):
def __init__(self, prev_frame, prev_roi, frame):
self.prev_frame=prev_frame
self.prev_roi = prev_roi
self.frame = frame
def create_dlib_tracker(frame, roi):
tracker = dlib.correlation_tracker()
(roi_x1, roi_y1, roi_x2, roi_y2) = roi
tracker.start_track(frame,
dlib.rectangle(roi_x1, roi_y1, roi_x2, roi_y2))
return tracker
def create_tracker(frame, roi):
# tracker = camshiftTracker()
# using mean shift for now
tracker = meanshiftTracker() #
(roi_x1, roi_y1, roi_x2, roi_y2) = roi
tracker.start_track(frame,
dlib.rectangle(roi_x1, roi_y1, roi_x2, roi_y2))
return tracker
def create_trackers(frame, rois, dlib=False):
trackers = []
for roi in rois:
if (dlib):
tracker = create_dlib_tracker(frame,roi)
else:
tracker = create_tracker(frame,roi)
trackers.append(tracker)
return trackers
def drectangle_to_tuple(drectangle):
cur_roi = (int(drectangle.left()),
int(drectangle.top()),
int(drectangle.right()),
int(drectangle.bottom()))
return cur_roi
def euclidean_distance_square(roi1, roi2):
result = abs(roi1[0] - roi2[0])**2 + abs(roi1[1] - roi2[1])**2
return result
def np_array_to_jpeg_string(frame):
# face_img = Image.fromarray(frame)
# sio = StringIO.StringIO()
# face_img.save(sio, 'JPEG')
# jpeg_img = sio.getvalue()
_, jpeg_img=cv2.imencode('.jpg', frame)
face_string = base64.b64encode(jpeg_img)
return face_string
def np_array_to_string(frame):
frame_bytes = frame.tobytes()
face_string = base64.b64encode(frame_bytes)
return face_string
def imwrite_rgb(path, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
sys.stdout.write('writing img to {}'.format(path))
sys.stdout.flush()
cv2.imwrite(path,frame)
class FaceROI(object):
def __init__(self, roi, data=None, name=None, tracker=None):
self.roi = roi
self.data = data
self.name = name
self.tracker = tracker
self.swap_tmp_data=None
# returned ROI may go out of bounds --> representing failure of tracking
def get_json(self, send_data=False):
(roi_x1, roi_y1, roi_x2, roi_y2) = self.roi
msg = {
'roi_x1':roi_x1,
'roi_y1':roi_y1,
'roi_x2':roi_x2,
'roi_y2':roi_y2,
'name':self.name
}
if send_data:
# msg['data'] = np_array_to_jpeg_string(self.data)
msg['data'] = np_array_to_jpeg_string(self.data)
return json.dumps(msg)
# return the center location of the face
def get_location(self):
(roi_x1, roi_y1, roi_x2, roi_y2) = self.roi
return ((roi_x1 + roi_x2)/2, (roi_y1+roi_y2)/2)
class RecognitionRequestUpdate(object):
def __init__(self, recognition_frame_id, location):
self.recognition_frame_id = recognition_frame_id
self.location=location
class FaceTransformation(object):
def __init__(self):
custom_logger_level=logging.INFO
if Config.DEBUG:
custom_logger_level=logging.DEBUG
logging.basicConfig(
format='%(asctime)s %(name)-12s %(levelname)-8s %(thread)d %(message)s',
filename='faceswap-proxy.log',
filemode='w+'
)
formatter = logging.Formatter('%(asctime)-15s %(levelname)-8s %(processName)s %(message)s')
self.logger=logging.getLogger(__name__)
self.logger.setLevel(custom_logger_level)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.cnt=0
self.detector = dlib.get_frontal_face_detector()
self.faces=[]
self.face_table = {}
self.faces_lock=threading.Lock()
self.img_queue = multiprocessing.Queue()
self.trackers_queue = multiprocessing.Queue()
self.recognition_queue = multiprocessing.Queue()
# openface related
self.training_cnt = 0
self.server_ip = u"ws://localhost"
self.server_port = 9000
# changed to two openface_client
# 1 sync for blocking response
# another for non-blocking response in detection_process
self.openface_client = OpenFaceClient(self.server_ip, self.server_port)
resp = self.openface_client.isTraining()
self.logger.info('resp: {}'.format(resp))
self.training = json.loads(resp)['val']
self.logger.info('openface is training?{}'.format(self.training))
mpl = multiprocessing.log_to_stderr()
mpl.setLevel(custom_logger_level)
self.correct_tracking_event = multiprocessing.Event()
self.correct_tracking_event.clear()
self.tracking_thread_idle_event = threading.Event()
self.tracking_thread_idle_event.clear()
self.sync_thread_stop_event=threading.Event()
self.sync_thread_stop_event.clear()
self.sync_faces_thread = threading.Thread(target=self.correct_tracking,
name='bgThread',
kwargs={'stop_event' : self.sync_thread_stop_event})
self.sync_faces_thread.start()
self.detection_process_shared_face_fragments=[]
self.detection_process_stop_event = multiprocessing.Event()
self.detection_process_stop_event.clear()
self.detection_process = multiprocessing.Process(
target = self.detection_update_thread,
name='DetectionProcess',
args=(self.img_queue,
self.trackers_queue,
self.recognition_queue,
self.server_ip,
self.server_port,
self.correct_tracking_event,
self.detection_process_stop_event,))
self.detection_process.start()
self.image_width=Config.MAX_IMAGE_WIDTH
# background thread that updates self.faces once
# detection process signaled
def correct_tracking(self, stop_event=None):
#TODO: should yield computing power unless there are events going on
# right now in between frames, this loop just keeps computing forever
in_fly_recognition_info={}
while (not stop_event.is_set()):
self.tracking_thread_idle_event.wait(1)
# self.sync_face_event.wait(0.1)
if (not self.tracking_thread_idle_event.is_set()):
continue
if (self.correct_tracking_event.is_set()):
self.logger.debug('bg-thread getting detection updates')
try:
tracker_updates = self.trackers_queue.get(timeout=1)
faces = tracker_updates['faces']
tracker_frame = tracker_updates['frame']
for face in faces:
nearest_face = self.find_nearest_face(face, self.faces)
# max_distance=self.image_width*Config.FACE_MAX_DRIFT_PERCENT)
if (nearest_face):
face.name = nearest_face.name
else:
face.name=""
tracker = create_tracker(tracker_frame, face.roi)
face.tracker = tracker
self.logger.debug('bg-thread updating faces from detection process!')
self.faces_lock.acquire()
self.faces = faces
self.faces_lock.release()
self.correct_tracking_event.clear()
except Queue.Empty:
self.logger.info('bg-thread updating faces queue empty!')
else:
try:
update = self.recognition_queue.get_nowait()
if (isinstance(update, RecognitionRequestUpdate)):
in_fly_recognition_info[update.recognition_frame_id] = update.location
else:
self.logger.debug('main process received recognition resp {}'.format(update))
recognition_resp = json.loads(update)
if (recognition_resp['type'] == FaceRecognitionServerProtocol.TYPE_frame_resp
and recognition_resp['success']):
frame_id = recognition_resp['id']
if (frame_id in in_fly_recognition_info):
# TODO: add in min distance requirement
self.faces_lock.acquire()
nearest_face = self.find_nearest_face(in_fly_recognition_info.pop(frame_id), self.faces)
# , max_distance=self.image_width*Config.FACE_MAX_DRIFT_PERCENT)
if (nearest_face):
nearest_face.name = recognition_resp['name']
self.faces_lock.release()
self.logger.debug('main process received recognition name {}'
.format(recognition_resp['name']))
else:
self.logger.error('received response but no frame info about the request')
else:
self.logger.error('received response is not frame_resp or frame_resp success is false')
except Queue.Empty:
pass
def terminate(self):
self.detection_process_stop_event.set()
self.sync_thread_stop_event.set()
self.detection_process.join()
self.logger.info('detection process shutdown!')
self.sync_faces_thread.join()
self.logger.info('sync faces thread shutdown!')
self.openface_client.terminate()
self.logger.debug('transformer terminate!')
def np_array_to_jpeg_data_url(self, frame):
face_string = np_array_to_jpeg_string(frame)
face_string = "data:image/jpeg;base64," + face_string
return face_string
def recognize_faces(self, openface_client, frame, rois):
names =[]
for roi in rois:
(x1,y1,x2,y2) = roi
face_pixels = np.copy(frame[y1:y2+1, x1:x2+1])
face_string = self.np_array_to_jpeg_data_url(face_pixels)
# has to use the same client as mainprocess
resp = openface_client.addFrame(face_string, 'detect')
self.logger.debug('server response: {}'.format(resp))
resp_dict = json.loads(resp)
name = resp_dict['name']
self.logger.debug('recognize: {}'.format(name))
names.append(name)
return names
# while (frame_cnt < 10):
# try:
# frame = img_queue.get(timeout=1)
# self.update_trackers(trackers, frame)
# frame_cnt +=1
# except Queue.Empty:
# pass
# faces=[]
# for idx, tracker in enumerate(trackers):
# new_roi = tracker.get_position()
# cur_roi = (int(new_roi.left()),
# int(new_roi.top()),
# int(new_roi.right()),
# int(new_roi.bottom()))
# name = names[idx]
# self.logger.debug('recognized faces {0} {1}'.format(idx, name))
# face = FaceROI(cur_roi, name=name)
# faces.append(face)
# if (len(faces)>0):
# tracker_updates = {'frame':frame, 'faces':faces}
# trackers_queue.put(tracker_updates)
# sync_face_event.set()
def find_nearest_face(self, src, nearby_faces, max_distance=None):
distances = []
# find the closest face object
for face in nearby_faces:
face_center = face.get_location()
if (isinstance(src, FaceROI)):
src_center = src.get_location()
else:
src_center=src
distance = euclidean_distance_square(face_center, src_center)
if max_distance is not None:
if distance <= max_distance:
distances.append(distance)
else:
self.logger.info('drift too much. do not update recognition result')
else:
distances.append(distance)
if distances:
(face_idx, _) = min(enumerate(distances), key=itemgetter(1))
return nearby_faces[face_idx]
else:
return None
# TODO: to be finished!!!
# called in another recognize listener process (listener)
def on_receive_openface_server_result(self, resp, queue=None, busy_event=None):
# parse the resp
resp_json=json.loads(resp)
if (resp_json['type']== FaceRecognitionServerProtocol.TYPE_frame_resp):
if (self.training):
self.logger.error('training is using async openface response')
return
if (busy_event.is_set()):
self.logger.debug('cleared recognition busy')
busy_event.clear()
else:
self.logger.debug('server busy event not set')
self.logger.debug('server response: {}'.format(resp[:40]))
queue.put(resp)
# resp_dict = json.loads(resp)
# success=resp_dict['success']
# if success:
# name = resp_dict['name']
# self.logger.debug('recognized: {}'.format(name))
# # notify main process
# if queue:
def send_face_recognition_requests(self, openface_client, frame, rois, frame_id):
for roi in rois:
(x1,y1,x2,y2) = roi
# TODO: really need copy here?
face_pixels = np.copy(frame[y1:y2+1, x1:x2+1])
face_string = self.np_array_to_jpeg_data_url(face_pixels)
# has to use the same client as mainprocess
openface_client.addFrameWithID(face_string, 'detect', frame_id)
frame_id+=1
return frame_id
def draw_rois(self, img,rois):
for roi in rois:
(x1,y1,x2,y2) = tuple(roi)
cv2.rectangle(img, (x1,y1), (x2, y2), (255,0,0))
def pic_output_path(self,idx):
return os.path.normpath(Config.WRITE_PICTURE_DEBUG_PATH+'/'+ str(idx)+'.jpg')
# send recognition request
def detection_update_thread(self,
img_queue,
trackers_queue,
recognition_queue,
openface_ip,
openface_port,
sync_face_event,
stop_event):
recognition_busy_event = multiprocessing.Event()
recognition_busy_event.clear()
try:
self.logger.info('created')
detector = dlib.get_frontal_face_detector()
detection_process_openface_client=AsyncOpenFaceClientProcess(call_back=self.on_receive_openface_server_result, queue=recognition_queue, busy_event=recognition_busy_event)
# create face recognition thread to be in background
# face_recognition_thread = threading.Thread(target=self.bg_face_recognition, name='bg_recognition_thread')
recognition_frame_id=0
frame_id=0
while (not stop_event.is_set()):
try:
frame = img_queue.get(timeout=1)
except Queue.Empty:
continue
rois = self.detect_faces(frame, detector)
self.logger.debug('finished detecting')
if (len(rois)>0):
if (not recognition_busy_event.is_set() ):
recognition_busy_event.set()
for roi in rois:
(x1,y1,x2,y2) = roi
# TODO: really need copy here?
# face_pixels = np.copy(frame[y1:y2+1, x1:x2+1])
face_pixels = frame[y1:y2+1, x1:x2+1]
face_string = self.np_array_to_jpeg_data_url(face_pixels)
# has to use the same client as mainprocess
detection_process_openface_client.addFrameWithID(face_string, 'detect', recognition_frame_id)
self.logger.debug('send out recognition request')
roi_center=((x1 + x2)/2, (y1+y2)/2)
recognition_queue.put(RecognitionRequestUpdate(recognition_frame_id, roi_center))
recognition_frame_id+=1
self.logger.debug('after putting updates on queues')
else:
self.logger.debug('skipped sending recognition')
if WRITE_PICTURE_DEBUG:
self.draw_rois(frame,rois)
imwrite_rgb(self.pic_output_path(str(frame_id)+'_detect'), frame)
frame_id+=1
trackers = create_trackers(frame, rois)
frame_available = True
frame_cnt = 0
while frame_available:
try:
frame = img_queue.get_nowait()
self.update_trackers(trackers, frame)
rois=[drectangle_to_tuple(tracker.get_position()) for tracker in trackers]
if WRITE_PICTURE_DEBUG:
self.draw_rois(frame,rois)
imwrite_rgb(self.pic_output_path(frame_id), frame)
frame_id+=1
frame_cnt +=1
except Queue.Empty:
self.logger.debug('all image catched up! # images {}'.format(frame_cnt))
frame_available = False
faces=[]
for idx, tracker in enumerate(trackers):
new_roi = tracker.get_position()
cur_roi = (int(new_roi.left()),
int(new_roi.top()),
int(new_roi.right()),
int(new_roi.bottom()))
# name = names[idx]
name = ""
# self.logger.info('recognized faces {0} {1}'.format(idx, name))
face = FaceROI(cur_roi, name=name)
faces.append(face)
tracker_updates = {'frame':frame, 'faces':faces}
trackers_queue.put(tracker_updates)
sync_face_event.set()
# wake thread up for terminating
sync_face_event.set()
except Exception as e:
traceback.print_exc()
print()
raise e
def update_trackers(self, trackers, frame, rgb_to_hsv=True):
if rgb_to_hsv:
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
for idx, tracker in enumerate(trackers):
tracker.update(hsv_frame, is_hsv=True)
def track_faces(self, frame, faces):
self.logger.debug('# faces tracking {} '.format(len(faces)))
to_be_removed_face = []
# cvtColor is an expensive operation
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
if (len(faces) == 0):
# sleep for 10 ms
time.sleep(0.005)
else:
for idx, face in enumerate(faces):
tracker = face.tracker
if DEBUG:
start = time.time()
(x1,y1,x2,y2)=face.roi
tracker.update(hsv_frame, is_hsv=True)
new_roi = tracker.get_position()
if DEBUG:
end = time.time()
self.logger.debug('tracker run: {}'.format((end-start)*1000))
(x1,y1,x2,y2) = (int(new_roi.left()),
int(new_roi.top()),
int(new_roi.right()),
int(new_roi.bottom()))
face.roi = (x1,y1,x2,y2)
if (self.is_small_face(face.roi)):
to_be_removed_face.append(face)
else:
face.data = np.copy(frame[y1:y2+1, x1:x2+1])
faces = [face for face in faces if face not in to_be_removed_face]
return faces
def get_large_faces(self, rois, trackers):
# find small faces
small_face_idx=self.find_small_face_idx(rois)
large_rois = rois
large_rois_trackers = trackers
if ( len(small_face_idx) > 0):
large_rois = [ rois[i] for i in xrange(len(rois)) if i not in set(small_face_idx) ]
large_rois_trackers = [ trackers[i] for i in xrange(len(trackers)) if i not in set(small_face_idx) ]
return large_rois, large_rois_trackers
def swap_face(self,frame):
# change training to true
if self.training:
self.logger.debug('main-process stopped openface training!')
self.training=False
self.openface_client.setTraining(False)
self.image_width=frame.shape[1]
self.logger.debug('image width: {}'.format(self.image_width))
# forward img to DetectionProcess
# preprocessing to grey scale can reduce run time for detection process only handle greyscale
# openface need rgb images
grey_frame = frame
# grey_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
faces=self.track_faces(frame, self.faces)
self.faces_lock.acquire()
self.faces=faces
self.faces_lock.release()
return_faces = self.faces
face_snippets = []
for face in return_faces:
try:
face_json = face.get_json(send_data=True)
face_snippets.append(face_json)
except ValueError:
pass
self.img_queue.put(grey_frame)
self.logger.debug('# faces in image: {}'.format(len(self.faces)))
return face_snippets
def get_FaceROI_from_string(self, faces, name):
for face in faces:
if face.name == name:
return face
return None
# shuffle faces
def shuffle_roi(self, faces, face_table):
to_people = []
for idx, face in enumerate(faces):
if face.name in face_table:
(roi_x1, roi_y1, roi_x2, roi_y2) = face.roi
to_person = self.get_FaceROI_from_string(faces, face_table[face.name])
if (None == to_person):
continue
to_people.append(to_person)
nxt_face = to_person.data
cur_face = face.data
dim = (cur_face.shape[1],cur_face.shape[0])
try:
nxt_face_resized = cv2.resize(nxt_face, dim, interpolation = cv2.INTER_AREA)
except:
print 'error: face resize failed!'
face.swap_tmp_data=nxt_face_resized
to_people = set(to_people)
faces = [face for face in faces if face not in to_people]
# faces = [face in faces if face in face_table and face not in to_people]
# faces.extend([ face in faces if face in face_table and face in to_people])
for face in faces:
if None != face.swap_tmp_data:
face.data = face.swap_tmp_data
return faces
def is_small_face(self, roi):
(x1,y1,x2,y2) = roi
# has negative number
if ( x1<0 or y1<0 or x2<0 or y2<0):
return True
# region too small
if ( abs(x2-x1) < MIN_WIDTH_THRESHOLD or abs(y2-y1) < MIN_HEIGHT_THRESHOLD):
self.logger.info('face too small discard')
return True
def find_small_face_idx(self, dets):
idx=[]
for i, d in enumerate(dets):
if ( self.is_small_face(d) ):
idx.append(i)
return idx
def rm_small_face(self, dets):
filtered_dets=[]
for i, d in enumerate(dets):
if not self.is_small_face(d):
filtered_dets.append(d)
return filtered_dets
# enlarge means enlarge the area a bit
def detect_faces(self, frame, detector, largest_only=False):
if DEBUG:
start = time.time()
# upsampling will take a lot of time
dets = detector(frame)
if DEBUG:
end = time.time()
self.logger.debug('detector run: {}'.format((end-start)*1000))
if largest_only:
if (len(dets) > 0):
max_det = max(dets, key=lambda rect: rect.width() * rect.height())
dets = [max_det]
dets=map(lambda d: (int(d.left()), int(d.top()), int(d.right()), int(d.bottom())), dets)
rois=self.rm_small_face(dets)
self.logger.debug('# detected : {}'.format(len(rois)))
rois=sorted(rois)
return rois
def addPerson(self, name):
return self.openface_client.addPerson(name)
# frame is a numpy array
def train(self, frame, name):
# change training to true
if self.training == False:
self.training = True
self.training_cnt = 0
self.openface_client.setTraining(True)
# detect the largest face
rois = self.detect_faces(frame, self.detector, largest_only=True)
# only the largest face counts
if (len(rois) > 1):
self.logger.info("more than 1 faces detected in training frame. abandon frame")
return self.training_cnt, None
if (len(rois) == 0):
self.logger.debug("No faces detected in training frame. abandon frame")
return self.training_cnt, None
self.logger.info("training: sucesss - detected 1 face. add frame")
if 1 == len(rois) :
(x1,y1,x2,y2) = rois[0]
face_pixels = np.copy(frame[y1:y2+1, x1:x2+1])
face = FaceROI(rois[0], data=face_pixels, name="training")
face_string = self.np_array_to_jpeg_data_url(face_pixels)
# face_img = Image.fromarray(face)
# sio = StringIO.StringIO()
# face_img.save(sio, 'JPEG')
# jpeg_img = sio.getvalue()
# if DEBUG:
# face_img.save('training.jpg')
# face_string = base64.b64encode(jpeg_img)
# face_string = "data:image/jpeg;base64," + face_string
resp = self.openface_client.addFrame(face_string, name)
resp = json.loads(resp)
success = resp['success']
if success:
self.training_cnt +=1
return self.training_cnt, face.get_json()
| Jamesjue/FaceSwap-server | server/face_swap.py | Python | apache-2.0 | 28,570 |
# Copyright (C) 2001,2002 Python Software Foundation
# csv package unit tests
import copy
import sys
import unittest
from io import StringIO
from tempfile import TemporaryFile
import csv
import gc
import pickle
from test import support
from itertools import permutations
from textwrap import dedent
from collections import OrderedDict
class Test_Csv(unittest.TestCase):
"""
Test the underlying C csv parser in ways that are not appropriate
from the high level interface. Further tests of this nature are done
in TestDialectRegistry.
"""
def _test_arg_valid(self, ctor, arg):
self.assertRaises(TypeError, ctor)
self.assertRaises(TypeError, ctor, None)
self.assertRaises(TypeError, ctor, arg, bad_attr = 0)
self.assertRaises(TypeError, ctor, arg, delimiter = 0)
self.assertRaises(TypeError, ctor, arg, delimiter = 'XX')
self.assertRaises(csv.Error, ctor, arg, 'foo')
self.assertRaises(TypeError, ctor, arg, delimiter=None)
self.assertRaises(TypeError, ctor, arg, delimiter=1)
self.assertRaises(TypeError, ctor, arg, quotechar=1)
self.assertRaises(TypeError, ctor, arg, lineterminator=None)
self.assertRaises(TypeError, ctor, arg, lineterminator=1)
self.assertRaises(TypeError, ctor, arg, quoting=None)
self.assertRaises(TypeError, ctor, arg,
quoting=csv.QUOTE_ALL, quotechar='')
self.assertRaises(TypeError, ctor, arg,
quoting=csv.QUOTE_ALL, quotechar=None)
def test_reader_arg_valid(self):
self._test_arg_valid(csv.reader, [])
def test_writer_arg_valid(self):
self._test_arg_valid(csv.writer, StringIO())
def _test_default_attrs(self, ctor, *args):
obj = ctor(*args)
# Check defaults
self.assertEqual(obj.dialect.delimiter, ',')
self.assertEqual(obj.dialect.doublequote, True)
self.assertEqual(obj.dialect.escapechar, None)
self.assertEqual(obj.dialect.lineterminator, "\r\n")
self.assertEqual(obj.dialect.quotechar, '"')
self.assertEqual(obj.dialect.quoting, csv.QUOTE_MINIMAL)
self.assertEqual(obj.dialect.skipinitialspace, False)
self.assertEqual(obj.dialect.strict, False)
# Try deleting or changing attributes (they are read-only)
self.assertRaises(AttributeError, delattr, obj.dialect, 'delimiter')
self.assertRaises(AttributeError, setattr, obj.dialect, 'delimiter', ':')
self.assertRaises(AttributeError, delattr, obj.dialect, 'quoting')
self.assertRaises(AttributeError, setattr, obj.dialect,
'quoting', None)
def test_reader_attrs(self):
self._test_default_attrs(csv.reader, [])
def test_writer_attrs(self):
self._test_default_attrs(csv.writer, StringIO())
def _test_kw_attrs(self, ctor, *args):
# Now try with alternate options
kwargs = dict(delimiter=':', doublequote=False, escapechar='\\',
lineterminator='\r', quotechar='*',
quoting=csv.QUOTE_NONE, skipinitialspace=True,
strict=True)
obj = ctor(*args, **kwargs)
self.assertEqual(obj.dialect.delimiter, ':')
self.assertEqual(obj.dialect.doublequote, False)
self.assertEqual(obj.dialect.escapechar, '\\')
self.assertEqual(obj.dialect.lineterminator, "\r")
self.assertEqual(obj.dialect.quotechar, '*')
self.assertEqual(obj.dialect.quoting, csv.QUOTE_NONE)
self.assertEqual(obj.dialect.skipinitialspace, True)
self.assertEqual(obj.dialect.strict, True)
def test_reader_kw_attrs(self):
self._test_kw_attrs(csv.reader, [])
def test_writer_kw_attrs(self):
self._test_kw_attrs(csv.writer, StringIO())
def _test_dialect_attrs(self, ctor, *args):
# Now try with dialect-derived options
class dialect:
delimiter='-'
doublequote=False
escapechar='^'
lineterminator='$'
quotechar='#'
quoting=csv.QUOTE_ALL
skipinitialspace=True
strict=False
args = args + (dialect,)
obj = ctor(*args)
self.assertEqual(obj.dialect.delimiter, '-')
self.assertEqual(obj.dialect.doublequote, False)
self.assertEqual(obj.dialect.escapechar, '^')
self.assertEqual(obj.dialect.lineterminator, "$")
self.assertEqual(obj.dialect.quotechar, '#')
self.assertEqual(obj.dialect.quoting, csv.QUOTE_ALL)
self.assertEqual(obj.dialect.skipinitialspace, True)
self.assertEqual(obj.dialect.strict, False)
def test_reader_dialect_attrs(self):
self._test_dialect_attrs(csv.reader, [])
def test_writer_dialect_attrs(self):
self._test_dialect_attrs(csv.writer, StringIO())
def _write_test(self, fields, expect, **kwargs):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, **kwargs)
writer.writerow(fields)
fileobj.seek(0)
self.assertEqual(fileobj.read(),
expect + writer.dialect.lineterminator)
def _write_error_test(self, exc, fields, **kwargs):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, **kwargs)
with self.assertRaises(exc):
writer.writerow(fields)
fileobj.seek(0)
self.assertEqual(fileobj.read(), '')
def test_write_arg_valid(self):
self._write_error_test(csv.Error, None)
self._write_test((), '')
self._write_test([None], '""')
self._write_error_test(csv.Error, [None], quoting = csv.QUOTE_NONE)
# Check that exceptions are passed up the chain
class BadList:
def __len__(self):
return 10;
def __getitem__(self, i):
if i > 2:
raise OSError
self._write_error_test(OSError, BadList())
class BadItem:
def __str__(self):
raise OSError
self._write_error_test(OSError, [BadItem()])
def test_write_bigfield(self):
# This exercises the buffer realloc functionality
bigstring = 'X' * 50000
self._write_test([bigstring,bigstring], '%s,%s' % \
(bigstring, bigstring))
def test_write_quoting(self):
self._write_test(['a',1,'p,q'], 'a,1,"p,q"')
self._write_error_test(csv.Error, ['a',1,'p,q'],
quoting = csv.QUOTE_NONE)
self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
quoting = csv.QUOTE_MINIMAL)
self._write_test(['a',1,'p,q'], '"a",1,"p,q"',
quoting = csv.QUOTE_NONNUMERIC)
self._write_test(['a',1,'p,q'], '"a","1","p,q"',
quoting = csv.QUOTE_ALL)
self._write_test(['a\nb',1], '"a\nb","1"',
quoting = csv.QUOTE_ALL)
def test_write_escape(self):
self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
escapechar='\\')
self._write_error_test(csv.Error, ['a',1,'p,"q"'],
escapechar=None, doublequote=False)
self._write_test(['a',1,'p,"q"'], 'a,1,"p,\\"q\\""',
escapechar='\\', doublequote = False)
self._write_test(['"'], '""""',
escapechar='\\', quoting = csv.QUOTE_MINIMAL)
self._write_test(['"'], '\\"',
escapechar='\\', quoting = csv.QUOTE_MINIMAL,
doublequote = False)
self._write_test(['"'], '\\"',
escapechar='\\', quoting = csv.QUOTE_NONE)
self._write_test(['a',1,'p,q'], 'a,1,p\\,q',
escapechar='\\', quoting = csv.QUOTE_NONE)
def test_write_iterable(self):
self._write_test(iter(['a', 1, 'p,q']), 'a,1,"p,q"')
self._write_test(iter(['a', 1, None]), 'a,1,')
self._write_test(iter([]), '')
self._write_test(iter([None]), '""')
self._write_error_test(csv.Error, iter([None]), quoting=csv.QUOTE_NONE)
self._write_test(iter([None, None]), ',')
def test_writerows(self):
class BrokenFile:
def write(self, buf):
raise OSError
writer = csv.writer(BrokenFile())
self.assertRaises(OSError, writer.writerows, [['a']])
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
self.assertRaises(TypeError, writer.writerows, None)
writer.writerows([['a', 'b'], ['c', 'd']])
fileobj.seek(0)
self.assertEqual(fileobj.read(), "a,b\r\nc,d\r\n")
def test_writerows_with_none(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
writer.writerows([['a', None], [None, 'd']])
fileobj.seek(0)
self.assertEqual(fileobj.read(), "a,\r\n,d\r\n")
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
writer.writerows([[None], ['a']])
fileobj.seek(0)
self.assertEqual(fileobj.read(), '""\r\na\r\n')
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
writer.writerows([['a'], [None]])
fileobj.seek(0)
self.assertEqual(fileobj.read(), 'a\r\n""\r\n')
@support.cpython_only
def test_writerows_legacy_strings(self):
import _testcapi
c = _testcapi.unicode_legacy_string('a')
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
writer.writerows([[c]])
fileobj.seek(0)
self.assertEqual(fileobj.read(), "a\r\n")
def _read_test(self, input, expect, **kwargs):
reader = csv.reader(input, **kwargs)
result = list(reader)
self.assertEqual(result, expect)
def test_read_oddinputs(self):
self._read_test([], [])
self._read_test([''], [[]])
self.assertRaises(csv.Error, self._read_test,
['"ab"c'], None, strict = 1)
# cannot handle null bytes for the moment
self.assertRaises(csv.Error, self._read_test,
['ab\0c'], None, strict = 1)
self._read_test(['"ab"c'], [['abc']], doublequote = 0)
self.assertRaises(csv.Error, self._read_test,
[b'ab\0c'], None)
def test_read_eol(self):
self._read_test(['a,b'], [['a','b']])
self._read_test(['a,b\n'], [['a','b']])
self._read_test(['a,b\r\n'], [['a','b']])
self._read_test(['a,b\r'], [['a','b']])
self.assertRaises(csv.Error, self._read_test, ['a,b\rc,d'], [])
self.assertRaises(csv.Error, self._read_test, ['a,b\nc,d'], [])
self.assertRaises(csv.Error, self._read_test, ['a,b\r\nc,d'], [])
def test_read_eof(self):
self._read_test(['a,"'], [['a', '']])
self._read_test(['"a'], [['a']])
self._read_test(['^'], [['\n']], escapechar='^')
self.assertRaises(csv.Error, self._read_test, ['a,"'], [], strict=True)
self.assertRaises(csv.Error, self._read_test, ['"a'], [], strict=True)
self.assertRaises(csv.Error, self._read_test,
['^'], [], escapechar='^', strict=True)
def test_read_escape(self):
self._read_test(['a,\\b,c'], [['a', 'b', 'c']], escapechar='\\')
self._read_test(['a,b\\,c'], [['a', 'b,c']], escapechar='\\')
self._read_test(['a,"b\\,c"'], [['a', 'b,c']], escapechar='\\')
self._read_test(['a,"b,\\c"'], [['a', 'b,c']], escapechar='\\')
self._read_test(['a,"b,c\\""'], [['a', 'b,c"']], escapechar='\\')
self._read_test(['a,"b,c"\\'], [['a', 'b,c\\']], escapechar='\\')
def test_read_quoting(self):
self._read_test(['1,",3,",5'], [['1', ',3,', '5']])
self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']],
quotechar=None, escapechar='\\')
self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']],
quoting=csv.QUOTE_NONE, escapechar='\\')
# will this fail where locale uses comma for decimals?
self._read_test([',3,"5",7.3, 9'], [['', 3, '5', 7.3, 9]],
quoting=csv.QUOTE_NONNUMERIC)
self._read_test(['"a\nb", 7'], [['a\nb', ' 7']])
self.assertRaises(ValueError, self._read_test,
['abc,3'], [[]],
quoting=csv.QUOTE_NONNUMERIC)
def test_read_bigfield(self):
# This exercises the buffer realloc functionality and field size
# limits.
limit = csv.field_size_limit()
try:
size = 50000
bigstring = 'X' * size
bigline = '%s,%s' % (bigstring, bigstring)
self._read_test([bigline], [[bigstring, bigstring]])
csv.field_size_limit(size)
self._read_test([bigline], [[bigstring, bigstring]])
self.assertEqual(csv.field_size_limit(), size)
csv.field_size_limit(size-1)
self.assertRaises(csv.Error, self._read_test, [bigline], [])
self.assertRaises(TypeError, csv.field_size_limit, None)
self.assertRaises(TypeError, csv.field_size_limit, 1, None)
finally:
csv.field_size_limit(limit)
def test_read_linenum(self):
r = csv.reader(['line,1', 'line,2', 'line,3'])
self.assertEqual(r.line_num, 0)
next(r)
self.assertEqual(r.line_num, 1)
next(r)
self.assertEqual(r.line_num, 2)
next(r)
self.assertEqual(r.line_num, 3)
self.assertRaises(StopIteration, next, r)
self.assertEqual(r.line_num, 3)
def test_roundtrip_quoteed_newlines(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
self.assertRaises(TypeError, writer.writerows, None)
rows = [['a\nb','b'],['c','x\r\nd']]
writer.writerows(rows)
fileobj.seek(0)
for i, row in enumerate(csv.reader(fileobj)):
self.assertEqual(row, rows[i])
def test_roundtrip_escaped_unquoted_newlines(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")
rows = [['a\nb','b'],['c','x\r\nd']]
writer.writerows(rows)
fileobj.seek(0)
for i, row in enumerate(csv.reader(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")):
self.assertEqual(row,rows[i])
class TestDialectRegistry(unittest.TestCase):
def test_registry_badargs(self):
self.assertRaises(TypeError, csv.list_dialects, None)
self.assertRaises(TypeError, csv.get_dialect)
self.assertRaises(csv.Error, csv.get_dialect, None)
self.assertRaises(csv.Error, csv.get_dialect, "nonesuch")
self.assertRaises(TypeError, csv.unregister_dialect)
self.assertRaises(csv.Error, csv.unregister_dialect, None)
self.assertRaises(csv.Error, csv.unregister_dialect, "nonesuch")
self.assertRaises(TypeError, csv.register_dialect, None)
self.assertRaises(TypeError, csv.register_dialect, None, None)
self.assertRaises(TypeError, csv.register_dialect, "nonesuch", 0, 0)
self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
badargument=None)
self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
quoting=None)
self.assertRaises(TypeError, csv.register_dialect, [])
def test_registry(self):
class myexceltsv(csv.excel):
delimiter = "\t"
name = "myexceltsv"
expected_dialects = csv.list_dialects() + [name]
expected_dialects.sort()
csv.register_dialect(name, myexceltsv)
self.addCleanup(csv.unregister_dialect, name)
self.assertEqual(csv.get_dialect(name).delimiter, '\t')
got_dialects = sorted(csv.list_dialects())
self.assertEqual(expected_dialects, got_dialects)
def test_register_kwargs(self):
name = 'fedcba'
csv.register_dialect(name, delimiter=';')
self.addCleanup(csv.unregister_dialect, name)
self.assertEqual(csv.get_dialect(name).delimiter, ';')
self.assertEqual([['X', 'Y', 'Z']], list(csv.reader(['X;Y;Z'], name)))
def test_incomplete_dialect(self):
class myexceltsv(csv.Dialect):
delimiter = "\t"
self.assertRaises(csv.Error, myexceltsv)
def test_space_dialect(self):
class space(csv.excel):
delimiter = " "
quoting = csv.QUOTE_NONE
escapechar = "\\"
with TemporaryFile("w+") as fileobj:
fileobj.write("abc def\nc1ccccc1 benzene\n")
fileobj.seek(0)
reader = csv.reader(fileobj, dialect=space())
self.assertEqual(next(reader), ["abc", "def"])
self.assertEqual(next(reader), ["c1ccccc1", "benzene"])
def compare_dialect_123(self, expected, *writeargs, **kwwriteargs):
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
writer = csv.writer(fileobj, *writeargs, **kwwriteargs)
writer.writerow([1,2,3])
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_dialect_apply(self):
class testA(csv.excel):
delimiter = "\t"
class testB(csv.excel):
delimiter = ":"
class testC(csv.excel):
delimiter = "|"
class testUni(csv.excel):
delimiter = "\u039B"
csv.register_dialect('testC', testC)
try:
self.compare_dialect_123("1,2,3\r\n")
self.compare_dialect_123("1\t2\t3\r\n", testA)
self.compare_dialect_123("1:2:3\r\n", dialect=testB())
self.compare_dialect_123("1|2|3\r\n", dialect='testC')
self.compare_dialect_123("1;2;3\r\n", dialect=testA,
delimiter=';')
self.compare_dialect_123("1\u039B2\u039B3\r\n",
dialect=testUni)
finally:
csv.unregister_dialect('testC')
def test_bad_dialect(self):
# Unknown parameter
self.assertRaises(TypeError, csv.reader, [], bad_attr = 0)
# Bad values
self.assertRaises(TypeError, csv.reader, [], delimiter = None)
self.assertRaises(TypeError, csv.reader, [], quoting = -1)
self.assertRaises(TypeError, csv.reader, [], quoting = 100)
def test_copy(self):
for name in csv.list_dialects():
dialect = csv.get_dialect(name)
self.assertRaises(TypeError, copy.copy, dialect)
def test_pickle(self):
for name in csv.list_dialects():
dialect = csv.get_dialect(name)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, dialect, proto)
class TestCsvBase(unittest.TestCase):
def readerAssertEqual(self, input, expected_result):
with TemporaryFile("w+", newline='') as fileobj:
fileobj.write(input)
fileobj.seek(0)
reader = csv.reader(fileobj, dialect = self.dialect)
fields = list(reader)
self.assertEqual(fields, expected_result)
def writerAssertEqual(self, input, expected_result):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect = self.dialect)
writer.writerows(input)
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected_result)
class TestDialectExcel(TestCsvBase):
dialect = 'excel'
def test_single(self):
self.readerAssertEqual('abc', [['abc']])
def test_simple(self):
self.readerAssertEqual('1,2,3,4,5', [['1','2','3','4','5']])
def test_blankline(self):
self.readerAssertEqual('', [])
def test_empty_fields(self):
self.readerAssertEqual(',', [['', '']])
def test_singlequoted(self):
self.readerAssertEqual('""', [['']])
def test_singlequoted_left_empty(self):
self.readerAssertEqual('"",', [['','']])
def test_singlequoted_right_empty(self):
self.readerAssertEqual(',""', [['','']])
def test_single_quoted_quote(self):
self.readerAssertEqual('""""', [['"']])
def test_quoted_quotes(self):
self.readerAssertEqual('""""""', [['""']])
def test_inline_quote(self):
self.readerAssertEqual('a""b', [['a""b']])
def test_inline_quotes(self):
self.readerAssertEqual('a"b"c', [['a"b"c']])
def test_quotes_and_more(self):
# Excel would never write a field containing '"a"b', but when
# reading one, it will return 'ab'.
self.readerAssertEqual('"a"b', [['ab']])
def test_lone_quote(self):
self.readerAssertEqual('a"b', [['a"b']])
def test_quote_and_quote(self):
# Excel would never write a field containing '"a" "b"', but when
# reading one, it will return 'a "b"'.
self.readerAssertEqual('"a" "b"', [['a "b"']])
def test_space_and_quote(self):
self.readerAssertEqual(' "a"', [[' "a"']])
def test_quoted(self):
self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
[['1', '2', '3',
'I think, therefore I am',
'5', '6']])
def test_quoted_quote(self):
self.readerAssertEqual('1,2,3,"""I see,"" said the blind man","as he picked up his hammer and saw"',
[['1', '2', '3',
'"I see," said the blind man',
'as he picked up his hammer and saw']])
def test_quoted_nl(self):
input = '''\
1,2,3,"""I see,""
said the blind man","as he picked up his
hammer and saw"
9,8,7,6'''
self.readerAssertEqual(input,
[['1', '2', '3',
'"I see,"\nsaid the blind man',
'as he picked up his\nhammer and saw'],
['9','8','7','6']])
def test_dubious_quote(self):
self.readerAssertEqual('12,12,1",', [['12', '12', '1"', '']])
def test_null(self):
self.writerAssertEqual([], '')
def test_single_writer(self):
self.writerAssertEqual([['abc']], 'abc\r\n')
def test_simple_writer(self):
self.writerAssertEqual([[1, 2, 'abc', 3, 4]], '1,2,abc,3,4\r\n')
def test_quotes(self):
self.writerAssertEqual([[1, 2, 'a"bc"', 3, 4]], '1,2,"a""bc""",3,4\r\n')
def test_quote_fieldsep(self):
self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
def test_newlines(self):
self.writerAssertEqual([[1, 2, 'a\nbc', 3, 4]], '1,2,"a\nbc",3,4\r\n')
class EscapedExcel(csv.excel):
quoting = csv.QUOTE_NONE
escapechar = '\\'
class TestEscapedExcel(TestCsvBase):
dialect = EscapedExcel()
def test_escape_fieldsep(self):
self.writerAssertEqual([['abc,def']], 'abc\\,def\r\n')
def test_read_escape_fieldsep(self):
self.readerAssertEqual('abc\\,def\r\n', [['abc,def']])
class TestDialectUnix(TestCsvBase):
dialect = 'unix'
def test_simple_writer(self):
self.writerAssertEqual([[1, 'abc def', 'abc']], '"1","abc def","abc"\n')
def test_simple_reader(self):
self.readerAssertEqual('"1","abc def","abc"\n', [['1', 'abc def', 'abc']])
class QuotedEscapedExcel(csv.excel):
quoting = csv.QUOTE_NONNUMERIC
escapechar = '\\'
class TestQuotedEscapedExcel(TestCsvBase):
dialect = QuotedEscapedExcel()
def test_write_escape_fieldsep(self):
self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
def test_read_escape_fieldsep(self):
self.readerAssertEqual('"abc\\,def"\r\n', [['abc,def']])
class TestDictFields(unittest.TestCase):
### "long" means the row is longer than the number of fieldnames
### "short" means there are fewer elements in the row than fieldnames
def test_write_simple_dict(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
writer.writeheader()
fileobj.seek(0)
self.assertEqual(fileobj.readline(), "f1,f2,f3\r\n")
writer.writerow({"f1": 10, "f3": "abc"})
fileobj.seek(0)
fileobj.readline() # header
self.assertEqual(fileobj.read(), "10,,abc\r\n")
def test_write_multiple_dict_rows(self):
fileobj = StringIO()
writer = csv.DictWriter(fileobj, fieldnames=["f1", "f2", "f3"])
writer.writeheader()
self.assertEqual(fileobj.getvalue(), "f1,f2,f3\r\n")
writer.writerows([{"f1": 1, "f2": "abc", "f3": "f"},
{"f1": 2, "f2": 5, "f3": "xyz"}])
self.assertEqual(fileobj.getvalue(),
"f1,f2,f3\r\n1,abc,f\r\n2,5,xyz\r\n")
def test_write_no_fields(self):
fileobj = StringIO()
self.assertRaises(TypeError, csv.DictWriter, fileobj)
def test_write_fields_not_in_fieldnames(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
# Of special note is the non-string key (issue 19449)
with self.assertRaises(ValueError) as cx:
writer.writerow({"f4": 10, "f2": "spam", 1: "abc"})
exception = str(cx.exception)
self.assertIn("fieldnames", exception)
self.assertIn("'f4'", exception)
self.assertNotIn("'f2'", exception)
self.assertIn("1", exception)
def test_typo_in_extrasaction_raises_error(self):
fileobj = StringIO()
self.assertRaises(ValueError, csv.DictWriter, fileobj, ['f1', 'f2'],
extrasaction="raised")
def test_write_field_not_in_field_names_raise(self):
fileobj = StringIO()
writer = csv.DictWriter(fileobj, ['f1', 'f2'], extrasaction="raise")
dictrow = {'f0': 0, 'f1': 1, 'f2': 2, 'f3': 3}
self.assertRaises(ValueError, csv.DictWriter.writerow, writer, dictrow)
def test_write_field_not_in_field_names_ignore(self):
fileobj = StringIO()
writer = csv.DictWriter(fileobj, ['f1', 'f2'], extrasaction="ignore")
dictrow = {'f0': 0, 'f1': 1, 'f2': 2, 'f3': 3}
csv.DictWriter.writerow(writer, dictrow)
self.assertEqual(fileobj.getvalue(), "1,2\r\n")
def test_read_dict_fields(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=["f1", "f2", "f3"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
def test_read_dict_no_fieldnames(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj)
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
# Two test cases to make sure existing ways of implicitly setting
# fieldnames continue to work. Both arise from discussion in issue3436.
def test_read_dict_fieldnames_from_file(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=next(csv.reader(fileobj)))
self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
def test_read_dict_fieldnames_chain(self):
import itertools
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj)
first = next(reader)
for row in itertools.chain([first], reader):
self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
self.assertEqual(row, {"f1": '1', "f2": '2', "f3": 'abc'})
def test_read_long(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc,4,5,6\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=["f1", "f2"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2',
None: ["abc", "4", "5", "6"]})
def test_read_long_with_rest(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc,4,5,6\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=["f1", "f2"], restkey="_rest")
self.assertEqual(next(reader), {"f1": '1', "f2": '2',
"_rest": ["abc", "4", "5", "6"]})
def test_read_long_with_rest_no_fieldnames(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2\r\n1,2,abc,4,5,6\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj, restkey="_rest")
self.assertEqual(reader.fieldnames, ["f1", "f2"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2',
"_rest": ["abc", "4", "5", "6"]})
def test_read_short(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc,4,5,6\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames="1 2 3 4 5 6".split(),
restval="DEFAULT")
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": 'DEFAULT', "5": 'DEFAULT',
"6": 'DEFAULT'})
def test_read_multi(self):
sample = [
'2147483648,43.0e12,17,abc,def\r\n',
'147483648,43.0e2,17,abc,def\r\n',
'47483648,43.0,170,abc,def\r\n'
]
reader = csv.DictReader(sample,
fieldnames="i1 float i2 s1 s2".split())
self.assertEqual(next(reader), {"i1": '2147483648',
"float": '43.0e12',
"i2": '17',
"s1": 'abc',
"s2": 'def'})
def test_read_with_blanks(self):
reader = csv.DictReader(["1,2,abc,4,5,6\r\n","\r\n",
"1,2,abc,4,5,6\r\n"],
fieldnames="1 2 3 4 5 6".split())
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
def test_read_semi_sep(self):
reader = csv.DictReader(["1;2;abc;4;5;6\r\n"],
fieldnames="1 2 3 4 5 6".split(),
delimiter=';')
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
class TestArrayWrites(unittest.TestCase):
def test_int_write(self):
import array
contents = [(20-i) for i in range(20)]
a = array.array('i', contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_double_write(self):
import array
contents = [(20-i)*0.1 for i in range(20)]
a = array.array('d', contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_float_write(self):
import array
contents = [(20-i)*0.1 for i in range(20)]
a = array.array('f', contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_char_write(self):
import array, string
a = array.array('u', string.ascii_letters)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join(a)+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
class TestDialectValidity(unittest.TestCase):
def test_quoting(self):
class mydialect(csv.Dialect):
delimiter = ";"
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
self.assertEqual(d.quoting, csv.QUOTE_NONE)
mydialect.quoting = None
self.assertRaises(csv.Error, mydialect)
mydialect.doublequote = True
mydialect.quoting = csv.QUOTE_ALL
mydialect.quotechar = '"'
d = mydialect()
self.assertEqual(d.quoting, csv.QUOTE_ALL)
self.assertEqual(d.quotechar, '"')
self.assertTrue(d.doublequote)
mydialect.quotechar = "''"
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"quotechar" must be a 1-character string')
mydialect.quotechar = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"quotechar" must be string, not int')
def test_delimiter(self):
class mydialect(csv.Dialect):
delimiter = ";"
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
self.assertEqual(d.delimiter, ";")
mydialect.delimiter = ":::"
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"delimiter" must be a 1-character string')
mydialect.delimiter = ""
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"delimiter" must be a 1-character string')
mydialect.delimiter = b","
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"delimiter" must be string, not bytes')
mydialect.delimiter = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"delimiter" must be string, not int')
def test_lineterminator(self):
class mydialect(csv.Dialect):
delimiter = ";"
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
self.assertEqual(d.lineterminator, '\r\n')
mydialect.lineterminator = ":::"
d = mydialect()
self.assertEqual(d.lineterminator, ":::")
mydialect.lineterminator = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
'"lineterminator" must be a string')
def test_invalid_chars(self):
def create_invalid(field_name, value):
class mydialect(csv.Dialect):
pass
setattr(mydialect, field_name, value)
d = mydialect()
for field_name in ("delimiter", "escapechar", "quotechar"):
with self.subTest(field_name=field_name):
self.assertRaises(csv.Error, create_invalid, field_name, "")
self.assertRaises(csv.Error, create_invalid, field_name, "abc")
self.assertRaises(csv.Error, create_invalid, field_name, b'x')
self.assertRaises(csv.Error, create_invalid, field_name, 5)
class TestSniffer(unittest.TestCase):
sample1 = """\
Harry's, Arlington Heights, IL, 2/1/03, Kimi Hayes
Shark City, Glendale Heights, IL, 12/28/02, Prezence
Tommy's Place, Blue Island, IL, 12/28/02, Blue Sunday/White Crow
Stonecutters Seafood and Chop House, Lemont, IL, 12/19/02, Week Back
"""
sample2 = """\
'Harry''s':'Arlington Heights':'IL':'2/1/03':'Kimi Hayes'
'Shark City':'Glendale Heights':'IL':'12/28/02':'Prezence'
'Tommy''s Place':'Blue Island':'IL':'12/28/02':'Blue Sunday/White Crow'
'Stonecutters ''Seafood'' and Chop House':'Lemont':'IL':'12/19/02':'Week Back'
"""
header1 = '''\
"venue","city","state","date","performers"
'''
sample3 = '''\
05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
'''
sample4 = '''\
2147483648;43.0e12;17;abc;def
147483648;43.0e2;17;abc;def
47483648;43.0;170;abc;def
'''
sample5 = "aaa\tbbb\r\nAAA\t\r\nBBB\t\r\n"
sample6 = "a|b|c\r\nd|e|f\r\n"
sample7 = "'a'|'b'|'c'\r\n'd'|e|f\r\n"
# Issue 18155: Use a delimiter that is a special char to regex:
header2 = '''\
"venue"+"city"+"state"+"date"+"performers"
'''
sample8 = """\
Harry's+ Arlington Heights+ IL+ 2/1/03+ Kimi Hayes
Shark City+ Glendale Heights+ IL+ 12/28/02+ Prezence
Tommy's Place+ Blue Island+ IL+ 12/28/02+ Blue Sunday/White Crow
Stonecutters Seafood and Chop House+ Lemont+ IL+ 12/19/02+ Week Back
"""
sample9 = """\
'Harry''s'+ Arlington Heights'+ 'IL'+ '2/1/03'+ 'Kimi Hayes'
'Shark City'+ Glendale Heights'+' IL'+ '12/28/02'+ 'Prezence'
'Tommy''s Place'+ Blue Island'+ 'IL'+ '12/28/02'+ 'Blue Sunday/White Crow'
'Stonecutters ''Seafood'' and Chop House'+ 'Lemont'+ 'IL'+ '12/19/02'+ 'Week Back'
"""
def test_has_header(self):
sniffer = csv.Sniffer()
self.assertEqual(sniffer.has_header(self.sample1), False)
self.assertEqual(sniffer.has_header(self.header1 + self.sample1),
True)
def test_has_header_regex_special_delimiter(self):
sniffer = csv.Sniffer()
self.assertEqual(sniffer.has_header(self.sample8), False)
self.assertEqual(sniffer.has_header(self.header2 + self.sample8),
True)
def test_guess_quote_and_delimiter(self):
sniffer = csv.Sniffer()
for header in (";'123;4';", "'123;4';", ";'123;4'", "'123;4'"):
with self.subTest(header):
dialect = sniffer.sniff(header, ",;")
self.assertEqual(dialect.delimiter, ';')
self.assertEqual(dialect.quotechar, "'")
self.assertIs(dialect.doublequote, False)
self.assertIs(dialect.skipinitialspace, False)
def test_sniff(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.sample1)
self.assertEqual(dialect.delimiter, ",")
self.assertEqual(dialect.quotechar, '"')
self.assertEqual(dialect.skipinitialspace, True)
dialect = sniffer.sniff(self.sample2)
self.assertEqual(dialect.delimiter, ":")
self.assertEqual(dialect.quotechar, "'")
self.assertEqual(dialect.skipinitialspace, False)
def test_delimiters(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.sample3)
# given that all three lines in sample3 are equal,
# I think that any character could have been 'guessed' as the
# delimiter, depending on dictionary order
self.assertIn(dialect.delimiter, self.sample3)
dialect = sniffer.sniff(self.sample3, delimiters="?,")
self.assertEqual(dialect.delimiter, "?")
dialect = sniffer.sniff(self.sample3, delimiters="/,")
self.assertEqual(dialect.delimiter, "/")
dialect = sniffer.sniff(self.sample4)
self.assertEqual(dialect.delimiter, ";")
dialect = sniffer.sniff(self.sample5)
self.assertEqual(dialect.delimiter, "\t")
dialect = sniffer.sniff(self.sample6)
self.assertEqual(dialect.delimiter, "|")
dialect = sniffer.sniff(self.sample7)
self.assertEqual(dialect.delimiter, "|")
self.assertEqual(dialect.quotechar, "'")
dialect = sniffer.sniff(self.sample8)
self.assertEqual(dialect.delimiter, '+')
dialect = sniffer.sniff(self.sample9)
self.assertEqual(dialect.delimiter, '+')
self.assertEqual(dialect.quotechar, "'")
def test_doublequote(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.header1)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.header2)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.sample2)
self.assertTrue(dialect.doublequote)
dialect = sniffer.sniff(self.sample8)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.sample9)
self.assertTrue(dialect.doublequote)
class NUL:
def write(s, *args):
pass
writelines = write
@unittest.skipUnless(hasattr(sys, "gettotalrefcount"),
'requires sys.gettotalrefcount()')
class TestLeaks(unittest.TestCase):
def test_create_read(self):
delta = 0
lastrc = sys.gettotalrefcount()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
csv.reader(["a,b,c\r\n"])
csv.reader(["a,b,c\r\n"])
csv.reader(["a,b,c\r\n"])
delta = rc-lastrc
lastrc = rc
# if csv.reader() leaks, last delta should be 3 or more
self.assertEqual(delta < 3, True)
def test_create_write(self):
delta = 0
lastrc = sys.gettotalrefcount()
s = NUL()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
csv.writer(s)
csv.writer(s)
csv.writer(s)
delta = rc-lastrc
lastrc = rc
# if csv.writer() leaks, last delta should be 3 or more
self.assertEqual(delta < 3, True)
def test_read(self):
delta = 0
rows = ["a,b,c\r\n"]*5
lastrc = sys.gettotalrefcount()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
rdr = csv.reader(rows)
for row in rdr:
pass
delta = rc-lastrc
lastrc = rc
# if reader leaks during read, delta should be 5 or more
self.assertEqual(delta < 5, True)
def test_write(self):
delta = 0
rows = [[1,2,3]]*5
s = NUL()
lastrc = sys.gettotalrefcount()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
writer = csv.writer(s)
for row in rows:
writer.writerow(row)
delta = rc-lastrc
lastrc = rc
# if writer leaks during write, last delta should be 5 or more
self.assertEqual(delta < 5, True)
class TestUnicode(unittest.TestCase):
names = ["Martin von Löwis",
"Marc André Lemburg",
"Guido van Rossum",
"François Pinard"]
def test_unicode_read(self):
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
fileobj.write(",".join(self.names) + "\r\n")
fileobj.seek(0)
reader = csv.reader(fileobj)
self.assertEqual(list(reader), [self.names])
def test_unicode_write(self):
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
writer = csv.writer(fileobj)
writer.writerow(self.names)
expected = ",".join(self.names)+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
class KeyOrderingTest(unittest.TestCase):
def test_ordering_for_the_dict_reader_and_writer(self):
resultset = set()
for keys in permutations("abcde"):
with TemporaryFile('w+', newline='', encoding="utf-8") as fileobject:
dw = csv.DictWriter(fileobject, keys)
dw.writeheader()
fileobject.seek(0)
dr = csv.DictReader(fileobject)
kt = tuple(dr.fieldnames)
self.assertEqual(keys, kt)
resultset.add(kt)
# Final sanity check: were all permutations unique?
self.assertEqual(len(resultset), 120, "Key ordering: some key permutations not collected (expected 120)")
def test_ordered_dict_reader(self):
data = dedent('''\
FirstName,LastName
Eric,Idle
Graham,Chapman,Over1,Over2
Under1
John,Cleese
''').splitlines()
self.assertEqual(list(csv.DictReader(data)),
[OrderedDict([('FirstName', 'Eric'), ('LastName', 'Idle')]),
OrderedDict([('FirstName', 'Graham'), ('LastName', 'Chapman'),
(None, ['Over1', 'Over2'])]),
OrderedDict([('FirstName', 'Under1'), ('LastName', None)]),
OrderedDict([('FirstName', 'John'), ('LastName', 'Cleese')]),
])
self.assertEqual(list(csv.DictReader(data, restkey='OtherInfo')),
[OrderedDict([('FirstName', 'Eric'), ('LastName', 'Idle')]),
OrderedDict([('FirstName', 'Graham'), ('LastName', 'Chapman'),
('OtherInfo', ['Over1', 'Over2'])]),
OrderedDict([('FirstName', 'Under1'), ('LastName', None)]),
OrderedDict([('FirstName', 'John'), ('LastName', 'Cleese')]),
])
del data[0] # Remove the header row
self.assertEqual(list(csv.DictReader(data, fieldnames=['fname', 'lname'])),
[OrderedDict([('fname', 'Eric'), ('lname', 'Idle')]),
OrderedDict([('fname', 'Graham'), ('lname', 'Chapman'),
(None, ['Over1', 'Over2'])]),
OrderedDict([('fname', 'Under1'), ('lname', None)]),
OrderedDict([('fname', 'John'), ('lname', 'Cleese')]),
])
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {'__doc__', '__version__'}
support.check__all__(self, csv, ('csv', '_csv'), extra=extra)
if __name__ == '__main__':
unittest.main()
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_csv.py | Python | gpl-2.0 | 48,189 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _generate_dump_from_simple_addition_graph(self):
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, "Node 'foo_bar' does not exist in partition graphs."):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session() as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s" % u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "%s" % s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsNone(u_vals[0])
self.assertEqual(1, len(s_vals))
self.assertIsNone(s_vals[0])
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session() as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(cond, body, [i], parallel_iterations=1)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))
self.assertTrue(
os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session() as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")))
u_file_path = dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
u_timestamp = int(u_file_path[u_file_path.rindex("_") + 1:])
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
# Swap the time stamps
new_u_file_path = u_file_path[:u_file_path.rindex(
"_")] + "_%d" % v_timestamp
new_v_file_path = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % u_timestamp
os.rename(u_file_path, new_u_file_path)
os.rename(v_file_path, new_v_file_path)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose([2, 4, 7], result)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(c, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 2.0, 2.0, 3.0, 2.0, 5.0, 4.0, -3.0, 7.0, 0.85714286,
8.97959184
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(a.initializer, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls())
sess.run(w, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
if __name__ == "__main__":
googletest.main()
| jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/tensorflow/python/debug/lib/session_debug_testlib.py | Python | mit | 40,584 |
'''
patent,Cit_Date,Cit_Name,Cit_Kind,Cit_Country,Citation,Category,CitSeq
06009555,1980-03-01,FAY,"",,04192017,,
06009555,1985-10-01,"BROWN ET AL.","",,04547903,,
'''
import csv
import re
with open("patent.csv","rb") as source:
rdr= csv.reader(source)
with open("patent-processed.csv","wb") as result:
wtr= csv.writer(result, delimiter=",")
for r in rdr:
wtr.writerow((re.sub(r'\D', '', str(r[0])), r[6], r[8]))
| mbartoli/patentprocessing | ETL/preprocess-patent.py | Python | mit | 452 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
import logging
from datetime import datetime
from tempfile import NamedTemporaryFile
from mock import patch
from udata.models import Dataset, PeriodicTask
from udata.tests import TestCase, DBTestMixin
from udata.core.organization.factories import OrganizationFactory
from udata.core.user.factories import UserFactory
from udata.core.dataset.factories import DatasetFactory
from udata.utils import faker
from .factories import (
HarvestSourceFactory, HarvestJobFactory,
mock_initialize, mock_process, DEFAULT_COUNT as COUNT
)
from ..models import (
HarvestSource, HarvestJob, HarvestError,
VALIDATION_PENDING, VALIDATION_ACCEPTED, VALIDATION_REFUSED
)
from ..backends import BaseBackend
from .. import actions, signals
log = logging.getLogger(__name__)
class HarvestActionsTest(DBTestMixin, TestCase):
def test_list_backends(self):
for backend in actions.list_backends():
self.assertTrue(issubclass(backend, BaseBackend))
def test_list_sources(self):
self.assertEqual(actions.list_sources(), [])
sources = HarvestSourceFactory.create_batch(3)
result = actions.list_sources()
self.assertEqual(len(result), len(sources))
for source in sources:
self.assertIn(source, result)
def test_list_sources_deleted(self):
self.assertEqual(actions.list_sources(), [])
now = datetime.now()
sources = HarvestSourceFactory.create_batch(3)
deleted_sources = HarvestSourceFactory.create_batch(2, deleted=now)
result = actions.list_sources()
self.assertEqual(len(result), len(sources))
for source in sources:
self.assertIn(source, result)
for source in deleted_sources:
self.assertNotIn(source, result)
def test_list_sources_for_owner(self):
owner = UserFactory()
self.assertEqual(actions.list_sources(owner), [])
sources = HarvestSourceFactory.create_batch(3, owner=owner)
HarvestSourceFactory()
result = actions.list_sources(owner)
self.assertEqual(len(result), len(sources))
for source in sources:
self.assertIn(source, result)
def test_list_sources_for_org(self):
org = OrganizationFactory()
self.assertEqual(actions.list_sources(org), [])
sources = HarvestSourceFactory.create_batch(3, organization=org)
HarvestSourceFactory()
result = actions.list_sources(org)
self.assertEqual(len(result), len(sources))
for source in sources:
self.assertIn(source, result)
def test_create_source(self):
source_url = faker.url()
with self.assert_emit(signals.harvest_source_created):
source = actions.create_source('Test source',
source_url,
'factory')
self.assertEqual(source.name, 'Test source')
self.assertEqual(source.slug, 'test-source')
self.assertEqual(source.url, source_url)
self.assertEqual(source.backend, 'factory')
self.assertEqual(source.frequency, 'manual')
self.assertTrue(source.active)
self.assertIsNone(source.owner)
self.assertIsNone(source.organization)
self.assertEqual(source.validation.state, VALIDATION_PENDING)
self.assertIsNone(source.validation.on)
self.assertIsNone(source.validation.by)
self.assertIsNone(source.validation.comment)
@patch('udata.harvest.actions.launch')
def test_validate_source(self, mock):
source = HarvestSourceFactory()
actions.validate_source(source.id)
source.reload()
self.assertEqual(source.validation.state, VALIDATION_ACCEPTED)
self.assertIsNotNone(source.validation.on)
self.assertIsNone(source.validation.by)
self.assertIsNone(source.validation.comment)
mock.assert_called_once_with(source.id)
@patch('udata.harvest.actions.launch')
def test_validate_source_with_comment(self, mock):
source = HarvestSourceFactory()
actions.validate_source(source.id, 'comment')
source.reload()
self.assertEqual(source.validation.state, VALIDATION_ACCEPTED)
self.assertIsNotNone(source.validation.on)
self.assertIsNone(source.validation.by)
self.assertEqual(source.validation.comment, 'comment')
mock.assert_called_once_with(source.id)
def test_reject_source(self):
source = HarvestSourceFactory()
actions.reject_source(source.id, 'comment')
source.reload()
self.assertEqual(source.validation.state, VALIDATION_REFUSED)
self.assertIsNotNone(source.validation.on)
self.assertIsNone(source.validation.by)
self.assertEqual(source.validation.comment, 'comment')
def test_get_source_by_slug(self):
source = HarvestSourceFactory()
self.assertEqual(actions.get_source(source.slug), source)
def test_get_source_by_id(self):
source = HarvestSourceFactory()
self.assertEqual(actions.get_source(str(source.id)), source)
def test_get_source_by_objectid(self):
source = HarvestSourceFactory()
self.assertEqual(actions.get_source(source.id), source)
def test_delete_source_by_slug(self):
source = HarvestSourceFactory()
with self.assert_emit(signals.harvest_source_deleted):
deleted_source = actions.delete_source(source.slug)
self.assertIsNotNone(deleted_source.deleted)
self.assertEqual(deleted_source.id, source.id)
deleted_sources = HarvestSource.objects(deleted__exists=True)
self.assertEqual(len(deleted_sources), 1)
def test_delete_source_by_id(self):
source = HarvestSourceFactory()
with self.assert_emit(signals.harvest_source_deleted):
deleted_source = actions.delete_source(str(source.id))
self.assertIsNotNone(deleted_source.deleted)
self.assertEqual(deleted_source.id, source.id)
deleted_sources = HarvestSource.objects(deleted__exists=True)
self.assertEqual(len(deleted_sources), 1)
def test_delete_source_by_objectid(self):
source = HarvestSourceFactory()
with self.assert_emit(signals.harvest_source_deleted):
deleted_source = actions.delete_source(source.id)
self.assertIsNotNone(deleted_source.deleted)
self.assertEqual(deleted_source.id, source.id)
deleted_sources = HarvestSource.objects(deleted__exists=True)
self.assertEqual(len(deleted_sources), 1)
def test_get_job_by_id(self):
job = HarvestJobFactory()
self.assertEqual(actions.get_job(str(job.id)), job)
def test_get_job_by_objectid(self):
job = HarvestJobFactory()
self.assertEqual(actions.get_job(job.id), job)
def test_schedule(self):
source = HarvestSourceFactory()
with self.assert_emit(signals.harvest_source_scheduled):
actions.schedule(str(source.id), hour=0)
source.reload()
self.assertEqual(len(PeriodicTask.objects), 1)
periodic_task = PeriodicTask.objects.first()
self.assertEqual(source.periodic_task, periodic_task)
self.assertEqual(periodic_task.args, [str(source.id)])
self.assertEqual(periodic_task.crontab.hour, '0')
self.assertEqual(periodic_task.crontab.minute, '*')
self.assertEqual(periodic_task.crontab.day_of_week, '*')
self.assertEqual(periodic_task.crontab.day_of_month, '*')
self.assertEqual(periodic_task.crontab.month_of_year, '*')
self.assertTrue(periodic_task.enabled)
self.assertEqual(periodic_task.name, 'Harvest {0}'.format(source.name))
def test_unschedule(self):
periodic_task = PeriodicTask.objects.create(
task='harvest',
name=faker.name(),
description=faker.sentence(),
enabled=True,
crontab=PeriodicTask.Crontab()
)
source = HarvestSourceFactory(periodic_task=periodic_task)
with self.assert_emit(signals.harvest_source_unscheduled):
actions.unschedule(str(source.id))
source.reload()
self.assertEqual(len(PeriodicTask.objects), 0)
self.assertIsNone(source.periodic_task)
def test_purge_sources(self):
now = datetime.now()
to_delete = HarvestSourceFactory.create_batch(3, deleted=now)
to_keep = HarvestSourceFactory.create_batch(2)
result = actions.purge_sources()
self.assertEqual(result, len(to_delete))
self.assertEqual(len(HarvestSource.objects), len(to_keep))
def test_attach(self):
datasets = DatasetFactory.create_batch(3)
with NamedTemporaryFile() as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['local', 'remote'],
delimiter=b';',
quotechar=b'"')
writer.writeheader()
for index, dataset in enumerate(datasets):
writer.writerow({
'local': str(dataset.id),
'remote': str(index)
})
csvfile.flush()
result = actions.attach('test.org', csvfile.name)
self.assertEqual(result.success, len(datasets))
self.assertEqual(result.errors, 0)
for index, dataset in enumerate(datasets):
dataset.reload()
self.assertEqual(dataset.extras['harvest:domain'], 'test.org')
self.assertEqual(dataset.extras['harvest:remote_id'], str(index))
def test_attach_does_not_duplicate(self):
attached_datasets = []
for i in range(2):
dataset = DatasetFactory.build()
dataset.extras['harvest:domain'] = 'test.org'
dataset.extras['harvest:remote_id'] = str(i)
dataset.last_modified = datetime.now()
dataset.save()
attached_datasets.append(dataset)
datasets = DatasetFactory.create_batch(3)
with NamedTemporaryFile() as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['local', 'remote'],
delimiter=b';',
quotechar=b'"')
writer.writeheader()
for index, dataset in enumerate(datasets):
writer.writerow({
'local': str(dataset.id),
'remote': str(index)
})
csvfile.flush()
result = actions.attach('test.org', csvfile.name)
dbcount = Dataset.objects(**{
'extras__harvest:remote_id__exists': True
}).count()
self.assertEqual(result.success, len(datasets))
self.assertEqual(dbcount, result.success)
for index, dataset in enumerate(datasets):
dataset.reload()
self.assertEqual(dataset.extras['harvest:domain'], 'test.org')
self.assertEqual(dataset.extras['harvest:remote_id'], str(index))
def test_attach_skip_not_found(self):
datasets = DatasetFactory.create_batch(3)
with NamedTemporaryFile() as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['local', 'remote'],
delimiter=b';',
quotechar=b'"')
writer.writeheader()
writer.writerow({
'local': 'not-found',
'remote': '42'
})
for index, dataset in enumerate(datasets):
writer.writerow({
'local': str(dataset.id),
'remote': str(index)
})
csvfile.flush()
result = actions.attach('test.org', csvfile.name)
self.assertEqual(result.success, len(datasets))
self.assertEqual(result.errors, 1)
class ExecutionTestMixin(DBTestMixin):
def test_default(self):
org = OrganizationFactory()
source = HarvestSourceFactory(backend='factory', organization=org)
with self.assert_emit(signals.before_harvest_job,
signals.after_harvest_job):
self.action(source.slug)
source.reload()
self.assertEqual(len(HarvestJob.objects(source=source)), 1)
job = source.get_last_job()
self.assertEqual(job.status, 'done')
self.assertEqual(job.errors, [])
self.assertIsNotNone(job.started)
self.assertIsNotNone(job.ended)
self.assertEqual(len(job.items), COUNT)
for item in job.items:
self.assertEqual(item.status, 'done')
self.assertEqual(item.errors, [])
self.assertIsNotNone(item.started)
self.assertIsNotNone(item.ended)
self.assertIsNotNone(item.dataset)
dataset = item.dataset
self.assertIsNotNone(Dataset.objects(id=dataset.id).first())
self.assertEqual(dataset.organization, org)
self.assertIn('harvest:remote_id', dataset.extras)
self.assertIn('harvest:last_update', dataset.extras)
self.assertIn('harvest:source_id', dataset.extras)
self.assertEqual(len(HarvestJob.objects), 1)
self.assertEqual(len(Dataset.objects), COUNT)
def test_error_on_initialize(self):
def init(self):
raise ValueError('test')
source = HarvestSourceFactory(backend='factory')
with self.assert_emit(signals.before_harvest_job),\
mock_initialize.connected_to(init):
self.action(source.slug)
source.reload()
self.assertEqual(len(HarvestJob.objects(source=source)), 1)
job = source.get_last_job()
self.assertEqual(job.status, 'failed')
self.assertEqual(len(job.errors), 1)
error = job.errors[0]
self.assertIsInstance(error, HarvestError)
self.assertIsNotNone(job.started)
self.assertIsNotNone(job.ended)
self.assertEqual(len(job.items), 0)
self.assertEqual(len(HarvestJob.objects), 1)
self.assertEqual(len(Dataset.objects), 0)
def test_error_on_item(self):
def process(self, item):
if item.remote_id == '1':
raise ValueError('test')
source = HarvestSourceFactory(backend='factory')
with self.assert_emit(signals.before_harvest_job,
signals.after_harvest_job), \
mock_process.connected_to(process):
self.action(source.slug)
source.reload()
self.assertEqual(len(HarvestJob.objects(source=source)), 1)
job = source.get_last_job()
self.assertEqual(job.status, 'done-errors')
self.assertIsNotNone(job.started)
self.assertIsNotNone(job.ended)
self.assertEqual(len(job.errors), 0)
self.assertEqual(len(job.items), COUNT)
items_ok = filter(lambda i: not len(i.errors), job.items)
self.assertEqual(len(items_ok), COUNT - 1)
for item in items_ok:
self.assertIsNotNone(item.started)
self.assertIsNotNone(item.ended)
self.assertEqual(item.status, 'done')
self.assertEqual(item.errors, [])
item_ko = filter(lambda i: len(i.errors), job.items)[0]
self.assertIsNotNone(item_ko.started)
self.assertIsNotNone(item_ko.ended)
self.assertEqual(item_ko.status, 'failed')
self.assertEqual(len(item_ko.errors), 1)
error = item_ko.errors[0]
self.assertIsInstance(error, HarvestError)
self.assertEqual(len(HarvestJob.objects), 1)
self.assertEqual(len(Dataset.objects), COUNT - 1)
class HarvestLaunchTest(ExecutionTestMixin, TestCase):
def action(self, *args, **kwargs):
return actions.launch(*args, **kwargs)
class HarvestRunTest(ExecutionTestMixin, TestCase):
def action(self, *args, **kwargs):
return actions.run(*args, **kwargs)
class HarvestPreviewTest(DBTestMixin, TestCase):
def test_preview(self):
org = OrganizationFactory()
source = HarvestSourceFactory(backend='factory', organization=org)
job = actions.preview(source.slug)
self.assertEqual(job.status, 'done')
self.assertEqual(job.errors, [])
self.assertIsNotNone(job.started)
self.assertIsNotNone(job.ended)
self.assertEqual(len(job.items), COUNT)
for item in job.items:
self.assertEqual(item.status, 'done')
self.assertEqual(item.errors, [])
self.assertIsNotNone(item.started)
self.assertIsNotNone(item.ended)
self.assertIsNotNone(item.dataset)
dataset = item.dataset
self.assertEqual(dataset.organization, org)
self.assertIn('harvest:remote_id', dataset.extras)
self.assertIn('harvest:last_update', dataset.extras)
self.assertIn('harvest:source_id', dataset.extras)
self.assertEqual(len(HarvestJob.objects), 0)
self.assertEqual(len(Dataset.objects), 0)
def test_preview_max_items(self):
org = OrganizationFactory()
source = HarvestSourceFactory(backend='factory',
organization=org,
config={'count': 10})
self.app.config['HARVEST_PREVIEW_MAX_ITEMS'] = 5
job = actions.preview(source.slug)
self.assertEqual(len(job.items), 5)
def test_preview_with_error_on_initialize(self):
def init(self):
raise ValueError('test')
source = HarvestSourceFactory(backend='factory')
with mock_initialize.connected_to(init):
job = actions.preview(source.slug)
self.assertEqual(job.status, 'failed')
self.assertEqual(len(job.errors), 1)
error = job.errors[0]
self.assertIsInstance(error, HarvestError)
self.assertIsNotNone(job.started)
self.assertIsNotNone(job.ended)
self.assertEqual(len(job.items), 0)
self.assertEqual(len(HarvestJob.objects), 0)
self.assertEqual(len(Dataset.objects), 0)
def test_preview_with_error_on_item(self):
def process(self, item):
if item.remote_id == '1':
raise ValueError('test')
source = HarvestSourceFactory(backend='factory')
with mock_process.connected_to(process):
job = actions.preview(source.slug)
self.assertEqual(job.status, 'done-errors')
self.assertIsNotNone(job.started)
self.assertIsNotNone(job.ended)
self.assertEqual(len(job.errors), 0)
self.assertEqual(len(job.items), COUNT)
items_ok = filter(lambda i: not len(i.errors), job.items)
self.assertEqual(len(items_ok), COUNT - 1)
for item in items_ok:
self.assertIsNotNone(item.started)
self.assertIsNotNone(item.ended)
self.assertEqual(item.status, 'done')
self.assertEqual(item.errors, [])
item_ko = filter(lambda i: len(i.errors), job.items)[0]
self.assertIsNotNone(item_ko.started)
self.assertIsNotNone(item_ko.ended)
self.assertEqual(item_ko.status, 'failed')
self.assertEqual(len(item_ko.errors), 1)
error = item_ko.errors[0]
self.assertIsInstance(error, HarvestError)
self.assertEqual(len(HarvestJob.objects), 0)
self.assertEqual(len(Dataset.objects), 0)
| jphnoel/udata | udata/harvest/tests/test_actions.py | Python | agpl-3.0 | 19,718 |
import json
import contextlib
import requests
from . import exceptions
RECEIPT_PRODUCTION_VALIDATION_URL = "https://buy.itunes.apple.com/verifyReceipt"
RECEIPT_SANDBOX_VALIDATION_URL = "https://sandbox.itunes.apple.com/verifyReceipt"
class Request(object):
"""Validation request with raw receipt. Receipt must be base64 encoded string.
Use `verify` method to try verification and get Receipt or exception.
"""
def __init__(self, receipt, password=None, **kwargs):
self.receipt = receipt
self.password = password
self.use_production = kwargs.get('use_production', True)
self.use_sandbox = kwargs.get('use_sandbox', False)
self.timeout = kwargs.get('timeout')
def __repr__(self):
return u'<Request(data:{}...)>'.format(self.receipt[:20])
def verify_from(self, url):
"""
Attempt to verify the receipt against given url.
"""
payload = {
'receipt-data': self.receipt
}
if self.password:
payload['password'] = self.password
try:
response = requests.post(url, json.dumps(payload), timeout=self.timeout, verify=True)
except requests.RequestException as e:
raise exceptions.ConnectionError('failed to request %s: %s' % (url, e))
if response.status_code != 200:
raise exceptions.ItunesNotAvailable(response.status_code, response.content)
try:
result = json.loads(response.content.decode('utf-8'))
status = result['status']
except (KeyError, ValueError):
raise exceptions.ItunesNotAvailable('invalid response', response.content)
if status not in (0, 21006): # ignore expired ios6 receipts
raise exceptions.InvalidReceipt(result.get('receipt'), status=status)
return result
def verify(self):
"""Try verification with settings. Returns a Receipt object if successed.
Or raise an exception. See `self.response` or `self.result` to see details.
"""
receipt = None
exc = None
if not (self.use_production or self.use_sandbox):
raise TypeError('use_production=%s use_sandbox=%s' % (self.use_production, self.use_sandbox))
if self.use_production:
try:
receipt = self.verify_from(RECEIPT_PRODUCTION_VALIDATION_URL)
except exceptions.InvalidReceipt as e:
exc = e
if self.use_sandbox:
try:
receipt = self.verify_from(RECEIPT_SANDBOX_VALIDATION_URL)
except exceptions.InvalidReceipt as e:
exc = e
if receipt:
return Receipt(receipt)
raise exc
@contextlib.contextmanager
def verification_mode(self, use_production=None, use_sandbox=None):
restore = self.use_production, self.use_sandbox
if use_production is not None:
self.use_production = use_production
if use_sandbox is not None:
self.use_sandbox = use_sandbox
yield
self.use_production, self.use_sandbox = restore
class Receipt(dict):
"""
dict like interface for decoded receipt obejct.
"""
def __init__(self, data):
self.data = data
self.status = data['status']
dict.__init__(self, data['receipt'])
def __repr__(self):
repr = super(Receipt, self).__repr__()
return u'<Receipt(status:{0}, {1})>'.format(self.status, repr)
| Eksmo/itunes-iap | itunesiap/core.py | Python | bsd-2-clause | 3,513 |
""" This is a module to interface to libewf.
This needs to be tested with the windows port.
"""
from ctypes import *
import ctypes.util
possible_names = ['libewf-1', 'ewf',]
for name in possible_names:
resolved = ctypes.util.find_library(name)
if resolved:
break
try:
if resolved == None:
raise ImportError("libewf not found")
libewf = CDLL(resolved)
if not libewf._name: raise OSError()
except OSError:
raise ImportError("libewf not found")
class ewffile:
""" A file like object to provide access to the ewf file """
def __init__(self, volumes):
if isinstance(volumes, str):
volumes = [volumes,]
volume_array = c_char_p * len(volumes)
self.handle = libewf.libewf_open(volume_array(*volumes), c_int(len(volumes)),
c_int(1))
if self.handle==0:
raise RuntimeError("Unable to open ewf file")
self.readptr = 0
size_p = pointer(c_ulonglong(0))
libewf.libewf_get_media_size(self.handle, size_p)
self.size = size_p.contents.value
def seek(self, offset, whence=0):
if whence==0:
self.readptr = offset
elif whence==1:
self.readptr += offset
elif whence==2:
self.readptr = self.size + offset
self.readptr = min(self.readptr, self.size)
def tell(self):
return self.readptr
def read(self, length):
buf = create_string_buffer(length)
length = libewf.libewf_read_random(self.handle, buf,
c_ulong(length),
c_ulonglong(self.readptr))
return buf.raw[:length]
def close(self):
libewf.libewf_close(self.handle)
def get_headers(self):
properties = ["case_number", "description", "examinier_name",
"evidence_number", "notes", "acquiry_date",
"system_date", "acquiry_operating_system",
"acquiry_software_version", "password",
"compression_type", "model", "serial_number",]
## Make sure we parsed all headers
libewf.libewf_parse_header_values(self.handle, c_int(4))
result = {'size': self.size}
buf = create_string_buffer(1024)
for p in properties:
libewf.libewf_get_header_value(self.handle, p, buf, 1024)
result[p] = buf.value
## Get the hash
if libewf.libewf_get_md5_hash(self.handle, buf, 16) == 1:
result['md5'] = buf.raw[:16]
return result
def ewf_open(volumes):
return ewffile(volumes)
if __name__=="__main__":
fd = ewffile("pyflag_stdimage_0.5.e01")
print fd.get_headers()
fd.seek(0x8E4B88)
print "%r" % fd.read(100)
| naototty/pyflag | src/plugins_old/aff4/ewf.py | Python | gpl-2.0 | 2,856 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import optparse
import os
import pexpect
import shutil
import signal
import subprocess
import tempfile
import time
import xmlrpclib
from contextlib import contextmanager
from glob import glob
from os.path import abspath, dirname, join
from sys import stdout
from tempfile import NamedTemporaryFile
# apt-get install rsync python-pexpect debhelper python-setuptools
#----------------------------------------------------------
# Utils
#----------------------------------------------------------
execfile(join(dirname(__file__), '..', 'odoo', 'release.py'))
version = version.split('-')[0]
docker_version = version.replace('+', '')
timestamp = time.strftime("%Y%m%d", time.gmtime())
GPGPASSPHRASE = os.getenv('GPGPASSPHRASE')
GPGID = os.getenv('GPGID')
PUBLISH_DIRS = {
'debian': 'deb',
'redhat': 'rpm',
'tarball': 'src',
'windows': 'exe',
}
ADDONS_NOT_TO_PUBLISH = [
]
def mkdir(d):
if not os.path.isdir(d):
os.makedirs(d)
def system(l, chdir=None):
print l
if chdir:
cwd = os.getcwd()
os.chdir(chdir)
if isinstance(l, list):
rc = os.spawnvp(os.P_WAIT, l[0], l)
elif isinstance(l, str):
tmp = ['sh', '-c', l]
rc = os.spawnvp(os.P_WAIT, tmp[0], tmp)
if chdir:
os.chdir(cwd)
return rc
def _rpc_count_modules(addr='http://127.0.0.1', port=8069, dbname='mycompany'):
time.sleep(5)
modules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(
dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'installed')]
)
if modules and len(modules) > 1:
time.sleep(1)
toinstallmodules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(
dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'to install')]
)
if toinstallmodules:
print("Package test: FAILED. Not able to install dependencies of base.")
raise Exception("Installation of package failed")
else:
print("Package test: successfuly installed %s modules" % len(modules))
else:
print("Package test: FAILED. Not able to install base.")
raise Exception("Installation of package failed")
def publish(o, type, extensions):
def _publish(o, release):
arch = ''
filename = release.split(os.path.sep)[-1]
release_dir = PUBLISH_DIRS[type]
release_path = join(o.pub, release_dir, filename)
system('mkdir -p %s' % join(o.pub, release_dir))
shutil.move(join(o.build_dir, release), release_path)
# Latest/symlink handler
release_abspath = abspath(release_path)
latest_abspath = release_abspath.replace(timestamp, 'latest')
if os.path.islink(latest_abspath):
os.unlink(latest_abspath)
os.symlink(release_abspath, latest_abspath)
return release_path
published = []
for extension in extensions:
release = glob("%s/odoo_*.%s" % (o.build_dir, extension))[0]
published.append(_publish(o, release))
return published
class OdooDocker(object):
def __init__(self):
self.log_file = NamedTemporaryFile(mode='w+b', prefix="bash", suffix=".txt", delete=False)
self.port = 8069 # TODO sle: reliable way to get a free port?
self.prompt_re = '[root@nightly-tests] # '
self.timeout = 600
def system(self, command):
self.docker.sendline(command)
self.docker.expect_exact(self.prompt_re)
def start(self, docker_image, build_dir, pub_dir):
self.build_dir = build_dir
self.pub_dir = pub_dir
self.docker = pexpect.spawn(
'docker run -v %s:/opt/release -p 127.0.0.1:%s:8069'
' -t -i %s /bin/bash --noediting' % (self.build_dir, self.port, docker_image),
timeout=self.timeout,
searchwindowsize=len(self.prompt_re) + 1,
)
time.sleep(2) # let the bash start
self.docker.logfile_read = self.log_file
self.id = subprocess.check_output('docker ps -l -q', shell=True)
def end(self):
try:
_rpc_count_modules(port=str(self.port))
except Exception, e:
print('Exception during docker execution: %s:' % str(e))
print('Error during docker execution: printing the bash output:')
with open(self.log_file.name) as f:
print '\n'.join(f.readlines())
raise
finally:
self.docker.close()
system('docker rm -f %s' % self.id)
self.log_file.close()
os.remove(self.log_file.name)
@contextmanager
def docker(docker_image, build_dir, pub_dir):
_docker = OdooDocker()
try:
_docker.start(docker_image, build_dir, pub_dir)
try:
yield _docker
except Exception, e:
raise
finally:
_docker.end()
class KVM(object):
def __init__(self, o, image, ssh_key='', login='openerp'):
self.o = o
self.image = image
self.ssh_key = ssh_key
self.login = login
def timeout(self,signum,frame):
print "vm timeout kill",self.pid
os.kill(self.pid,15)
def start(self):
l="kvm -net nic,model=rtl8139 -net user,hostfwd=tcp:127.0.0.1:10022-:22,hostfwd=tcp:127.0.0.1:18069-:8069,hostfwd=tcp:127.0.0.1:15432-:5432 -drive".split(" ")
#l.append('file=%s,if=virtio,index=0,boot=on,snapshot=on'%self.image)
l.append('file=%s,snapshot=on'%self.image)
#l.extend(['-vnc','127.0.0.1:1'])
l.append('-nographic')
print " ".join(l)
self.pid=os.spawnvp(os.P_NOWAIT, l[0], l)
time.sleep(10)
signal.alarm(2400)
signal.signal(signal.SIGALRM, self.timeout)
try:
self.run()
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
os.kill(self.pid,15)
time.sleep(10)
def ssh(self,cmd):
l=['ssh','-o','UserKnownHostsFile=/dev/null','-o','StrictHostKeyChecking=no','-p','10022','-i',self.ssh_key,'%s@127.0.0.1'%self.login,cmd]
system(l)
def rsync(self,args,options='--delete --exclude .bzrignore'):
cmd ='rsync -rt -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 10022 -i %s" %s %s' % (self.ssh_key, options, args)
system(cmd)
def run(self):
pass
class KVMWinBuildExe(KVM):
def run(self):
with open(join(self.o.build_dir, 'setup/win32/Makefile.version'), 'w') as f:
f.write("VERSION=%s\n" % version)
with open(join(self.o.build_dir, 'setup/win32/Makefile.python'), 'w') as f:
f.write("PYTHON_VERSION=%s\n" % self.o.vm_winxp_python_version.replace('.', ''))
with open(join(self.o.build_dir, 'setup/win32/Makefile.servicename'), 'w') as f:
f.write("SERVICENAME=%s\n" % nt_service_name)
self.ssh("mkdir -p build")
self.rsync('%s/ %s@127.0.0.1:build/server/' % (self.o.build_dir, self.login))
self.ssh("cd build/server/setup/win32;time make allinone;")
self.rsync('%s@127.0.0.1:build/server/setup/win32/release/ %s/' % (self.login, self.o.build_dir), '')
print "KVMWinBuildExe.run(): done"
class KVMWinTestExe(KVM):
def run(self):
setuppath = glob("%s/openerp-server-setup-*.exe" % self.o.build_dir)[0]
setupfile = setuppath.split('/')[-1]
setupversion = setupfile.split('openerp-server-setup-')[1].split('.exe')[0]
self.rsync('"%s" %s@127.0.0.1:' % (setuppath, self.login))
self.ssh("TEMP=/tmp ./%s /S" % setupfile)
self.ssh('PGPASSWORD=openpgpwd /cygdrive/c/"Program Files"/"Odoo %s"/PostgreSQL/bin/createdb.exe -e -U openpg mycompany' % setupversion)
self.ssh('/cygdrive/c/"Program Files"/"Odoo %s"/server/odoo-bin.exe -d mycompany -i base --stop-after-init' % setupversion)
self.ssh('net start %s' % nt_service_name)
_rpc_count_modules(port=18069)
#----------------------------------------------------------
# Stage: building
#----------------------------------------------------------
def _prepare_build_dir(o, win32=False):
cmd = ['rsync', '-a', '--exclude', '.git', '--exclude', '*.pyc', '--exclude', '*.pyo']
if not win32:
cmd += ['--exclude', 'setup/win32']
system(cmd + ['%s/' % o.odoo_dir, o.build_dir])
try:
for addon_path in glob(join(o.build_dir, 'addons/*')):
if addon_path.split(os.path.sep)[-1] not in ADDONS_NOT_TO_PUBLISH:
shutil.move(addon_path, join(o.build_dir, 'odoo/addons'))
except shutil.Error:
# Thrown when the add-on is already in odoo/addons (if _prepare_build_dir
# has already been called once)
pass
def build_tgz(o):
system(['python2', 'setup.py', 'sdist', '--quiet', '--formats=gztar,zip'], o.build_dir)
system(['mv', glob('%s/dist/odoo-*.tar.gz' % o.build_dir)[0], '%s/odoo_%s.%s.tar.gz' % (o.build_dir, version, timestamp)])
system(['mv', glob('%s/dist/odoo-*.zip' % o.build_dir)[0], '%s/odoo_%s.%s.zip' % (o.build_dir, version, timestamp)])
def build_deb(o):
# Append timestamp to version for the .dsc to refer the right .tar.gz
cmd=['sed', '-i', '1s/^.*$/odoo (%s.%s) stable; urgency=low/'%(version,timestamp), 'debian/changelog']
subprocess.call(cmd, cwd=o.build_dir)
deb = pexpect.spawn('dpkg-buildpackage -rfakeroot -k%s' % GPGID, cwd=o.build_dir)
deb.logfile = stdout
if GPGPASSPHRASE:
deb.expect_exact('Enter passphrase: ', timeout=1200)
deb.send(GPGPASSPHRASE + '\r\n')
deb.expect_exact('Enter passphrase: ')
deb.send(GPGPASSPHRASE + '\r\n')
deb.expect(pexpect.EOF, timeout=1200)
system(['mv', glob('%s/../odoo_*.deb' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*.dsc' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*_amd64.changes' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*.tar.gz' % o.build_dir)[0], '%s' % o.build_dir])
def build_rpm(o):
system(['python2', 'setup.py', '--quiet', 'bdist_rpm'], o.build_dir)
system(['mv', glob('%s/dist/odoo-*.noarch.rpm' % o.build_dir)[0], '%s/odoo_%s.%s.noarch.rpm' % (o.build_dir, version, timestamp)])
def build_exe(o):
KVMWinBuildExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()
system(['cp', glob('%s/openerp*.exe' % o.build_dir)[0], '%s/odoo_%s.%s.exe' % (o.build_dir, version, timestamp)])
#----------------------------------------------------------
# Stage: testing
#----------------------------------------------------------
def _prepare_testing(o):
if not o.no_tarball:
subprocess.call(["mkdir", "docker_src"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfsrc", os.path.join(o.build_dir, "docker_src", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
# Use rsync to copy requirements.txt in order to keep original permissions
subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_src")],
cwd=os.path.join(o.odoo_dir))
subprocess.call(["docker", "build", "-t", "odoo-%s-src-nightly-tests" % docker_version, "."],
cwd=os.path.join(o.build_dir, "docker_src"))
if not o.no_debian:
subprocess.call(["mkdir", "docker_debian"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfdebian", os.path.join(o.build_dir, "docker_debian", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
# Use rsync to copy requirements.txt in order to keep original permissions
subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_debian")],
cwd=os.path.join(o.odoo_dir))
subprocess.call(["docker", "build", "-t", "odoo-%s-debian-nightly-tests" % docker_version, "."],
cwd=os.path.join(o.build_dir, "docker_debian"))
if not o.no_rpm:
subprocess.call(["mkdir", "docker_fedora"], cwd=o.build_dir)
subprocess.call(["cp", "package.dffedora", os.path.join(o.build_dir, "docker_fedora", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
subprocess.call(["docker", "build", "-t", "odoo-%s-fedora-nightly-tests" % docker_version, "."],
cwd=os.path.join(o.build_dir, "docker_fedora"))
def test_tgz(o):
with docker('odoo-%s-src-nightly-tests' % docker_version, o.build_dir, o.pub) as wheezy:
wheezy.release = '*.tar.gz'
wheezy.system("service postgresql start")
wheezy.system('pip install /opt/release/%s' % wheezy.release)
wheezy.system("useradd --system --no-create-home odoo")
wheezy.system('su postgres -s /bin/bash -c "createuser -s odoo"')
wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"')
wheezy.system('mkdir /var/lib/odoo')
wheezy.system('chown odoo:odoo /var/lib/odoo')
wheezy.system('su odoo -s /bin/bash -c "odoo --addons-path=/usr/local/lib/python2.7/dist-packages/odoo/addons -d mycompany -i base --stop-after-init"')
wheezy.system('su odoo -s /bin/bash -c "odoo --addons-path=/usr/local/lib/python2.7/dist-packages/odoo/addons -d mycompany &"')
def test_deb(o):
with docker('odoo-%s-debian-nightly-tests' % docker_version, o.build_dir, o.pub) as wheezy:
wheezy.release = '*.deb'
wheezy.system("service postgresql start")
wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"')
wheezy.system('/usr/bin/dpkg -i /opt/release/%s' % wheezy.release)
wheezy.system('/usr/bin/apt-get install -f -y')
wheezy.system('su odoo -s /bin/bash -c "odoo -c /etc/odoo/odoo.conf -d mycompany -i base --stop-after-init"')
wheezy.system('su odoo -s /bin/bash -c "odoo -c /etc/odoo/odoo.conf -d mycompany &"')
def test_rpm(o):
with docker('odoo-%s-fedora-nightly-tests' % docker_version, o.build_dir, o.pub) as fedora24:
fedora24.release = '*.noarch.rpm'
# Start postgresql
fedora24.system('su postgres -c "/usr/bin/pg_ctl -D /var/lib/postgres/data start"')
fedora24.system('sleep 5')
fedora24.system('su postgres -c "createdb mycompany"')
# Odoo install
fedora24.system('dnf install -d 0 -e 0 /opt/release/%s -y' % fedora24.release)
fedora24.system('su odoo -s /bin/bash -c "odoo -c /etc/odoo/odoo.conf -d mycompany -i base --stop-after-init"')
fedora24.system('su odoo -s /bin/bash -c "odoo -c /etc/odoo/odoo.conf -d mycompany &"')
def test_exe(o):
KVMWinTestExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()
#---------------------------------------------------------
# Generates Packages, Sources and Release files of debian package
#---------------------------------------------------------
def gen_deb_package(o, published_files):
# Executes command to produce file_name in path, and moves it to o.pub/deb
def _gen_file(o, (command, file_name), path):
cur_tmp_file_path = os.path.join(path, file_name)
with open(cur_tmp_file_path, 'w') as out:
subprocess.call(command, stdout=out, cwd=path)
system(['cp', cur_tmp_file_path, os.path.join(o.pub, 'deb', file_name)])
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='debPackages')
for pub_file_path in published_files:
system(['cp', pub_file_path, temp_path])
commands = [
(['dpkg-scanpackages', '.'], "Packages"), # Generate Packages file
(['dpkg-scansources', '.'], "Sources"), # Generate Sources file
(['apt-ftparchive', 'release', '.'], "Release") # Generate Release file
]
# Generate files
for command in commands:
_gen_file(o, command, temp_path)
# Remove temp directory
shutil.rmtree(temp_path)
# Generate Release.gpg (= signed Release)
# Options -abs: -a (Create ASCII armored output), -b (Make a detach signature), -s (Make a signature)
subprocess.call(['gpg', '--default-key', GPGID, '--passphrase', GPGPASSPHRASE, '--yes', '-abs', '--no-tty', '-o', 'Release.gpg', 'Release'], cwd=os.path.join(o.pub, 'deb'))
#---------------------------------------------------------
# Generates an RPM repo
#---------------------------------------------------------
def gen_rpm_repo(o, file_name):
# Sign the RPM
rpmsign = pexpect.spawn('/bin/bash', ['-c', 'rpm --resign %s' % file_name], cwd=os.path.join(o.pub, 'rpm'))
rpmsign.expect_exact('Enter pass phrase: ')
rpmsign.send(GPGPASSPHRASE + '\r\n')
rpmsign.expect(pexpect.EOF)
# Removes the old repodata
subprocess.call(['rm', '-rf', os.path.join(o.pub, 'rpm', 'repodata')])
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='rpmPackages')
subprocess.call(['cp', file_name, temp_path])
subprocess.call(['createrepo', temp_path]) # creates a repodata folder in temp_path
subprocess.call(['cp', '-r', os.path.join(temp_path, "repodata"), os.path.join(o.pub, 'rpm')])
# Remove temp directory
shutil.rmtree(temp_path)
#----------------------------------------------------------
# Options and Main
#----------------------------------------------------------
def options():
op = optparse.OptionParser()
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_dir = "%s-%s" % (root, timestamp)
op.add_option("-b", "--build-dir", default=build_dir, help="build directory (%default)", metavar="DIR")
op.add_option("-p", "--pub", default=None, help="pub directory (%default)", metavar="DIR")
op.add_option("", "--no-testing", action="store_true", help="don't test the builded packages")
op.add_option("", "--no-debian", action="store_true", help="don't build the debian package")
op.add_option("", "--no-rpm", action="store_true", help="don't build the rpm package")
op.add_option("", "--no-tarball", action="store_true", help="don't build the tarball")
op.add_option("", "--no-windows", action="store_true", help="don't build the windows package")
# Windows VM
op.add_option("", "--vm-winxp-image", default='/home/odoo/vm/winxp27/winxp27.vdi', help="%default")
op.add_option("", "--vm-winxp-ssh-key", default='/home/odoo/vm/winxp27/id_rsa', help="%default")
op.add_option("", "--vm-winxp-login", default='Naresh', help="Windows login (%default)")
op.add_option("", "--vm-winxp-python-version", default='2.7', help="Windows Python version installed in the VM (default: %default)")
(o, args) = op.parse_args()
# derive other options
o.odoo_dir = root
o.pkg = join(o.build_dir, 'pkg')
o.work = join(o.build_dir, 'openerp-%s' % version)
o.work_addons = join(o.work, 'odoo', 'addons')
return o
def main():
o = options()
_prepare_build_dir(o)
if not o.no_testing:
_prepare_testing(o)
try:
if not o.no_tarball:
build_tgz(o)
try:
if not o.no_testing:
test_tgz(o)
published_files = publish(o, 'tarball', ['tar.gz', 'zip'])
except Exception, e:
print("Won't publish the tgz release.\n Exception: %s" % str(e))
if not o.no_debian:
build_deb(o)
try:
if not o.no_testing:
test_deb(o)
published_files = publish(o, 'debian', ['deb', 'dsc', 'changes', 'tar.gz'])
gen_deb_package(o, published_files)
except Exception, e:
print("Won't publish the deb release.\n Exception: %s" % str(e))
if not o.no_rpm:
build_rpm(o)
try:
if not o.no_testing:
test_rpm(o)
published_files = publish(o, 'redhat', ['noarch.rpm'])
gen_rpm_repo(o, published_files[0])
except Exception, e:
print("Won't publish the rpm release.\n Exception: %s" % str(e))
if not o.no_windows:
_prepare_build_dir(o, win32=True)
build_exe(o)
try:
if not o.no_testing:
test_exe(o)
published_files = publish(o, 'windows', ['exe'])
except Exception, e:
print("Won't publish the exe release.\n Exception: %s" % str(e))
except:
pass
finally:
shutil.rmtree(o.build_dir)
print('Build dir %s removed' % o.build_dir)
if not o.no_testing:
system("docker rm -f `docker ps -a | awk '{print $1 }'` 2>>/dev/null")
print('Remaining dockers removed')
if __name__ == '__main__':
main()
| kosgroup/odoo | setup/package.py | Python | gpl-3.0 | 21,097 |
import datetime
from robocompdsl.templates.common.templatedict import TemplateDict
from robocompdsl.templates.templateCPP.plugins.base.functions import function_utils as utils
INTERFACE_METHOD_STR = """
${ret} ${interface_name}I::${method_name}(${input_params})
{
${to_return}worker->${interface_name}_${method_name}(${param_str});
}
"""
class SERVANT_H(TemplateDict):
def __init__(self, component, interface_name):
super(SERVANT_H, self).__init__()
self.component = component
module = self.component.idsl_pool.module_providing_interface(interface_name)
self['year'] = str(datetime.date.today().year)
self['interface_name'] = interface_name
self['interface_name_upper'] = interface_name.upper()
self['filename_without_extension'] = module['filename'].split('/')[-1].split('.')[0]
self['module_name'] = module['name']
self['interface_methods_definition'] = self.interface_methods_definition(module,
interface_name)
def interface_methods_definition(self, module, interface_name):
result = ""
for interface in module['interfaces']:
if interface['name'] == interface_name:
for mname in interface['methods']:
method = interface['methods'][mname]
ret = utils.get_type_string(method['return'], module['name'])
name = method['name']
param_str = utils.get_parameters_string(method, module['name'], self.component.language)
if param_str:
param_str = f"{param_str}, const Ice::Current&"
else:
param_str = "const Ice::Current&"
result += ret + ' ' + name + '(' + param_str + ');\n'
return result
| robocomp/robocomp | tools/cli/robocompdsl/robocompdsl/templates/templateCPP/plugins/base/functions/SERVANT_H.py | Python | gpl-3.0 | 1,893 |
# -*- coding: utf-8 -*-
import gevent.pool
import gevent.queue
from gevent import monkey
monkey.patch_all()
class asynchronizer():
def __init__(self, workers):
# workers define how many concurrent functions should be run
self.workers = workers
self.pool = gevent.pool.Pool(self.workers)
self.pqueue = gevent.queue.PriorityQueue()
def add(self, func, *args, **kwargs):
# this function adds other functions to the priority queue
priority = kwargs['priority'] if 'priority' in kwargs else 1
self.pqueue.put((priority, func, args, kwargs))
self.startWorkers()
def updateWorkers(self, workers):
# can be used to update no. of workers if -
# nothing is added to the pool yet
self.workers = workers
self.pool = gevent.pool.Pool(self.workers)
def worker(self):
# this is the worker that runs inside a greenlet thread
# this will process the queue till there are no items left
while True:
try:
p, func, args, kwargs = self.pqueue.get_nowait()
func(*args, **kwargs)
except gevent.queue.Empty:
return
def startWorkers(self):
# this function spawns the required number of greenlet threads
for x in range(0, min(self.pqueue.qsize(), self.pool.free_count())):
self.pool.spawn(self.worker)
def wait(self):
# this function will wait for all the greenlet threads to finish
# this is a blocking function
self.pool.join()
a = asynchronizer(32)
def asynchronize(async_func):
def converted_func(*args, **kwargs):
# remove this parameters , as it are used in add()
if 'async_func' in kwargs:
del kwargs['async_func']
a.add(async_func, *args, **kwargs)
return converted_func
def Wait():
# wrapper for asynchronizer.wait()
a.wait()
def setWorkers(workers):
# wrapper for asynchronizer.updateWorkers()
a.updateWorkers(workers)
| Arsh23/asynchronizer | asynchronizer/__init__.py | Python | mit | 2,045 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import collections
TestCase = collections.namedtuple('TestCase', "description input_table_name input_commit_statement input_primary_key_fields expected_entries expected_sql")
tests=[
TestCase(
description="Rejected",
input_table_name="ALS2",
input_commit_statement="""ALTER TABLE ALS2 shrink space check""",
input_primary_key_fields=None,
expected_entries=[],
expected_sql=''
)
]
| iagcl/data_pipeline | tests/processor/data_logminer_rejected_alters.py | Python | apache-2.0 | 1,201 |
from flask import Flask
from flask import jsonify
from flask_jwt_extended import create_access_token
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
from flask_jwt_extended import set_access_cookies
from flask_jwt_extended import unset_jwt_cookies
app = Flask(__name__)
# Here you can globally configure all the ways you want to allow JWTs to
# be sent to your web application. By default, this will be only headers.
app.config["JWT_TOKEN_LOCATION"] = ["headers", "cookies", "json", "query_string"]
# If true this will only allow the cookies that contain your JWTs to be sent
# over https. In production, this should always be set to True
app.config["JWT_COOKIE_SECURE"] = False
# Change this in your code!
app.config["JWT_SECRET_KEY"] = "super-secret"
jwt = JWTManager(app)
@app.route("/login_without_cookies", methods=["POST"])
def login_without_cookies():
access_token = create_access_token(identity="example_user")
return jsonify(access_token=access_token)
@app.route("/login_with_cookies", methods=["POST"])
def login_with_cookies():
response = jsonify({"msg": "login successful"})
access_token = create_access_token(identity="example_user")
set_access_cookies(response, access_token)
return response
@app.route("/logout_with_cookies", methods=["POST"])
def logout_with_cookies():
response = jsonify({"msg": "logout successful"})
unset_jwt_cookies(response)
return response
@app.route("/protected", methods=["GET", "POST"])
@jwt_required()
def protected():
return jsonify(foo="bar")
@app.route("/only_headers")
@jwt_required(locations=["headers"])
def only_headers():
return jsonify(foo="baz")
if __name__ == "__main__":
app.run()
| vimalloc/flask-jwt-extended | examples/jwt_locations.py | Python | mit | 1,740 |
from __future__ import absolute_import, unicode_literals
from tests.mpd import protocol
class AuthenticationActiveTest(protocol.BaseTestCase):
def get_config(self):
config = super(AuthenticationActiveTest, self).get_config()
config['mpd']['password'] = 'topsecret'
return config
def test_authentication_with_valid_password_is_accepted(self):
self.send_request('password "topsecret"')
self.assertTrue(self.dispatcher.authenticated)
self.assertInResponse('OK')
def test_authentication_with_invalid_password_is_not_accepted(self):
self.send_request('password "secret"')
self.assertFalse(self.dispatcher.authenticated)
self.assertEqualResponse('ACK [3@0] {password} incorrect password')
def test_authentication_without_password_fails(self):
self.send_request('password')
self.assertFalse(self.dispatcher.authenticated)
self.assertEqualResponse(
'ACK [2@0] {password} wrong number of arguments for "password"')
def test_anything_when_not_authenticated_should_fail(self):
self.send_request('any request at all')
self.assertFalse(self.dispatcher.authenticated)
self.assertEqualResponse(
u'ACK [4@0] {any} you don\'t have permission for "any"')
def test_close_is_allowed_without_authentication(self):
self.send_request('close')
self.assertFalse(self.dispatcher.authenticated)
def test_commands_is_allowed_without_authentication(self):
self.send_request('commands')
self.assertFalse(self.dispatcher.authenticated)
self.assertInResponse('OK')
def test_notcommands_is_allowed_without_authentication(self):
self.send_request('notcommands')
self.assertFalse(self.dispatcher.authenticated)
self.assertInResponse('OK')
def test_ping_is_allowed_without_authentication(self):
self.send_request('ping')
self.assertFalse(self.dispatcher.authenticated)
self.assertInResponse('OK')
class AuthenticationInactiveTest(protocol.BaseTestCase):
def test_authentication_with_anything_when_password_check_turned_off(self):
self.send_request('any request at all')
self.assertTrue(self.dispatcher.authenticated)
self.assertEqualResponse('ACK [5@0] {} unknown command "any"')
def test_any_password_is_not_accepted_when_password_check_turned_off(self):
self.send_request('password "secret"')
self.assertEqualResponse('ACK [3@0] {password} incorrect password')
| priestd09/mopidy | tests/mpd/protocol/test_authentication.py | Python | apache-2.0 | 2,557 |
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
"""A plugin that ensures that given features are present."""
import dnf.cli
from dnf.i18n import _
from dnf.cli.option_parser import OptionParser
# The parent class allows registration to the CLI manager.
class Command(dnf.cli.Command):
"""A command that ensures that given features are present."""
# An alias is needed to invoke the command from command line.
aliases = ['foo'] # <-- SET YOUR ALIAS HERE.
def configure(self):
"""Setup the demands."""
# Repositories are needed if we want to install anything.
self.cli.demands.available_repos = True
# A sack is required by marking methods and dependency resolving.
self.cli.demands.sack_activation = True
# Resolving performs a transaction that installs the packages.
self.cli.demands.resolving = True
# Based on the system, privileges are required to do an installation.
self.cli.demands.root_user = True # <-- SET YOUR FLAG HERE.
@staticmethod
def set_argparser(parser):
"""Parse command line arguments."""
parser.add_argument('package', nargs='+', metavar=_('PACKAGE'),
action=OptionParser.ParseSpecGroupFileCallback,
help=_('Package to install'))
def run(self):
"""Run the command."""
# Feature marking methods set the user request.
for ftr_spec in self.opts.pkg_specs:
try:
self.base.install(ftr_spec)
except dnf.exceptions.MarkingError:
raise dnf.exceptions.Error('feature(s) not found: ' + ftr_spec)
# Package marking methods set the user request.
for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False):
try:
self.base.package_install(pkg, strict=False)
except dnf.exceptions.MarkingError as e:
raise dnf.exceptions.Error(e)
# Comps data reading initializes the base.comps attribute.
if self.opts.grp_specs:
self.base.read_comps(arch_filter=True)
# Group marking methods set the user request.
for grp_spec in self.opts.grp_specs:
group = self.base.comps.group_by_pattern(grp_spec)
if not group:
raise dnf.exceptions.Error('group not found: ' + grp_spec)
self.base.group_install(group.id, ['mandatory', 'default'])
# Every plugin must be a subclass of dnf.Plugin.
class Plugin(dnf.Plugin):
"""A plugin that registers our custom command."""
# Every plugin must provide its name.
name = 'foo' # <-- SET YOUR NAME HERE.
# Every plugin must provide its own initialization function.
def __init__(self, base, cli):
"""Initialize the plugin."""
super(Plugin, self).__init__(base, cli)
if cli:
cli.register_command(Command)
| dmach/dnf | doc/examples/install_plugin.py | Python | gpl-2.0 | 3,836 |
#!/usr/bin/env python
import pantilthat
import time
import sys
import math
import servo_ranges
def tick():
time.sleep(0.010)
class Shelf(object):
def __init__(self, num, start, end, tilt):
self.count = num; # num of records
self.pan_start = start; # degress +
self.pan_end = end; # degrees -
self.tilt_pos = tilt; # degrees
def map_pos_to_angles(self, pos):
if (pos <= 0 or pos > self.count):
return 0
# naive algorithm: just lerp the range of angles
# it works well enough
pan_range = abs(self.pan_start) + abs(self.pan_end)
incr = float(pan_range) / self.count
return int(self.pan_start - pos * incr)
# a better algoritm: get the angle based on physical
# measurements - but somehow behaves very poorly
# dist = 700. #mm
# record_thick = 10. #mm
# error = .5 #mm
# offset = (self.count / 2. - pos) * record_thick + error
# print offset
# angle = math.atan2(offset, dist)
# return int(math.degrees(angle))
max_shelves = 5
shelves = [
Shelf(42, 24, -29, -68),
Shelf(68, 24, -28, -40),
Shelf(80, 26, -25, 0),
Shelf(88, 25, -26, +40),
Shelf(68, 26, -26, +65)
]
# sanity checks
if len(sys.argv) != 3:
print "Usage: <shelf id> <shelf pos>\n"
exit()
# setup
servo_ranges.calibrate()
# read last cmd
orig_pan = pantilthat.get_pan()
orig_tilt = pantilthat.get_tilt()
print "found pan: %i; tilt: %i" % (orig_pan, orig_tilt)
# get args
in_id = int(sys.argv[1])
in_id = (in_id - 1) % max_shelves # convert to C array notation
in_pos = int(sys.argv[2])
print "searching: %i %i" % (in_id, in_pos)
# find
new_pan = shelves[in_id].map_pos_to_angles(in_pos)
new_tilt = shelves[in_id].tilt_pos
# debug
print "output: %i %i" % (new_pan, new_tilt)
#exit()
# start laser
pantilthat.light_mode(pantilthat.PWM)
pantilthat.brightness(128)
# do the requests
pan = orig_pan
pan_incr = 1 if new_pan > orig_pan else -1
while pan != new_pan:
pan = pan + pan_incr
#print pan
pantilthat.pan(pan)
tick()
tilt = orig_tilt
tilt_incr = 1 if new_tilt > orig_tilt else -1
while tilt != new_tilt:
tilt = tilt + tilt_incr
#print tilt
pantilthat.tilt(tilt)
tick()
# because the servos are so shit
# do a dance to hide the horrible inaccuracy
a = 0.
while a < (12 * math.pi):
a += math.pi / 20.
r = int(math.sin(a) * 5.)
pantilthat.pan(new_pan + r)
time.sleep(0.005)
# sec; to allow the servos to move before they are auto shut down on exit
print "waiting:"
for t in range(0, 3):
time.sleep(1)
print "."
# turn off the laser on the way out
pantilthat.brightness(0)
| valentingalea/vinyl-shelf-finder | pantilthat/finder.py | Python | mit | 2,511 |
from __future__ import absolute_import, unicode_literals
import pickle
import sys
from collections import defaultdict
from kombu import Connection, Consumer, Producer, Exchange, Queue
from kombu.exceptions import MessageStateError
from kombu.utils import ChannelPromise
from kombu.utils import json
from .case import Case, Mock, patch
from .mocks import Transport
class test_Producer(Case):
def setup(self):
self.exchange = Exchange('foo', 'direct')
self.connection = Connection(transport=Transport)
self.connection.connect()
self.assertTrue(self.connection.connection.connected)
self.assertFalse(self.exchange.is_bound)
def test_repr(self):
p = Producer(self.connection)
self.assertTrue(repr(p))
def test_pickle(self):
chan = Mock()
producer = Producer(chan, serializer='pickle')
p2 = pickle.loads(pickle.dumps(producer))
self.assertEqual(p2.serializer, producer.serializer)
def test_no_channel(self):
p = Producer(None)
self.assertFalse(p._channel)
@patch('kombu.messaging.maybe_declare')
def test_maybe_declare(self, maybe_declare):
p = self.connection.Producer()
q = Queue('foo')
p.maybe_declare(q)
maybe_declare.assert_called_with(q, p.channel, False)
@patch('kombu.common.maybe_declare')
def test_maybe_declare_when_entity_false(self, maybe_declare):
p = self.connection.Producer()
p.maybe_declare(None)
maybe_declare.assert_not_called()
def test_auto_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=True)
self.assertIsNot(p.exchange, self.exchange,
'creates Exchange clone at bind')
self.assertTrue(p.exchange.is_bound)
self.assertIn('exchange_declare', channel,
'auto_declare declares exchange')
def test_manual_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=False)
self.assertTrue(p.exchange.is_bound)
self.assertNotIn('exchange_declare', channel,
'auto_declare=False does not declare exchange')
p.declare()
self.assertIn('exchange_declare', channel,
'p.declare() declares exchange')
def test_prepare(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, headers={})
self.assertDictEqual(message, json.loads(m))
self.assertEqual(ctype, 'application/json')
self.assertEqual(cencoding, 'utf-8')
def test_prepare_compression(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
headers = {}
m, ctype, cencoding = p._prepare(message, compression='zlib',
headers=headers)
self.assertEqual(ctype, 'application/json')
self.assertEqual(cencoding, 'utf-8')
self.assertEqual(headers['compression'], 'application/x-gzip')
import zlib
self.assertEqual(
json.loads(zlib.decompress(m).decode('utf-8')),
message,
)
def test_prepare_custom_content_type(self):
message = 'the quick brown fox'.encode('utf-8')
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='custom')
self.assertEqual(m, message)
self.assertEqual(ctype, 'custom')
self.assertEqual(cencoding, 'binary')
m, ctype, cencoding = p._prepare(message, content_type='custom',
content_encoding='alien')
self.assertEqual(m, message)
self.assertEqual(ctype, 'custom')
self.assertEqual(cencoding, 'alien')
def test_prepare_is_already_unicode(self):
message = 'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='text/plain')
self.assertEqual(m, message.encode('utf-8'))
self.assertEqual(ctype, 'text/plain')
self.assertEqual(cencoding, 'utf-8')
m, ctype, cencoding = p._prepare(message, content_type='text/plain',
content_encoding='utf-8')
self.assertEqual(m, message.encode('utf-8'))
self.assertEqual(ctype, 'text/plain')
self.assertEqual(cencoding, 'utf-8')
def test_publish_with_Exchange_instance(self):
p = self.connection.Producer()
p.channel = Mock()
p.publish('hello', exchange=Exchange('foo'), delivery_mode='transient')
self.assertEqual(
p._channel.basic_publish.call_args[1]['exchange'], 'foo',
)
def test_publish_with_expiration(self):
p = self.connection.Producer()
p.channel = Mock()
p.publish('hello', exchange=Exchange('foo'), expiration=10)
properties = p._channel.prepare_message.call_args[0][5]
self.assertEqual(properties['expiration'], '10000')
def test_publish_with_reply_to(self):
p = self.connection.Producer()
p.channel = Mock()
p.publish('hello', exchange=Exchange('foo'), reply_to=Queue('foo'))
properties = p._channel.prepare_message.call_args[0][5]
self.assertEqual(properties['reply_to'], 'foo')
def test_set_on_return(self):
chan = Mock()
chan.events = defaultdict(Mock)
p = Producer(ChannelPromise(lambda: chan), on_return='on_return')
p.channel
chan.events['basic_return'].add.assert_called_with('on_return')
def test_publish_retry_calls_ensure(self):
p = Producer(Mock())
p._connection = Mock()
ensure = p.connection.ensure = Mock()
p.publish('foo', exchange='foo', retry=True)
ensure.assert_called()
def test_publish_retry_with_declare(self):
p = self.connection.Producer()
p.maybe_declare = Mock()
p.connection.ensure = Mock()
ex = Exchange('foo')
p._publish('hello', 0, '', '', {}, {}, 'rk', 0, 0, ex, declare=[ex])
p.maybe_declare.assert_called_with(ex)
def test_revive_when_channel_is_connection(self):
p = self.connection.Producer()
p.exchange = Mock()
new_conn = Connection('memory://')
defchan = new_conn.default_channel
p.revive(new_conn)
self.assertIs(p.channel, defchan)
p.exchange.revive.assert_called_with(defchan)
def test_enter_exit(self):
p = self.connection.Producer()
p.release = Mock()
self.assertIs(p.__enter__(), p)
p.__exit__()
p.release.assert_called_with()
def test_connection_property_handles_AttributeError(self):
p = self.connection.Producer()
p.channel = object()
p.__connection__ = None
self.assertIsNone(p.connection)
def test_publish(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
message = {'the quick brown fox': 'jumps over the lazy dog'}
ret = p.publish(message, routing_key='process')
self.assertIn('prepare_message', channel)
self.assertIn('basic_publish', channel)
m, exc, rkey = ret
self.assertDictEqual(message, json.loads(m['body']))
self.assertDictContainsSubset({'content_type': 'application/json',
'content_encoding': 'utf-8',
'priority': 0}, m)
self.assertDictContainsSubset({'delivery_mode': 2}, m['properties'])
self.assertEqual(exc, p.exchange.name)
self.assertEqual(rkey, 'process')
def test_no_exchange(self):
chan = self.connection.channel()
p = Producer(chan)
self.assertFalse(p.exchange.name)
def test_revive(self):
chan = self.connection.channel()
p = Producer(chan)
chan2 = self.connection.channel()
p.revive(chan2)
self.assertIs(p.channel, chan2)
self.assertIs(p.exchange.channel, chan2)
def test_on_return(self):
chan = self.connection.channel()
def on_return(exception, exchange, routing_key, message):
pass
p = Producer(chan, on_return=on_return)
self.assertIn(on_return, chan.events['basic_return'])
self.assertTrue(p.on_return)
class test_Consumer(Case):
def setup(self):
self.connection = Connection(transport=Transport)
self.connection.connect()
self.assertTrue(self.connection.connection.connected)
self.exchange = Exchange('foo', 'direct')
def test_accept(self):
a = Consumer(self.connection)
self.assertIsNone(a.accept)
b = Consumer(self.connection, accept=['json', 'pickle'])
self.assertSetEqual(
b.accept,
{'application/json', 'application/x-python-serialize'},
)
c = Consumer(self.connection, accept=b.accept)
self.assertSetEqual(b.accept, c.accept)
def test_enter_exit_cancel_raises(self):
c = Consumer(self.connection)
c.cancel = Mock(name='Consumer.cancel')
c.cancel.side_effect = KeyError('foo')
with c:
pass
c.cancel.assert_called_with()
def test_receive_callback_accept(self):
message = Mock(name='Message')
message.errors = []
callback = Mock(name='on_message')
c = Consumer(self.connection, accept=['json'], on_message=callback)
c.on_decode_error = None
c.channel = Mock(name='channel')
c.channel.message_to_python = None
c._receive_callback(message)
callback.assert_called_with(message)
self.assertSetEqual(message.accept, c.accept)
def test_accept__content_disallowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = conn.Producer()
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
callback = Mock(name='callback')
with conn.Consumer(queues=[q], callbacks=[callback]) as consumer:
with self.assertRaises(consumer.ContentDisallowed):
conn.drain_events(timeout=1)
callback.assert_not_called()
def test_accept__content_allowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = conn.Producer()
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
callback = Mock(name='callback')
with conn.Consumer(queues=[q], accept=['pickle'],
callbacks=[callback]):
conn.drain_events(timeout=1)
callback.assert_called()
body, message = callback.call_args[0]
self.assertTrue(body['complex'])
def test_set_no_channel(self):
c = Consumer(None)
self.assertIsNone(c.channel)
c.revive(Mock())
self.assertTrue(c.channel)
def test_set_no_ack(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True, no_ack=True)
self.assertTrue(consumer.no_ack)
def test_add_queue_when_auto_declare(self):
consumer = self.connection.Consumer(auto_declare=True)
q = Mock()
q.return_value = q
consumer.add_queue(q)
self.assertIn(q, consumer.queues)
q.declare.assert_called_with()
def test_add_queue_when_not_auto_declare(self):
consumer = self.connection.Consumer(auto_declare=False)
q = Mock()
q.return_value = q
consumer.add_queue(q)
self.assertIn(q, consumer.queues)
self.assertFalse(q.declare.call_count)
def test_consume_without_queues_returns(self):
consumer = self.connection.Consumer()
consumer.queues[:] = []
self.assertIsNone(consumer.consume())
def test_consuming_from(self):
consumer = self.connection.Consumer()
consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')]
consumer._active_tags = {'a': 1, 'b': 2}
self.assertFalse(consumer.consuming_from(Queue('c')))
self.assertFalse(consumer.consuming_from('c'))
self.assertFalse(consumer.consuming_from(Queue('d')))
self.assertFalse(consumer.consuming_from('d'))
self.assertTrue(consumer.consuming_from(Queue('a')))
self.assertTrue(consumer.consuming_from(Queue('b')))
self.assertTrue(consumer.consuming_from('b'))
def test_receive_callback_without_m2p(self):
channel = self.connection.channel()
c = channel.Consumer()
m2p = getattr(channel, 'message_to_python')
channel.message_to_python = None
try:
message = Mock()
message.errors = []
message.decode.return_value = 'Hello'
recv = c.receive = Mock()
c._receive_callback(message)
recv.assert_called_with('Hello', message)
finally:
channel.message_to_python = m2p
def test_receive_callback__message_errors(self):
channel = self.connection.channel()
channel.message_to_python = None
c = channel.Consumer()
message = Mock()
try:
raise KeyError('foo')
except KeyError:
message.errors = [sys.exc_info()]
message._reraise_error.side_effect = KeyError()
with self.assertRaises(KeyError):
c._receive_callback(message)
def test_set_callbacks(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
callbacks = [lambda x, y: x,
lambda x, y: x]
consumer = Consumer(channel, queue, auto_declare=True,
callbacks=callbacks)
self.assertEqual(consumer.callbacks, callbacks)
def test_auto_declare(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.consume()
consumer.consume() # twice is a noop
self.assertIsNot(consumer.queues[0], queue)
self.assertTrue(consumer.queues[0].is_bound)
self.assertTrue(consumer.queues[0].exchange.is_bound)
self.assertIsNot(consumer.queues[0].exchange, self.exchange)
for meth in ('exchange_declare',
'queue_declare',
'queue_bind',
'basic_consume'):
self.assertIn(meth, channel)
self.assertEqual(channel.called.count('basic_consume'), 1)
self.assertTrue(consumer._active_tags)
consumer.cancel_by_queue(queue.name)
consumer.cancel_by_queue(queue.name)
self.assertFalse(consumer._active_tags)
def test_consumer_tag_prefix(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, tag_prefix='consumer_')
consumer.consume()
self.assertTrue(
consumer._active_tags[queue.name].startswith('consumer_'),
)
def test_manual_declare(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=False)
self.assertIsNot(consumer.queues[0], queue)
self.assertTrue(consumer.queues[0].is_bound)
self.assertTrue(consumer.queues[0].exchange.is_bound)
self.assertIsNot(consumer.queues[0].exchange, self.exchange)
for meth in ('exchange_declare',
'queue_declare',
'basic_consume'):
self.assertNotIn(meth, channel)
consumer.declare()
for meth in ('exchange_declare',
'queue_declare',
'queue_bind'):
self.assertIn(meth, channel)
self.assertNotIn('basic_consume', channel)
consumer.consume()
self.assertIn('basic_consume', channel)
def test_consume__cancel(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.consume()
consumer.cancel()
self.assertIn('basic_cancel', channel)
self.assertFalse(consumer._active_tags)
def test___enter____exit__(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
context = consumer.__enter__()
self.assertIs(context, consumer)
self.assertTrue(consumer._active_tags)
res = consumer.__exit__(None, None, None)
self.assertFalse(res)
self.assertIn('basic_cancel', channel)
self.assertFalse(consumer._active_tags)
def test_flow(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.flow(False)
self.assertIn('flow', channel)
def test_qos(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.qos(30, 10, False)
self.assertIn('basic_qos', channel)
def test_purge(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
b2 = Queue('qname2', self.exchange, 'rkey')
b3 = Queue('qname3', self.exchange, 'rkey')
b4 = Queue('qname4', self.exchange, 'rkey')
consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True)
consumer.purge()
self.assertEqual(channel.called.count('queue_purge'), 4)
def test_multiple_queues(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
b2 = Queue('qname2', self.exchange, 'rkey')
b3 = Queue('qname3', self.exchange, 'rkey')
b4 = Queue('qname4', self.exchange, 'rkey')
consumer = Consumer(channel, [b1, b2, b3, b4])
consumer.consume()
self.assertEqual(channel.called.count('exchange_declare'), 4)
self.assertEqual(channel.called.count('queue_declare'), 4)
self.assertEqual(channel.called.count('queue_bind'), 4)
self.assertEqual(channel.called.count('basic_consume'), 4)
self.assertEqual(len(consumer._active_tags), 4)
consumer.cancel()
self.assertEqual(channel.called.count('basic_cancel'), 4)
self.assertFalse(len(consumer._active_tags))
def test_receive_callback(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
received = []
def callback(message_data, message):
received.append(message_data)
message.ack()
message.payload # trigger cache
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
self.assertIn('basic_ack', channel)
self.assertIn('message_to_python', channel)
self.assertEqual(received[0], {'foo': 'bar'})
def test_basic_ack_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.ack()
message.ack()
consumer.register_callback(callback)
with self.assertRaises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
def test_basic_reject(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.reject()
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
self.assertIn('basic_reject', channel)
def test_basic_reject_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.reject()
message.reject()
consumer.register_callback(callback)
with self.assertRaises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
self.assertIn('basic_reject', channel)
def test_basic_reject__requeue(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.requeue()
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
self.assertIn('basic_reject:requeue', channel)
def test_basic_reject__requeue_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.requeue()
message.requeue()
consumer.register_callback(callback)
with self.assertRaises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
self.assertIn('basic_reject:requeue', channel)
def test_receive_without_callbacks_raises(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
with self.assertRaises(NotImplementedError):
consumer.receive(1, 2)
def test_decode_error(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
consumer.channel.throw_decode_error = True
with self.assertRaises(ValueError):
consumer._receive_callback({'foo': 'bar'})
def test_on_decode_error_callback(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
thrown = []
def on_decode_error(msg, exc):
thrown.append((msg.body, exc))
consumer = Consumer(channel, [b1], on_decode_error=on_decode_error)
consumer.channel.throw_decode_error = True
consumer._receive_callback({'foo': 'bar'})
self.assertTrue(thrown)
m, exc = thrown[0]
self.assertEqual(json.loads(m), {'foo': 'bar'})
self.assertIsInstance(exc, ValueError)
def test_recover(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
consumer.recover()
self.assertIn('basic_recover', channel)
def test_revive(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
channel2 = self.connection.channel()
consumer.revive(channel2)
self.assertIs(consumer.channel, channel2)
self.assertIs(consumer.queues[0].channel, channel2)
self.assertIs(consumer.queues[0].exchange.channel, channel2)
def test_revive__with_prefetch_count(self):
channel = Mock(name='channel')
b1 = Queue('qname1', self.exchange, 'rkey')
Consumer(channel, [b1], prefetch_count=14)
channel.basic_qos.assert_called_with(0, 14, False)
def test__repr__(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
self.assertTrue(repr(Consumer(channel, [b1])))
def test_connection_property_handles_AttributeError(self):
p = self.connection.Consumer()
p.channel = object()
self.assertIsNone(p.connection)
| Elastica/kombu | kombu/tests/test_messaging.py | Python | bsd-3-clause | 24,631 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base import exceptions as exc
from taiga.base import filters
from taiga.base import response
from taiga.base.api import ModelCrudViewSet
from taiga.base.api import ModelListViewSet
from taiga.base.api.mixins import BlockedByProjectMixin
from taiga.base.api.utils import get_object_or_404
from taiga.base.decorators import list_route
from taiga.mdrender.service import render as mdrender
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.history.services import take_snapshot
from taiga.projects.models import Project
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.notifications.mixins import WatchersViewSetMixin
from taiga.projects.notifications.services import analize_object_for_watchers
from taiga.projects.notifications.services import send_notifications
from taiga.projects.occ import OCCResourceMixin
from . import models
from . import permissions
from . import serializers
from . import validators
from . import utils as wiki_utils
class WikiViewSet(OCCResourceMixin, HistoryResourceMixin, WatchedResourceMixin,
BlockedByProjectMixin, ModelCrudViewSet):
model = models.WikiPage
serializer_class = serializers.WikiPageSerializer
validator_class = validators.WikiPageValidator
permission_classes = (permissions.WikiPagePermission,)
filter_backends = (filters.CanViewWikiPagesFilterBackend,)
filter_fields = ("project", "slug")
queryset = models.WikiPage.objects.all()
def get_queryset(self):
qs = super().get_queryset()
qs = wiki_utils.attach_extra_info(qs, user=self.request.user)
return qs
@list_route(methods=["GET"])
def by_slug(self, request):
slug = request.QUERY_PARAMS.get("slug", None)
project_id = request.QUERY_PARAMS.get("project", None)
wiki_page = get_object_or_404(models.WikiPage, slug=slug, project_id=project_id)
return self.retrieve(request, pk=wiki_page.pk)
@list_route(methods=["POST"])
def render(self, request, **kwargs):
content = request.DATA.get("content", None)
project_id = request.DATA.get("project_id", None)
if not content:
raise exc.WrongArguments({"content": _("'content' parameter is mandatory")})
if not project_id:
raise exc.WrongArguments({"project_id": _("'project_id' parameter is mandatory")})
project = get_object_or_404(Project, pk=project_id)
self.check_permissions(request, "render", project)
data = mdrender(project, content)
return response.Ok({"data": data})
def pre_save(self, obj):
if not obj.owner:
obj.owner = self.request.user
obj.last_modifier = self.request.user
super().pre_save(obj)
class WikiWatchersViewSet(WatchersViewSetMixin, ModelListViewSet):
permission_classes = (permissions.WikiPageWatchersPermission,)
resource_model = models.WikiPage
class WikiLinkViewSet(BlockedByProjectMixin, ModelCrudViewSet):
model = models.WikiLink
serializer_class = serializers.WikiLinkSerializer
validator_class = validators.WikiLinkValidator
permission_classes = (permissions.WikiLinkPermission,)
filter_backends = (filters.CanViewWikiPagesFilterBackend,)
filter_fields = ["project"]
def post_save(self, obj, created=False):
if created:
self._create_wiki_page_when_create_wiki_link_if_not_exist(self.request, obj)
super().pre_save(obj)
def _create_wiki_page_when_create_wiki_link_if_not_exist(self, request, wiki_link):
try:
self.check_permissions(request, "create_wiki_page", wiki_link)
except exc.PermissionDenied:
# Create only the wiki link because the user doesn't have permission.
pass
else:
# Create the wiki link and the wiki page if not exist.
wiki_page, created = models.WikiPage.objects.get_or_create(
slug=wiki_link.href,
project=wiki_link.project,
defaults={"owner": self.request.user, "last_modifier": self.request.user})
if created:
# Create the new history entry, Set watcher for the new wiki page
# and send notifications about the new page created
history = take_snapshot(wiki_page, user=self.request.user)
analize_object_for_watchers(wiki_page, history.comment, history.owner)
send_notifications(wiki_page, history=history)
| taigaio/taiga-back | taiga/projects/wiki/api.py | Python | agpl-3.0 | 5,324 |
from systemdlogger.log import log
from systemdlogger.journal import JournalExporter
from systemdlogger.cloudwatch import CloudwatchLogger
from systemdlogger.elasticsearch import ElasticsearchLogger
import json
import os
from string import Template
class Runner:
LOGGERS = {
'cloudwatch': CloudwatchLogger,
'elasticsearch': ElasticsearchLogger
}
def __init__(
self,
config_path
):
self.config = self.load_config(config_path)
self.journal = JournalExporter(**self.config['systemd'])
if len(self.config['backends']):
self.loggers = [
Runner.LOGGERS[backend](**self.config['backends'][backend])
for backend in self.config['backends']
]
@staticmethod
def load_config(config_path):
with open(config_path, 'r') as config_file:
tmpl = Template(config_file.read())
return json.loads(tmpl.substitute(**os.environ))
def save(self, entries):
for logger in self.loggers:
res = logger.save(entries)
# verify cloudwatch logger succeeded
if isinstance(logger, CloudwatchLogger):
if 'nextSequenceToken' in res:
logger.set_last_token(res['nextSequenceToken'])
else:
print('did not write to logger successfully')
raise
def run(self):
try:
entries = self.journal.get_entries()
if entries:
self.save(entries)
# if all backends succeed
self.journal.set_cursor(entries)
else:
print('no new entries')
except Exception as e:
log('e', e)
raise
| techjacker/systemdlogger | systemdlogger/runner.py | Python | mit | 1,781 |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import logging
import signal
import time
import sys
from traitlets.config import catch_config_error
from traitlets import (
Instance, Dict, Unicode, Bool, List, CUnicode, Any, Float
)
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases
)
from . import __version__
from .consoleapp import JupyterConsoleApp, app_aliases, app_flags
try:
import queue
except ImportError:
import Queue as queue
OUTPUT_TIMEOUT = 10
# copy flags from mixin:
flags = dict(base_flags)
# start with mixin frontend flags:
frontend_flags = dict(app_flags)
# update full dict with frontend flags:
flags.update(frontend_flags)
# copy flags from mixin
aliases = dict(base_aliases)
# start with mixin frontend flags
frontend_aliases = dict(app_aliases)
# load updated frontend flags into full dict
aliases.update(frontend_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
frontend_aliases = set(frontend_aliases.keys())
frontend_flags = set(frontend_flags.keys())
class RunApp(JupyterApp, JupyterConsoleApp):
version = __version__
name = "jupyter run"
description = """Run Jupyter kernel code."""
flags = Dict(flags)
aliases = Dict(aliases)
frontend_aliases = Any(frontend_aliases)
frontend_flags = Any(frontend_flags)
kernel_timeout = Float(60, config=True,
help="""Timeout for giving up on a kernel (in seconds).
On first connect and restart, the console tests whether the
kernel is running and responsive by sending kernel_info_requests.
This sets the timeout in seconds for how long the kernel can take
before being presumed dead.
"""
)
def parse_command_line(self, argv=None):
super(RunApp, self).parse_command_line(argv)
self.build_kernel_argv(self.extra_args)
self.filenames_to_run = self.extra_args[:]
@catch_config_error
def initialize(self, argv=None):
self.log.debug("jupyter run: initialize...")
super(RunApp, self).initialize(argv)
JupyterConsoleApp.initialize(self)
signal.signal(signal.SIGINT, self.handle_sigint)
self.init_kernel_info()
def handle_sigint(self, *args):
if self.kernel_manager:
self.kernel_manager.interrupt_kernel()
else:
print("", file=sys.stderr)
error("Cannot interrupt kernels we didn't start.\n")
def init_kernel_info(self):
"""Wait for a kernel to be ready, and store kernel info"""
timeout = self.kernel_timeout
tic = time.time()
self.kernel_client.hb_channel.unpause()
msg_id = self.kernel_client.kernel_info()
while True:
try:
reply = self.kernel_client.get_shell_msg(timeout=1)
except queue.Empty:
if (time.time() - tic) > timeout:
raise RuntimeError("Kernel didn't respond to kernel_info_request")
else:
if reply['parent_header'].get('msg_id') == msg_id:
self.kernel_info = reply['content']
return
def start(self):
self.log.debug("jupyter run: starting...")
super(RunApp, self).start()
if self.filenames_to_run:
for filename in self.filenames_to_run:
self.log.debug("jupyter run: executing `%s`" % filename)
with open(filename) as fp:
code = fp.read()
reply = self.kernel_client.execute_interactive(code, timeout=OUTPUT_TIMEOUT)
return_code = 0 if reply['content']['status'] == 'ok' else 1
if return_code:
raise Exception("jupyter-run error running '%s'" % filename)
else:
code = sys.stdin.read()
reply = self.kernel_client.execute_interactive(code, timeout=OUTPUT_TIMEOUT)
return_code = 0 if reply['content']['status'] == 'ok' else 1
if return_code:
raise Exception("jupyter-run error running 'stdin'")
main = launch_new_instance = RunApp.launch_instance
if __name__ == '__main__':
main()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/jupyter_client/runapp.py | Python | bsd-2-clause | 4,322 |
import os, datetime, json
from LogController import LogController
from django.http import HttpResponse
from ..models import Asset,Tag, TagMetadata, Language
from django.shortcuts import render,redirect
from django.http import JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
class TagController(object):
def index(self, request):
try:
if not request.user.is_authenticated:
lc = LogController()
return redirect(lc.login_view(request))
registros = Tag.objects.all().order_by('-id')
paginator = Paginator(registros, 25)
page = request.GET.get('page')
try:
registros = paginator.page(page)
except PageNotAnInteger:
registros = paginator.page(1)
except EmptyPage:
registros = paginator.page(paginator.num_pages)
context ={
'registros': registros
}
return render(request, 'cawas/tags/index.html', context=context)
except Exception as e:
request.session['index_tags_message'] = e.message
return render(request, 'cawas/tags/index.html', context=context)
def add(self, request):
if request.is_ajax():
if request.method == 'POST':
try:
#Leer el Json
json_data = json.loads(request.body)
print (str(json_data))
#La validacion se hace en el Front End con Jquery
nombre = json_data['nombre']
nombre_pt = ''
nombre_es = ''
if 'nombre_es' in json_data:
nombre_es = json_data['nombre_es']
if 'nombre_pt' in json_data:
nombre_pt = json_data['nombre_pt']
print (str(nombre))
print (str(nombre_es))
#Crear Tag
newtag = Tag()
newtag.name = nombre
newtag.save()
if nombre_es !="":
lang = Language.objects.get(code='es')
newtagmetadata = TagMetadata()
newtagmetadata.tag = newtag
newtagmetadata.language =lang
newtagmetadata.name = nombre_es
newtagmetadata.save()
if nombre_pt !="":
lang = Language.objects.get(code='pt')
newtagmetadata = TagMetadata()
newtagmetadata.tag = newtag
newtagmetadata.language = lang
newtagmetadata.name = nombre_pt
newtagmetadata.save()
return JsonResponse({'code': 200, 'message': 'Guardado Correctamente'})
except Asset.DoesNotExist as e:
return JsonResponse({'code': 500, 'message': e.message})
return render(request, 'cawas/tags/add.html', None)
def edit(self, request, tag_id):
if request.is_ajax():
if request.method == 'POST':
try:
# Leer el Json
json_data = json.loads(request.body)
# La validacion se hace en el Front End con Jquery
tag_id = json_data['tag_id']
tag = Tag.objects.get(tag_id=tag_id)
nombre = json_data['nombre']
nombre_pt = ''
nombre_es = ''
if 'nombre_es' in json_data:
nombre_es = json_data['nombre_es']
if 'nombre_pt' in json_data:
nombre_pt = json_data['nombre_pt']
tag.name = nombre
tag.save()
if nombre_es != '':
lang = Language.objects.get(code='es')
tagmetadata = TagMetadata.objects.get(language=lang, tag=tag)
tagmetadata.name = nombre_es
tagmetadata.save()
if nombre_pt != '':
lang = Language.objects.get(code='pt')
tagmetadata = TagMetadata.objects.get(language=lang, tag=tag)
print 'tag:' + str(tag)
tagmetadata.name = nombre_pt
tagmetadata.save()
return JsonResponse({'code': 200, 'message': 'Guardado Correctamente'})
except Tag.DoesNotExist as e:
return JsonResponse({'code': 500, 'message': e.message})
except TagMetadata.DoesNotExist as e:
return JsonResponse({'code': 500, 'message': e.message})
except Exception as e:
return JsonResponse({'code': 500, 'message': e.message})
#GET
tag = Tag.objects.get(tag_id=tag_id)
lang_es = Language.objects.get(code='es')
lang_pt = Language.objects.get(code='pt')
item_metadata_es=''
item_metadata_pt=''
if TagMetadata.objects.filter(tag=tag, language=lang_es).count() > 0:
item_metadata_es = TagMetadata.objects.get(tag=tag, language=lang_es)
if TagMetadata.objects.filter(tag=tag, language=lang_pt)>0:
item_metadata_pt = TagMetadata.objects.get(tag=tag, language=lang_pt)
context = {'item': tag, 'item_metadata_es': item_metadata_es, 'item_metadata_pt': item_metadata_pt}
return render(request, 'cawas/tags/edit.html', context)
def delete(self, request):
if request.is_ajax():
if request.method == 'POST':
try:
json_data = json.loads(request.body)
id = json_data['id']
Tag.objects.filter(id=id).delete()
data = {'code': 200, 'message': 'Eliminado Correctamente'}
return JsonResponse({'code': 200, 'message': 'Eliminado Correctamente'})
except Exception as e:
return JsonResponse({'code': 500, 'message': e.message})
return JsonResponse({'code': 500, 'message': ''})
def findAll(self):
data =[
{'id': 1, 'description': 'Tag 1'},
{'id': 2, 'description': 'Tag 2'},
]
return HttpResponse(json.dumps(data), None, 200)
| emilianobilli/backend | dam/cawas/Controller/TagController.py | Python | gpl-3.0 | 6,601 |
"""
Enhanced subprocess.Popen subclass, supporting:
* .communicate() with timeout
* kill/terminate/send_signal (like in Py 2.6) for Py 2.4 / 2.5
Sample usage:
out, err = Popen(...).communicate(input, timeout=300)
"""
import os
import subprocess
import threading
import signal
if subprocess.mswindows:
try:
# Python >= 2.6 should have this:
from _subprocess import TerminateProcess
except ImportError:
# otherwise you need win32 extensions:
from win32process import TerminateProcess
else:
import select
import errno
class Popen(subprocess.Popen):
# send_signal, terminate, kill copied from Python 2.6
# (we want to support Python >= 2.4)
if subprocess.mswindows:
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
else:
raise ValueError("Only SIGTERM is supported on Windows")
def terminate(self):
"""Terminates the process
"""
TerminateProcess(self._handle, 1)
kill = terminate
else: # POSIX
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
self.timeout = timeout
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self._fo_write_no_intr(self.stdin, input)
self.stdin.close()
elif self.stdout:
stdout = self._fo_read_no_intr(self.stdout)
self.stdout.close()
elif self.stderr:
stderr = self._fo_read_no_intr(self.stderr)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if subprocess.mswindows:
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join(self.timeout)
if self.stderr:
stderr_thread.join(self.timeout)
# if the threads are still alive, that means the thread join timed out
timed_out = (self.stdout and stdout_thread.isAlive() or
self.stderr and stderr_thread.isAlive())
if timed_out:
self.kill()
else:
self.wait()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
return (stdout, stderr)
else: # POSIX
def _communicate(self, input):
timed_out = False
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [], self.timeout)
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
timed_out = not (rlist or wlist or xlist)
if timed_out:
break
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
chunk = input[input_offset:input_offset + 512]
bytes_written = os.write(self.stdin.fileno(), chunk)
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
if timed_out:
self.kill()
else:
self.wait()
# make sure all files are closed:
for f in [self.stdin, self.stdout, self.stderr]:
try:
f.close()
except:
pass
return (stdout, stderr)
def exec_cmd(cmd, input=None, timeout=None):
p = Popen(cmd, shell=True,
close_fds=not subprocess.mswindows,
bufsize=1024,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data, errors = p.communicate(input, timeout=timeout)
return data, errors, p.returncode
if __name__ == '__main__':
print exec_cmd("python", "import time ; time.sleep(20) ; print 'never!' ;", timeout=10)
print exec_cmd("python", "import time ; time.sleep(20) ; print '20s gone' ;")
| Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/util/SubProcess.py | Python | mit | 9,069 |
# -*- coding: utf-8 -*-
import click
from ..models import Post
@click.command()
@click.option('--title', default=None, help='Title of the Post')
def cli(title):
"Prints a list of posts"
posts = Post.objects
if title:
posts = posts(title=title)
for post in posts:
click.echo(post)
| seraphln/wheel | wheel/modules/posts/commands/listposts.py | Python | gpl-3.0 | 316 |
"""Support for Apple HomeKit."""
import asyncio
import ipaddress
import logging
import os
from aiohttp import web
from pyhap.const import CATEGORY_CAMERA, CATEGORY_TELEVISION, STANDALONE_AID
import voluptuous as vol
from homeassistant.components import zeroconf
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OCCUPANCY,
DOMAIN as BINARY_SENSOR_DOMAIN,
)
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.humidifier import DOMAIN as HUMIDIFIER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
ATTR_ENTITY_ID,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PORT,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
SERVICE_RELOAD,
)
from homeassistant.core import CoreState, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, Unauthorized
from homeassistant.helpers import device_registry, entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import BASE_FILTER_SCHEMA, FILTER_SCHEMA
from homeassistant.helpers.reload import async_integration_yaml_config
from homeassistant.loader import IntegrationNotFound, async_get_integration
from homeassistant.util import get_local_ip
# pylint: disable=unused-import
from . import ( # noqa: F401
type_cameras,
type_covers,
type_fans,
type_humidifiers,
type_lights,
type_locks,
type_media_players,
type_security_systems,
type_sensors,
type_switches,
type_thermostats,
)
from .accessories import HomeBridge, HomeDriver, get_accessory
from .aidmanager import AccessoryAidStorage
from .const import (
AID_STORAGE,
ATTR_INTERGRATION,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
BRIDGE_NAME,
BRIDGE_SERIAL_NUMBER,
CONF_ADVERTISE_IP,
CONF_AUTO_START,
CONF_ENTITY_CONFIG,
CONF_ENTRY_INDEX,
CONF_FILTER,
CONF_HOMEKIT_MODE,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_HUMIDITY_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_SAFE_MODE,
CONF_ZEROCONF_DEFAULT_INTERFACE,
CONFIG_OPTIONS,
DEFAULT_AUTO_START,
DEFAULT_HOMEKIT_MODE,
DEFAULT_PORT,
DEFAULT_SAFE_MODE,
DOMAIN,
HOMEKIT,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODES,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
MANUFACTURER,
SERVICE_HOMEKIT_RESET_ACCESSORY,
SERVICE_HOMEKIT_START,
SHUTDOWN_TIMEOUT,
UNDO_UPDATE_LISTENER,
)
from .util import (
dismiss_setup_message,
get_persist_fullpath_for_entry_id,
port_is_available,
remove_state_files_for_entry_id,
show_setup_message,
validate_entity_config,
)
_LOGGER = logging.getLogger(__name__)
MAX_DEVICES = 150
# #### Driver Status ####
STATUS_READY = 0
STATUS_RUNNING = 1
STATUS_STOPPED = 2
STATUS_WAIT = 3
def _has_all_unique_names_and_ports(bridges):
"""Validate that each homekit bridge configured has a unique name."""
names = [bridge[CONF_NAME] for bridge in bridges]
ports = [bridge[CONF_PORT] for bridge in bridges]
vol.Schema(vol.Unique())(names)
vol.Schema(vol.Unique())(ports)
return bridges
BRIDGE_SCHEMA = vol.All(
cv.deprecated(CONF_ZEROCONF_DEFAULT_INTERFACE),
cv.deprecated(CONF_SAFE_MODE),
vol.Schema(
{
vol.Optional(CONF_HOMEKIT_MODE, default=DEFAULT_HOMEKIT_MODE): vol.In(
HOMEKIT_MODES
),
vol.Optional(CONF_NAME, default=BRIDGE_NAME): vol.All(
cv.string, vol.Length(min=3, max=25)
),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_ADVERTISE_IP): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_AUTO_START, default=DEFAULT_AUTO_START): cv.boolean,
vol.Optional(CONF_SAFE_MODE, default=DEFAULT_SAFE_MODE): cv.boolean,
vol.Optional(CONF_FILTER, default={}): BASE_FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG, default={}): validate_entity_config,
vol.Optional(CONF_ZEROCONF_DEFAULT_INTERFACE): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
),
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [BRIDGE_SCHEMA], _has_all_unique_names_and_ports)},
extra=vol.ALLOW_EXTRA,
)
RESET_ACCESSORY_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids}
)
def _async_get_entries_by_name(current_entries):
"""Return a dict of the entries by name."""
# For backwards compat, its possible the first bridge is using the default
# name.
return {entry.data.get(CONF_NAME, BRIDGE_NAME): entry for entry in current_entries}
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the HomeKit from yaml."""
hass.data.setdefault(DOMAIN, {})
_async_register_events_and_services(hass)
if DOMAIN not in config:
return True
current_entries = hass.config_entries.async_entries(DOMAIN)
entries_by_name = _async_get_entries_by_name(current_entries)
for index, conf in enumerate(config[DOMAIN]):
if _async_update_config_entry_if_from_yaml(hass, entries_by_name, conf):
continue
conf[CONF_ENTRY_INDEX] = index
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=conf,
)
)
return True
@callback
def _async_update_config_entry_if_from_yaml(hass, entries_by_name, conf):
"""Update a config entry with the latest yaml.
Returns True if a matching config entry was found
Returns False if there is no matching config entry
"""
bridge_name = conf[CONF_NAME]
if (
bridge_name in entries_by_name
and entries_by_name[bridge_name].source == SOURCE_IMPORT
):
entry = entries_by_name[bridge_name]
# If they alter the yaml config we import the changes
# since there currently is no practical way to support
# all the options in the UI at this time.
data = conf.copy()
options = {}
for key in CONFIG_OPTIONS:
options[key] = data[key]
del data[key]
hass.config_entries.async_update_entry(entry, data=data, options=options)
return True
return False
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up HomeKit from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
conf = entry.data
options = entry.options
name = conf[CONF_NAME]
port = conf[CONF_PORT]
_LOGGER.debug("Begin setup HomeKit for %s", name)
aid_storage = AccessoryAidStorage(hass, entry.entry_id)
await aid_storage.async_initialize()
# ip_address and advertise_ip are yaml only
ip_address = conf.get(CONF_IP_ADDRESS)
advertise_ip = conf.get(CONF_ADVERTISE_IP)
homekit_mode = options.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
entity_config = options.get(CONF_ENTITY_CONFIG, {}).copy()
auto_start = options.get(CONF_AUTO_START, DEFAULT_AUTO_START)
entity_filter = FILTER_SCHEMA(options.get(CONF_FILTER, {}))
homekit = HomeKit(
hass,
name,
port,
ip_address,
entity_filter,
entity_config,
homekit_mode,
advertise_ip,
entry.entry_id,
)
zeroconf_instance = await zeroconf.async_get_instance(hass)
# If the previous instance hasn't cleaned up yet
# we need to wait a bit
try:
await hass.async_add_executor_job(homekit.setup, zeroconf_instance)
except (OSError, AttributeError) as ex:
_LOGGER.warning(
"%s could not be setup because the local port %s is in use", name, port
)
raise ConfigEntryNotReady from ex
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
AID_STORAGE: aid_storage,
HOMEKIT: homekit,
UNDO_UPDATE_LISTENER: undo_listener,
}
if hass.state == CoreState.running:
await homekit.async_start()
elif auto_start:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, homekit.async_start)
return True
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
if entry.source == SOURCE_IMPORT:
return
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
dismiss_setup_message(hass, entry.entry_id)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
homekit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
if homekit.status == STATUS_RUNNING:
await homekit.async_stop()
for _ in range(0, SHUTDOWN_TIMEOUT):
if not await hass.async_add_executor_job(
port_is_available, entry.data[CONF_PORT]
):
_LOGGER.info("Waiting for the HomeKit server to shutdown")
await asyncio.sleep(1)
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Remove a config entry."""
return await hass.async_add_executor_job(
remove_state_files_for_entry_id, hass, entry.entry_id
)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
data = dict(entry.data)
modified = False
for importable_option in CONFIG_OPTIONS:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
del data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, data=data, options=options)
@callback
def _async_register_events_and_services(hass: HomeAssistant):
"""Register events and services for HomeKit."""
hass.http.register_view(HomeKitPairingQRView)
def handle_homekit_reset_accessory(service):
"""Handle start HomeKit service call."""
for entry_id in hass.data[DOMAIN]:
if HOMEKIT not in hass.data[DOMAIN][entry_id]:
continue
homekit = hass.data[DOMAIN][entry_id][HOMEKIT]
if homekit.status != STATUS_RUNNING:
_LOGGER.warning(
"HomeKit is not running. Either it is waiting to be "
"started or has been stopped"
)
continue
entity_ids = service.data.get("entity_id")
homekit.reset_accessories(entity_ids)
hass.services.async_register(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
handle_homekit_reset_accessory,
schema=RESET_ACCESSORY_SERVICE_SCHEMA,
)
async def async_handle_homekit_service_start(service):
"""Handle start HomeKit service call."""
tasks = []
for entry_id in hass.data[DOMAIN]:
if HOMEKIT not in hass.data[DOMAIN][entry_id]:
continue
homekit = hass.data[DOMAIN][entry_id][HOMEKIT]
if homekit.status == STATUS_RUNNING:
_LOGGER.debug("HomeKit is already running")
continue
if homekit.status != STATUS_READY:
_LOGGER.warning(
"HomeKit is not ready. Either it is already starting up or has "
"been stopped"
)
continue
tasks.append(homekit.async_start())
await asyncio.gather(*tasks)
hass.services.async_register(
DOMAIN, SERVICE_HOMEKIT_START, async_handle_homekit_service_start
)
async def _handle_homekit_reload(service):
"""Handle start HomeKit service call."""
config = await async_integration_yaml_config(hass, DOMAIN)
if not config or DOMAIN not in config:
return
current_entries = hass.config_entries.async_entries(DOMAIN)
entries_by_name = _async_get_entries_by_name(current_entries)
for conf in config[DOMAIN]:
_async_update_config_entry_if_from_yaml(hass, entries_by_name, conf)
reload_tasks = [
hass.config_entries.async_reload(entry.entry_id)
for entry in current_entries
]
await asyncio.gather(*reload_tasks)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_RELOAD,
_handle_homekit_reload,
)
class HomeKit:
"""Class to handle all actions between HomeKit and Home Assistant."""
def __init__(
self,
hass,
name,
port,
ip_address,
entity_filter,
entity_config,
homekit_mode,
advertise_ip=None,
entry_id=None,
):
"""Initialize a HomeKit object."""
self.hass = hass
self._name = name
self._port = port
self._ip_address = ip_address
self._filter = entity_filter
self._config = entity_config
self._advertise_ip = advertise_ip
self._entry_id = entry_id
self._homekit_mode = homekit_mode
self.status = STATUS_READY
self.bridge = None
self.driver = None
def setup(self, zeroconf_instance):
"""Set up bridge and accessory driver."""
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self.async_stop)
ip_addr = self._ip_address or get_local_ip()
persist_file = get_persist_fullpath_for_entry_id(self.hass, self._entry_id)
self.driver = HomeDriver(
self.hass,
self._entry_id,
self._name,
loop=self.hass.loop,
address=ip_addr,
port=self._port,
persist_file=persist_file,
advertised_address=self._advertise_ip,
zeroconf_instance=zeroconf_instance,
)
# If we do not load the mac address will be wrong
# as pyhap uses a random one until state is restored
if os.path.exists(persist_file):
self.driver.load()
else:
self.driver.persist()
def reset_accessories(self, entity_ids):
"""Reset the accessory to load the latest configuration."""
if not self.bridge:
self.driver.config_changed()
return
aid_storage = self.hass.data[DOMAIN][self._entry_id][AID_STORAGE]
removed = []
for entity_id in entity_ids:
aid = aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
if aid not in self.bridge.accessories:
continue
_LOGGER.info(
"HomeKit Bridge %s will reset accessory with linked entity_id %s",
self._name,
entity_id,
)
acc = self.remove_bridge_accessory(aid)
removed.append(acc)
if not removed:
# No matched accessories, probably on another bridge
return
self.driver.config_changed()
for acc in removed:
self.bridge.add_accessory(acc)
self.driver.config_changed()
def add_bridge_accessory(self, state):
"""Try adding accessory to bridge if configured beforehand."""
if not self._filter(state.entity_id):
return
# The bridge itself counts as an accessory
if len(self.bridge.accessories) + 1 >= MAX_DEVICES:
_LOGGER.warning(
"Cannot add %s as this would exceed the %d device limit. Consider using the filter option",
state.entity_id,
MAX_DEVICES,
)
return
aid = self.hass.data[DOMAIN][self._entry_id][
AID_STORAGE
].get_or_allocate_aid_for_entity_id(state.entity_id)
conf = self._config.pop(state.entity_id, {})
# If an accessory cannot be created or added due to an exception
# of any kind (usually in pyhap) it should not prevent
# the rest of the accessories from being created
try:
acc = get_accessory(self.hass, self.driver, state, aid, conf)
if acc is not None:
if acc.category == CATEGORY_CAMERA:
_LOGGER.warning(
"The bridge %s has camera %s. For best performance, "
"and to prevent unexpected unavailability, create and "
"pair a separate HomeKit instance in accessory mode for "
"each camera.",
self._name,
acc.entity_id,
)
elif acc.category == CATEGORY_TELEVISION:
_LOGGER.warning(
"The bridge %s has tv %s. For best performance, "
"and to prevent unexpected unavailability, create and "
"pair a separate HomeKit instance in accessory mode for "
"each tv media player.",
self._name,
acc.entity_id,
)
self.bridge.add_accessory(acc)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Failed to create a HomeKit accessory for %s", state.entity_id
)
def remove_bridge_accessory(self, aid):
"""Try adding accessory to bridge if configured beforehand."""
acc = None
if aid in self.bridge.accessories:
acc = self.bridge.accessories.pop(aid)
return acc
async def async_start(self, *args):
"""Start the accessory driver."""
if self.status != STATUS_READY:
return
self.status = STATUS_WAIT
ent_reg = await entity_registry.async_get_registry(self.hass)
dev_reg = await device_registry.async_get_registry(self.hass)
device_lookup = ent_reg.async_get_device_class_lookup(
{
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_BATTERY_CHARGING),
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_MOTION),
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_OCCUPANCY),
(SENSOR_DOMAIN, DEVICE_CLASS_BATTERY),
(SENSOR_DOMAIN, DEVICE_CLASS_HUMIDITY),
}
)
bridged_states = []
for state in self.hass.states.async_all():
entity_id = state.entity_id
if not self._filter(entity_id):
continue
ent_reg_ent = ent_reg.async_get(entity_id)
if ent_reg_ent:
await self._async_set_device_info_attributes(
ent_reg_ent, dev_reg, entity_id
)
self._async_configure_linked_sensors(ent_reg_ent, device_lookup, state)
bridged_states.append(state)
self._async_register_bridge(dev_reg)
await self._async_start(bridged_states)
_LOGGER.debug("Driver start for %s", self._name)
self.hass.add_job(self.driver.start_service)
self.status = STATUS_RUNNING
@callback
def _async_register_bridge(self, dev_reg):
"""Register the bridge as a device so homekit_controller and exclude it from discovery."""
formatted_mac = device_registry.format_mac(self.driver.state.mac)
# Connections and identifiers are both used here.
#
# connections exists so homekit_controller can know the
# virtual mac address of the bridge and know to not offer
# it via discovery.
#
# identifiers is used as well since the virtual mac may change
# because it will not survive manual pairing resets (deleting state file)
# which we have trained users to do over the past few years
# because this was the way you had to fix homekit when pairing
# failed.
#
connection = (device_registry.CONNECTION_NETWORK_MAC, formatted_mac)
identifier = (DOMAIN, self._entry_id, BRIDGE_SERIAL_NUMBER)
self._async_purge_old_bridges(dev_reg, identifier, connection)
is_accessory_mode = self._homekit_mode == HOMEKIT_MODE_ACCESSORY
hk_mode_name = "Accessory" if is_accessory_mode else "Bridge"
dev_reg.async_get_or_create(
config_entry_id=self._entry_id,
identifiers={identifier},
connections={connection},
manufacturer=MANUFACTURER,
name=self._name,
model=f"Home Assistant HomeKit {hk_mode_name}",
)
@callback
def _async_purge_old_bridges(self, dev_reg, identifier, connection):
"""Purge bridges that exist from failed pairing or manual resets."""
devices_to_purge = []
for entry in dev_reg.devices.values():
if self._entry_id in entry.config_entries and (
identifier not in entry.identifiers
or connection not in entry.connections
):
devices_to_purge.append(entry.id)
for device_id in devices_to_purge:
dev_reg.async_remove_device(device_id)
async def _async_start(self, entity_states):
"""Start the accessory."""
if self._homekit_mode == HOMEKIT_MODE_ACCESSORY:
state = entity_states[0]
conf = self._config.pop(state.entity_id, {})
acc = get_accessory(self.hass, self.driver, state, STANDALONE_AID, conf)
self.driver.add_accessory(acc)
else:
self.bridge = HomeBridge(self.hass, self.driver, self._name)
for state in entity_states:
self.add_bridge_accessory(state)
acc = self.bridge
await self.hass.async_add_executor_job(self.driver.add_accessory, acc)
if not self.driver.state.paired:
show_setup_message(
self.hass,
self._entry_id,
self._name,
self.driver.state.pincode,
self.driver.accessory.xhm_uri(),
)
async def async_stop(self, *args):
"""Stop the accessory driver."""
if self.status != STATUS_RUNNING:
return
self.status = STATUS_STOPPED
_LOGGER.debug("Driver stop for %s", self._name)
await self.driver.async_stop()
if self.bridge:
for acc in self.bridge.accessories.values():
acc.async_stop()
else:
self.driver.accessory.async_stop()
@callback
def _async_configure_linked_sensors(self, ent_reg_ent, device_lookup, state):
if (
ent_reg_ent is None
or ent_reg_ent.device_id is None
or ent_reg_ent.device_id not in device_lookup
or ent_reg_ent.device_class
in (DEVICE_CLASS_BATTERY_CHARGING, DEVICE_CLASS_BATTERY)
):
return
if ATTR_BATTERY_CHARGING not in state.attributes:
battery_charging_binary_sensor_entity_id = device_lookup[
ent_reg_ent.device_id
].get((BINARY_SENSOR_DOMAIN, DEVICE_CLASS_BATTERY_CHARGING))
if battery_charging_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_BATTERY_CHARGING_SENSOR,
battery_charging_binary_sensor_entity_id,
)
if ATTR_BATTERY_LEVEL not in state.attributes:
battery_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(SENSOR_DOMAIN, DEVICE_CLASS_BATTERY)
)
if battery_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_BATTERY_SENSOR, battery_sensor_entity_id
)
if state.entity_id.startswith(f"{CAMERA_DOMAIN}."):
motion_binary_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_MOTION)
)
if motion_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_MOTION_SENSOR,
motion_binary_sensor_entity_id,
)
doorbell_binary_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_OCCUPANCY)
)
if doorbell_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_DOORBELL_SENSOR,
doorbell_binary_sensor_entity_id,
)
if state.entity_id.startswith(f"{HUMIDIFIER_DOMAIN}."):
current_humidity_sensor_entity_id = device_lookup[
ent_reg_ent.device_id
].get((SENSOR_DOMAIN, DEVICE_CLASS_HUMIDITY))
if current_humidity_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_HUMIDITY_SENSOR,
current_humidity_sensor_entity_id,
)
async def _async_set_device_info_attributes(self, ent_reg_ent, dev_reg, entity_id):
"""Set attributes that will be used for homekit device info."""
ent_cfg = self._config.setdefault(entity_id, {})
if ent_reg_ent.device_id:
dev_reg_ent = dev_reg.async_get(ent_reg_ent.device_id)
if dev_reg_ent is not None:
# Handle missing devices
if dev_reg_ent.manufacturer:
ent_cfg[ATTR_MANUFACTURER] = dev_reg_ent.manufacturer
if dev_reg_ent.model:
ent_cfg[ATTR_MODEL] = dev_reg_ent.model
if dev_reg_ent.sw_version:
ent_cfg[ATTR_SOFTWARE_VERSION] = dev_reg_ent.sw_version
if ATTR_MANUFACTURER not in ent_cfg:
try:
integration = await async_get_integration(
self.hass, ent_reg_ent.platform
)
ent_cfg[ATTR_INTERGRATION] = integration.name
except IntegrationNotFound:
ent_cfg[ATTR_INTERGRATION] = ent_reg_ent.platform
class HomeKitPairingQRView(HomeAssistantView):
"""Display the homekit pairing code at a protected url."""
url = "/api/homekit/pairingqr"
name = "api:homekit:pairingqr"
requires_auth = False
async def get(self, request):
"""Retrieve the pairing QRCode image."""
if not request.query_string:
raise Unauthorized()
entry_id, secret = request.query_string.split("-")
if (
entry_id not in request.app["hass"].data[DOMAIN]
or secret
!= request.app["hass"].data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR_SECRET]
):
raise Unauthorized()
return web.Response(
body=request.app["hass"].data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR],
content_type="image/svg+xml",
)
| turbokongen/home-assistant | homeassistant/components/homekit/__init__.py | Python | apache-2.0 | 27,666 |
""" demonstrating some great Python features via unit tests """
from functools import partial
import operator
import os
import tempfile
import unittest
class testRandomFeatures(unittest.TestCase):
""" testing random language features """
def testMultipleAssignent(self):
""" tests the shorthand syntax of assigning values to multiple variables """
x, y = 1, 2
self.assertEqual(1, x)
self.assertEqual(2, y)
# from lists indices
x, y, *z = [1, 2, 3, 4]
self.assertEqual(1, x)
self.assertEqual(2, y)
self.assertEqual([3, 4], z, "all other values!")
# in-place value swapping"
y, x = x, y
self.assertEqual(2, x)
self.assertEqual(1, y)
def returns_multiple_values():
return (1, 2)
x, y = returns_multiple_values()
self.assertEqual(1, x)
self.assertEqual(2, y)
def testForElse(self):
""" tests that an "else" statement is entered if a for loop is not exited """
for i in range(0, 5):
if i > 5:
break
else:
return
self.fail()
def testWith(self):
""" tests unmanaged resources are closed via the "with" keyword """
filename = ""
with tempfile.NamedTemporaryFile() as file_handle:
filename = file_handle.name
self.assertTrue(os.path.exists(filename))
self.assertFalse(os.path.exists(filename))
def testChainedConditions(self):
""" tests the much-loved feature of chaining conditions """
self.assertTrue(1 < 2 < 3)
self.assertTrue(1 < 3 > 2)
def testStringTemplating(self):
""" shows the simple yet powerful ability to template strings """
from string import Template
xml_template = Template('<${tag}>${content}</${tag}>')
actual = xml_template.substitute(tag='h1', content='Hello, world!')
expected = "<h1>Hello, world!</h1>"
self.assertEqual(expected, actual)
def testDecorators(self):
"""
tests the very useful ability to decorate functions with other functions
classes can also be used
"""
def paragraph_me(func):
def _paragraph_me(args):
return "<p>" + func(args) + "</p>"
return _paragraph_me
@paragraph_me
def to_paragraph(value):
return value
self.assertEqual(to_paragraph("Hello, world!"), "<p>Hello, world!</p>")
def testBewareMutableFunctionArguments(self):
"""
default function argument values are stored in a tuple as part of the
function, so use a sentinel value to denote "not given" and replace with
the mutable as a default
"""
def mutates_default_arg(arg=[]):
arg.append(1)
return arg
self.assertEqual(len(mutates_default_arg.__defaults__), 1)
self.assertEqual(mutates_default_arg(), [1])
self.assertEqual(mutates_default_arg.__defaults__[0], [1])
self.assertEqual(mutates_default_arg(), [1, 1])
self.assertEqual(mutates_default_arg.__defaults__[0], [1, 1])
def does_not_mutate_default_arg(arg=None):
arg = [] if arg is None else arg
arg.append(1)
return arg
self.assertEqual(len(does_not_mutate_default_arg.__defaults__), 1)
self.assertEqual(does_not_mutate_default_arg(), [1])
self.assertEqual(does_not_mutate_default_arg.__defaults__[0], None)
self.assertEqual(does_not_mutate_default_arg(), [1])
self.assertEqual(does_not_mutate_default_arg.__defaults__[0], None)
def testInterestingOperatorUsage(self):
""" why restrict yourself to the usual use of operators? """
self.assertEqual("nom" * 2, "nomnom")
self.assertEqual(2 * "nom", "nomnom")
self.assertEqual([1, 2] * 2, [1, 2, 1, 2])
self.assertEqual("Keep me!" * True, "Keep me!")
self.assertEqual("Keep me!" * False, "")
self.assertEqual([1, 2] + [3, 4], [1, 2, 3, 4])
def testMembership(self):
""" using the "in" keyword to test memberships """
self.assertTrue("b" in "abc")
self.assertFalse("b" in "efg")
self.assertTrue(2 in [1, 2, 3])
self.assertFalse(2 in [4, 5, 6])
def testZip(self):
""" showing how lists can be iterated together, and how to tranpose them """
for zipped in zip(["one"], [2]):
self.assertEqual(zipped, ("one", 2))
transpose_me = [(1, 2), (3, 4), (5, 6)]
self.assertEqual(list(zip(*transpose_me)), [(1, 3, 5), (2, 4, 6)])
def testEllipsisOperator(self):
""" demonstrates how the ellipsis operator can be used """
class TestEllipsis(object):
def __getitem__(self, item):
if item is Ellipsis:
return "Returning all items"
return "return %r items" % item
test_ellipsis = TestEllipsis()
self.assertEqual(test_ellipsis[2], "return 2 items")
self.assertEqual(test_ellipsis[...], "Returning all items")
def testFunctools(self):
""" testing the binding of arguments to functions for late evaluation """
bound_func = partial(range, 0, 4)
self.assertEqual(list(bound_func()), [0, 1, 2, 3])
self.assertEqual(list(bound_func(2)), [0, 2])
def functionsAsFirstClassObjects(self):
""" passing functions around as they are first-class objects """
called_back = False
def callback():
called_back = True
def use_callback(func):
func()
use_callback(callback)
self.assertTrue(called_back)
class testExceptions(unittest.TestCase):
""" showcasing the interesting additions to exception handling """
def testTryExceptElseFinally(self):
""" tests the flow of an exception-handling block """
try:
1 / 0
except ZeroDivisionError:
pass
else:
self.fail()
finally:
return
self.fail()
def testReRaiseException(self):
""" shows how an exception can be reraised to preserve its original traceback """
try:
try:
1 / 0
except ZeroDivisionError as e:
raise
except ZeroDivisionError as e:
return
self.fail()
class testIterating(unittest.TestCase):
""" demonstrates the support for different uses of iterators """
def testOperatorSorting(self):
""" demonstrates sorting objects on any of their keys without custom functions """
class Custom:
def __init__(self, value):
self.id = value
customs = [Custom(5), Custom(3)]
customs.sort(key=operator.attrgetter('id'))
self.assertEqual(3, customs[0].id)
def testGenerators(self):
""" shows the awesome ability to yield values as they are needed """
def generate_to(max_value):
for i in range(0, max_value):
yield i
for i in generate_to(3):
self.assertEqual(i, 0)
return
def testSendingValuesToGenerators(self):
""" demonstrates how values can be received by generators """
def generate_value(value):
while True:
received_value = (yield value)
if received_value is not None:
value = received_value
generator = generate_value(5)
self.assertEqual(next(generator), 5)
self.assertEqual(next(generator), 5)
self.assertEqual(generator.send(7), 7)
self.assertEqual(next(generator), 7)
def arraySlicing(self):
""" slicing arrays in many different ways """
values = [1, 2, 3, 4, 5]
self.assertEqual(values[2:], [3, 4, 5], "from an index onwards")
self.assertEqual(values[-1], [5],
"using a negative index to access indices from end of the array")
self.assertEqual(values[:2], [1, 2], "up to an index")
self.assertEqual(values[2:4], [3, 4], "between indices")
self.assertEqual(values[::2], [2, 4], "using a custom interval")
self.assertEqual(values[::-1], [5, 4, 3, 2, 1], "reversing")
class testSets(unittest.TestCase):
""" shows the support for sets and comparisons of them """
def setUp(self):
""" initialise the sets used throughout the tests """
self.one = set(' abcde ')
self.two = set(' b d f')
def testDifference(self):
""" shows the members missing from the second set that are in the first """
self.assertEqual(self.one - self.two, self.one.difference(self.two))
difference = list((self.one - self.two))
difference.sort()
self.assertEqual(difference, ["a", "c", "e"])
def testUnion(self):
""" tests the union of two sets """
self.assertEqual(self.one | self.two, self.one.union(self.two))
union = list((self.one | self.two))
union.sort()
self.assertEqual(union, [" ", "a", "b", "c", "d", "e", "f"])
def testIntersection(self):
""" finds the common members of two sets """
self.assertEqual(self.one & self.two, self.one.intersection(self.two))
intersection = list((self.one & self.two))
intersection.sort()
self.assertEqual(intersection, [" ", "b", "d"])
def testSymmetricDifference(self):
""" shows all the values that are members of one set and not the other """
self.assertEqual(self.one ^ self.two, self.one.symmetric_difference(self.two))
symmetric_difference = list((self.one ^ self.two))
symmetric_difference.sort()
self.assertEqual(symmetric_difference, ["a", "c", "e", "f"])
class testListComprehension(unittest.TestCase):
""" why loop when you can comprehend?! """
def testProjection(self):
""" tests projecting the items from one list to another """
expected = [2, 4, 6]
actual = [n * 2 for n in [1, 2, 3]]
self.assertEqual(expected, actual)
def testNested(self):
""" tests nested projection to transpose nested lists """
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
expected = [
[1, 4, 7],
[2, 5, 8],
[3, 6, 9]
]
actual = [[row[i] for row in matrix] for i in range(len(matrix))]
self.assertEqual(expected, actual)
def testRestriction(self):
""" tests restricting a list and finding the odd numbers """
expected = [1, 3]
actual = [n for n in [1, 2, 3] if n % 2 == 1]
self.assertEqual(expected, actual)
class testUnpacking(unittest.TestCase):
""" tests the neat feature of variable unpacking """
@staticmethod
def rectangle_area(height=1, width=1):
""" simple function that multiplies named parameters """
return height * width
def testArray(self):
"""
tests unpacking an array so that each
element is used as an argument to a function
"""
rectangle = [2, 4]
actual = self.rectangle_area(*rectangle)
expected = rectangle[0] * rectangle[1]
self.assertEqual(actual, expected)
def testDictionary(self):
"""
tests unpacking a dictionary so that each
key-value pair is used as a named argument to a function
"""
rectangle = {
"height": 2,
"width": 4
}
actual = self.rectangle_area(**rectangle)
expected = rectangle["height"] * rectangle["width"]
self.assertEqual(actual, expected)
def testTuple(self):
"""
tests unpacking a tuple into individual variables
and an array of unknown length
"""
my_tuple = (1, 2, 3, 4)
first, second, *others = my_tuple
self.assertEqual(first, my_tuple[0])
self.assertEqual(second, my_tuple[1])
self.assertEqual(others, list(my_tuple[2:4]))
if __name__ == "__main__":
unittest.main()
| WhatIsHeDoing/DidYouKnow | python/main.py | Python | mit | 12,214 |
#!/usr/bin/env python
# This file is Copyright David Francos Cuartero, licensed under the GPL2 license.
from distutils.core import setup
setup(name='airdrop-ng',
version='1.1',
description='Rule based Deauth Tool',
author='TheX1le',
console = [{"script": "airdrop-ng" }],
url='https://aircrack-ng.org',
license='GPL2',
classifiers=[ 'Development Status :: 4 - Beta', ],
packages=['airdrop'],
scripts=['airdrop-ng'],
)
| creaktive/aircrack-ng | scripts/airdrop-ng/setup.py | Python | gpl-2.0 | 479 |
import numpy as np
import warnings
SCALARTYPES = (complex, float, int, np.number)
def is_scalar(f):
"""Determine if the input argument is a scalar.
The function **is_scalar** returns *True* if the input is an integer,
float or complex number. The function returns *False* otherwise.
Parameters
----------
f :
Any input quantity
Returns
-------
bool :
- *True* if the input argument is an integer, float or complex number
- *False* otherwise
"""
if isinstance(f, SCALARTYPES):
return True
elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
return True
return False
def as_array_n_by_dim(pts, dim):
"""Ensures the given array will have *dim* columns.
The function **as_array_n_by_dim** will examine the *pts* array,
and coerce it to be at least if the number of columns is equal to *dim*.
This is similar to the :func:`numpy.atleast_2d`, except that it ensures that then
input has *dim* columns, and it appends a :data:`numpy.newaxis` to 1D arrays
instead of prepending.
Parameters
----------
pts : array_like
array to check.
dim : int
The number of columns which *pts* should have
Returns
-------
(n_pts, dim) numpy.ndarray
verified array
"""
if type(pts) == list:
pts = np.array(pts)
if not isinstance(pts, np.ndarray):
raise TypeError("pts must be a numpy array")
if dim > 1:
pts = np.atleast_2d(pts)
elif len(pts.shape) == 1:
pts = pts[:, np.newaxis]
if pts.shape[1] != dim:
raise ValueError(
"pts must be a column vector of shape (nPts, {0:d}) not ({1:d}, {2:d})".format(
*((dim,) + pts.shape)
)
)
return pts
def requires(modules):
"""Decorator to wrap functions with soft dependencies.
This function was inspired by the `requires` function of pysal,
which is released under the 'BSD 3-Clause "New" or "Revised" License'.
https://github.com/pysal/pysal/blob/master/pysal/lib/common.py
Parameters
----------
modules : dict
Dictionary containing soft dependencies, e.g.,
{'matplotlib': matplotlib}.
Returns
-------
decorated_function : function
Original function if all soft dependencies are met, otherwise
it returns an empty function which prints why it is not running.
"""
# Check the required modules, add missing ones in the list `missing`.
missing = []
for key, item in modules.items():
if item is False:
missing.append(key)
def decorated_function(function):
"""Wrap function."""
if not missing:
return function
else:
def passer(*args, **kwargs):
print(("Missing dependencies: {d}.".format(d=missing)))
print(("Not running `{}`.".format(function.__name__)))
return passer
return decorated_function
def deprecate_class(removal_version=None, new_location=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
def decorator(cls):
my_name = cls.__name__
parent_name = cls.__bases__[0].__name__
message = f"{my_name} has been deprecated, please use {parent_name}."
if removal_version is not None:
message += (
f" It will be removed in version {removal_version} of discretize."
)
else:
message += " It will be removed in a future version of discretize."
# stash the original initialization of the class
cls._old__init__ = cls.__init__
def __init__(self, *args, **kwargs):
warnings.warn(message, Warning)
self._old__init__(*args, **kwargs)
cls.__init__ = __init__
if new_location is not None:
parent_name = f"{new_location}.{parent_name}"
cls.__doc__ = f""" This class has been deprecated, see `{parent_name}` for documentation"""
return cls
return decorator
def deprecate_module(old_name, new_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
message = f"The {old_name} module has been deprecated, please use {new_name}."
if removal_version is not None:
message += f" It will be removed in version {removal_version} of discretize"
else:
message += " It will be removed in a future version of discretize."
message += " Please update your code accordingly."
warnings.warn(message, Warning)
def deprecate_property(new_name, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def get_dep(self):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, Warning)
return getattr(self, new_name)
def set_dep(self, other):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, Warning)
setattr(self, new_name, other)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation.
See Also
--------
{new_name}
"""
return property(get_dep, set_dep, None, doc)
def deprecate_method(new_name, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def new_method(self, *args, **kwargs):
class_name = type(self).__name__
warnings.warn(
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag,
Warning,
)
return getattr(self, new_name)(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
new_method.__doc__ = doc
return new_method
def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
new_name = new_function.__name__
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def dep_function(*args, **kwargs):
warnings.warn(
f"{old_name} has been deprecated, please use {new_name}." + tag,
Warning,
)
return new_function(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
dep_function.__doc__ = doc
return dep_function
# DEPRECATIONS
isScalar = deprecate_function(is_scalar, "isScalar", removal_version="1.0.0", future_warn=False)
asArray_N_x_Dim = deprecate_function(
as_array_n_by_dim, "asArray_N_x_Dim", removal_version="1.0.0", future_warn=False
)
| simpeg/discretize | discretize/utils/code_utils.py | Python | mit | 7,861 |
from datetime import datetime
from asynctest import patch
import bot # noqa: F401
from .base_cache_store import TestCacheStore
from lib.cache import CacheStore
from lib.api.twitch import TwitchCommunity
class TestCacheTwitchApiCreatedDate(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.twitch.created_date')
self.addCleanup(patcher.stop)
self.mock_created = patcher.start()
async def test(self,):
user = 'megotsthis'
key = f'twitch:{user}:created'
dt = datetime(2000, 1, 1)
self.mock_created.return_value = dt
self.assertEqual(await self.data.twitch_created_date('megotsthis'), dt)
self.assertTrue(self.mock_created.called)
self.mock_created.reset_mock()
self.assertEqual(await self.data.twitch_created_date('megotsthis'), dt)
self.assertFalse(self.mock_created.called)
self.assertIsNotNone(await self.redis.get(key))
class TestCacheTwitchApiNumFollowers(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.twitch.num_followers')
self.addCleanup(patcher.stop)
self.mock_followers = patcher.start()
async def test(self,):
user = 'megotsthis'
key = f'twitch:{user}:following'
self.mock_followers.return_value = 1
self.assertEqual(await self.data.twitch_num_followers('megotsthis'), 1)
self.assertTrue(self.mock_followers.called)
self.mock_followers.reset_mock()
self.assertEqual(await self.data.twitch_num_followers('megotsthis'), 1)
self.assertFalse(self.mock_followers.called)
self.assertIsNotNone(await self.redis.get(key))
class TestCacheTwitchApiId(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.twitch.getTwitchIds')
self.addCleanup(patcher.stop)
self.mock_ids = patcher.start()
async def test_load_id(self):
self.mock_ids.return_value = {'botgotsthis': '0'}
self.assertIs(await self.data.twitch_load_id('botgotsthis'),
True)
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIs(await self.data.twitch_load_id('botgotsthis'),
True)
self.assertFalse(self.mock_ids.called)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdIdKey('0')))
async def test_load_id_no_load(self):
self.mock_ids.return_value = None
self.assertIs(await self.data.twitch_load_id('botgotsthis'), False)
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIs(await self.data.twitch_load_id('botgotsthis'), False)
self.assertTrue(self.mock_ids.called)
self.assertIsNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
async def test_load_id_no_id(self):
self.mock_ids.return_value = {}
self.assertIs(await self.data.twitch_load_id('botgotsthis'), True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
async def test_load_ids(self):
self.mock_ids.return_value = {'botgotsthis': '0'}
self.assertIs(await self.data.twitch_load_ids(['botgotsthis']),
True)
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIs(await self.data.twitch_load_ids(['botgotsthis']),
True)
self.assertFalse(self.mock_ids.called)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdIdKey('0')))
async def test_load_ids_no_load(self):
self.mock_ids.return_value = None
self.assertIs(await self.data.twitch_load_ids(['botgotsthis']), False)
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIs(await self.data.twitch_load_ids(['botgotsthis']), False)
self.assertTrue(self.mock_ids.called)
self.assertIsNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
async def test_load_ids_no_id(self):
self.mock_ids.return_value = {}
self.assertIs(await self.data.twitch_load_ids(['botgotsthis']), True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
async def test_load_ids_multiple(self):
self.mock_ids.return_value = {'botgotsthis': '0'}
self.assertIs(
await self.data.twitch_load_ids(['botgotsthis', 'megotsthis']),
True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('megotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdIdKey('0')))
async def test_save_id(self):
self.assertIs(await self.data.twitch_save_id('0', 'botgotsthis'),
True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdIdKey('0')))
async def test_save_id_no_id(self):
self.assertIs(await self.data.twitch_save_id(None, 'botgotsthis'),
True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchIdUserKey('botgotsthis')))
async def test_is_valid_user(self):
self.mock_ids.return_value = {'botgotsthis': '0'}
self.assertIs(await self.data.twitch_is_valid_user('botgotsthis'),
True)
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIs(await self.data.twitch_is_valid_user('botgotsthis'),
True)
self.assertFalse(self.mock_ids.called)
async def test_is_valid_user_false(self):
self.mock_ids.return_value = {}
self.assertIs(await self.data.twitch_is_valid_user('botgotsthis'),
False)
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIs(await self.data.twitch_is_valid_user('botgotsthis'),
False)
self.assertFalse(self.mock_ids.called)
async def test_is_valid_user_no_load(self):
self.mock_ids.return_value = None
self.assertIsNone(await self.data.twitch_is_valid_user('botgotsthis'))
self.assertTrue(self.mock_ids.called)
self.mock_ids.reset_mock()
self.assertIsNone(await self.data.twitch_is_valid_user('botgotsthis'))
self.assertTrue(self.mock_ids.called)
async def test_get_id(self):
await self.data.twitch_save_id('0', 'botgotsthis')
self.assertEqual(await self.data.twitch_get_id('botgotsthis'), '0')
async def test_get_id_none(self):
await self.data.twitch_save_id(None, 'botgotsthis')
self.assertIsNone(await self.data.twitch_get_id('botgotsthis'))
async def test_get_id_empty(self):
self.assertIsNone(await self.data.twitch_get_id('botgotsthis'))
async def test_get_ids(self):
await self.data.twitch_save_id('0', 'botgotsthis')
self.assertEqual(await self.data.twitch_get_ids({'botgotsthis'}),
{'botgotsthis': '0'})
async def test_get_ids_none(self):
await self.data.twitch_save_id(None, 'botgotsthis')
self.assertEqual(await self.data.twitch_get_ids({'botgotsthis'}),
{'botgotsthis': None})
async def test_get_ids_empty(self):
self.assertEqual(await self.data.twitch_get_ids({'botgotsthis'}),
{'botgotsthis': None})
async def test_get_user(self):
await self.data.twitch_save_id('0', 'botgotsthis')
self.assertEqual(await self.data.twitch_get_user('0'), 'botgotsthis')
async def test_get_user_empty(self):
self.assertIsNone(await self.data.twitch_get_user('0'))
class TestCacheTwitchApiCommunity(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.twitch.get_community_by_id')
self.addCleanup(patcher.stop)
self.mock_community_id = patcher.start()
patcher = patch('lib.api.twitch.get_community')
self.addCleanup(patcher.stop)
self.mock_community_name = patcher.start()
async def test_load_id(self):
self.mock_community_id.return_value = TwitchCommunity(
'0', 'botgotsthis')
self.assertIs(await self.data.twitch_load_community_id('0'), True)
self.assertTrue(self.mock_community_id.called)
self.mock_community_id.reset_mock()
self.assertIs(await self.data.twitch_load_community_id('0'), True)
self.assertFalse(self.mock_community_id.called)
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_load_id_no_load(self):
self.mock_community_id.return_value = None
self.assertIs(await self.data.twitch_load_community_id('0'),
False)
self.assertTrue(self.mock_community_id.called)
self.mock_community_id.reset_mock()
self.assertIs(await self.data.twitch_load_community_id('0'),
False)
self.assertTrue(self.mock_community_id.called)
self.assertIsNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_load_ids(self):
self.mock_community_id.return_value = TwitchCommunity(
'0', 'botgotsthis')
self.assertIs(await self.data.twitch_load_community_ids({'0'}), True)
self.assertTrue(self.mock_community_id.called)
self.mock_community_id.reset_mock()
self.assertIs(await self.data.twitch_load_community_ids({'0'}), True)
self.assertFalse(self.mock_community_id.called)
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_load_ids_no_load(self):
self.assertIs(await self.data.twitch_load_community_ids(set()), True)
self.mock_community_id.return_value = None
self.assertIs(await self.data.twitch_load_community_ids({'0'}),
True)
self.assertTrue(self.mock_community_id.called)
self.mock_community_id.reset_mock()
self.assertIs(await self.data.twitch_load_community_ids({'0'}),
True)
self.assertTrue(self.mock_community_id.called)
self.assertIsNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_load_name(self):
self.mock_community_name.return_value = TwitchCommunity(
'0', 'botgotsthis')
self.assertIs(
await self.data.twitch_load_community_name('botgotsthis'), True)
self.assertTrue(self.mock_community_name.called)
self.mock_community_name.reset_mock()
self.assertIs(
await self.data.twitch_load_community_name('botgotsthis'), True)
self.assertFalse(self.mock_community_name.called)
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_load_name_no_load(self):
self.mock_community_name.return_value = None
self.assertIs(
await self.data.twitch_load_community_name('botgotsthis'), False)
self.assertTrue(self.mock_community_name.called)
self.mock_community_name.reset_mock()
self.assertIs(
await self.data.twitch_load_community_name('botgotsthis'), False)
self.assertTrue(self.mock_community_name.called)
self.assertIsNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_save(self):
self.assertIs(
await self.data.twitch_save_community('0', 'botgotsthis'), True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_save_no_id(self):
self.assertIs(
await self.data.twitch_save_community(None, 'botgotsthis'),
True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityNameKey(
'botgotsthis')))
async def test_save_no_name(self):
self.assertIs(await self.data.twitch_save_community('0', None), True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchCommunityIdKey('0')))
async def test_get_id(self):
await self.data.twitch_save_community('0', 'botgotsthis')
self.assertEqual(
await self.data.twitch_get_community_id('botgotsthis'), '0')
async def test_get_id_none(self):
await self.data.twitch_save_community(None, 'botgotsthis')
self.assertIsNone(
await self.data.twitch_get_community_id('botgotsthis'))
async def test_get_id_empty(self):
self.assertIsNone(
await self.data.twitch_get_community_id('botgotsthis'))
async def test_get_user(self):
await self.data.twitch_save_community('0', 'botgotsthis')
self.assertEqual(await self.data.twitch_get_community_name('0'),
'botgotsthis')
async def test_get_name_none(self):
await self.data.twitch_save_community('0', None)
self.assertIsNone(await self.data.twitch_get_community_name('0'))
async def test_get_user_empty(self):
self.assertIsNone(await self.data.twitch_get_community_name('0'))
class TestCacheTwitchApiEmotes(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.twitch.twitch_emotes')
self.addCleanup(patcher.stop)
self.mock_emotes = patcher.start()
self.mock_emotes.return_value = {25: ('Kappa', 0)}
CacheStore._lastEmoteSet = None
async def test_load(self):
self.assertIs(await self.data.twitch_load_emotes({0}), True)
self.assertTrue(self.mock_emotes.called)
self.mock_emotes.reset_mock()
self.assertIs(await self.data.twitch_load_emotes({0}), True)
self.assertFalse(self.mock_emotes.called)
self.assertIsNotNone(await self.redis.get(self.data._twitchEmoteKey()))
self.assertIsNotNone(
await self.redis.get(self.data._twitchEmoteSetKey()))
async def test_load_background(self):
self.assertIs(await self.data.twitch_load_emotes({0}, background=True),
True)
self.assertTrue(self.mock_emotes.called)
self.data.redis.expire(self.data._twitchEmoteKey(), 5)
self.mock_emotes.reset_mock()
self.assertIs(await self.data.twitch_load_emotes({0}, background=True),
True)
self.assertTrue(self.mock_emotes.called)
self.assertIsNotNone(await self.redis.get(self.data._twitchEmoteKey()))
self.assertIsNotNone(
await self.redis.get(self.data._twitchEmoteSetKey()))
async def test_load_none(self):
self.assertIs(await self.data.twitch_load_emotes(set()), False)
self.assertFalse(self.mock_emotes.called)
self.assertIsNone(await self.redis.get(self.data._twitchEmoteKey()))
self.assertIsNone(await self.redis.get(self.data._twitchEmoteSetKey()))
self.mock_emotes.return_value = None
self.assertIs(await self.data.twitch_load_emotes({0}), False)
self.assertTrue(self.mock_emotes.called)
self.mock_emotes.reset_mock()
self.assertIs(await self.data.twitch_load_emotes({0}), False)
self.assertTrue(self.mock_emotes.called)
self.assertIsNone(await self.redis.get(self.data._twitchEmoteKey()))
self.assertIsNotNone(
await self.redis.get(self.data._twitchEmoteSetKey()))
async def test_save_set(self):
self.assertIs(
await self.data.twitch_save_emote_set({0}), True)
self.assertIsNotNone(
await self.redis.get(self.data._twitchEmoteSetKey()))
async def test_save_emotes(self):
self.assertIs(
await self.data.twitch_save_emotes({25: ('Kappa', 0)}), True)
self.assertIsNotNone(await self.redis.get(self.data._twitchEmoteKey()))
async def test_get_set(self):
await self.data.twitch_save_emote_set({0})
self.assertEqual(await self.data.twitch_get_bot_emote_set(), {0})
async def test_get_set_none(self):
self.assertIsNone(await self.data.twitch_get_bot_emote_set())
async def test_get_set_expired(self):
await self.data.twitch_save_emote_set({0})
self.assertEqual(await self.data.twitch_get_bot_emote_set(), {0})
self.redis.flushdb()
self.assertEqual(await self.data.twitch_get_bot_emote_set(), {0})
async def test_get_emotes(self):
await self.data.twitch_save_emotes({25: ('Kappa', 0)})
self.assertEqual(await self.data.twitch_get_emotes(), {25: 'Kappa'})
async def test_get_emotes_empty(self):
self.assertEqual(await self.data.twitch_get_emotes(), None)
async def test_get_emotes_sets(self):
await self.data.twitch_save_emotes({25: ('Kappa', 0)})
self.assertEqual(await self.data.twitch_get_emote_sets(), {25: 0})
async def test_get_emotes_sets_empty(self):
self.assertEqual(await self.data.twitch_get_emote_sets(), None)
| MeGotsThis/BotGotsThis | tests/cache/test_twitch_api.py | Python | gpl-3.0 | 18,708 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Payment.fee'
db.alter_column('core_payment', 'fee', self.gf('django.db.models.fields.DecimalField')(max_digits=16, decimal_places=8))
def backwards(self, orm):
# Changing field 'Payment.fee'
db.alter_column('core_payment', 'fee', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updatedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"})
},
'core.issuecommenthistevent': {
'Meta': {'object_name': 'IssueCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.issuewatch': {
'Meta': {'object_name': 'IssueWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.offer': {
'Meta': {'object_name': 'Offer'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offercomment': {
'Meta': {'object_name': 'OfferComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"})
},
'core.offercommenthistevent': {
'Meta': {'object_name': 'OfferCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OfferComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.offerhistevent': {
'Meta': {'object_name': 'OfferHistEvent'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offerwatch': {
'Meta': {'object_name': 'OfferWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.payment': {
'Meta': {'object_name': 'Payment'},
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'fee': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'paykey': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
'core.paymenthistevent': {
'Meta': {'object_name': 'PaymentHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.paymentpart': {
'Meta': {'object_name': 'PaymentPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'realprice': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"})
},
'core.project': {
'Meta': {'object_name': 'Project'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'homeURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.solution': {
'Meta': {'object_name': 'Solution'},
'accepting_payments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.solutionhistevent': {
'Meta': {'object_name': 'SolutionHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brazilianPaypal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_from_userlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paypal_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'preferred_language_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'realName': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'receiveAllEmail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'screenName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core'] | stonestone/stonefreedomsponsors | djangoproject/core/migrations/0025_auto__chg_field_payment_fee.py | Python | agpl-3.0 | 16,141 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if os.environ.get('DJANGO_SETTINGS_MODULE') is None:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.base'
# When using an on-disk database for the test suite,
# Django asks us if we want to delete the database.
# We do.
if 'test' in sys.argv[0:3]:
# Catch warnings in tests and redirect them to be handled by the test runner. Otherwise build results are too
# noisy to be of much use.
import logging
logging.captureWarnings(True)
sys.argv.append('--noinput')
sys.argv.append('--logging-clear-handlers')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| edx/edx-ora2 | manage.py | Python | agpl-3.0 | 762 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para Repelis - Por Kampanita-2015
# ( con ayuda de neno1978, DrZ3r0, y robalo )
# 4/9/2015
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# -----------------------------------------------------------------
import re
import urlparse
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
# Main list manual
def mainlist(item):
logger.info()
itemlist = []
item.url = "http://www.repelis.tv/pag/1"
mifan="http://www.psicocine.com/wp-content/uploads/2013/08/Bad_Robot_Logo.jpg"
itemlist.append( Item(channel=item.channel, action="menupelis", title="Peliculas", url="http://www.repelis.tv/pag/1" , thumbnail="http://www.gaceta.es/sites/default/files/styles/668x300/public/metro_goldwyn_mayer_1926-web.png?itok=-lRSR9ZC", fanart=mifan) )
itemlist.append( Item(channel=item.channel, action="menuestre", title="Estrenos", url="http://www.repelis.tv/archivos/estrenos/pag/1" , thumbnail="http://t0.gstatic.com/images?q=tbn:ANd9GcS4g68rmeLQFuX7iCrPwd00FI_OlINZXCYXEFrJHTZ0VSHefIIbaw", fanart=mifan) )
itemlist.append( Item(channel=item.channel, action="menudesta", title="Destacadas", url="http://www.repelis.tv/pag/1" , thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan) )
itemlist.append( Item(channel=item.channel, action="todaspelis", title="Proximos estrenos", url="http://www.repelis.tv/archivos/proximos-estrenos/pag/1", thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on", fanart=mifan))
itemlist.append( Item(channel=item.channel, action="todaspelis", title="Todas las Peliculas", url="http://www.repelis.tv/pag/1" , thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan) )
if config.get_setting("adult_mode") != 0:
itemlist.append( Item(channel=item.channel, action="todaspelis", title="Eroticas +18", url="http://www.repelis.tv/genero/eroticas/pag/1" , thumbnail="http://www.topkamisetas.com/catalogo/images/TB0005.gif", fanart="http://www.topkamisetas.com/catalogo/images/TB0005.gif") )
#Quito la busqueda por año si no esta enabled el adultmode, porque no hay manera de filtrar los enlaces eroticos72
itemlist.append( Item(channel=item.channel, action="poranyo", title="Por Año", url="http://www.repelis.tv/anio/2016", thumbnail="http://t3.gstatic.com/images?q=tbn:ANd9GcSkxiYXdBcI0cvBLsb_nNlz_dWXHRl2Q-ER9dPnP1gNUudhrqlR", fanart=mifan))
#Por categoria si que filtra la categoria de eroticos
itemlist.append( Item(channel=item.channel, action="porcateg", title="Por Categoria", url="http://www.repelis.tv/genero/accion/pag/1", thumbnail="http://www.logopro.it/blog/wp-content/uploads/2013/07/categoria-sigaretta-elettronica.png", fanart=mifan))
itemlist.append( Item(channel=item.channel, action="search", title="Buscar...", url="http://www.repelis.tv/search/?s=", thumbnail="http://thumbs.dreamstime.com/x/buscar-pistas-13159747.jpg", fanart=mifan))
return itemlist
#Peliculas recien agregadas ( quitamos las de estreno del slide-bar en el top
def menupelis(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
patronenlaces= '<h3>Películas Recién Agregadas</h3>.*?>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(data)
logger.info("begin ----------")
scrapertools.printMatches(matchesenlaces)
logger.info("end ----------")
for bloque_enlaces in matchesenlaces:
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron +='<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
logger.info("He encontrado el segundo bloque")
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","");
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )
## Paginación
#<span class="current">2</span><a href="http://www.repelis.tv/page/3"
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data,'<span class="current">\d+</span><a href="([^"]+)"')
title= "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
itemlist.append( Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail, fanart=item.fanart, folder=True) )
except: pass
return itemlist
#Todas las peliculas
def todaspelis(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
print data
patronenlaces= '<h1>.*?</h1>.*?>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
#patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron +='<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","");
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )
## Paginación
#<span class="current">2</span><a href="http://www.repelis.tv/page/3"
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data,'<span class="current">\d+</span><a href="([^"]+)"')
title= "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
itemlist.append( Item(channel=item.channel, title=title, url=next_page, action="todaspelis", folder=True) )
except: pass
return itemlist
#Peliculas Destacadas
def menudesta(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
patronenlaces= '<h3>.*?Destacadas.*?>(.*?)<h3>'
matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
#patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron +='<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","");
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )
return itemlist
#Peliculas de Estreno
def menuestre(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
patronenlaces= '<h1>Estrenos</h1>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
#patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron +='<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","");
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )
## Paginación
#<span class="current">2</span><a href="http://www.repelis.tv/page/3"
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data,'<span class="current">\d+</span><a href="([^"]+)"')
title= "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
itemlist.append( Item(channel=item.channel, title=title, url=next_page, action="menuestre", folder=True) )
except: pass
return itemlist
def findvideos(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
'''<h2>Sinopsis</2><p>(.*?)</p>
<div id="informacion" class="tab-pane">
<h2>Titulo en Español</h2>
<p>Abzurdah</p>
<h2>Titulo Original</h2>
<p>Abzurdah</p>
<h2>Año de Lanzamiento</h2>
<p>2015</p>
<h2>Generos</h2>
<p>Romance</p>
<h2>Idioma</h2>
<p>Latino</p>
<h2>Calidad</h2>
<p>DVD-Rip</p>
'''
#estos son los datos para plot
patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' #titulo
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for sinopsis,title in matches:
title = "[COLOR white][B]" + title + "[/B][/COLOR]"
patron = '<div id="informacion".*?>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedplot in matches:
splot = title + "\n\n"
plot = scrapedplot
plot = re.sub('<h2>',"[COLOR red][B]",plot)
plot = re.sub('</h2>',"[/B][/COLOR] : ",plot)
plot = re.sub('<p>',"[COLOR green]",plot)
plot = re.sub('</p>',"[/COLOR]\n",plot)
plot = re.sub('<[^>]+>',"",plot)
splot += plot + "\n[COLOR red][B] Sinopsis[/B][/COLOR]\n " + sinopsis
#datos de los enlaces
'''
<a rel="nofollow" href="(.*?)".*?<td><img.*?</td><td>(.*?)</td><td>(.*?)</td></tr>
">Vimple</td>
'''
patron='<tbody>(.*?)</tbody>'
matchesx = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matchesx)
for bloq in matchesx:
patron='href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<'
matches = re.compile(patron,re.DOTALL).findall(bloq)
#scrapertools.printMatches(matches)
for scrapedurl,scrapedserver,scrapedlang,scrapedquality in matches:
url = urlparse.urljoin(item.url,scrapedurl)
logger.info("Lang:["+scrapedlang+"] Quality["+scrapedquality+"] URL["+url+"]")
patronenlaces= '.*?://(.*?)/'
matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(scrapedurl)
scrapertools.printMatches(matchesenlaces)
scrapedtitle = ""
for scrapedenlace in matchesenlaces:
scrapedtitle = title + " [COLOR white][ [/COLOR]" +"[COLOR green]" +scrapedquality+"[/COLOR]" +"[COLOR white] ][/COLOR]" + " [COLOR red] [" + scrapedlang +"][/COLOR] » " +scrapedserver
itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , extra=title, url=url, fanart=item.thumbnail, thumbnail=item.thumbnail, plot=splot, folder=False))
return itemlist
def play(item):
logger.info("url="+item.url)
#itemlist = servertools.find_video_items(data=item.url)
url = scrapertools.find_single_match(scrapertools.cache_page(item.url),'<iframe src="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
return itemlist
def search(item, texto):
logger.info(item.url)
texto = texto.replace(" ", "+")
item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
logger.info(item.url)
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
logger.info("data: "+data)
'''
<div class="col-xs-2">
<div class="row">
<a href="http://www.repelis.tv/8973/pelicula/contracted-phase-ii.html" title="Ver PelÃÂcula Contracted: Phase II Online">
<img src="http://1.bp.blogspot.com/-YWmw6voBipE/VcB91p-EcnI/AAAAAAAAQZs/EhUzWlInmA8/s175/contracted-phase-2.jpg" border="0">
'''
patron = '<div class="col-xs-2">.*?'
patron+= '<div class="row">.*?'
patron+= '<a href="(.*?)" title="(.*?)">.*?'
patron+= '<img src="(.*?)"'
logger.info(patron)
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
print "repelis ..................................."
itemlist = []
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","")
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
logger.info(url)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )
return itemlist
#Por año, aquà está difÃcil filtrar las "eroticas" asà que quito la opcion si no esta el adultmode enabled
def poranyo(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","")
url = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title , url=url, fanart=item.fanart ) )
return itemlist
#Aqui si que se filtran las eroticas
def porcateg(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedurl,scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
title = title.replace("Online","")
url = urlparse.urljoin(item.url,scrapedurl)
logger.info(url)
#si no esta permitidas categoria adultos, la filtramos
erotica = ""
if config.get_setting("adult_mode") == 0:
patron = '.*?/erotic.*?'
try:
erotica = scrapertools.get_match(scrapedurl,patron)
except:
itemlist.append( Item(channel=item.channel, action="todaspelis", fanart=item.fanart,title=title, fulltitle=title , url=url ) )
else:
itemlist.append( Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title , url=url, fanart=item.fanart ) )
return itemlist
| neno1978/pelisalacarta | python/main-classic/channels/repelis.py | Python | gpl-3.0 | 16,038 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: ios_template
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco IOS device configurations over SSH
description:
- Manages Cisco IOS network device configurations over SSH. This module
allows implementers to work with the device running-config. It
provides a way to push a set of commands onto a network device
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
deprecated: Deprecated in 2.2. Use ios_config instead
extends_documentation_fragment: ios
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will first search for the source
file in role or playbook root folder in templates unless a full
path to the file is given.
required: true
force:
description:
- The force argument instructs the module not to consider the
current device running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: [ "true", "false" ]
include_defaults:
description:
- The module, by default, will collect the current device
running-config to use as a base for comparison to the commands
in I(src). Setting this value to true will cause the command
issued to add any necessary flags to collect all defaults as
well as the device configuration. If the destination device
does not support such a flag, this argument is silently ignored.
required: true
choices: [ "true", "false" ]
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: [ "true", "false" ]
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task. The I(config) argument allows the implementer to
pass in the configuration to use as the base config for
comparison.
required: false
default: null
"""
EXAMPLES = """
- name: push a configuration onto the device
ios_template:
host: hostname
username: foo
src: config.j2
- name: forceable push a configuration onto the device
ios_template:
host: hostname
username: foo
src: config.j2
force: yes
- name: provide the base configuration for comparison
ios_template:
host: hostname
username: foo
src: candidate_config.txt
config: current_config.txt
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device
returned: when not check_mode
type: list
sample: ['...', '...']
"""
import ansible.module_utils.ios
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.ios import NetworkModule
def get_config(module):
config = module.params['config'] or dict()
defaults = module.params['include_defaults']
if not config and not module.params['force']:
config = module.config.get_config(include_defaults=defaults)
return config
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(),
force=dict(default=False, type='bool'),
include_defaults=dict(default=True, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(),
)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = NetworkModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False)
candidate = NetworkConfig(contents=module.params['src'], indent=1)
contents = get_config(module)
if contents:
config = NetworkConfig(contents=contents, indent=1)
result['_backup'] = str(contents)
if not module.params['force']:
commands = candidate.difference(config)
commands = dumps(commands, 'commands').split('\n')
commands = [str(c) for c in commands if c]
else:
commands = str(candidate).split('\n')
if commands:
if not module.check_mode:
response = module.config(commands)
result['responses'] = response
result['changed'] = True
result['updates'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| sivel/ansible-modules-core | network/ios/_ios_template.py | Python | gpl-3.0 | 6,070 |
from readers.dependency_instance import DependencyInstance
class Conll07Reader:
# ## read Conll 2007 data: reusing https://github.com/bplank/myconllutils
### http://ilk.uvt.nl/conll/index.html#dataformat
def __init__(self, filename):
self.filename = filename
self.startReading()
def __iter__(self):
i = self.getNext()
while i:
yield i
i = self.getNext()
def startReading(self):
self.FILE = open(self.filename, "r")
def getNext(self):
# return next instance or None
line = self.FILE.readline()
line = line.strip()
lineList = line.split("\t")
ids = []
form = []
lemma = []
cpos = []
pos = []
feats = []
head = []
deprel = []
phead = []
pdeprel = []
if len(lineList) >= 12: #CONLL 2009 format
while len(lineList) >= 12:
ids.append(int(lineList[0]))
form.append(lineList[1])
lemma.append(lineList[2])
cpos.append(lineList[5])
pos.append(lineList[4])
feats.append(lineList[6])
head.append(int(lineList[8]))
deprel.append(lineList[10])
phead.append(lineList[9])
pdeprel.append(lineList[11])
line = self.FILE.readline()
line = line.strip()
lineList = line.split("\t")
elif len(lineList) == 10:
# contains all cols, also phead/pdeprel
while len(lineList) == 10:
ids.append(int(lineList[0]))
form.append(lineList[1])
lemma.append(lineList[2])
cpos.append(lineList[3])
pos.append(lineList[4])
feats.append(lineList[5])
head.append(int(lineList[6]))
deprel.append(lineList[7])
phead.append(lineList[8])
pdeprel.append(lineList[9])
line = self.FILE.readline()
line = line.strip()
lineList = line.split("\t")
elif len(lineList) == 8:
while len(lineList) == 8:
ids.append(lineList[0])
form.append(lineList[1])
lemma.append(lineList[2])
cpos.append(lineList[3])
pos.append(lineList[4])
feats.append(lineList[5])
head.append(int(lineList[6]))
deprel.append(lineList[7])
phead.append("_")
pdeprel.append("_")
line = self.FILE.readline()
line = line.strip()
lineList = line.split("\t")
elif len(lineList) > 1:
raise Exception("not in right format!")
if len(form) > 0:
return DependencyInstance(ids, form, lemma, cpos, pos, feats, head, deprel, phead, pdeprel)
else:
return None
def getInstances(self):
instance = self.getNext()
instances = []
while instance:
instances.append(instance)
instance = self.getNext()
return instances
def getSentences(self):
""" return sentences as list of lists """
instances = self.getInstances()
sents = []
for i in instances:
sents.append(i.form)
return sents
def getStrings(self, wordform="form"):
""" sentence is one space-separated string in a list """
if wordform == "lemma":
return (" ".join(instance.lemma) for instance in self)
else:
return (" ".join(instance.form) for instance in self)
def writeStrings(self, filepath, wordform="form"):
""" write form to output. """
with open(filepath, "w") as out:
for i in self.getStrings(wordform=wordform):
out.write("{}\n".format(i))
def getVocabulary(self, n_sent=float("Inf"), add_root=True, lemmas=False):
"""
vocabulary with frequencies
:param n_sent: max number of sentences to consider
:param add_root: add artificial symbol *root* to vocab
:param lemmas: use lemma instead of form
"""
from collections import defaultdict
vocab = defaultdict(int)
instance = self.getNext()
c = 1
if lemmas:
while instance and (c <= n_sent):
for w in instance.getSentenceLemmas():
vocab[w] += 1
vocab["*root*"] += 1
instance = self.getNext()
c += 1
else:
while instance and (c <= n_sent):
for w in instance.getSentence():
vocab[w] += 1
vocab["*root*"] += 1
instance = self.getNext()
c += 1
return vocab
def getRelationVocabulary(self, n_sent=float("Inf")):
"""
vocabulary of relation labels
:param n_sent: max number of sentences to consider
:param add_root: add artificial symbol *root* to vocab
"""
vocab = set()
instance = self.getNext()
c = 1
while instance and (c <= n_sent):
vocab.update(instance.deprel)
instance = self.getNext()
c += 1
return vocab
def getCorpusTriples(self, wordform="form"):
""" gets counts of head_w\tdep_w occurences """
from collections import defaultdict
counts = defaultdict(int)
if wordform == "form":
for instance in self:
for i in instance.getBareFormTriples():
counts[i] += 1
elif wordform == "lemma":
for instance in self:
for i in instance.getBareLemmaTriples():
counts[i] += 1
return counts
def getkCorpusTriples(self, k):
""" gets counts of head_w\tdep_w occurences for k instances """
from collections import defaultdict
counts = defaultdict(int)
for instance in self:
if k > 0:
for i in instance.getBareFormTriples():
counts[i] += 1
k -= 1
else:
break
return counts
def writeCorpusTriples(counts, filepath):
with open(filepath, "w") as out:
for k, v in counts.items():
out.write("{0[0]}\t{0[1]}\t{1}\n".format(k.split("\t"), v))
def filter_freq(s, f, vocab, lemma=False):
if s is not None:
for i in range(len(s)):
if lemma:
if vocab[s.lemma[i]] < f:
s.lemma[i] = "*unk*"
else:
if vocab[s.form[i]] < f:
s.form[i] = "*unk*"
return s
return
def filter_len(s, min_len, max_len):
if s is not None:
if min_len < len(s) < max_len:
return s
return
| rug-compling/hmm-reps | readers/conll07_reader.py | Python | mit | 7,024 |
import urllib.parse
from dateutil import parser
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import F, OuterRef, Q, Subquery
from kuma.core.urlresolvers import reverse
from kuma.wiki.models import Document, Revision
class Command(BaseCommand):
"""
See https://bugzilla.mozilla.org/show_bug.cgi?id=1551999 for more
background.
This script tries to solve this but it tries to be as generic as possible
because the problem could re-appear.
Generally we hope to not have to run this (manually) on a recurring basis.
The original commit that started this problem landed on master
on April 16 2019.
https://github.com/mdn/kuma/commit/3177d761775c1a14244f144845c5045f05891b38
If there are other documents before then, it might be outside the scope
of this.
"""
help = (
"Due to a bug, at some point certain edits were made where a new "
"revision was created but the document's 'current_revision' wasn't "
"moved with it. This script attempts to rectify that."
)
def add_arguments(self, parser):
parser.add_argument(
"--edit",
action="store_true",
default=False,
help="Actually edit the current revision of documents found",
)
parser.add_argument(
"--include-archive",
action="store_true",
default=False,
help="By default /docs/Archive are excluded. This includes them.",
)
parser.add_argument(
"--baseurl", help="Base URL to site", default=settings.SITE_URL
)
parser.add_argument(
"--start-date",
help=(
"Start date for document.modified (default: not set). "
"Note that the possible bug that caused all of this was "
"landed on April 16 2019."
),
)
parser.add_argument(
"--end-date",
help=(
"End date for document.modified (default: not set) "
"Note that the problem that started all of this landed in "
"master on April 22 2019"
),
)
parser.add_argument("slugsearch", nargs="*")
@transaction.atomic()
def handle(self, *args, **options):
actually_edit = options["edit"]
def get_url(doc, name="wiki.document", *args):
return "{}{}".format(
options["baseurl"],
urllib.parse.quote(
reverse(name, locale=doc.locale, args=(doc.slug,) + args)
),
)
documents = Document.objects.all()
if not options["include_archive"]:
documents = documents.exclude(slug__startswith="Archive/")
if options["slugsearch"]:
q = Q()
for slugsearch in options["slugsearch"]:
q |= Q(slug__contains=slugsearch)
documents = documents.filter(q)
self.stdout.write(
"Found slugs: {!r}".format(
list(documents.values_list("slug", flat=True))
)
)
revisions = Revision.objects.all()
if options["start_date"]:
start_date = parser.parse(options["start_date"])
revisions = revisions.filter(created__gte=start_date)
self.stdout.write("Filtering revisions modified >= {}\n".format(start_date))
if options["end_date"]:
end_date = parser.parse(options["end_date"])
revisions = revisions.filter(created__lte=end_date)
self.stdout.write("Filtering revisions modified <= {}\n".format(end_date))
newest = revisions.filter(document=OuterRef("pk")).order_by("-created")
documents = documents.annotate(
newest_revision_id=Subquery(newest.values("pk")[:1])
)
count_found = 0
for document in documents.exclude(current_revision_id=F("newest_revision_id")):
self.stdout.write(
"DOCUMENT: {} (last modified {})".format(
get_url(document), document.modified.strftime("%Y-%m-%d")
)
)
self.stdout.write(
"\tHistory: {}".format(get_url(document, "wiki.document_revisions"))
)
first = True
revisions = Revision.objects.filter(document=document)
for revision in revisions.order_by("-created"):
current = revision.id == document.current_revision_id
self.stdout.write(
"\tRevision: {} of {}: {} created {}".format(
revision.id,
("CURRENT" if current else "NOT CURRENT").ljust(11),
get_url(document, "wiki.revision", revision.id),
revision.created,
)
)
if first:
if actually_edit:
document.make_current()
else:
self.stderr.write("\tNOT editing at the moment!")
if current:
break
first = False
self.stdout.write("\n")
count_found += 1
self.stdout.write("\nFound {:,} documents in total.".format(count_found))
| Elchi3/kuma | kuma/wiki/management/commands/correct_current_revision_documents.py | Python | mpl-2.0 | 5,448 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^(?:(?P<location_uid>[a-z\-]+:\d+)/)?$',
views.locations,
name="locations")
)
| dimagi/rapidsms | lib/rapidsms/contrib/locations/urls.py | Python | bsd-3-clause | 246 |
import os
import sys
from collections import Counter, defaultdict, namedtuple
import six
from six import StringIO
from conans import ConanFile, Options
from conans.client.output import ConanOutput
from conans.client.userio import UserIO
from conans.model.env_info import DepsEnvInfo, EnvInfo, EnvValues
from conans.model.options import PackageOptions
from conans.model.user_info import DepsUserInfo
class LocalDBMock(object):
def __init__(self, user=None, access_token=None, refresh_token=None):
self.user = user
self.access_token = access_token
self.refresh_token = refresh_token
def get_login(self, _):
return self.user, self.access_token, self.refresh_token
def get_username(self, _):
return self.user
def store(self, user, access_token, refresh_token, _):
self.user = user
self.access_token = access_token
self.refresh_token = refresh_token
class MockedUserIO(UserIO):
"""
Mock for testing. If get_username or get_password is requested will raise
an exception except we have a value to return.
"""
def __init__(self, logins, ins=sys.stdin, out=None):
"""
logins is a dict of {remote: list(user, password)}
will return sequentially
"""
assert isinstance(logins, dict)
self.logins = logins
self.login_index = Counter()
UserIO.__init__(self, ins, out)
def get_username(self, remote_name):
username_env = self._get_env_username(remote_name)
if username_env:
return username_env
self._raise_if_non_interactive()
sub_dict = self.logins[remote_name]
index = self.login_index[remote_name]
if len(sub_dict) - 1 < index:
raise Exception("Bad user/password in testing framework, "
"provide more tuples or input the right ones")
return sub_dict[index][0]
def get_password(self, remote_name):
"""Overridable for testing purpose"""
password_env = self._get_env_password(remote_name)
if password_env:
return password_env
self._raise_if_non_interactive()
sub_dict = self.logins[remote_name]
index = self.login_index[remote_name]
tmp = sub_dict[index][1]
self.login_index.update([remote_name])
return tmp
class MockSettings(object):
def __init__(self, values):
self.values = values
def get_safe(self, value):
return self.values.get(value, None)
class MockCppInfo(object):
def __init__(self):
self.bin_paths = []
self.lib_paths = []
self.include_paths = []
self.libs = []
self.cflags = []
self.cppflags = []
self.defines = []
self.frameworks = []
self.framework_paths = []
class MockDepsCppInfo(defaultdict):
def __init__(self):
super(MockDepsCppInfo, self).__init__(MockCppInfo)
self.include_paths = []
self.lib_paths = []
self.libs = []
self.defines = []
self.cflags = []
self.cxxflags = []
self.sharedlinkflags = []
self.exelinkflags = []
self.sysroot = ""
self.frameworks = []
self.framework_paths = []
self.system_libs = []
@property
def deps(self):
return self.keys()
class MockConanfile(ConanFile):
def __init__(self, settings, options=None, runner=None):
self.deps_cpp_info = MockDepsCppInfo()
self.settings = settings
self.runner = runner
self.options = options or MockOptions({})
self.generators = []
self.output = TestBufferConanOutput()
self.should_configure = True
self.should_build = True
self.should_install = True
self.should_test = True
self.package_folder = None
def run(self, *args, **kwargs):
if self.runner:
kwargs["output"] = None
self.runner(*args, **kwargs)
class ConanFileMock(ConanFile):
def __init__(self, shared=None, options=None, options_values=None):
options = options or ""
self.command = None
self.path = None
self.source_folder = self.build_folder = "."
self.settings = None
self.options = Options(PackageOptions.loads(options))
if options_values:
for var, value in options_values.items():
self.options._data[var] = value
self.deps_cpp_info = MockDepsCppInfo() # ("deps_cpp_info", "sysroot")("/path/to/sysroot")
self.deps_cpp_info.sysroot = "/path/to/sysroot"
self.output = TestBufferConanOutput()
self.in_local_cache = False
self.install_folder = "myinstallfolder"
if shared is not None:
self.options = namedtuple("options", "shared")(shared)
self.should_configure = True
self.should_build = True
self.should_install = True
self.should_test = True
self.generators = []
self.captured_env = {}
self.deps_env_info = DepsEnvInfo()
self.env_info = EnvInfo()
self.deps_user_info = DepsUserInfo()
self._conan_env_values = EnvValues()
def run(self, command):
self.command = command
self.path = os.environ["PATH"]
self.captured_env = {key: value for key, value in os.environ.items()}
MockOptions = MockSettings
class TestBufferConanOutput(ConanOutput):
""" wraps the normal output of the application, captures it into an stream
and gives it operators similar to string, so it can be compared in tests
"""
def __init__(self):
ConanOutput.__init__(self, StringIO(), color=False)
def __repr__(self):
# FIXME: I'm sure there is a better approach. Look at six docs.
if six.PY2:
return str(self._stream.getvalue().encode("ascii", "ignore"))
else:
return self._stream.getvalue()
def __str__(self, *args, **kwargs):
return self.__repr__()
def __eq__(self, value):
return self.__repr__() == value
def __ne__(self, value):
return not self.__eq__(value)
def __contains__(self, value):
return value in self.__repr__()
# cli2.0
class RedirectedTestOutput(StringIO):
def __init__(self):
super(RedirectedTestOutput, self).__init__()
def __repr__(self):
return self.getvalue()
def __str__(self, *args, **kwargs):
return self.__repr__()
def __eq__(self, value):
return self.__repr__() == value
def __ne__(self, value):
return not self.__eq__(value)
def __contains__(self, value):
return value in self.__repr__()
| conan-io/conan-package-tools | cpt/test/utils/mocks.py | Python | mit | 6,729 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.cube.CubeList` class."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import collections
import iris.tests as tests
import iris.tests.stock
from unittest import mock
from cf_units import Unit
import numpy as np
from iris import Constraint
from iris.cube import Cube, CubeList
from iris.coords import AuxCoord, DimCoord
import iris.coord_systems
import iris.exceptions
from iris.fileformats.pp import STASH
class Test_concatenate_cube(tests.IrisTest):
def setUp(self):
self.units = Unit(
"days since 1970-01-01 00:00:00", calendar="gregorian"
)
self.cube1 = Cube([1, 2, 3], "air_temperature", units="K")
self.cube1.add_dim_coord(
DimCoord([0, 1, 2], "time", units=self.units), 0
)
def test_pass(self):
self.cube2 = Cube([1, 2, 3], "air_temperature", units="K")
self.cube2.add_dim_coord(
DimCoord([3, 4, 5], "time", units=self.units), 0
)
result = CubeList([self.cube1, self.cube2]).concatenate_cube()
self.assertIsInstance(result, Cube)
def test_fail(self):
units = Unit("days since 1970-01-02 00:00:00", calendar="gregorian")
cube2 = Cube([1, 2, 3], "air_temperature", units="K")
cube2.add_dim_coord(DimCoord([0, 1, 2], "time", units=units), 0)
with self.assertRaises(iris.exceptions.ConcatenateError):
CubeList([self.cube1, cube2]).concatenate_cube()
def test_empty(self):
exc_regexp = "can't concatenate an empty CubeList"
with self.assertRaisesRegex(ValueError, exc_regexp):
CubeList([]).concatenate_cube()
class Test_extract_overlapping(tests.IrisTest):
def setUp(self):
shape = (6, 14, 19)
n_time, n_lat, n_lon = shape
n_data = n_time * n_lat * n_lon
cube = Cube(np.arange(n_data, dtype=np.int32).reshape(shape))
coord = iris.coords.DimCoord(
points=np.arange(n_time),
standard_name="time",
units="hours since epoch",
)
cube.add_dim_coord(coord, 0)
cs = iris.coord_systems.GeogCS(6371229)
coord = iris.coords.DimCoord(
points=np.linspace(-90, 90, n_lat),
standard_name="latitude",
units="degrees",
coord_system=cs,
)
cube.add_dim_coord(coord, 1)
coord = iris.coords.DimCoord(
points=np.linspace(-180, 180, n_lon),
standard_name="longitude",
units="degrees",
coord_system=cs,
)
cube.add_dim_coord(coord, 2)
self.cube = cube
def test_extract_one_str_dim(self):
cubes = iris.cube.CubeList([self.cube[2:], self.cube[:4]])
a, b = cubes.extract_overlapping("time")
self.assertEqual(a.coord("time"), self.cube.coord("time")[2:4])
self.assertEqual(b.coord("time"), self.cube.coord("time")[2:4])
def test_extract_one_list_dim(self):
cubes = iris.cube.CubeList([self.cube[2:], self.cube[:4]])
a, b = cubes.extract_overlapping(["time"])
self.assertEqual(a.coord("time"), self.cube.coord("time")[2:4])
self.assertEqual(b.coord("time"), self.cube.coord("time")[2:4])
def test_extract_two_dims(self):
cubes = iris.cube.CubeList([self.cube[2:, 5:], self.cube[:4, :10]])
a, b = cubes.extract_overlapping(["time", "latitude"])
self.assertEqual(a.coord("time"), self.cube.coord("time")[2:4])
self.assertEqual(
a.coord("latitude"), self.cube.coord("latitude")[5:10]
)
self.assertEqual(b.coord("time"), self.cube.coord("time")[2:4])
self.assertEqual(
b.coord("latitude"), self.cube.coord("latitude")[5:10]
)
def test_different_orders(self):
cubes = iris.cube.CubeList([self.cube[::-1][:4], self.cube[:4]])
a, b = cubes.extract_overlapping("time")
self.assertEqual(a.coord("time"), self.cube[::-1].coord("time")[2:4])
self.assertEqual(b.coord("time"), self.cube.coord("time")[2:4])
class Test_merge_cube(tests.IrisTest):
def setUp(self):
self.cube1 = Cube([1, 2, 3], "air_temperature", units="K")
self.cube1.add_aux_coord(AuxCoord([0], "height", units="m"))
def test_pass(self):
cube2 = self.cube1.copy()
cube2.coord("height").points = [1]
result = CubeList([self.cube1, cube2]).merge_cube()
self.assertIsInstance(result, Cube)
def test_fail(self):
cube2 = self.cube1.copy()
cube2.rename("not air temperature")
with self.assertRaises(iris.exceptions.MergeError):
CubeList([self.cube1, cube2]).merge_cube()
def test_empty(self):
with self.assertRaises(ValueError):
CubeList([]).merge_cube()
def test_single_cube(self):
result = CubeList([self.cube1]).merge_cube()
self.assertEqual(result, self.cube1)
self.assertIsNot(result, self.cube1)
def test_repeated_cube(self):
with self.assertRaises(iris.exceptions.MergeError):
CubeList([self.cube1, self.cube1]).merge_cube()
class Test_merge__time_triple(tests.IrisTest):
@staticmethod
def _make_cube(fp, rt, t, realization=None):
cube = Cube(np.arange(20).reshape(4, 5))
cube.add_dim_coord(DimCoord(np.arange(5), long_name="x"), 1)
cube.add_dim_coord(DimCoord(np.arange(4), long_name="y"), 0)
cube.add_aux_coord(DimCoord(fp, standard_name="forecast_period"))
cube.add_aux_coord(
DimCoord(rt, standard_name="forecast_reference_time")
)
cube.add_aux_coord(DimCoord(t, standard_name="time"))
if realization is not None:
cube.add_aux_coord(
DimCoord(realization, standard_name="realization")
)
return cube
def test_orthogonal_with_realization(self):
# => fp: 2; rt: 2; t: 2; realization: 2
triples = (
(0, 10, 1),
(0, 10, 2),
(0, 11, 1),
(0, 11, 2),
(1, 10, 1),
(1, 10, 2),
(1, 11, 1),
(1, 11, 2),
)
en1_cubes = [
self._make_cube(*triple, realization=1) for triple in triples
]
en2_cubes = [
self._make_cube(*triple, realization=2) for triple in triples
]
cubes = CubeList(en1_cubes) + CubeList(en2_cubes)
(cube,) = cubes.merge()
self.assertCML(cube, checksum=False)
def test_combination_with_realization(self):
# => fp, rt, t: 8; realization: 2
triples = (
(0, 10, 1),
(0, 10, 2),
(0, 11, 1),
(0, 11, 3), # This '3' breaks the pattern.
(1, 10, 1),
(1, 10, 2),
(1, 11, 1),
(1, 11, 2),
)
en1_cubes = [
self._make_cube(*triple, realization=1) for triple in triples
]
en2_cubes = [
self._make_cube(*triple, realization=2) for triple in triples
]
cubes = CubeList(en1_cubes) + CubeList(en2_cubes)
(cube,) = cubes.merge()
self.assertCML(cube, checksum=False)
def test_combination_with_extra_realization(self):
# => fp, rt, t, realization: 17
triples = (
(0, 10, 1),
(0, 10, 2),
(0, 11, 1),
(0, 11, 2),
(1, 10, 1),
(1, 10, 2),
(1, 11, 1),
(1, 11, 2),
)
en1_cubes = [
self._make_cube(*triple, realization=1) for triple in triples
]
en2_cubes = [
self._make_cube(*triple, realization=2) for triple in triples
]
# Add extra that is a duplicate of one of the time triples
# but with a different realisation.
en3_cubes = [self._make_cube(0, 10, 2, realization=3)]
cubes = CubeList(en1_cubes) + CubeList(en2_cubes) + CubeList(en3_cubes)
(cube,) = cubes.merge()
self.assertCML(cube, checksum=False)
def test_combination_with_extra_triple(self):
# => fp, rt, t, realization: 17
triples = (
(0, 10, 1),
(0, 10, 2),
(0, 11, 1),
(0, 11, 2),
(1, 10, 1),
(1, 10, 2),
(1, 11, 1),
(1, 11, 2),
)
en1_cubes = [
self._make_cube(*triple, realization=1) for triple in triples
]
# Add extra time triple on the end.
en2_cubes = [
self._make_cube(*triple, realization=2)
for triple in triples + ((1, 11, 3),)
]
cubes = CubeList(en1_cubes) + CubeList(en2_cubes)
(cube,) = cubes.merge()
self.assertCML(cube, checksum=False)
class Test_xml(tests.IrisTest):
def setUp(self):
self.cubes = CubeList([Cube(np.arange(3)), Cube(np.arange(3))])
def test_byteorder_default(self):
self.assertIn("byteorder", self.cubes.xml())
def test_byteorder_false(self):
self.assertNotIn("byteorder", self.cubes.xml(byteorder=False))
def test_byteorder_true(self):
self.assertIn("byteorder", self.cubes.xml(byteorder=True))
class Test_extract(tests.IrisTest):
def setUp(self):
self.scalar_cubes = CubeList()
for i in range(5):
for letter in "abcd":
self.scalar_cubes.append(Cube(i, long_name=letter))
def test_scalar_cube_name_constraint(self):
# Test the name based extraction of a CubeList containing scalar cubes.
res = self.scalar_cubes.extract("a")
expected = CubeList([Cube(i, long_name="a") for i in range(5)])
self.assertEqual(res, expected)
def test_scalar_cube_data_constraint(self):
# Test the extraction of a CubeList containing scalar cubes
# when using a cube_func.
val = 2
constraint = iris.Constraint(cube_func=lambda c: c.data == val)
res = self.scalar_cubes.extract(constraint)
expected = CubeList([Cube(val, long_name=letter) for letter in "abcd"])
self.assertEqual(res, expected)
class ExtractMixin:
# Choose "which" extract method to test.
# Effectively "abstract" -- inheritor must define this property :
# method_name = 'extract_cube' / 'extract_cubes'
def setUp(self):
self.cube_x = Cube(0, long_name="x")
self.cube_y = Cube(0, long_name="y")
self.cons_x = Constraint("x")
self.cons_y = Constraint("y")
self.cons_any = Constraint(cube_func=lambda cube: True)
self.cons_none = Constraint(cube_func=lambda cube: False)
def check_extract(self, cubes, constraints, expected):
# Check that extracting a cubelist with the given arguments has the
# expected result.
# 'expected' and the operation results can be:
# * None
# * a single cube
# * a list of cubes --> cubelist (with cubes matching)
# * string --> a ConstraintMatchException matching the string
cubelist = CubeList(cubes)
method = getattr(cubelist, self.method_name)
if isinstance(expected, str):
with self.assertRaisesRegex(
iris.exceptions.ConstraintMismatchError, expected
):
method(constraints)
else:
result = method(constraints)
if expected is None:
self.assertIsNone(result)
elif isinstance(expected, Cube):
self.assertIsInstance(result, Cube)
self.assertEqual(result, expected)
elif isinstance(expected, list):
self.assertIsInstance(result, CubeList)
self.assertEqual(result, expected)
else:
msg = (
'Unhandled usage in "check_extract" call: '
'"expected" arg has type {}, value {}.'
)
raise ValueError(msg.format(type(expected), expected))
class Test_extract_cube(ExtractMixin, tests.IrisTest):
method_name = "extract_cube"
def test_empty(self):
self.check_extract([], self.cons_x, "Got 0 cubes .* expecting 1")
def test_single_cube_ok(self):
self.check_extract([self.cube_x], self.cons_x, self.cube_x)
def test_single_cube_fail__too_few(self):
self.check_extract(
[self.cube_x], self.cons_y, "Got 0 cubes .* expecting 1"
)
def test_single_cube_fail__too_many(self):
self.check_extract(
[self.cube_x, self.cube_y],
self.cons_any,
"Got 2 cubes .* expecting 1",
)
def test_string_as_constraint(self):
# Check that we can use a string, that converts to a constraint
# ( via "as_constraint" ).
self.check_extract([self.cube_x], "x", self.cube_x)
def test_none_as_constraint(self):
# Check that we can use a None, that converts to a constraint
# ( via "as_constraint" ).
self.check_extract([self.cube_x], None, self.cube_x)
def test_constraint_in_list__fail(self):
# Check that we *cannot* use [constraint]
msg = "cannot be cast to a constraint"
with self.assertRaisesRegex(TypeError, msg):
self.check_extract([], [self.cons_x], [])
def test_multi_cube_ok(self):
self.check_extract(
[self.cube_x, self.cube_y], self.cons_x, self.cube_x
) # NOTE: returns a cube
def test_multi_cube_fail__too_few(self):
self.check_extract(
[self.cube_x, self.cube_y],
self.cons_none,
"Got 0 cubes .* expecting 1",
)
def test_multi_cube_fail__too_many(self):
self.check_extract(
[self.cube_x, self.cube_y],
self.cons_any,
"Got 2 cubes .* expecting 1",
)
class ExtractCubesMixin(ExtractMixin):
method_name = "extract_cubes"
class Test_extract_cubes__noconstraint(ExtractCubesMixin, tests.IrisTest):
"""Test with an empty list of constraints."""
def test_empty(self):
self.check_extract([], [], [])
def test_single_cube(self):
self.check_extract([self.cube_x], [], [])
def test_multi_cubes(self):
self.check_extract([self.cube_x, self.cube_y], [], [])
class ExtractCubesSingleConstraintMixin(ExtractCubesMixin):
"""
Common code for testing extract_cubes with a single constraint.
Generalised, so that we can do the same tests for a "bare" constraint,
and a list containing a single [constraint].
"""
# Effectively "abstract" -- inheritor must define this property :
# wrap_test_constraint_as_list_of_one = True / False
def check_extract(self, cubes, constraint, result):
# Overload standard test operation.
if self.wrap_test_constraint_as_list_of_one:
constraint = [constraint]
super().check_extract(cubes, constraint, result)
def test_empty(self):
self.check_extract([], self.cons_x, "Got 0 cubes .* expecting 1")
def test_single_cube_ok(self):
self.check_extract(
[self.cube_x], self.cons_x, [self.cube_x]
) # NOTE: always returns list NOT cube
def test_single_cube__fail_mismatch(self):
self.check_extract(
[self.cube_x], self.cons_y, "Got 0 cubes .* expecting 1"
)
def test_multi_cube_ok(self):
self.check_extract(
[self.cube_x, self.cube_y], self.cons_x, [self.cube_x]
) # NOTE: always returns list NOT cube
def test_multi_cube__fail_too_few(self):
self.check_extract(
[self.cube_x, self.cube_y],
self.cons_none,
"Got 0 cubes .* expecting 1",
)
def test_multi_cube__fail_too_many(self):
self.check_extract(
[self.cube_x, self.cube_y],
self.cons_any,
"Got 2 cubes .* expecting 1",
)
class Test_extract_cubes__bare_single_constraint(
ExtractCubesSingleConstraintMixin, tests.IrisTest
):
"""Testing with a single constraint as the argument."""
wrap_test_constraint_as_list_of_one = False
class Test_extract_cubes__list_single_constraint(
ExtractCubesSingleConstraintMixin, tests.IrisTest
):
"""Testing with a list of one constraint as the argument."""
wrap_test_constraint_as_list_of_one = True
class Test_extract_cubes__multi_constraints(ExtractCubesMixin, tests.IrisTest):
"""
Testing when the 'constraints' arg is a list of multiple constraints.
"""
def test_empty(self):
# Always fails.
self.check_extract(
[], [self.cons_x, self.cons_any], "Got 0 cubes .* expecting 1"
)
def test_single_cube_ok(self):
# Possible if the one cube matches all the constraints.
self.check_extract(
[self.cube_x],
[self.cons_x, self.cons_any],
[self.cube_x, self.cube_x],
)
def test_single_cube__fail_too_few(self):
self.check_extract(
[self.cube_x],
[self.cons_x, self.cons_y],
"Got 0 cubes .* expecting 1",
)
def test_multi_cube_ok(self):
self.check_extract(
[self.cube_x, self.cube_y],
[self.cons_y, self.cons_x], # N.B. reverse order !
[self.cube_y, self.cube_x],
)
def test_multi_cube_castable_constraint_args(self):
# Check with args that *aren't* constraints, but can be converted
# ( via "as_constraint" ).
self.check_extract(
[self.cube_x, self.cube_y],
["y", "x", self.cons_y],
[self.cube_y, self.cube_x, self.cube_y],
)
# NOTE: not bothering to check we can cast a 'None', as it will anyway
# fail with multiple input cubes.
def test_multi_cube__fail_too_few(self):
self.check_extract(
[self.cube_x, self.cube_y],
[self.cons_x, self.cons_y, self.cons_none],
"Got 0 cubes .* expecting 1",
)
def test_multi_cube__fail_too_many(self):
self.check_extract(
[self.cube_x, self.cube_y],
[self.cons_x, self.cons_y, self.cons_any],
"Got 2 cubes .* expecting 1",
)
class Test_iteration(tests.IrisTest):
def setUp(self):
self.scalar_cubes = CubeList()
for i in range(5):
for letter in "abcd":
self.scalar_cubes.append(Cube(i, long_name=letter))
def test_iterable(self):
self.assertTrue(isinstance(self.scalar_cubes, collections.Iterable))
def test_iteration(self):
letters = "abcd" * 5
for i, cube in enumerate(self.scalar_cubes):
self.assertEqual(cube.long_name, letters[i])
class TestPrint(tests.IrisTest):
def setUp(self):
self.cubes = CubeList([iris.tests.stock.lat_lon_cube()])
def test_summary(self):
expected = (
"0: unknown / (unknown) "
" (latitude: 3; longitude: 4)"
)
self.assertEqual(str(self.cubes), expected)
def test_summary_name_unit(self):
self.cubes[0].long_name = "aname"
self.cubes[0].units = "1"
expected = (
"0: aname / (1) "
" (latitude: 3; longitude: 4)"
)
self.assertEqual(str(self.cubes), expected)
def test_summary_stash(self):
self.cubes[0].attributes["STASH"] = STASH.from_msi("m01s00i004")
expected = (
"0: m01s00i004 / (unknown) "
" (latitude: 3; longitude: 4)"
)
self.assertEqual(str(self.cubes), expected)
class TestRealiseData(tests.IrisTest):
def test_realise_data(self):
# Simply check that calling CubeList.realise_data is calling
# _lazy_data.co_realise_cubes.
mock_cubes_list = [mock.Mock(ident=count) for count in range(3)]
test_cubelist = CubeList(mock_cubes_list)
call_patch = self.patch("iris._lazy_data.co_realise_cubes")
test_cubelist.realise_data()
# Check it was called once, passing cubes as *args.
self.assertEqual(
call_patch.call_args_list, [mock.call(*mock_cubes_list)]
)
if __name__ == "__main__":
tests.main()
| pp-mo/iris | lib/iris/tests/unit/cube/test_CubeList.py | Python | lgpl-3.0 | 20,583 |
# testing configuration
DEBUG = True
TESTING = True
SECRET_KEY = "dummy"
CACHE_NO_NULL_WARNING = True
SQLALCHEMY_DATABASE_URI = "postgres://localhost/seamless_karma_test"
| singingwolfboy/seamless-karma | seamless_karma/config/test_postgres.py | Python | mit | 172 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
__author__ = 'gjp'
from django.core.exceptions import ValidationError
from django.conf import settings
import pika
import fiware_cloto.cloto.information as information
from fiware_cloto.cloto.models import TenantInfo, ServerInfo
from fiware_cloto.cloto.utils.log import logger
class InfoManager():
"""This class provides methods to manage information about the server and windowsize of tenants.
"""
def __init__(self):
self.tenantInfo = self.get_tenant_information()
self.serverInfo = self.get_server_information()
def get_server_information(self):
"""Returns model of Information about server."""
return ServerInfo
def get_tenant_information(self):
"""Returns model of Information about a tenant."""
return TenantInfo
def get_information(self, tenantId):
"""Returns information about the server and tenant's windowsize."""
serverInfo = self.get_server_information()
tenantInfo = self.get_tenant_information()
s_query = serverInfo.objects.get(id__exact='1')
t_query = tenantInfo.objects.get(tenantId__exact=tenantId)
owner = s_query.__getattribute__("owner")
windowsize = t_query.__getattribute__("windowsize")
version = s_query.__getattribute__("version")
runningfrom = s_query.__getattribute__("runningfrom")
doc = s_query.__getattribute__("doc")
return information.information(owner, windowsize, version, runningfrom, doc)
def updateWindowSize(self, tenantId, newSize):
"""Updates windowsize of a specified tenant."""
self.checkSize(newSize)
t = self.tenantInfo.objects.get(tenantId__exact=tenantId)
t.windowsize = newSize
t.save()
message = tenantId + " " + str(newSize)
logger.info("%s", message)
self.publish_message(message)
logger.info("%s windowsize updated to %d", tenantId, newSize)
return t
def setInformations(self, sInfo, tInfo):
"""Sets server information and tenant information to the InfoManager."""
self.tenantInfo = tInfo
self.serverInfo = sInfo
def checkSize(self, newSize):
if int(newSize) <= 0 or int(newSize) > int(settings.MAX_WINDOW_SIZE):
raise ValidationError("New size is not an integer between 1 and %d" % int(settings.MAX_WINDOW_SIZE))
def init_information(self):
"""Creates initial data in data base."""
import datetime
from django.utils import timezone
from fiware_cloto.cloto.models import ServerInfo
runningfrom = datetime.datetime.now(tz=timezone.get_default_timezone())
# Creating initial data
s = ServerInfo(id=1, owner=settings.OWNER, version=settings.VERSION,
runningfrom=runningfrom, doc=settings.API_INFO_URL)
s.save()
def publish_message(self, message):
"""Publish a message related to the windowsize in the rabbitmq and
close the connection
:param str message: The well-formatted message to send
"""
""" Initialize the class and create a connection with a RabbitMQ server instance.
"""
# Get the default IP address of the RabbitMQ server.
rabbitmq = settings.RABBITMQ_URL
self.connection = None
self.channel = None
# Open a remote connection to RabbitMQ on specified IP address
try:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host=rabbitmq))
# Open the channel
self.channel = self.connection.channel()
except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError), error:
raise Exception("Error with Rabbit connection %s", error)
if self.channel:
self.channel.exchange_declare(exchange="windowsizes",
exchange_type='direct')
try:
# Send a message
self.channel.basic_publish(exchange="windowsizes",
routing_key="windowsizes",
body=message)
except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError), error:
raise Exception("AMQP Connection failed.")
else:
raise Exception("AMQP channel not properly created...")
if self.connection and self.connection.is_open:
# Close the connection
self.connection.close()
else:
raise Exception("AMQP connection not properly created...")
| geonexus/fiware-cloto | fiware_cloto/cloto/manager/InfoManager.py | Python | apache-2.0 | 5,472 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.