text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Copyright (c) 2015 SONATA-NFV and Paderborn University
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
from requests import get, put, delete
from tabulate import tabulate
import pprint
import argparse
import json
from subprocess import Popen
pp = pprint.PrettyPrinter(indent=4)
class RestApiClient():
def __init__(self):
self.cmds = {}
def execute_command(self, args):
if getattr(self, args["command"]) is not None:
# call the local method with the same name as the command arg
getattr(self, args["command"])(args)
else:
print("Command not implemented.")
def start(self, args):
req = {'image': args.get("image"),
'command': args.get("docker_command"),
'network': args.get("network")}
response = put("%s/restapi/compute/%s/%s" %
(args.get("endpoint"),
args.get("datacenter"),
args.get("name")),
json=req)
pp.pprint(response.json())
def stop(self, args):
response = delete("%s/restapi/compute/%s/%s" %
(args.get("endpoint"),
args.get("datacenter"),
args.get("name")))
pp.pprint(response.json())
def list(self, args):
list = get('%s/restapi/compute/%s' % (args.get("endpoint"), args.get('datacenter'))).json()
table = []
for c in list:
# for each container add a line to the output table
if len(c) > 1:
name = c[0]
status = c[1]
#eth0ip = status.get("docker_network", "-")
netw_list = [netw_dict['intf_name'] for netw_dict in status.get("network")]
dc_if_list = [netw_dict['dc_portname'] for netw_dict in status.get("network")]
table.append([status.get("datacenter"),
name,
status.get("image"),
','.join(netw_list),
','.join(dc_if_list)])
#status.get("state").get("Status")]
headers = ["Datacenter",
"Container",
"Image",
"Interface list",
"Datacenter interfaces"]
print(tabulate(table, headers=headers, tablefmt="grid"))
def status(self, args):
list = get("%s/restapi/compute/%s/%s" %
(args.get("endpoint"),
args.get("datacenter"),
args.get("name"))).json()
pp.pprint(list)
def xterm(self, args):
vnf_names = args.get("vnf_names")
for vnf_name in vnf_names:
Popen(['xterm', '-xrm', 'XTerm.vt100.allowTitleOps: false', '-T', vnf_name,
'-e', "docker exec -it mn.{0} /bin/bash".format(vnf_name)])
parser = argparse.ArgumentParser(description="""son-emu-cli compute
Examples:
- son-emu-cli compute start -d dc2 -n client -i sonatanfv/sonata-iperf3-vnf
- son-emu-cli list
- son-emu-cli compute status -d dc2 -n client
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"command",
choices=['start', 'stop', 'list', 'status', 'xterm'],
help="Action to be executed.")
parser.add_argument(
"vnf_names",
nargs='*',
help="vnf names to open an xterm for")
parser.add_argument(
"--datacenter", "-d", dest="datacenter",
help="Data center to which the command should be applied.")
parser.add_argument(
"--name", "-n", dest="name",
help="Name of compute instance e.g. 'vnf1'.")
parser.add_argument(
"--image", "-i", dest="image",
help="Name of container image to be used e.g. 'ubuntu:trusty'")
parser.add_argument(
"--dcmd", "-c", dest="docker_command",
help="Startup command of the container e.g. './start.sh'")
parser.add_argument(
"--net", dest="network",
help="Network properties of a compute instance e.g. \
'(id=input,ip=10.0.10.3/24),(id=output,ip=10.0.10.4/24)' for multiple interfaces.")
parser.add_argument(
"--endpoint", "-e", dest="endpoint",
default="http://127.0.0.1:5001",
help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
def main(argv):
args = vars(parser.parse_args(argv))
c = RestApiClient()
c.execute_command(args)
| {
"content_hash": "1cb48f4197023add0d7a3f594e03cbfd",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 99,
"avg_line_length": 35.66013071895425,
"alnum_prop": 0.592008797653959,
"repo_name": "stevenvanrossem/son-emu",
"id": "fdfc11bb6fc62b670c8721f488c669a177b61454",
"size": "5456",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/emuvim/cli/rest/compute.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "625"
},
{
"name": "Dockerfile",
"bytes": "2644"
},
{
"name": "HTML",
"bytes": "6268"
},
{
"name": "JavaScript",
"bytes": "13979"
},
{
"name": "Python",
"bytes": "792173"
},
{
"name": "Shell",
"bytes": "5708"
}
],
"symlink_target": ""
} |
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock
from cassandra import ProtocolVersion
from cassandra.protocol import PrepareMessage, QueryMessage, ExecuteMessage
class MessageTest(unittest.TestCase):
def test_prepare_message(self):
"""
Test to check the appropriate calls are made
@since 3.9
@jira_ticket PYTHON-713
@expected_result the values are correctly written
@test_category connection
"""
message = PrepareMessage("a")
io = Mock()
message.send_body(io,4)
self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',)])
io.reset_mock()
message.send_body(io,5)
self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x00\x00\x00',)])
def test_execute_message(self):
message = ExecuteMessage('1',[],4)
io = Mock()
message.send_body(io,4)
self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x01',), (b'\x00\x00',)])
io.reset_mock()
message.send_body(io, 5)
self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x00\x00\x00\x01',), (b'\x00\x00',)])
def test_query_message(self):
"""
Test to check the appropriate calls are made
@since 3.9
@jira_ticket PYTHON-713
@expected_result the values are correctly written
@test_category connection
"""
message = QueryMessage("a",3)
io = Mock()
message.send_body(io,4)
self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x03',), (b'\x00',)])
io.reset_mock()
message.send_body(io,5)
self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x03',), (b'\x00\x00\x00\x00',)])
def _check_calls(self, io, expected):
self.assertEqual(len(io.write.mock_calls), len(expected))
for call, expect in zip(io.write.mock_calls, expected):
self.assertEqual(call[1], expect)
def test_prepare_flag(self):
"""
Test to check the prepare flag is properly set, This should only happen for V5 at the moment.
@since 3.9
@jira_ticket PYTHON-713
@expected_result the values are correctly written
@test_category connection
"""
message = PrepareMessage("a")
io = Mock()
for version in ProtocolVersion.SUPPORTED_VERSIONS:
message.send_body(io, version)
if ProtocolVersion.uses_prepare_flags(version):
# This should pass after PYTHON-696
self.assertEqual(len(io.write.mock_calls), 3)
# self.assertEqual(uint32_unpack(io.write.mock_calls[2][1][0]) & _WITH_SERIAL_CONSISTENCY_FLAG, 1)
else:
self.assertEqual(len(io.write.mock_calls), 2)
io.reset_mock()
| {
"content_hash": "ebe27af5731fa48713b5f965cab99e62",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 114,
"avg_line_length": 32.455555555555556,
"alnum_prop": 0.5737761040739473,
"repo_name": "coldeasy/python-driver",
"id": "7e6a5b3702a2080dcb8f11f87a82a64d967ae80f",
"size": "2922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28924"
},
{
"name": "PowerShell",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "2238540"
}
],
"symlink_target": ""
} |
import wpilib
class ButtonDebouncer:
"""Useful utility class for debouncing buttons"""
def __init__(self, joystick, buttonnum, period=0.5):
"""
:param joystick: Joystick object
:type joystick: :class:`wpilib.Joystick`
:param buttonnum: Number of button to retrieve
:type buttonnum: int
:param period: Period of time (in seconds) to wait before allowing new button
presses. Defaults to 0.5 seconds.
:type period: float
"""
self.joystick = joystick
self.buttonnum = buttonnum
self.latest = 0
self.debounce_period = float(period)
self.timer = wpilib.Timer
def set_debounce_period(self, period):
"""Set number of seconds to wait before returning True for the
button again"""
self.debounce_period = float(period)
def get(self):
"""Returns the value of the joystick button. If the button is held down, then
True will only be returned once every ``debounce_period`` seconds"""
now = self.timer.getFPGATimestamp()
if self.joystick.getRawButton(self.buttonnum):
if (now - self.latest) > self.debounce_period:
self.latest = now
return True
return False
__bool__ = get
| {
"content_hash": "67e2a23ab465e8b69748b556c09457a1",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 88,
"avg_line_length": 34.205128205128204,
"alnum_prop": 0.6019490254872564,
"repo_name": "robotpy/robotpy-wpilib-utilities",
"id": "2af487510bf4db2399717497b0e98aa106cb767f",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "robotpy_ext/control/button_debouncer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "148916"
},
{
"name": "Shell",
"bytes": "536"
}
],
"symlink_target": ""
} |
from itertools import repeat, product
from .features import render_features
from .inputs import render_slotlist, render_slot, render_head
from .paradigms import is_transitive, para_logs
from .tools import pairwise
from .vis import render_exponent, render_vi
def render_log(paradigms, worklog, rules, readjustments):
return '\n'.join(lines(paradigms, worklog, rules, readjustments))
def lines(paradigms, worklog, rules, readjustments):
yield '\\footnotesize'
for paradigm, logs in para_logs(paradigms, worklog):
yield '\\subsection{%s}' % paradigm['name']
if is_transitive(paradigm):
sub_obj = product(*paradigm['headers'])
else:
sub_obj = zip(paradigm['headers'][0], repeat(''))
for log, (sub, obj) in zip(logs, sub_obj):
if obj:
yield '\\minisec{%s:%s}' % (sub, obj)
else:
yield '\\minisec{%s}' % sub
yield 'Input (Rule applied)'
yield '\\begin{itemize}'
yield '\\item %s' % render_slotlist(log['input_pre'])
for i, (pre, pst) in enumerate(pairwise([log['input_pre']] + log['input_pro'])):
if pst is not pre:
rule = rules[i]
yield '\\item %s %s' % (render_slotlist(pst), rule['ref'])
yield '\\end{itemize}\n'
if not log['inserts']:
continue
yield 'Matches'
yield '\\begin{itemize}'
for slt, slt_match in zip(log['input_pst'], log['matches']):
yield '\\item %s' % render_slot(slt)
yield '\\begin{itemize}'
for hd, hd_match in zip(slt, slt_match):
yield '\\item %s' % render_head(hd)
yield '\\begin{itemize}'
for match in hd_match:
yield '\\item %s' % render_features(match['head'])
if not match['matches']:
continue
yield '\\begin{itemize}'
for m in match['matches']:
yield '\\item %s %s' % (render_vi(m), m['ref'])
yield '\\end{itemize}'
yield '\\end{itemize}'
yield '\\end{itemize}'
yield '\\end{itemize}\n'
yield 'Inserts'
yield '\\begin{itemize}'
yield '\\item %s' % insertlist(log['inserts'])
yield '\\end{itemize}\n'
yield 'Output (Readjustment applied)'
yield '\\begin{itemize}'
yield '\\item %s' % vilist(log['output_pre'])
for i, (pre, pst) in enumerate(pairwise([log['output_pre']] + log['output_pro'])):
if pst is not pre:
readjustment = readjustments[i]
yield '\\item %s %s' % (vilist(pst), readjustment['ref'])
yield '\\end{itemize}\n'
yield 'Spellout'
yield '\\begin{itemize}'
yield '\\item %s' % render_exponent(log['spellout'])
yield '\\end{itemize}\n'
def explist(exponents):
return '\\quad '.join(render_exponent(e) for e in exponents)
def insertlist(slots):
return '\\quad '.join(
'\\#%s\\#' % ','.join(
'%s %s' % (render_exponent(vi['exponent']), vi['ref'].rstrip())
for vi in slot)
for slot in slots)
def vilist(vis):
return explist(vi['exponent'] for vi in vis)
| {
"content_hash": "8fddac75529fad40668e67e9e4e9d722",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 94,
"avg_line_length": 40.3448275862069,
"alnum_prop": 0.5028490028490028,
"repo_name": "xflr6/dmengine",
"id": "314215b2f5da862ddcfdc7093454222ba7c4e00d",
"size": "3510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dmengine/reporting/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84937"
},
{
"name": "TeX",
"bytes": "1717"
}
],
"symlink_target": ""
} |
"""Artman smoke tests.
It generates both grpc and gapic client libraries for Google APIs in googleapis
repo (except those being blacklisted), and fails if any generation fails.
The test currently assumes that artman and googleapis folders are under the
same parent folder.
"""
import argparse
import fnmatch
import os
import re
import subprocess
import sys
ARTMAN_CONFIG_BLACKLIST = [
'artman_spanner_admin_instance.yaml',
'artman_spanner_admin_database.yaml',
'artman_bigtable_admin.yaml'
]
SUPPORTED_LANGS = ['python', 'java', 'ruby', 'nodejs', 'php', 'go', 'csharp']
def run_smoke_test(apis):
artman_config_dir = '../googleapis/gapic/api'
failure = []
artman_config_list = []
if apis:
for api in apis.split(','):
artman_config_list.append('artman_%s.yaml' % api)
for root, dirs, files in os.walk(artman_config_dir):
for f in fnmatch.filter(files, 'artman_*.yaml'):
if f in ARTMAN_CONFIG_BLACKLIST:
# Do not run generation tests for those in the blacklist.
continue
if artman_config_list and f not in artman_config_list:
# If apis list is given, only test those in the list
continue
api_name = re.search('artman_(.*)\.yaml', f).group(1)
filename = os.path.join(root, f)
content = open(filename).read()
for lang in SUPPORTED_LANGS:
if '%s:' % lang in content:
if generate_grpc_library(filename, lang):
failure.append('Failed to generate grpc %s library '
'for %s' % (lang, api_name))
if generate_gapic_library(filename, lang):
failure.append('Failed to generate gapic %s library '
'for %s' % (lang, api_name))
if failure:
sys.exit('Smoke test failed:\n%s' % '\n'.join(failure))
def generate_grpc_library(artman_config, lang):
grpc_pipeline_args = [
'artman',
'--config', '%s,../googleapis/gapic/lang/common.yaml' % artman_config,
'--language', lang,
'--pipeline', 'GrpcClientPipeline',
'--publish', 'noop',
]
return subprocess.call(grpc_pipeline_args, stdout=subprocess.PIPE)
def generate_gapic_library(artman_config, lang):
gapic_pipeline_args = [
'artman',
'--config', '%s,../googleapis/gapic/lang/common.yaml' % artman_config,
'--language', lang,
'--publish', 'noop',
]
return subprocess.call(gapic_pipeline_args, stdout=subprocess.PIPE)
def parse_args(*args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--apis',
default=None,
type=str,
help='Comma-delimited list of apis to test against. The artman config '
'of the API must be available in googleapis/googleapis github '
'repo in order for smoketest to run properly. If not specified, '
'all APIs will be tested. APIs in the blacklist will not be '
'tested.')
return parser.parse_args(args=args)
if __name__ == "__main__":
flags = parse_args(*sys.argv[1:])
run_smoke_test(flags.apis)
| {
"content_hash": "d8683cfcb3ac9d3330a76979e5a5aeca",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 34.680851063829785,
"alnum_prop": 0.5932515337423313,
"repo_name": "shinfan/artman",
"id": "7cda2d83debba7a5aae5a57ff9d3258c5d69e68f",
"size": "3876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/smoketest_artman.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "40829"
},
{
"name": "Python",
"bytes": "316278"
}
],
"symlink_target": ""
} |
import decimal
from wsme import types as wtypes
from cloudkitty.api.v1 import types as ck_types
from cloudkitty.rating.hash.datamodels import mapping as mapping_models
class Threshold(wtypes.Base):
"""Type describing a Threshold.
A threshold is used to apply rating rules based on a level, if the parent
is a field then the level is checked against a metadata. If it's a service
then it's the quantity of the resource that is checked.
"""
threshold_id = wtypes.wsattr(ck_types.UuidType(), mandatory=False)
"""UUID of the threshold."""
level = wtypes.wsattr(decimal.Decimal,
mandatory=True,
default=decimal.Decimal('0'))
"""Level of the threshold."""
map_type = wtypes.wsattr(mapping_models.MAP_TYPE,
default='flat',
name='type')
"""Type of the threshold."""
cost = wtypes.wsattr(decimal.Decimal, mandatory=True)
"""Value of the threshold."""
service_id = wtypes.wsattr(ck_types.UuidType(),
mandatory=False)
"""UUID of the service."""
field_id = wtypes.wsattr(ck_types.UuidType(),
mandatory=False)
"""UUID of the field."""
group_id = wtypes.wsattr(ck_types.UuidType(),
mandatory=False)
"""UUID of the hashmap group."""
tenant_id = wtypes.wsattr(wtypes.text, mandatory=False, default=None)
"""ID of the hashmap tenant."""
@classmethod
def sample(cls):
sample = cls(threshold_id='39dbd39d-f663-4444-a795-fb19d81af136',
field_id='ac55b000-a05b-4832-b2ff-265a034886ab',
level=decimal.Decimal('1024'),
map_type='flat',
cost=decimal.Decimal('4.2'),
tenant_id='7977999e-2e25-11e6-a8b2-df30b233ffcb')
return sample
class ThresholdCollection(wtypes.Base):
"""Type describing a list of mappings.
"""
thresholds = [Threshold]
"""List of thresholds."""
@classmethod
def sample(cls):
sample = Threshold.sample()
return cls(thresholds=[sample])
| {
"content_hash": "c0afcb7823480950bf5c65a4f4ab7c4d",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 31.442857142857143,
"alnum_prop": 0.591094956837801,
"repo_name": "openstack/cloudkitty",
"id": "0e86948a4fc8f109b32495a5c7e1631eaf33773e",
"size": "2833",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudkitty/rating/hash/datamodels/threshold.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "4904"
},
{
"name": "Python",
"bytes": "1046196"
},
{
"name": "Shell",
"bytes": "16361"
}
],
"symlink_target": ""
} |
"""
Example Airflow DAG that shows how to use Google Analytics 360.
"""
import os
from airflow import models
from airflow.providers.google.marketing_platform.operators.analytics import (
GoogleAnalyticsDataImportUploadOperator,
GoogleAnalyticsDeletePreviousDataUploadsOperator,
GoogleAnalyticsGetAdsLinkOperator,
GoogleAnalyticsListAccountsOperator,
GoogleAnalyticsModifyFileHeadersDataImportOperator,
GoogleAnalyticsRetrieveAdsLinksListOperator,
)
from airflow.utils import dates
ACCOUNT_ID = os.environ.get("GA_ACCOUNT_ID", "123456789")
BUCKET = os.environ.get("GMP_ANALYTICS_BUCKET", "test-airflow-analytics-bucket")
BUCKET_FILENAME = "data.csv"
WEB_PROPERTY_ID = os.environ.get("GA_WEB_PROPERTY", "UA-12345678-1")
WEB_PROPERTY_AD_WORDS_LINK_ID = os.environ.get("GA_WEB_PROPERTY_AD_WORDS_LINK_ID", "rQafFTPOQdmkx4U-fxUfhj")
DATA_ID = "kjdDu3_tQa6n8Q1kXFtSmg"
with models.DAG(
"example_google_analytics",
schedule_interval=None, # Override to match your needs,
start_date=dates.days_ago(1),
) as dag:
# [START howto_marketing_platform_list_accounts_operator]
list_account = GoogleAnalyticsListAccountsOperator(task_id="list_account")
# [END howto_marketing_platform_list_accounts_operator]
# [START howto_marketing_platform_get_ads_link_operator]
get_ad_words_link = GoogleAnalyticsGetAdsLinkOperator(
web_property_ad_words_link_id=WEB_PROPERTY_AD_WORDS_LINK_ID,
web_property_id=WEB_PROPERTY_ID,
account_id=ACCOUNT_ID,
task_id="get_ad_words_link",
)
# [END howto_marketing_platform_get_ads_link_operator]
# [START howto_marketing_platform_retrieve_ads_links_list_operator]
list_ad_words_link = GoogleAnalyticsRetrieveAdsLinksListOperator(
task_id="list_ad_link", account_id=ACCOUNT_ID, web_property_id=WEB_PROPERTY_ID
)
# [END howto_marketing_platform_retrieve_ads_links_list_operator]
upload = GoogleAnalyticsDataImportUploadOperator(
task_id="upload",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
delete = GoogleAnalyticsDeletePreviousDataUploadsOperator(
task_id="delete",
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
transform = GoogleAnalyticsModifyFileHeadersDataImportOperator(
task_id="transform",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
)
upload >> [delete, transform]
| {
"content_hash": "1b2921743c10b919dee13b2c1c769acb",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 108,
"avg_line_length": 36.774647887323944,
"alnum_prop": 0.7261585599387208,
"repo_name": "DinoCow/airflow",
"id": "851132d429590a69e6df11badeba092dc4b3371a",
"size": "3396",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/providers/google/marketing_platform/example_dags/example_analytics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
} |
from esengine.bases.py3 import * # noqa
from esengine.fields import StringField
from esengine.exceptions import ValidationError
import warnings
from six import iteritems
class BaseDocument(object):
_strict = False
_validators = None
_query_fields = None
def _initialize_defaults_fields(self, ignore=None):
ignore = ignore or []
for key, field_instance in iteritems(self.__class__._fields):
if key not in ignore:
default = self.get_default_value_for_field(field_instance)
setattr(self, key, default)
def get_default_value_for_field(self, field_instance):
default = field_instance._default
if callable(default):
try:
default = field_instance._default(self, field_instance)
except TypeError:
default = field_instance._default()
return default
def __init__(self, *args, **kwargs):
klass = self.__class__.__name__
if not hasattr(self, '_doctype'):
raise ValueError('{} have no _doctype attribute'.format(klass))
if not hasattr(self, '_index'):
raise ValueError('{} have no _index attribute'.format(klass))
id_field = self.__class__._fields.get("id")
if id_field and not isinstance(id_field, StringField):
warnings.warn(
'To avoid mapping problems, '
'it is recommended to define the id field as a StringField'
)
for key, value in iteritems(kwargs):
setattr(self, key, value)
self._initialize_defaults_fields(ignore=kwargs.keys())
def __setattr__(self, key, value):
if (not key.startswith('_')) and key not in self._fields:
raise KeyError('`{}` is an invalid field'.format(key))
field_instance = self._fields.get(key)
if field_instance and not self._strict:
value = field_instance.from_dict(value)
super(BaseDocument, self).__setattr__(key, value)
def to_dict(self, validate=True, only=None, exclude=None):
"""
Transform value from Python to Dict to be saved in E.S
:param validate: If should validate before transform
:param only: if specified only those fields will be included
:param exclude: fields to exclude from dict
:return: dict
"""
if validate:
self.validate()
if only:
fields = {
k: v for k, v in iteritems(self._fields)
if k in only
}
elif exclude:
fields = {
k: v for k, v in iteritems(self._fields)
if k not in exclude
}
else:
fields = self._fields
return {
field_name: field_instance.to_dict(
getattr(self, field_name), validate=validate
)
for field_name, field_instance in iteritems(fields)
}
@classmethod
def from_dict(cls, dct):
"""
Transform data read from E.S to Python Document Object
:param dct: Result from E.S (hits, source as dict)
:return: Instance of Document
"""
params = {}
for field_name, field_instance in iteritems(cls._fields):
serialized = dct.get(field_name)
value = field_instance.from_dict(serialized)
params[field_name] = value
return cls(**params)
@classmethod
def from_es(cls, hit):
"""
Takes E.S hit element containing
[u'_score', u'_type', u'_id', u'_source', u'_index']
:param hit: E.S hit
:return: Document instance
"""
instance = cls.from_dict(dct=hit.get('_source', {}))
instance._id = instance.id = hit.get('_id')
instance._score = hit.get('_score')
instance._query_fields = hit.get('fields', None)
return instance
def validate(self):
if self._validators:
for validator in self._validators:
"""
Functions in self._validators receives document instance
should return None or
raise Exception (ValidationError) or return any value
"""
val = validator(self)
if val:
raise ValidationError("Invalid: %s" % val)
| {
"content_hash": "c889979bebed313b24e502c35c1e661b",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 75,
"avg_line_length": 35.395161290322584,
"alnum_prop": 0.5618591934381408,
"repo_name": "catholabs/esengine",
"id": "66010bb96c3ab779f06ab0ef04e5b70072dc1865",
"size": "4389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "esengine/bases/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "630"
},
{
"name": "Python",
"bytes": "116256"
}
],
"symlink_target": ""
} |
from cupy import elementwise
from cupy.math import ufunc
sin = ufunc.create_math_ufunc(
'sin', 1, 'cupy_sin',
'''Elementwise sine function.
.. seealso:: :data:`numpy.sin`
''')
cos = ufunc.create_math_ufunc(
'cos', 1, 'cupy_cos',
'''Elementwise cosine function.
.. seealso:: :data:`numpy.cos`
''')
tan = ufunc.create_math_ufunc(
'tan', 1, 'cupy_tan',
'''Elementwise tangent function.
.. seealso:: :data:`numpy.tan`
''')
arcsin = ufunc.create_math_ufunc(
'asin', 1, 'cupy_arcsin',
'''Elementwise inverse-sine function (a.k.a. arcsine function).
.. seealso:: :data:`numpy.arcsin`
''')
arccos = ufunc.create_math_ufunc(
'acos', 1, 'cupy_arccos',
'''Elementwise inverse-cosine function (a.k.a. arccosine function).
.. seealso:: :data:`numpy.arccos`
''')
arctan = ufunc.create_math_ufunc(
'atan', 1, 'cupy_arctan',
'''Elementwise inverse-tangent function (a.k.a. arctangent function).
.. seealso:: :data:`numpy.arctan`
''')
hypot = ufunc.create_math_ufunc(
'hypot', 2, 'cupy_hypot',
'''Computes the hypoteneous of orthogonal vectors of given length.
This is equivalent to ``sqrt(x1 **2 + x2 ** 2)``, while this function is
more efficient.
.. seealso:: :data:`numpy.hypot`
''')
arctan2 = ufunc.create_math_ufunc(
'atan2', 2, 'cupy_arctan2',
'''Elementwise inverse-tangent of the ratio of two arrays.
.. seealso:: :data:`numpy.arctan2`
''')
deg2rad = elementwise.create_ufunc(
'cupy_deg2rad',
('e->e', 'f->f', 'd->d'),
'out0 = in0 * (out0_type)(M_PI / 180)',
doc='''Converts angles from degrees to radians elementwise.
.. seealso:: :data:`numpy.deg2rad`, :data:`numpy.radians`
''')
rad2deg = elementwise.create_ufunc(
'cupy_rad2deg',
('e->e', 'f->f', 'd->d'),
'out0 = in0 * (out0_type)(180 / M_PI)',
doc='''Converts angles from radians to degrees elementwise.
.. seealso:: :data:`numpy.rad2deg`, :data:`numpy.degrees`
''')
# TODO(okuta): Implement unwrap
degrees = rad2deg
radians = deg2rad
| {
"content_hash": "67ccfe8c8473643cca651abf21718fa2",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 76,
"avg_line_length": 19.92452830188679,
"alnum_prop": 0.6022727272727273,
"repo_name": "ytoyama/yans_chainer_hackathon",
"id": "cb068feee6430ecd82580a48b749ce6c4a4f0855",
"size": "2112",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "cupy/math/trigonometric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cuda",
"bytes": "5986"
},
{
"name": "Python",
"bytes": "879615"
}
],
"symlink_target": ""
} |
import sys, argparse, os
def show_notification(args):
command = "osascript -e 'display notification \"{0}\"".format(args.text)
if (args.title):
command += " with title \"{0}\"".format(args.title)
if (args.sound):
command += " sound name \"{0}\"".format(args.sound)
command += "\'"
os.system(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('text', type=str, help='the body text of the notification')
parser.add_argument('-s', '--sound', help='system sound to play when notification shows (if not found, a default is used)', type=str, nargs='?', const='asdf')
parser.add_argument('-t', '--title', help='the title of the notification', type=str)
args = parser.parse_args()
show_notification(args)
| {
"content_hash": "b9a99a07d766b59bb5b9a0e01c7f9276",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 159,
"avg_line_length": 33,
"alnum_prop": 0.6679841897233202,
"repo_name": "blakek/my-files",
"id": "385ddc116c1f793560de2b4e6b5b37416b6e60b7",
"size": "783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "temp/bin/notify.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4200"
},
{
"name": "Python",
"bytes": "17070"
},
{
"name": "Ruby",
"bytes": "592"
},
{
"name": "Shell",
"bytes": "160674"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import mciutil
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mastercard file utilities'
copyright = u'2015, Anthony Delosa'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = mciutil.__version__
# The full version, including alpha/beta/rc tags.
release = mciutil.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mciutildoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'mciutil.tex',
u'Mastercard file utilities Documentation',
u'Anthony Delosa', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mciutil',
u'Mastercard file utilities Documentation',
[u'Anthony Delosa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mciutil',
u'Mastercard file utilities Documentation',
u'Anthony Delosa',
'mciutil',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "50936f467325bad90168f8c025c727d3",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 76,
"avg_line_length": 30.846743295019156,
"alnum_prop": 0.7038877158117004,
"repo_name": "adelosa/mciutil",
"id": "b6e7f0559af53a373b2073bab5a8be4db34e654c",
"size": "8493",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1991"
},
{
"name": "Python",
"bytes": "141614"
}
],
"symlink_target": ""
} |
import json
import os
import urllib2
import urllib
import html2text
from unidecode import unidecode
import time
import urllib
import logging
import os
import os.path
import sys
import threading
if os.path.isfile('recipeitems-latest.json'):
pass
else:
os.system('wget http://openrecipes.s3.amazonaws.com/recipeitems-latest.json.gz')
os.system('gunzip recipeitems-latest.json.gz')
if not os.path.exists('recipes'):
os.makedirs('recipes')
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename='log',
filemode='a')
def get_url_markdown(baseurl,start,increment):
'''
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0')]
try:
j = opener.open(baseurl)
except:
return None
data = j.read()
'''
urlHandler = urllib2.urlopen(baseurl)
data = urlHandler.read()
'''
os.system('wget -O temp' + str(start)+"_"+str(increment) + ' ' + baseurl)
data = open('temp' + str(start)+"_"+str(increment),'rU').read()
'''
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
h.body_width = 10000
data = h.handle(unidecode(unicode(data,errors='ignore')))
return unidecode(data)
def worker(start,increment):
logger = logging.getLogger('worker'+str(start)+"_"+str(increment))
"""thread worker function"""
print 'Worker: %s/%s' % (start,increment)
indexFile = 'recipes/index'+str(start)+"_"+str(increment)+'.txt'
lastLine = ""
if os.path.isfile(indexFile):
with open(indexFile,'rb') as f:
for line in f:
lastLine = line
lastfileNum = int(lastLine.split()[0])
else:
lastfileNum = -1
fileNum = 0
t = time.time()
with open('recipeitems-latest.json','rb') as f:
for line in f:
fileNum = fileNum + 1
if fileNum % increment == start:
folderSave = str(int(fileNum/500))
if not os.path.exists('recipes/' + folderSave):
os.makedirs('recipes/' + folderSave)
if fileNum>lastfileNum:
recipe = json.loads(line)
logger.info(str(fileNum) + "\t" + recipe['url'] + '\t' + recipe['name'])
t=time.time()
recipeMD = get_url_markdown(recipe['url'],start,increment)
logger.info('%s seconds' % str(round(time.time()-t,1)))
if recipeMD is not None:
with open('recipes/' + folderSave + '/' + str(fileNum) + '.md','wb') as g:
g.write(recipeMD)
#os.system('bzip2 ' + 'recipes/' + folderSave + '/' + str(fileNum) + '.md')
with open(indexFile,'a') as g:
g.write(str(fileNum) + "\t" + recipe['url'] + '\t' + unidecode(recipe['name']) + '\n')
else:
with open(indexFile,'a') as g:
g.write(str(fileNum) + "\t" + recipe['url'] + '\t' + 'None' + '\n')
return
threads = []
numThreads = 15
for i in range(numThreads):
t = threading.Thread(target=worker, args=(i,numThreads,))
threads.append(t)
t.start() | {
"content_hash": "e0286a9b1017901748ec6b80e655a3d2",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 107,
"avg_line_length": 32.79207920792079,
"alnum_prop": 0.5694444444444444,
"repo_name": "schollz/extract_recipe",
"id": "48bf96798a4540c19d0551a619884a348035a113",
"size": "3312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_recipes/downloadRecipes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "878"
},
{
"name": "Go",
"bytes": "1801"
},
{
"name": "HTML",
"bytes": "1571"
},
{
"name": "Python",
"bytes": "151504"
},
{
"name": "Shell",
"bytes": "401"
}
],
"symlink_target": ""
} |
"""
This function receives events from the word list DynamoDB table
stream.
"""
from __future__ import print_function
import json
import boto3, botocore
def lambda_handler(event, context):
print("Event: {}".format(json.dumps(event)))
for each_record in event["Records"]:
handle_dynamodb_stream_record(each_record)
def handle_dynamodb_stream_record(record):
print("Record: {}".format(json.dumps(record))) | {
"content_hash": "c48d833942714adb8f9eba7237830be8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 27.4375,
"alnum_prop": 0.6970387243735763,
"repo_name": "moduspwnens/unique-word-generator-api",
"id": "9325b0d5b28b3798889e17a7d69b099dd174f369",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functions/word_list_table_trigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9228"
},
{
"name": "Python",
"bytes": "18012"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import pytest
from flexget.plugins.parsers.parser_internal import ParserInternal
from flexget.plugins.parsers.parser_guessit import ParserGuessit
class TestSeriesParser(object):
@pytest.fixture(scope='class', params=(ParserInternal, ParserGuessit), ids=['internal', 'guessit'])
def parse(self, request):
p = request.param()
def parse(data, name=None, **kwargs):
return p.parse_series(data, name=name, **kwargs)
return parse
@pytest.fixture(scope='class')
def parse_invalid(self, parse):
def parse_invalid(name, data, **kwargs):
"""Makes sure either ParseWarning is raised, or return is invalid."""
r = parse(name, data, **kwargs)
assert not r.valid, '{data} should not be valid'.format(data=data)
return parse_invalid
def test_proper(self, parse):
"""SeriesParser: proper"""
s = parse(name='Something Interesting', data='Something.Interesting.S01E02.Proper-FlexGet')
assert s.season == 1
assert s.episode == 2
assert s.quality.name == 'unknown'
assert s.proper, 'did not detect proper from %s' % s.data
s = parse(name='foobar', data='foobar 720p proper s01e01')
assert s.proper, 'did not detect proper from %s' % s.data
def test_non_proper(self, parse):
"""SeriesParser: non-proper"""
s = parse(name='Something Interesting', data='Something.Interesting.S01E02-FlexGet')
assert s.season == 1
assert s.episode == 2
assert s.quality.name == 'unknown'
assert not s.proper, 'detected proper'
def test_anime_proper(self, parse):
"""SeriesParser: anime fansub style proper (13v2)"""
s = parse(name='Anime', data='[aoeu] Anime 19v2 [23BA98]')
assert s.identifier == 19
assert s.proper_count == 1
s = parse(name='Anime', data='Anime_-_19v3')
assert s.identifier == 19
assert s.proper_count == 2
def test_basic(self, parse, parse_invalid):
"""SeriesParser: basic parsing"""
parse_invalid(name='Something Interesting', data='The.Something.Interesting.S01E02-FlexGet')
s = parse(name='25', data='25.And.More.S01E02-FlexGet')
assert s.valid, 'Fix the implementation, should be valid'
assert s.identifier == 'S01E02', 'identifier broken'
def test_confusing_date(self, parse):
"""SeriesParser: confusing (invalid) numbering scheme"""
s = parse(name='Something', data='Something.2008x12.13-FlexGet')
assert not s.episode, 'Should not have episode'
assert not s.season, 'Should not have season'
assert s.id_type == 'date'
assert s.identifier == '2008-12-13', 'invalid id'
assert s.valid, 'should be valid'
def test_unwanted_disc(self, parse_invalid):
"""SeriesParser: unwanted disc releases"""
parse_invalid(name='Something', data='Something.S01D2.DVDR-FlexGet')
def test_season_x_ep(self, parse):
"""SeriesParser: 01x02"""
s = parse(name='Something', data='Something.01x02-FlexGet')
assert (s.season == 1 and s.episode == 2), 'failed to parse 01x02'
s = parse(name='Something', data='Something 1 x 2-FlexGet')
assert (s.season == 1 and s.episode == 2), 'failed to parse 1 x 2'
# Ticket #732
s = parse(name='Something', data='Something - This is the Subtitle 14x9 [Group-Name]')
assert (s.season == 14 and s.episode == 9), 'failed to parse %s' % s.data
@pytest.mark.skip(reason='FIX: #402 .. a bit hard to do')
def test_ep_in_square_brackets(self, parse):
"""SeriesParser: [S01] [E02] NOT IMPLEMENTED"""
# FIX: #402 .. a bit hard to do
s = parse(name='Something', data='Something [S01] [E02]')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
def test_ep_in_parenthesis(self, parse):
"""SeriesParser: test ep in parenthesis"""
s = parse(name='Something', data='Something (S01E02)')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
def test_season_episode(self, parse):
"""SeriesParser: season X, episode Y"""
s = parse(name='Something', data='Something - Season 3, Episode 2')
assert (s.season == 3 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Season2, Episode2')
assert (s.season == 2 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Season2 Episode2')
assert (s.season == 2 and s.episode == 2), 'failed to parse %s' % s
def test_series_episode(self, parse):
"""SeriesParser: series X, episode Y"""
s = parse(name='Something', data='Something - Series 2, Episode 2')
assert (s.season == 2 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Series3, Episode2')
assert (s.season == 3 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Series4 Episode2')
assert (s.season == 4 and s.episode == 2), 'failed to parse %s' % s
def test_episode(self, parse):
"""SeriesParser: episode X (assume season 1)"""
s = parse(name='Something', data='Something - Episode2')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Episode 2')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Episode VIII')
assert (s.season == 1 and s.episode == 8), 'failed to parse %s' % s
def test_ep(self, parse):
"""SeriesParser: ep X (assume season 1)"""
s = parse(name='Something', data='Something - Ep2')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Ep 2')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something - Ep VIII')
assert (s.season == 1 and s.episode == 8), 'failed to parse %s' % s
def test_season_episode_of_total(self, parse):
"""SeriesParser: season X YofZ"""
s = parse(name='Something', data='Something Season 2 2of12')
assert (s.season == 2 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something Season 2, 2 of 12')
assert (s.season == 2 and s.episode == 2), 'failed to parse %s' % s
def test_episode_of_total(self, parse):
"""SeriesParser: YofZ (assume season 1)"""
s = parse(name='Something', data='Something 2of12')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
s = parse(name='Something', data='Something 2 of 12')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
def test_part(self, parse):
"""SeriesParser: test parsing part numeral (assume season 1)"""
s = parse(name='Test', data='Test.Pt.I.720p-FlexGet')
assert (s.season == 1 and s.episode == 1), 'failed to parse %s' % s
s = parse(name='Test', data='Test.Pt.VI.720p-FlexGet')
assert (s.season == 1 and s.episode == 6), 'failed to parse %s' % s
s = parse(name='Test', data='Test.Part.2.720p-FlexGet')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s
assert s.identifier == 'S01E02'
s = parse(name='Test', data='Test.Part3.720p-FlexGet')
assert (s.season == 1 and s.episode == 3), 'failed to parse %s' % s
s = parse(name='Test', data='Test.Season.3.Part.IV')
assert (s.season == 3 and s.episode == 4), 'failed to parse %s' % s
s = parse(name='Test', data='Test.Part.One')
assert (s.season == 1 and s.episode == 1), 'failed to parse %s' % s
def test_digits(self, parse):
"""SeriesParser: digits (UID)"""
s = parse(name='Something', data='Something 01 FlexGet')
assert (s.id == 1), 'failed to parse %s' % s.data
assert s.id_type == 'sequence'
s = parse(name='Something', data='Something-121.H264.FlexGet')
assert (s.id == 121), 'failed to parse %s' % s.data
assert s.id_type == 'sequence'
s = parse(name='Something', data='Something 1 AC3')
assert (s.id == 1), 'failed to parse %s' % s.data
assert s.id_type == 'sequence'
s = parse(name='Something', data='[TheGroup] Something - 12 1280x720 x264-Hi10P')
assert (s.id == 12), 'failed to parse %s' % s.data
assert s.id_type == 'sequence'
def test_quality(self, parse):
"""SeriesParser: quality"""
s = parse(name='Foo Bar', data='Foo.Bar.S01E01.720p.HDTV.x264-FlexGet')
assert (s.season == 1 and s.episode == 1), 'failed to parse episodes from %s' % s.data
assert (s.quality.name == '720p hdtv h264'), 'failed to parse quality from %s' % s.data
s = parse(name='Test', data='Test.S01E01.720p-FlexGet')
assert s.quality.name == '720p', 'failed to parse quality from %s' % s.data
s = parse(name='30 Suck', data='30 Suck 4x4 [HDTV - FlexGet]')
assert s.quality.name == 'hdtv', 'failed to parse quality %s' % s.data
s = parse(name='ShowB', data='ShowB.S04E19.Name of Ep.720p.WEB-DL.DD5.1.H.264')
assert s.quality.name == '720p webdl h264 dd5.1', 'failed to parse quality %s' % s.data
def test_quality_parenthesis(self, parse):
"""SeriesParser: quality in parenthesis"""
s = parse(name='Foo Bar', data='Foo.Bar.S01E01.[720p].HDTV.x264-FlexGet')
assert (s.season == 1 and s.episode == 1), 'failed to parse episodes from %s' % s.data
assert (s.quality.name == '720p hdtv h264'), 'failed to parse quality from %s' % s.data
s = parse(name='Foo Bar', data='Foo.Bar.S01E01.(720p).HDTV.x264-FlexGet')
assert (s.season == 1 and s.episode == 1), 'failed to parse episodes from %s' % s.data
assert (s.quality.name == '720p hdtv h264'), 'failed to parse quality from %s' % s.data
s = parse(name='Foo Bar', data='[720p]Foo.Bar.S01E01.HDTV.x264-FlexGet')
assert (s.season == 1 and s.episode == 1), 'failed to parse episodes from %s' % s.data
assert (s.quality.name == '720p hdtv h264'), 'failed to parse quality from %s' % s.data
def test_numeric_names(self, parse):
"""SeriesParser: numeric names (24)"""
s = parse(name='24', data='24.1x2-FlexGet')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s.data
s = parse(name='90120', data='90120.1x2-FlexGet')
assert (s.season == 1 and s.episode == 2), 'failed to parse %s' % s.data
def test_group_prefix(self, parse):
"""SeriesParser: [group] before name"""
s = parse(name='Foo Bar', data='[l.u.l.z] Foo Bar - 11 (H.264) [5235532D].mkv')
assert (s.id == 11), 'failed to parse %s' % s.data
s = parse(name='Foo Bar', data='[7.1.7.5] Foo Bar - 11 (H.264) [5235532D].mkv')
assert (s.id == 11), 'failed to parse %s' % s.data
def test_hd_prefix(self, parse):
"""SeriesParser: HD 720p before name"""
s = parse(name='Foo Bar', data='HD 720p: Foo Bar - 11 (H.264) [5235532D].mkv')
assert (s.id == 11), 'failed to parse %s' % s.data
assert (s.quality.name == '720p h264'), 'failed to pick up quality'
def test_partially_numeric(self, parse):
"""SeriesParser: partially numeric names"""
s = parse(name='Foo 2009', data='Foo.2009.S02E04.HDTV.XviD-2HD[FlexGet]')
assert (s.season == 2 and s.episode == 4), 'failed to parse %s' % s.data
assert (s.quality.name == 'hdtv xvid'), 'failed to parse quality from %s' % s.data
def test_ignore_seasonpacks(self, parse, parse_invalid):
"""SeriesParser: ignoring season packs"""
# parse_invalid(name='The Foo', data='The.Foo.S04.1080p.FlexGet.5.1')
parse_invalid(name='The Foo', data='The Foo S05 720p BluRay DTS x264-FlexGet')
parse_invalid(name='The Foo', data='The Foo S05 720p BluRay DTS x264-FlexGet')
parse_invalid(name='Something', data='Something S02 Pack 720p WEB-DL-FlexGet')
parse_invalid(name='Something', data='Something S06 AC3-CRAPL3SS')
parse_invalid(name='Something', data='Something SEASON 1 2010 540p BluRay QEBS AAC ANDROID IPAD MP4 FASM')
parse_invalid(name='Something', data='Something.1x0.Complete.Season-FlexGet')
parse_invalid(name='Something', data='Something.1xAll.Season.Complete-FlexGet')
parse_invalid(name='Something', data='Something Seasons 1 & 2 - Complete')
parse_invalid(name='Something', data='Something Seasons 4 Complete')
parse_invalid(name='Something', data='Something Seasons 1 2 3 4')
parse_invalid(name='Something', data='Something S6 E1-4')
parse_invalid(name='Something', data='Something_Season_1_Full_Season_2_EP_1-7_HD')
parse_invalid(name='Something', data='Something - Season 10 - FlexGet')
parse_invalid(name='Something', data='Something_ DISC_1_OF_2 MANofKENT INVICTA RG')
# Make sure no false positives
assert parse(name='Something', data='Something S01E03 Full Throttle').valid
def test_similar(self, parse):
s = parse(name='Foo Bar', data='Foo.Bar:Doppelganger.S02E04.HDTV.FlexGet', strict_name=True)
assert not s.valid, 'should not have parser Foo.Bar:Doppelganger'
s = parse(name='Foo Bar', data='Foo.Bar.Doppelganger.S02E04.HDTV.FlexGet', strict_name=True)
assert not s.valid, 'should not have parser Foo.Bar.Doppelganger'
def test_idiotic_numbering(self, parse):
"""SeriesParser: idiotic 101, 102, 103, .. numbering"""
s = parse('Test.706.720p-FlexGet', name='test', identified_by='ep')
assert s.season == 7, 'didn\'t pick up season'
assert s.episode == 6, 'didn\'t pick up episode'
def test_idiotic_numbering_with_zero(self, parse):
"""SeriesParser: idiotic 0101, 0102, 0103, .. numbering"""
s = parse('Test.0706.720p-FlexGet', name='test', identified_by='ep')
assert s.season == 7, 'season missing'
assert s.episode == 6, 'episode missing'
assert s.identifier == 'S07E06', 'identifier broken'
def test_idiotic_invalid(self, parse):
"""SeriesParser: idiotic confused by invalid"""
s = parse('Test.Revealed.WS.PDTV.XviD-aAF.5190458.TPB.torrent', name='test', identified_by='ep')
# assert_raises(ParseWarning, s.parse)
assert not s.season == 5, 'confused, got season'
assert not s.season == 4, 'confused, got season'
assert not s.episode == 19, 'confused, got episode'
assert not s.episode == 58, 'confused, got episode'
def test_zeroes(self, parse):
"""SeriesParser: test zeroes as a season, episode"""
for data in ['Test.S00E00-FlexGet', 'Test.S00E01-FlexGet', 'Test.S01E00-FlexGet']:
s = parse(name='Test', data=data)
id = s.identifier
assert s.valid, 'parser not a valid for %s' % data
assert isinstance(id, basestring), 'id is not a string for %s' % data
assert isinstance(s.season, int), 'season is not a int for %s' % data
assert isinstance(s.episode, int), 'season is not a int for %s' % data
def test_exact_name(self, parse):
"""SeriesParser: test exact/strict name parsing"""
s = parse('Test.Foobar.S01E02.720p-FlexGet', name='test')
assert s.valid, 'normal failed'
s = parse('Test.A.S01E02.720p-FlexGet', name='test', strict_name=True)
assert not s.valid, 'strict A failed'
s = parse('Test.AB.S01E02.720p-FlexGet', name='Test AB', strict_name=True)
assert s.valid, 'strict AB failed'
s = parse('Red Tomato (US) S01E02 720p-FlexGet', name='Red Tomato', strict_name=True)
assert not s.valid, 'Red Tomato (US) should not match Red Tomato in exact mode'
def test_name_word_boundries(self, parse):
name = 'test'
s = parse('Test.S01E02.720p-FlexGet', name=name)
assert s.valid, 'normal failed'
# In non-exact mode these should match
s = parse('Test.crap.S01E02.720p-FlexGet', name=name)
assert s.valid, 'normal failed'
s = parse('Test_crap.S01E02.720p-FlexGet', name=name)
assert s.valid, 'underscore failed'
# However if the title ends mid-word, it should not match
s = parse('Testing.S01E02.720p-FlexGet', name=name)
assert not s.valid, 'word border failed'
def test_quality_as_ep(self, parse):
"""SeriesParser: test that qualities are not picked as ep"""
from flexget.utils import qualities
for quality in qualities.all_components():
parse('FooBar %s XviD-FlexGet' % quality.name, name='FooBar')
def test_sound_as_ep(self, parse):
"""SeriesParser: test that sound infos are not picked as ep"""
sounds = ['AC3', 'DD5.1', 'DTS']
for sound in sounds:
parse(data='FooBar %s XViD-FlexGet' % sound, name='FooBar')
def test_ep_as_quality(self, parse):
"""SeriesParser: test that eps are not picked as qualities"""
from flexget.utils import qualities
for quality1 in qualities.all_components():
# Attempt to create an episode number out of quality
mock_ep1 = ''.join(list(filter(str.isdigit, quality1.name)))
if not mock_ep1:
continue
for quality2 in qualities.all_components():
mock_ep2 = ''.join(list(filter(str.isdigit, quality2.name)))
if not mock_ep2:
continue
# 720i, 1080i, etc. are failing because
# e.g the 720 in 720i can always be taken to mean 720p,
# which is a higher priority quality.
# Moreover, 1080 as an ep number is always failing because
# sequence regexps support at most 3 digits at the moment.
# Luckily, all of these cases are discarded by the following,
# which also discards the failing cases when episode number
# (e.g. 720) is greater or equal than quality number (e.g. 480p).
# There's nothing that can be done with those failing cases with the
# current
# "grab leftmost occurrence of highest quality-like thing" algorithm.
if int(mock_ep1) >= int(mock_ep2):
continue
s = parse('FooBar - %s %s-FlexGet' % (mock_ep1, quality2.name), name='FooBar')
assert s.episode == int(mock_ep1), "confused episode %s with quality %s" % \
(mock_ep1, quality2.name)
# Also test with reversed relative order of episode and quality
s = parse('[%s] FooBar - %s [FlexGet]' % (quality2.name, mock_ep1), name='FooBar')
assert s.episode == int(mock_ep1), "confused episode %s with quality %s" % \
(mock_ep1, quality2.name)
def test_name_with_number(self, parse):
"""SeriesParser: test number in a name"""
parse('Storage 13 no ep number', name='Storage 13')
def test_name_uncorrupted(self, parse):
"""SeriesParser: test name doesn't get corrupted when cleaned"""
s = parse(name='The New Adventures of Old Christine',
data='The.New.Adventures.of.Old.Christine.S05E16.HDTV.XviD-FlexGet')
assert s.name == 'The New Adventures of Old Christine'
assert s.season == 5
assert s.episode == 16
assert s.quality.name == 'hdtv xvid'
def test_from_groups(self, parse):
"""SeriesParser: test from groups"""
s = parse('Test.S01E01-Group', name='Test', allow_groups=['xxxx', 'group'])
assert s.group == 'group', 'did not get group'
def test_group_dashes(self, parse):
"""SeriesParser: group name around extra dashes"""
s = parse('Test.S01E01-FooBar-Group', name='Test', allow_groups=['xxxx', 'group'])
assert s.group == 'group', 'did not get group with extra dashes'
def test_id_and_hash(self, parse):
"""SeriesParser: Series with confusing hash"""
s = parse(name='Something', data='Something 63 [560D3414]')
assert (s.id == 63), 'failed to parse %s' % s.data
s = parse(name='Something', data='Something 62 [293A8395]')
assert (s.id == 62), 'failed to parse %s' % s.data
def test_ticket_700(self, parse):
"""SeriesParser: confusing name (#700)"""
s = parse(name='Something', data='Something 9x02 - Episode 2')
assert s.season == 9, 'failed to parse season'
assert s.episode == 2, 'failed to parse episode'
def test_date_id(self, parse):
"""SeriesParser: Series with dates"""
s = parse(name='Something', data='Something.2010.10.25')
assert (s.identifier == '2010-10-25'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
s = parse(name='Something', data='Something 2010-10-25')
assert (s.identifier == '2010-10-25'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
s = parse(name='Something', data='Something 10/25/2010')
assert (s.identifier == '2010-10-25'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
s = parse(name='Something', data='Something 25.10.2010')
assert (s.identifier == '2010-10-25'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
# February 1 is picked rather than January 2 because it is closer to now
s = parse(name='Something', data='Something 1.2.11')
assert s.identifier == '2011-02-01', 'failed to parse %s' % s.data
assert s.id_type == 'date'
# Future dates should not be considered dates
s = parse(name='Something', data='Something 01.02.32')
assert s.id_type != 'date'
# Dates with parts used to be parsed as episodes.
s = parse(name='Something', data='Something.2010.10.25, Part 2')
assert (s.identifier == '2010-10-25'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
# Text based dates
s = parse(name='Something', data='Something (18th july 2013)')
assert (s.identifier == '2013-07-18'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
s = parse(name='Something', data='Something 2 mar 2013)')
assert (s.identifier == '2013-03-02'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
s = parse(name='Something', data='Something 1st february 1993)')
assert (s.identifier == '1993-02-01'), 'failed to parse %s' % s.data
assert s.id_type == 'date'
def test_date_options(self, parse):
# By default we should pick the latest interpretation
s = parse(name='Something', data='Something 01-02-03')
assert (s.identifier == '2003-02-01'), 'failed to parse %s' % s.data
# Test it still works with both options specified
s = parse(name='Something', data='Something 01-02-03', date_yearfirst=False, date_dayfirst=True)
assert (s.identifier == '2003-02-01'), 'failed to parse %s' % s.data
# If we specify yearfirst yes it should force another interpretation
s = parse(name='Something', data='Something 01-02-03', date_yearfirst=True)
assert (s.identifier == '2001-03-02'), 'failed to parse %s' % s.data
# If we specify dayfirst no it should force the third interpretation
s = parse(name='Something', data='Something 01-02-03', date_dayfirst=False)
assert (s.identifier == '2003-01-02'), 'failed to parse %s' % s.data
def test_season_title_episode(self, parse):
"""SeriesParser: Series with title between season and episode"""
s = parse(name='Something', data='Something.S5.Drunk.Santa.Part1')
assert s.season == 5, 'failed to parse season'
assert s.episode == 1, 'failed to parse episode'
def test_specials(self, parse):
"""SeriesParser: Special episodes with no id"""
s = parse(name='The Show', data='The Show 2005 A Christmas Carol 2010 Special 720p HDTV x264')
assert s.valid, 'Special episode should be valid'
def test_double_episodes(self, parse):
s = parse(name='Something', data='Something.S04E05-06')
assert s.season == 4, 'failed to parse season'
assert s.episode == 5, 'failed to parse episode'
assert s.episodes == 2, 'failed to parse episode range'
s = parse(name='Something', data='Something.S04E05-E06')
assert s.season == 4, 'failed to parse season'
assert s.episode == 5, 'failed to parse episode'
assert s.episodes == 2, 'failed to parse episode range'
s = parse(name='Something', data='Something.S04E05E06')
assert s.season == 4, 'failed to parse season'
assert s.episode == 5, 'failed to parse episode'
assert s.episodes == 2, 'failed to parse episode range'
s = parse(name='Something', data='Something.4x05-06')
assert s.season == 4, 'failed to parse season'
assert s.episode == 5, 'failed to parse episode'
assert s.episodes == 2, 'failed to parse episode range'
# Test that too large a range is not accepted
s = parse(name='Something', data='Something.S04E05-09')
assert not s.valid, 'large episode range should not be valid'
# Make sure regular identifier doesn't have end_episode
s = parse(name='Something', data='Something.S04E05')
assert s.episodes == 1, 'should not have detected end_episode'
def test_and_replacement(self, parse):
titles = ['Alpha.&.Beta.S01E02.hdtv', 'alpha.and.beta.S01E02.hdtv', 'alpha&beta.S01E02.hdtv']
for title in titles:
s = parse(name='Alpha & Beta', data=title)
assert s.valid
s = parse(name='Alpha and Beta', data=title)
assert s.valid
# Test 'and' isn't replaced within a word
s = parse(name='Sandy Dunes', data='S&y Dunes.S01E01.hdtv')
assert not s.valid
def test_unicode(self, parse):
s = parse(name=u'abc äää abc', data=u'abc.äää.abc.s01e02')
assert s.season == 1
assert s.episode == 2
def test_parentheticals(self, parse):
s = parse('The Show (US)', name="The Show (US)")
# Make sure US is ok outside of parentheses
s = parse('The.Show.US.S01E01', name="The Show (US)")
assert s.valid
# Make sure US is ok inside parentheses
s = parse('The Show (US) S01E01', name="The Show (US)")
assert s.valid
# Make sure it works without US
s = parse('The.Show.S01E01', name="The Show (US)")
assert s.valid
# Make sure it doesn't work with a different country
s = parse('The Show (UK) S01E01', name="The Show (US)")
assert not s.valid
def test_id_regexps(self, parse):
id_regexps = ['(dog)?e(cat)?']
s = parse('The Show dogecat', name='The Show', id_regexps=id_regexps)
assert s.valid
assert s.id == 'dog-cat'
s = parse('The Show doge', name='The Show', id_regexps=id_regexps)
assert s.valid
assert s.id == 'dog'
s = parse('The Show ecat', name='The Show', id_regexps=id_regexps)
assert s.valid
assert s.id == 'cat'
# assert_raises(ParseWarning, s.parse, 'The Show e')
def test_apostrophe(self, parse):
s = parse(name=u"FlexGet's show", data=u"FlexGet's show s01e01")
assert s.valid
s = parse(name=u"FlexGet's show", data=u"FlexGets show s01e01")
assert s.valid
s = parse(name=u"FlexGet's show", data=u"FlexGet s show s01e01")
assert s.valid
s = parse(name=u"FlexGet's show", data=u"FlexGet show s01e01")
assert not s.valid
# bad data with leftover escaping
s = parse(name=u"FlexGet's show", data=u"FlexGet\\'s show s01e01")
assert s.valid
def test_alternate_names(self, parse):
name = 'The Show'
alternate_names = ['Show', 'Completely Different']
s = parse('The Show S01E01', name=name, alternate_names=alternate_names)
assert s.valid
s = parse('Show S01E01', name=name, alternate_names=alternate_names)
assert s.valid
s = parse('Completely.Different.S01E01', name=name, alternate_names=alternate_names)
assert s.valid
s = parse('Not The Show S01E01', name=name, alternate_names=alternate_names)
assert not s.valid
def test_long_season(self, parse):
"""SeriesParser: long season ID Ticket #2197"""
s = parse(name='FlexGet', data='FlexGet.US.S2013E14.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP')
assert s.season == 2013
assert s.episode == 14
assert s.quality.name == '720p hdtv h264 aac'
assert not s.proper, 'detected proper'
s = parse(name='FlexGet', data='FlexGet.Series.2013.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP')
assert s.season == 2013
assert s.episode == 14
assert s.quality.name == '720p hdtv h264 aac'
assert not s.proper, 'detected proper'
| {
"content_hash": "f2b820aac81b7f6f856d7bc319ae5758",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 114,
"avg_line_length": 49.06942148760331,
"alnum_prop": 0.6067639033920571,
"repo_name": "jacobmetrick/Flexget",
"id": "0df3eb748f679e24c4bc92de776a87ab1d1e8f0b",
"size": "29717",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_seriesparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "78933"
},
{
"name": "JavaScript",
"bytes": "261421"
},
{
"name": "Python",
"bytes": "3090372"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
"""
This script contains unit test of the :mod: 'rmgpy.transport' module and :mod: 'rmgpy.data.transport' module
"""
import unittest
import os
import rmgpy.constants as constants
from rmgpy import settings
from rmgpy.species import Species
from rmgpy.molecule.molecule import Molecule
from rmgpy.quantity import DipoleMoment, Length, Volume, Energy
from rmgpy.transport import TransportData
from rmgpy.data.transport import CriticalPointGroupContribution, TransportDatabase
#################################################################################
class TestTransportData(unittest.TestCase):
"""
Contains unit test of the :class: 'transportData' class
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.shapeIndex = 1
self.epsilon = Energy(2.104, 'kJ/mol')
self.sigma = Length(3.402, 'angstroms')
self.dipoleMoment = DipoleMoment(1.000, 'C*m')
self.polarizability = Volume(0.134, 'angstroms^3')
self.rotrelaxcollnum = 0.000
self.comment = 'test'
self.transport = TransportData(
shapeIndex=self.shapeIndex,
epsilon=self.epsilon,
sigma=self.sigma,
dipoleMoment=self.dipoleMoment,
polarizability=self.polarizability,
rotrelaxcollnum=self.rotrelaxcollnum,
comment=self.comment,
)
def test_shapeIndex(self):
"""
Test that the TransportData shapeIndex property was properly set.
"""
self.assertAlmostEqual(self.transport.shapeIndex, self.shapeIndex, 6)
def test_epsilon(self):
"""
Test that the TransportData epsilon property was properly set.
"""
self.assertAlmostEqual(self.transport.epsilon.value_si, self.epsilon.value_si, 6)
def test_sigma(self):
"""
Test that the TransportData sigma property was properly set.
"""
self.assertAlmostEqual(self.transport.sigma.value_si * 1e10, self.sigma.value_si * 1e10, 6)
def test_dipoleMoment(self):
"""
Test that the TransportData dipoleMoment property was properly set.
"""
self.assertAlmostEqual(self.transport.dipoleMoment.value_si, self.dipoleMoment.value_si, 6)
def test_polarizability(self):
"""
Test that the TransportData polarizability property was properly set.
"""
self.assertAlmostEqual(self.transport.polarizability.value_si, self.polarizability.value_si, 6)
def test_rotrelaxcollnum(self):
"""
Test that the TransportData rotrelaxcollnum property was properly set.
"""
self.assertAlmostEqual(self.transport.rotrelaxcollnum, self.rotrelaxcollnum, 6)
def test_comment(self):
"""
Test that the TransportData comment property was properly set.
"""
self.assertEqual(self.transport.comment, self.comment)
def test_getCollisionFrequency(self):
"""
Test the LennardJones.getCollisionFrequency() method.
"""
T = 1000; P = 1.0e5
M = P / constants.R / T
mu = 1.0
omega = self.transport.getCollisionFrequency(T, M, mu)
self.assertAlmostEqual(omega / 1.17737e10, 1.0, 4)
def test_pickle(self):
"""
Test that a TransportData object can be pickled and unpickled with no loss of information.
"""
import cPickle
transport = cPickle.loads(cPickle.dumps(self.transport,-1))
self.assertAlmostEqual(self.transport.shapeIndex, transport.shapeIndex, 4)
self.assertAlmostEqual(self.transport.epsilon.value_si, transport.epsilon.value_si, 4)
self.assertAlmostEqual(self.transport.sigma.value_si, transport.sigma.value_si, 4)
self.assertAlmostEqual(self.transport.dipoleMoment.value_si, transport.dipoleMoment.value_si, 4)
self.assertAlmostEqual(self.transport.polarizability.value_si, transport.polarizability.value_si, 4)
self.assertAlmostEqual(self.transport.rotrelaxcollnum, transport.rotrelaxcollnum, 4)
self.assertEqual(self.transport.comment, transport.comment)
def test_repr(self):
"""
Test that a TransportData object can be reconstructed from its repr() output with no loss of information
"""
transport = None
exec('transport = {0!r}'.format(self.transport))
self.assertAlmostEqual(self.transport.shapeIndex, transport.shapeIndex, 4)
self.assertAlmostEqual(self.transport.epsilon.value_si, transport.epsilon.value_si, 4)
self.assertAlmostEqual(self.transport.sigma.value_si, transport.sigma.value_si, 4)
self.assertAlmostEqual(self.transport.dipoleMoment.value_si, transport.dipoleMoment.value_si, 4)
self.assertAlmostEqual(self.transport.polarizability.value_si, transport.polarizability.value_si, 4)
self.assertAlmostEqual(self.transport.rotrelaxcollnum, transport.rotrelaxcollnum, 4)
self.assertEqual(self.transport.comment, transport.comment)
class TestCriticalPointGroupContribution(unittest.TestCase):
"""
Contains unit test of the :class: 'criticalPointGroupContribution' class
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tc = 0.0141
self.Pc = -.0012
self.Vc = 65
self.Tb = 23.58
self.structureIndex = 1
self.criticalPointContribution = CriticalPointGroupContribution(
Tc=self.Tc,
Pc=self.Pc,
Vc=self.Vc,
Tb=self.Tb,
structureIndex=self.structureIndex,
)
def test_Tc(self):
"""
Test that the CriticalPointGroupContribution Tc property was properly set.
"""
self.assertAlmostEqual(self.criticalPointContribution.Tc, self.Tc, 6)
def test_Pc(self):
"""
Test that the CriticalPointGroupContribution Pc property was properly set.
"""
self.assertAlmostEqual(self.criticalPointContribution.Pc, self.Pc, 6)
def test_Vc(self):
"""
Test that the CriticalPointGroupContribution Vc property was properly set.
"""
self.assertAlmostEqual(self.criticalPointContribution.Vc, self.Vc, 6)
def test_Tb(self):
"""
Test that the CriticalPointGroupContribution Tb property was properly set.
"""
self.assertAlmostEqual(self.criticalPointContribution.Tb, self.Tb, 6)
def test_structureIndex(self):
"""
Test that the CriticalPointGroupContribution structureIndex property was properly set.
"""
self.assertAlmostEqual(self.criticalPointContribution.structureIndex, self.structureIndex, 6)
def test_pickle(self):
"""
Test that a CriticalPointGroupContribution object can be pickled and unpickled with no loss of information.
"""
import cPickle
criticalPointContribution = cPickle.loads(cPickle.dumps(self.criticalPointContribution,-1))
self.assertAlmostEqual(self.criticalPointContribution.Tc, criticalPointContribution.Tc, 4)
self.assertAlmostEqual(self.criticalPointContribution.Pc, criticalPointContribution.Pc, 4)
self.assertAlmostEqual(self.criticalPointContribution.Vc, criticalPointContribution.Vc, 4)
self.assertAlmostEqual(self.criticalPointContribution.Tb, criticalPointContribution.Tb, 4)
self.assertAlmostEqual(self.criticalPointContribution.structureIndex, criticalPointContribution.structureIndex, 4)
def test_repr(self):
"""
Test that a CriticalPointGroupContribution object can be reconstructed from its repr() output with no loss of information
"""
criticalPointContribution = None
exec('criticalPointContribution = {0!r}'.format(self.criticalPointContribution))
self.assertAlmostEqual(self.criticalPointContribution.Tc, criticalPointContribution.Tc, 4)
self.assertAlmostEqual(self.criticalPointContribution.Pc, criticalPointContribution.Pc, 4)
self.assertAlmostEqual(self.criticalPointContribution.Vc, criticalPointContribution.Vc, 4)
self.assertAlmostEqual(self.criticalPointContribution.Tb, criticalPointContribution.Tb, 4)
self.assertAlmostEqual(self.criticalPointContribution.structureIndex, criticalPointContribution.structureIndex, 4)
class TestTransportDatabase(unittest.TestCase):
"""
Contains unit test of the :class: 'TransportDatabase' class
"""
def setUp(self):
"""
a function run before each unit test in this class
"""
self.libraries = ['GRI-Mech', 'PrimaryTransportLibrary']
self.groups = ['ring', 'nonring']
self.libraryOrder = []
path = os.path.join(settings['database.directory'], 'transport')
self.transportdb = TransportDatabase()
self.transportdb.load(path, self.libraries)
def testJoback(self):
#values calculate from joback's estimations
self.testCases = [
['acetone', 'CC(=O)C', Length(5.36421, 'angstroms'), Energy(3.20446, 'kJ/mol'), "Epsilon & sigma estimated with Tc=500.53 K, Pc=47.11 bar (from Joback method)"],
['cyclopenta-1,2-diene', 'C1=C=CCC1', None, None, None], # not sure what to expect, we just want to make sure it doesn't crash
['benzene', 'c1ccccc1', None, None, None],
]
for name, smiles, sigma, epsilon, comment in self.testCases:
molecule=Molecule(SMILES=smiles)
species = Species(molecule=[molecule])
transportData, blank, blank2 = self.transportdb.getTransportPropertiesViaGroupEstimates(species)
# check Joback worked.
# If we don't know what to expect, don't check (just make sure we didn't crash)
if comment:
self.assertTrue(transportData.comment == comment)
if sigma:
self.assertAlmostEqual(transportData.sigma.value_si * 1e10, sigma.value_si * 1e10, 4)
if epsilon:
self.assertAlmostEqual(transportData.epsilon.value_si, epsilon.value_si, 1)
def testJobackOnBenzeneBonds(self):
"Test Joback doesn't crash on Cb desription of beneze"
adjlist = """
1 C u0 p0 {2,D} {6,S} {7,S}
2 C u0 p0 {1,D} {3,S} {8,S}
3 C u0 p0 {2,S} {4,D} {9,S}
4 C u0 p0 {3,D} {5,S} {10,S}
5 C u0 p0 {4,S} {6,D} {11,S}
6 C u0 p0 {1,S} {5,D} {12,S}
7 H u0 p0 {1,S}
8 H u0 p0 {2,S}
9 H u0 p0 {3,S}
10 H u0 p0 {4,S}
11 H u0 p0 {5,S}
12 H u0 p0 {6,S}
"""
m = Molecule().fromAdjacencyList(adjlist)
species = Species(molecule=[m])
transportData, blank, blank2 = self.transportdb.getTransportPropertiesViaGroupEstimates(species)
self.assertIsNotNone(transportData)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| {
"content_hash": "2267b05cb7d2a73c371a7db5803c4079",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 173,
"avg_line_length": 43.13513513513514,
"alnum_prop": 0.6504654493376298,
"repo_name": "enochd/RMG-Py",
"id": "e979a9659ed43c399581dadd41a4f9d32488f6d6",
"size": "12616",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rmgpy/transportDataTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3650"
},
{
"name": "Makefile",
"bytes": "3781"
},
{
"name": "Python",
"bytes": "3139323"
},
{
"name": "Shell",
"bytes": "8634"
}
],
"symlink_target": ""
} |
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| {
"content_hash": "5308efd3db8318418541a5d73c5350a2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 37.5,
"alnum_prop": 0.6533333333333333,
"repo_name": "aroth-arsoft/arsoft-trac-commitupdater",
"id": "ce0807f222efad092116666464644efc289d883f",
"size": "226",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "arsoft/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45710"
}
],
"symlink_target": ""
} |
"""Add missing constraints for threats
Revision ID: 62f26762d0a
Revises: 2837682ad516
Create Date: 2015-11-12 16:10:29.579969
"""
from alembic import op
# from ggrc.migrations.utils.resolve_duplicates import resolve_duplicates
# from ggrc_risks.models import Threat
# revision identifiers, used by Alembic.
revision = '62f26762d0a'
down_revision = '2837682ad516'
def upgrade():
# resolve_duplicates(Threat, 'slug')
op.create_unique_constraint('uq_threats', 'threats', ['slug'])
def downgrade():
op.drop_constraint('uq_threats', 'threats', 'unique')
| {
"content_hash": "ead4f76e38751496eb6b40ecb3e2bdf8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 23.458333333333332,
"alnum_prop": 0.7460035523978685,
"repo_name": "AleksNeStu/ggrc-core",
"id": "f3e633817d8e9166c4d875c999bd731f1d7d5da1",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/0.10-Raspberry",
"path": "src/ggrc_risks/migrations/versions/20151112161029_62f26762d0a_add_missing_constraints_for_threats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "221201"
},
{
"name": "HTML",
"bytes": "1055542"
},
{
"name": "JavaScript",
"bytes": "1872353"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2700938"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
} |
'''
Code for handling files that are band interleaved by pixel (BIP).
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import array
import logging
import numpy as np
import os
import sys
import spectral as spy
from .spyfile import SpyFile, MemmapFile
from spectral.utilities.python23 import typecode, tobytes, frombytes
byte_typecode = typecode('b')
class BipFile(SpyFile, MemmapFile):
'''
A class to interface image files stored with bands interleaved by pixel.
'''
def __init__(self, params, metadata=None):
self.interleave = spy.BIP
if metadata is None:
metadata = {}
SpyFile.__init__(self, params, metadata)
self._memmap = self._open_memmap('r')
def _open_memmap(self, mode):
logger = logging.getLogger('spectral')
if (os.path.getsize(self.filename) < sys.maxsize):
try:
(R, C, B) = self.shape
return np.memmap(self.filename, dtype=self.dtype, mode=mode,
offset=self.offset, shape=self.shape)
except:
logger.debug('Unable to create memmap interface.')
return None
else:
return None
def read_band(self, band, use_memmap=True):
'''Reads a single band from the image.
Arguments:
`band` (int):
Index of band to read.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxN` array of values for the specified band.
'''
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[:, :, band])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
vals = array.array(byte_typecode)
delta = self.sample_size * (self.nbands - 1)
nVals = self.nrows * self.ncols
sample_size = self.sample_size
f = self.fid
f.seek(self.offset + self.sample_size * band, 0)
# Pixel format is BIP
for i in range(nVals - 1):
vals.fromfile(f, sample_size)
f.seek(delta, 1)
vals.fromfile(f, sample_size)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr = arr.reshape(self.nrows, self.ncols)
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_bands(self, bands, use_memmap=True):
'''Reads multiple bands from the image.
Arguments:
`bands` (list of ints):
Indices of bands to read.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxNxL` array of values for the specified bands. `M` and `N`
are the number of rows & columns in the image and `L` equals
len(`bands`).
'''
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[:, :, bands])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
vals = array.array(byte_typecode)
offset = self.offset
delta = self.sample_size * self.nbands
nVals = self.nrows * self.ncols
sample_size = self.sample_size
# Increments between bands
delta_b = list(bands[:])
for i in range(len(delta_b)):
delta_b[i] *= self.sample_size
f = self.fid
# Pixel format is BIP
for i in range(nVals):
pixelOffset = offset + i * delta
for j in range(len(bands)):
f.seek(pixelOffset + delta_b[j], 0) # Next band
vals.fromfile(f, sample_size)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr = arr.reshape(self.nrows, self.ncols, len(bands))
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_pixel(self, row, col, use_memmap=True):
'''Reads the pixel at position (row,col) from the file.
Arguments:
`row`, `col` (int):
Indices of the row & column for the pixel
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
A length-`B` array, where `B` is the number of image bands.
'''
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[row, col, :])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
vals = array.array(byte_typecode)
f = self.fid
f.seek(self.offset + self.sample_size
* self.nbands * (row * self.ncols + col), 0)
# Pixel format is BIP so read entire pixel.
vals.fromfile(f, self.nbands * self.sample_size)
pixel = np.frombuffer(tobytes(vals), dtype=self.dtype)
if self.scale_factor != 1:
return pixel / float(self.scale_factor)
return pixel
def read_subregion(self, row_bounds, col_bounds, bands=None,
use_memmap=True):
'''
Reads a contiguous rectangular sub-region from the image.
Arguments:
`row_bounds` (2-tuple of ints):
(a, b) -> Rows a through b-1 will be read.
`col_bounds` (2-tuple of ints):
(a, b) -> Columnss a through b-1 will be read.
`bands` (list of ints):
Optional list of bands to read. If not specified, all bands
are read.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxNxL` array.
'''
if self._memmap is not None and use_memmap is True:
if bands is None:
data = np.array(self._memmap[row_bounds[0]: row_bounds[1],
col_bounds[0]: col_bounds[1], :])
else:
data = np.array(self._memmap[row_bounds[0]: row_bounds[1],
col_bounds[0]: col_bounds[1],
bands])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
offset = self.offset
nbands = self.nbands
nSubRows = row_bounds[1] - row_bounds[0] # Rows in sub-image
nSubCols = col_bounds[1] - col_bounds[0] # Cols in sub-image
d_row = self.sample_size * self.ncols * self.nbands
colStartPos = col_bounds[0] * self.sample_size * self.nbands
vals = array.array(byte_typecode)
nVals = self.nrows * self.ncols
sample_size = self.sample_size
# Increments between bands
if bands is not None:
allBands = 0
nSubBands = len(bands)
delta_b = bands[:]
for i in range(len(delta_b)):
delta_b[i] *= self.sample_size
else:
allBands = 1
nSubBands = self.nbands
f = self.fid
# Pixel format is BIP
for i in range(row_bounds[0], row_bounds[1]):
f.seek(offset + i * d_row + colStartPos, 0)
rowPos = f.tell()
if allBands:
# This is the simple one
vals.fromfile(f, nSubCols * nbands * sample_size)
else:
# Need to pull out specific bands for each column.
for j in range(nSubCols):
f.seek(rowPos + j * self.sample_size * self.nbands, 0)
pixelPos = f.tell()
for k in range(len(bands)):
f.seek(pixelPos + delta_b[k], 0) # Next band
vals.fromfile(f, sample_size)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr = arr.reshape(nSubRows, nSubCols, nSubBands)
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_subimage(self, rows, cols, bands=None, use_memmap=False):
'''
Reads arbitrary rows, columns, and bands from the image.
Arguments:
`rows` (list of ints):
Indices of rows to read.
`cols` (list of ints):
Indices of columns to read.
`bands` (list of ints):
Optional list of bands to read. If not specified, all bands
are read.
`use_memmap` (bool, default False):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxNxL` array, where `M` = len(`rows`), `N` = len(`cols`),
and `L` = len(bands) (or # of image bands if `bands` == None).
'''
if self._memmap is not None and use_memmap is True:
if bands is None:
data = np.array(self._memmap.take(rows, 0).take(cols, 1))
else:
data = np.array(
self._memmap.take(rows, 0).take(cols, 1).take(bands, 2))
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
offset = self.offset
nbands = self.nbands
nSubRows = len(rows) # Rows in sub-image
nSubCols = len(cols) # Cols in sub-image
d_band = self.sample_size
d_col = d_band * self.nbands
d_row = d_col * self.ncols
vals = array.array(byte_typecode)
nVals = self.nrows * self.ncols
sample_size = self.sample_size
# Increments between bands
if bands is not None:
allBands = 0
nSubBands = len(bands)
else:
allBands = 1
bands = list(range(self.nbands))
nSubBands = self.nbands
f = self.fid
# Pixel format is BIP
for i in rows:
for j in cols:
if allBands:
f.seek(offset + i * d_row + j * d_col, 0)
vals.fromfile(f, nSubBands * sample_size)
else:
for k in bands:
f.seek(offset +
i * d_row +
j * d_col +
k * d_band, 0)
vals.fromfile(f, sample_size)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr = arr.reshape(nSubRows, nSubCols, nSubBands)
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_datum(self, i, j, k, use_memmap=True):
'''Reads the band `k` value for pixel at row `i` and column `j`.
Arguments:
`i`, `j`, `k` (integer):
Row, column and band index, respectively.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Using this function is not an efficient way to iterate over bands or
pixels. For such cases, use readBands or readPixel instead.
'''
if self._memmap is not None and use_memmap is True:
datum = self._memmap[i, j, k]
if self.scale_factor != 1:
datum /= float(self.scale_factor)
return datum
vals = array.array(byte_typecode)
f = self.fid
f.seek(self.offset + self.sample_size
* (self.nbands * (i * self.ncols + j) + k), 0)
# Pixel format is BIP so read entire pixel.
vals.fromfile(f, self.sample_size)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
return arr.tolist()[0] / float(self.scale_factor)
| {
"content_hash": "5204377de3ae459a1a0da8bb23900849",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 82,
"avg_line_length": 33.87341772151899,
"alnum_prop": 0.523542600896861,
"repo_name": "spectralpython/spectral",
"id": "9ee92e1f91080fbec20b391ed7a09a70448556da",
"size": "13380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectral/io/bipfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLSQL",
"bytes": "10976"
},
{
"name": "Python",
"bytes": "628229"
}
],
"symlink_target": ""
} |
"""Tests for the ironic driver."""
from ironicclient import exc as ironic_exception
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import uuidutils
import six
from testtools.matchers import HasLength
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state as nova_states
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import utils
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper as cw
from nova.virt.ironic import driver as ironic_driver
from nova.virt.ironic import ironic_states
CONF = cfg.CONF
IRONIC_FLAGS = dict(
api_version=1,
group='ironic',
)
FAKE_CLIENT = ironic_utils.FakeClient()
class FakeClientWrapper(cw.IronicClientWrapper):
def _get_client(self):
return FAKE_CLIENT
class FakeLoopingCall(object):
def __init__(self):
self.wait = mock.MagicMock()
self.start = mock.MagicMock()
self.start.return_value = self
def _get_properties():
return {'cpus': 2,
'memory_mb': 512,
'local_gb': 10,
'cpu_arch': 'x86_64',
'capabilities': None}
def _get_stats():
return {'cpu_arch': 'x86_64'}
FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
# set client log config to exercise the code that manipulates it
CONF.set_override('client_log_level', 'DEBUG', group='ironic')
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
self.instance_uuid = uuidutils.generate_uuid()
# mock retries configs to avoid sleeps and make tests run quicker
CONF.set_default('api_max_retries', default=1, group='ironic')
CONF.set_default('api_retry_interval', default=0, group='ironic')
def test_public_api_signatures(self):
self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
def test_driver_capabilities(self):
self.assertFalse(self.driver.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\''
'is invalid')
self.assertFalse(self.driver.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\''
'is invalid')
def test__get_hypervisor_type(self):
self.assertEqual('ironic', self.driver._get_hypervisor_type())
def test__get_hypervisor_version(self):
self.assertEqual(1, self.driver._get_hypervisor_version())
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node(self, mock_gbiui):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
ironicclient = cw.IronicClientWrapper()
mock_gbiui.return_value = node
result = ironic_driver._validate_instance_and_node(ironicclient,
instance)
self.assertEqual(result.uuid, node_uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node_failed(self, mock_gbiui):
ironicclient = cw.IronicClientWrapper()
mock_gbiui.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertRaises(exception.InstanceNotFound,
ironic_driver._validate_instance_and_node,
ironicclient, instance)
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_pass(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYING)
fake_validate.return_value = node
self.driver._wait_for_active(FAKE_CLIENT, instance)
fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_done(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_fail(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYFAIL)
fake_validate.return_value = node
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def _wait_for_active_abort(self, instance_params, fake_validate,
fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid(),
**instance_params)
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
# Assert _validate_instance_and_node wasn't called
self.assertFalse(fake_validate.called)
fake_refresh.assert_called_once_with()
def test__wait_for_active_abort_deleting(self):
self._wait_for_active_abort({'task_state': task_states.DELETING})
def test__wait_for_active_abort_deleted(self):
self._wait_for_active_abort({'vm_state': vm_states.DELETED})
def test__wait_for_active_abort_error(self):
self._wait_for_active_abort({'vm_state': vm_states.ERROR})
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_power_state_pass(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.POWER_OFF)
fake_validate.return_value = node
self.driver._wait_for_power_state(
FAKE_CLIENT, instance, 'fake message')
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_power_state_ok(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.NOSTATE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_power_state,
FAKE_CLIENT, instance, 'fake message')
self.assertTrue(fake_validate.called)
def test__node_resource(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
properties=props)
result = self.driver._node_resource(node)
wantkeys = ["hypervisor_hostname", "hypervisor_type",
"hypervisor_version", "cpu_info",
"vcpus", "vcpus_used",
"memory_mb", "memory_mb_used",
"local_gb", "local_gb_used",
"disk_available_least",
"supported_instances",
"stats"]
wantkeys.sort()
gotkeys = result.keys()
gotkeys.sort()
self.assertEqual(wantkeys, gotkeys)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(props['cpus'], result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(props['memory_mb'], result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(props['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
def test__node_resource_canonicalizes_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
props['cpu_arch'] = 'i386'
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual('i686',
jsonutils.loads(result['supported_instances'])[0][0])
self.assertEqual('i386',
jsonutils.loads(result['stats'])['cpu_arch'])
def test__node_resource_unknown_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
del props['cpu_arch']
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual([], jsonutils.loads(result['supported_instances']))
def test__node_resource_exposes_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = jsonutils.loads(result['stats'])
self.assertIsNone(stats.get('capabilities'))
self.assertEqual('capability', stats.get('test'))
def test__node_resource_no_capabilities(self):
props = _get_properties()
props['capabilities'] = None
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
def test__node_resource_malformed_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability,:no_key,no_val:'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = jsonutils.loads(result['stats'])
self.assertEqual('capability', stats.get('test'))
def test__node_resource_available(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=None,
power_state=ironic_states.POWER_OFF,
properties=props,
provision_state=ironic_states.AVAILABLE)
result = self.driver._node_resource(node)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_unavailable')
def test__node_resource_unavailable_node_res(self, mock_res_unavail):
mock_res_unavail.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=None,
properties=props)
result = self.driver._node_resource(node)
self.assertEqual(0, result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_used')
def test__node_resource_used_node_res(self, mock_res_used):
mock_res_used.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=uuidutils.generate_uuid(),
provision_state=ironic_states.ACTIVE,
properties=props)
result = self.driver._node_resource(node)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(props['cpus'], result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(props['memory_mb'], result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(props['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties(self, mock_warning):
props = _get_properties()
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
self.assertEqual(props, parsed)
# Assert we didn't log any warning since all properties are
# correct
self.assertFalse(mock_warning.called)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties_bad_values(self, mock_warning):
props = _get_properties()
props['cpus'] = 'bad-value'
props['memory_mb'] = 'bad-value'
props['local_gb'] = 'bad-value'
props['cpu_arch'] = 'bad-value'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
expected_props = props.copy()
expected_props['cpus'] = 0
expected_props['memory_mb'] = 0
expected_props['local_gb'] = 0
expected_props['cpu_arch'] = None
self.assertEqual(expected_props, parsed)
self.assertEqual(4, mock_warning.call_count)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties_canonicalize_cpu_arch(self, mock_warning):
props = _get_properties()
props['cpu_arch'] = 'amd64'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
expected_props = props.copy()
# Make sure it cpu_arch was canonicalized
expected_props['cpu_arch'] = 'x86_64'
self.assertEqual(expected_props, parsed)
# Assert we didn't log any warning since all properties are
# correct
self.assertFalse(mock_warning.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
create=True)
def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._start_firewall(fake_inst, fake_net_info)
mock_aif.assert_called_once_with(fake_inst, fake_net_info)
mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
mock_pif.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test__stop_firewall(self, mock_ui):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._stop_firewall(fake_inst, fake_net_info)
mock_ui.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists(self, mock_call):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertTrue(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists_fail(self, mock_call):
mock_call.side_effect = ironic_exception.NotFound
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertFalse(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid)
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances(self, mock_inst_by_uuid, mock_call):
nodes = []
instances = []
for i in range(2):
uuid = uuidutils.generate_uuid()
instances.append(fake_instance.fake_instance_obj(self.ctx,
id=i,
uuid=uuid))
nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
mock_inst_by_uuid.side_effect = instances
mock_call.return_value = nodes
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
expected_calls = [mock.call(mock.ANY, instances[0].uuid),
mock.call(mock.ANY, instances[1].uuid)]
mock_inst_by_uuid.assert_has_calls(expected_calls)
self.assertEqual(['instance-00000000', 'instance-00000001'],
sorted(response))
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances_fail(self, mock_inst_by_uuid, mock_call):
mock_call.side_effect = exception.NovaException
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
self.assertFalse(mock_inst_by_uuid.called)
self.assertThat(response, HasLength(0))
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_list_instance_uuids(self, mock_call):
num_nodes = 2
nodes = []
for n in range(num_nodes):
nodes.append(ironic_utils.get_test_node(
instance_uuid=uuidutils.generate_uuid()))
mock_call.return_value = nodes
uuids = self.driver.list_instance_uuids()
mock_call.assert_called_with('node.list', associated=True, limit=0)
expected = [n.instance_uuid for n in nodes]
self.assertEqual(sorted(expected), sorted(uuids))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache_empty_list(self, mock_get,
mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = []
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_get.assert_called_with(node.uuid)
mock_list.assert_called_with(detail=True, limit=0)
mock_get.side_effect = ironic_exception.NotFound
self.assertFalse(self.driver.node_is_available(node.uuid))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_list.assert_called_with(detail=True, limit=0)
self.assertEqual(0, mock_get.call_count)
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_with_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
# populate the cache
self.driver.get_available_nodes(refresh=True)
# prove that zero calls are made after populating cache
mock_list.reset_mock()
self.assertTrue(self.driver.node_is_available(node.uuid))
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
def test__node_resources_unavailable(self):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF,
'provision_state': ironic_states.AVAILABLE},
# a node in maintenance /w no instance and ERROR power state
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.ERROR,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.NOSTATE,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance or bad power state, bad provision state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.MANAGEABLE},
# a node in cleaning
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.CLEANING},
# a node in cleaning, waiting for a clean step to finish
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.CLEANWAIT},
# a node in deleting
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.DELETING},
# a node in deleted
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.DELETED}
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_unavailable(node))
for ok_state in (ironic_states.AVAILABLE, ironic_states.NOSTATE):
# these are both ok and should present as available
avail_node = ironic_utils.get_test_node(
power_state=ironic_states.POWER_OFF,
provision_state=ok_state)
unavailable = self.driver._node_resources_unavailable(avail_node)
self.assertFalse(unavailable)
def test__node_resources_used(self):
node_dicts = [
# a node in maintenance /w instance and active
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': uuidutils.generate_uuid(),
'provision_state': ironic_states.ACTIVE},
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_used(node))
unused_node = ironic_utils.get_test_node(
instance_uuid=None,
provision_state=ironic_states.AVAILABLE)
self.assertFalse(self.driver._node_resources_used(unused_node))
@mock.patch.object(FAKE_CLIENT.node, 'list')
def test_get_available_nodes(self, mock_list):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF},
# a node /w instance and power ON
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': self.instance_uuid,
'power_state': ironic_states.POWER_ON},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.ERROR},
]
nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
mock_list.return_value = nodes
available_nodes = self.driver.get_available_nodes()
expected_uuids = [n['uuid'] for n in node_dicts]
self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource(self, mock_nr, mock_list, mock_get):
node = ironic_utils.get_test_node()
node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
fake_resource = 'fake-resource'
mock_get.return_value = node
# ensure cache gets populated without the node we want
mock_list.return_value = [node_2]
mock_nr.return_value = fake_resource
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
mock_nr.assert_called_once_with(node)
mock_get.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource_with_cache(self, mock_nr, mock_list,
mock_get):
node = ironic_utils.get_test_node()
fake_resource = 'fake-resource'
mock_list.return_value = [node]
mock_nr.return_value = fake_resource
# populate the cache
self.driver.get_available_nodes(refresh=True)
mock_list.reset_mock()
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
mock_nr.assert_called_once_with(node)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info(self, mock_gbiu):
properties = {'memory_mb': 512, 'cpus': 2}
power_state = ironic_states.POWER_ON
node = ironic_utils.get_test_node(instance_uuid=self.instance_uuid,
properties=properties,
power_state=power_state)
mock_gbiu.return_value = node
# ironic_states.POWER_ON should be mapped to
# nova_states.RUNNING
memory_kib = properties['memory_mb'] * 1024
instance = fake_instance.fake_instance_obj('fake-context',
uuid=self.instance_uuid)
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.RUNNING,
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=properties['cpus']),
result)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info_http_not_found(self, mock_gbiu):
mock_gbiu.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=uuidutils.generate_uuid())
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.NOSTATE),
result)
@mock.patch.object(FAKE_CLIENT, 'node')
def test_macs_for_instance(self, mock_node):
node = ironic_utils.get_test_node()
port = ironic_utils.get_test_port()
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
result = self.driver.macs_for_instance(instance)
self.assertEqual(set([port.address]), result)
mock_node.list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_macs_for_instance_http_not_found(self, mock_get):
mock_get.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, node=uuidutils.generate_uuid())
result = self.driver.macs_for_instance(instance)
self.assertIsNone(result)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
mock_node, mock_looping, mock_save):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
self.driver.spawn(self.ctx, instance, None, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
mock_pvifs.assert_called_once_with(node, instance, None)
mock_sf.assert_called_once_with(instance, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'active', configdrive=mock.ANY)
self.assertIsNone(instance.default_ephemeral_device)
self.assertFalse(mock_save.called)
mock_looping.assert_called_once_with(mock_wait_active,
FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = False
self._test_spawn()
# assert configdrive was not generated
self.assertFalse(mock_configdrive.called)
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn_with_configdrive(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = True
self._test_spawn()
# assert configdrive was generated
mock_configdrive.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
extra_md={}, files=[])
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
mock_wait_active, mock_destroy,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
deploy_exc = exception.InstanceDeployFailure('foo')
fake_looping_call.wait.side_effect = deploy_exc
self.assertRaises(
exception.InstanceDeployFailure,
self.driver.spawn, self.ctx, instance, None, [], None)
mock_destroy.assert_called_once_with(self.ctx, instance, None)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_good(self, mock_update):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.driver._add_driver_fields(node, instance, image_meta, flavor)
expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta['id']},
{'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)},
{'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])},
{'path': '/instance_info/display_name',
'value': instance.display_name, 'op': 'add'},
{'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.assertRaises(exception.InstanceDeployFailure,
self.driver._add_driver_fields,
node, instance, image_meta, flavor)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_good_with_flavor(self, mock_update):
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
self.driver._cleanup_deploy(self.ctx, node, instance, None,
flavor=flavor)
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_instance_already_removed(self, mock_update,
mock_validate):
mock_validate.side_effect = exception.InstanceNotFound(
instance_id='fake-instance')
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
self.driver._cleanup_deploy(self.ctx, node, instance, None,
flavor=flavor)
# assert node.update is not called
self.assertFalse(mock_update.called)
mock_validate.assert_called_once_with(mock.ANY, instance)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_without_flavor(self, mock_update):
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
instance.flavor = flavor
self.driver._cleanup_deploy(self.ctx, node, instance, None)
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
instance.flavor = flavor
self.assertRaises(exception.InstanceTerminationFailure,
self.driver._cleanup_deploy,
self.ctx, node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_spawn_node_driver_validation_fail(self, mock_node,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.validate.return_value = ironic_utils.get_test_validation(
power=False, deploy=False)
mock_node.get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
self.assertRaises(exception.ValidationError, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
image_meta = ironic_utils.get_test_image_meta()
class TestException(Exception):
pass
mock_sf.side_effect = TestException()
self.assertRaises(TestException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
mock_pvifs, mock_sf,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
fake_looping_call.wait.side_effect = ironic_exception.BadRequest
fake_net_info = utils.get_test_network_info()
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn, self.ctx, instance,
image_meta, [], None, fake_net_info)
mock_destroy.assert_called_once_with(self.ctx, instance,
fake_net_info)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
mock_wait, mock_node,
mock_save, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor(ephemeral_gb=1)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
image_meta = ironic_utils.get_test_image_meta()
self.driver.spawn(self.ctx, instance, image_meta, [], None)
self.assertTrue(mock_save.called)
self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_destroy(self, mock_cleanup_deploy, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
def fake_set_provision_state(*_):
node.provision_state = None
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.side_effect = fake_set_provision_state
self.driver.destroy(self.ctx, instance, network_info, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node,
instance, network_info)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.DELETING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
self.driver.destroy(self.ctx, instance, network_info, None)
self.assertFalse(mock_node.set_provision_state.called)
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
network_info)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def _test_destroy_cleaning(self, mock_cleanup_deploy, mock_node,
state=None):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(
driver='fake', uuid=node_uuid,
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
self.driver.destroy(self.ctx, instance, network_info, None)
self.assertFalse(mock_node.set_provision_state.called)
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
network_info)
def test_destroy_cleaning(self):
self._test_destroy_cleaning(state=ironic_states.CLEANING)
def test_destroy_cleanwait(self):
self._test_destroy_cleaning(state=ironic_states.CLEANWAIT)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
mock_sps.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def _test__unprovision_instance(self, mock_validate_inst, state=None):
fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake',
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
self.driver._unprovision(fake_ironic_client, instance, node)
mock_validate_inst.assert_called_once_with(fake_ironic_client,
instance)
fake_ironic_client.call.assert_called_once_with(
"node.set_provision_state", node.uuid, "deleted")
def test__unprovision_cleaning(self):
self._test__unprovision_instance(state=ironic_states.CLEANING)
def test__unprovision_cleanwait(self):
self._test__unprovision_instance(state=ironic_states.CLEANWAIT)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__unprovision_fail_max_retries(self, mock_validate_inst):
CONF.set_default('api_max_retries', default=2, group='ironic')
fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake',
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
self.assertRaises(exception.NovaException, self.driver._unprovision,
fake_ironic_client, instance, node)
expected_calls = (mock.call(mock.ANY, instance),
mock.call(mock.ANY, instance))
mock_validate_inst.assert_has_calls(expected_calls)
fake_ironic_client.call.assert_called_once_with(
"node.set_provision_state", node.uuid, "deleted")
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__unprovision_instance_not_found(self, mock_validate_inst):
fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake', provision_state=ironic_states.DELETING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.side_effect = exception.InstanceNotFound(
instance_id='fake')
self.driver._unprovision(fake_ironic_client, instance, node)
mock_validate_inst.assert_called_once_with(fake_ironic_client,
instance)
fake_ironic_client.call.assert_called_once_with(
"node.set_provision_state", node.uuid, "deleted")
@mock.patch.object(FAKE_CLIENT, 'node')
def test_destroy_unassociate_fail(self, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
mock_node.update.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_reboot(self, mock_sp, fake_validate, mock_looping):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
self.driver.reboot(self.ctx, instance, None, None)
mock_sp.assert_called_once_with(node.uuid, 'reboot')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_off(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_off')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_on(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_on')
def _test_power_on_off(self, mock_sp, fake_validate, mock_looping,
method_name=None):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=self.instance_uuid)
# Call the method under test here
if method_name == 'power_on':
self.driver.power_on(self.ctx, instance,
utils.get_test_network_info())
mock_sp.assert_called_once_with(node.uuid, 'on')
elif method_name == 'power_off':
self.driver.power_off(instance)
mock_sp.assert_called_once_with(node.uuid, 'off')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
port_id = six.text_type(network_info[0]['id'])
expected_patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
mock_port_udt.assert_called_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
def test_plug_vifs(self, mock__plug_vifs, mock_get):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
mock_get.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
self.driver.plug_vifs(instance, network_info)
mock_get.assert_called_once_with(node_uuid)
mock__plug_vifs.assert_called_once_with(node, instance, network_info)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
# len(network_info) > len(ports)
network_info = (utils.get_test_network_info() +
utils.get_test_network_info())
self.assertRaises(exception.NovaException,
self.driver._plug_vifs, node, instance,
network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = []
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
expected_patch = [{'op': 'remove', 'path':
'/extra/vif_port_id'}]
self.driver.unplug_vifs(instance,
utils.get_test_network_info())
# asserts
mock_node.get.assert_called_once_with(node_uuid)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
mock_update.assert_called_once_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
self.driver.unplug_vifs(instance, utils.get_test_network_info())
mock_node.get.assert_called_once_with(node_uuid)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
def test_unplug_vifs_no_network_info(self, mock_update):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = []
self.driver.unplug_vifs(instance, network_info)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test_unfilter_instance(self, mock_ui):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.unfilter_instance(instance, network_info)
mock_ui.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
mock_sbf.assert_called_once_with(instance, network_info)
mock_pif.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_instance_security_rules(self, mock_risr):
instance = fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_instance_security_rules(instance)
mock_risr.assert_called_once_with(instance)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_provider_fw_rules', create=True)
def test_refresh_provider_fw_rules(self, mock_rpfr):
fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_provider_fw_rules()
mock_rpfr.assert_called_once_with()
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_security_group_members', create=True)
def test_refresh_security_group_members(self, mock_rsgm):
fake_group = 'fake-security-group-members'
self.driver.refresh_security_group_members(fake_group)
mock_rsgm.assert_called_once_with(fake_group)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_security_group_rules(self, mock_risr):
fake_group = 'fake-security-group-members'
self.driver.refresh_instance_security_rules(fake_group)
mock_risr.assert_called_once_with(fake_group)
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate, mock_looping, mock_wait_active,
preserve=False):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
self.driver.rebuild(
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None,
preserve_ephemeral=preserve)
mock_save.assert_called_once_with(
expected_task_state=[task_states.REBUILDING])
mock_driver_fields.assert_called_once_with(node, instance, image_meta,
flavor, preserve)
mock_set_pstate.assert_called_once_with(node_uuid,
ironic_states.REBUILD)
mock_looping.assert_called_once_with(mock_wait_active,
FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(preserve=True)
def test_rebuild_no_preserve_ephemeral(self):
self._test_rebuild(preserve=False)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
exceptions = [
exception.NovaException(),
ironic_exception.BadRequest(),
ironic_exception.InternalServerError(),
]
for e in exceptions:
mock_set_pstate.side_effect = e
self.assertRaises(exception.InstanceDeployFailure,
self.driver.rebuild,
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None)
@mock.patch.object(instance_metadata, 'InstanceMetadata')
@mock.patch.object(configdrive, 'ConfigDriveBuilder')
class IronicDriverGenerateConfigDriveTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverGenerateConfigDriveTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
node_uuid = uuidutils.generate_uuid()
self.node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
self.instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
self.network_info = utils.get_test_network_info()
def test_generate_configdrive(self, mock_cd_builder, mock_instance_meta):
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.driver._generate_configdrive(self.instance, self.node,
self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
def test_generate_configdrive_fail(self, mock_cd_builder,
mock_instance_meta):
mock_cd_builder.side_effect = exception.ConfigDriveMountFailed(
operation='foo', error='error')
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.assertRaises(exception.ConfigDriveMountFailed,
self.driver._generate_configdrive,
self.instance, self.node, self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
| {
"content_hash": "21ec7b88726c50144ce3bdb3351f8394",
"timestamp": "",
"source": "github",
"line_count": 1579,
"max_line_length": 79,
"avg_line_length": 47.86763774540849,
"alnum_prop": 0.6103356574891179,
"repo_name": "shail2810/nova",
"id": "69c87de3fe09239f36555e5b31a6ec16eafd30cc",
"size": "76248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/ironic/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16525734"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "285480"
}
],
"symlink_target": ""
} |
"""
From TinyDB
https://github.com/msiemens/tinydb/tree/master/tests
"""
from tinydb import where
from tinydb.operations import delete, increment, decrement
from conftest import db_middleware_populated_withkeylist, db_middleware_populated
import pytest
@pytest.mark.parametrize('db', [db_middleware_populated_withkeylist(), db_middleware_populated()])
def test_delete(db):
db.update(delete('int'), where('char') == 'a')
assert 'int' not in db.get(where('char') == 'a')
@pytest.mark.parametrize('db', [db_middleware_populated_withkeylist(), db_middleware_populated()])
def test_increment(db):
db.update(increment('int'), where('char') == 'a')
assert db.get(where('char') == 'a')['int'] == 2
@pytest.mark.parametrize('db', [db_middleware_populated_withkeylist(), db_middleware_populated()])
def test_decrement(db):
db.update(decrement('int'), where('char') == 'a')
assert db.get(where('char') == 'a')['int'] == 0
| {
"content_hash": "9b14ce04753ac1516b9a94f3148c4530",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 98,
"avg_line_length": 36.19230769230769,
"alnum_prop": 0.6928799149840595,
"repo_name": "veedo/tinydb-keystore",
"id": "23f3abde7ab9722d39bd315a9714b86d9026197a",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_operations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30720"
}
],
"symlink_target": ""
} |
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.paths import Paths
from .operations.queries import Queries
from .operations.path_items import PathItems
from . import models
class AutoRestUrlTestServiceConfiguration(Configuration):
"""Configuration for AutoRestUrlTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param global_string_path: A string value 'globalItemStringPath' that
appears in the path
:type global_string_path: str
:param global_string_query: should contain value null
:type global_string_query: str
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, global_string_path, global_string_query=None, base_url=None, filepath=None):
if global_string_path is None:
raise ValueError("Parameter 'global_string_path' must not be None.")
if not isinstance(global_string_path, str):
raise TypeError("Parameter 'global_string_path' must be str.")
if global_string_query is not None and not isinstance(global_string_query, str):
raise TypeError("Optional parameter 'global_string_query' must be str.")
if not base_url:
base_url = 'http://localhost'
super(AutoRestUrlTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autoresturltestservice/{}'.format(VERSION))
self.global_string_path = global_string_path
self.global_string_query = global_string_query
class AutoRestUrlTestService(object):
"""Test Infrastructure for AutoRest
:param config: Configuration for client.
:type config: AutoRestUrlTestServiceConfiguration
:ivar paths: Paths operations
:vartype paths: .operations.Paths
:ivar queries: Queries operations
:vartype queries: .operations.Queries
:ivar path_items: PathItems operations
:vartype path_items: .operations.PathItems
"""
def __init__(self, config):
self._client = ServiceClient(None, config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer()
self._deserialize = Deserializer(client_models)
self.config = config
self.paths = Paths(
self._client, self.config, self._serialize, self._deserialize)
self.queries = Queries(
self._client, self.config, self._serialize, self._deserialize)
self.path_items = PathItems(
self._client, self.config, self._serialize, self._deserialize)
| {
"content_hash": "8d7607ff3e113feb4b4f471cf41277be",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 94,
"avg_line_length": 37.80555555555556,
"alnum_prop": 0.692505510653931,
"repo_name": "BurtBiel/autorest",
"id": "34efde192fd6b78364837458e9a5c4c66c3a8920",
"size": "3196",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Url/autoresturltestservice/auto_rest_url_test_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "10060202"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3946482"
},
{
"name": "JavaScript",
"bytes": "4531237"
},
{
"name": "PowerShell",
"bytes": "5703"
},
{
"name": "Python",
"bytes": "2038698"
},
{
"name": "Ruby",
"bytes": "223391"
},
{
"name": "Shell",
"bytes": "142"
},
{
"name": "TypeScript",
"bytes": "173662"
}
],
"symlink_target": ""
} |
DESCRIPTION="This script applies clustering"
import numpy as np
import argparse
import glob2
import logging
import re
from os.path import basename
from os.path import dirname
import sys
sys.path.append(dirname(__file__))
from my_target_counter import TargetCounter
logger = logging.getLogger(__file__)
# HOW TO ADD NEW algorithm
# 1. add a process for your algorithm to 'get_model'
# 2. add a process to 'my_fit_predict' if the algorithm has outlier cluster and the outlier label is not 0.
#from memory_profiler import profile
#@profile
def main(args):
src_dir = args.src_dir
dest_dir = args.dest_dir
src_pat = "W_(\d{3}).csv$"
tar_template = "y_%s.dat"
tc=TargetCounter(src_pat,tar_template,src_dir,dest_dir)
target_ids,src_files = tc.listup_targets()
n_targets = len(target_ids)
if args.count_targets:
print(len(target_ids))
sys.exit()
if n_targets==0:
logger.warn("There are no before-process src files in '%s'"%src_dir)
sys.exit()
model = get_model(args)
epsilon=0.1**100
for id,src_file in zip(target_ids,src_files):
dest_file = "%s/%s"%(args.dest_dir,tc.id2destfile(id))
print(src_file)
W = np.loadtxt(src_file,delimiter=",")
if 'affinity' in src_dir:
W[W<epsilon]=epsilon
y = my_fit_predict(model,W,args)
np.savetxt(dest_file,y,fmt="%d")
def my_fit_predict(model,X,args):
alg = args.algorithm
y = model.fit_predict(X)
if alg == 'DBSCAN':
y = y+1 # DBSCAN uses -1 as outlier label, we treat 0 as the label.
return y
def get_model(args):
alg = args.algorithm
if alg=='SC':
from sklearn.cluster import SpectralClustering
model = SpectralClustering(\
n_clusters=args.n_clusters,\
eigen_solver='arpack',\
random_state=None,\
affinity='precomputed',\
assign_labels='discretize',
n_jobs=1)
elif alg=='IDC':
from isolated_dense_clustering import IsolatedDenseClustering
search_range = range(args.min_clusters,args.max_clusters)
model = IsolatedDenseClustering(\
search_range=search_range, \
affinity='precomputed', \
assign_labels='discretize',\
n_jobs=1,\
eigen_solver='arpack', \
random_state=None)
elif alg=='SG':
import spectral_gap
model = spectral_gap.SpectralClusteringSG(
max_clusters=args.max_clusters,\
eigen_solver='arpack',\
random_state=None,\
affinity='precomputed',\
assign_labels='discretize')
elif alg=='STSC':
import stsc_wrapper
model = stsc_wrapper.SelfTuningSpectralClustering(n_clusters_max=args.max_clusters)
elif alg=='MODULARITY':
pass
elif alg=='SEA':
pass
elif alg=='DBSCAN':
from sklearn.cluster import DBSCAN
model = DBSCAN(
eps=args.eps,
min_samples=args.min_samples,
metric="precomputed")
else:
logger.warn("Unknown Algorithm '%s' is directed."%alg)
sys.exit()
return model
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('algorithm', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Clustering algorithm (SC|IDC|SG|STSC|MODULARITY|SEA|DBSCAN).', \
metavar=None)
parser.add_argument('src_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the source data are located.', \
metavar=None)
parser.add_argument('dest_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the formatted data will be located.', \
metavar=None)
parser.add_argument('--n_clusters', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=int, \
choices=None, \
help='Number of clusters.', \
metavar=None)
parser.add_argument('--min_clusters',\
action='store', \
nargs=None, \
const=None, \
default=3, \
type=int, \
choices=None, \
help='Minimum number of clusters to set the search range.', \
metavar=None
)
parser.add_argument('--max_clusters',\
action='store', \
nargs=None, \
const=None, \
default=20, \
type=int, \
choices=None, \
help='Maximum number of clusters to set the search range.', \
metavar=None
)
parser.add_argument('--eps',\
action='store', \
nargs=None, \
const=None, \
default=0.5, \
type=float, \
choices=None, \
help='eps for DBSCAN.', \
metavar=None
)
parser.add_argument('--min_samples',\
action='store', \
nargs=None, \
const=None, \
default=3, \
type=int, \
choices=None, \
help='min_samples for DBSCAN.', \
metavar=None
)
parser.add_argument('--count_targets',\
action="store_true", default=False, help='count processing targets, and exit.')
if __name__ == '__main__':
args = parser.parse_args()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
logger.addHandler(sh)
main(args)
| {
"content_hash": "a2ce33f882023caf274f1f58350d90b0",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 107,
"avg_line_length": 28.738916256157637,
"alnum_prop": 0.5541652382584847,
"repo_name": "AtsushiHashimoto/exp_idc",
"id": "1f8e439c402baf9ccd7211f607870939eccbc746",
"size": "5873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/do_clustering.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Shell",
"bytes": "17449"
}
],
"symlink_target": ""
} |
import click
from fabric.api import (
run,
env,
cd
)
from fabric.contrib.project import rsync_project
from .. import app, config, logger, cache
from .bootstrap import init_project
from ..init import run_app
@click.group(chain=True)
def cli():
pass
@click.command('init')
@click.argument('name')
def init(name):
"""
Init service
"""
init_project(name, license="mit")
@click.command('shell')
def start_shell():
"""
Start shell
"""
from luna.clients import http
from IPython import embed
embed()
@click.command('serve')
def start_server():
"""
Start server
"""
from imp import load_package
name, host, port = app.name, app.config["HOST"], app.config["PORT"]
if name != "default":
load_package(name, name)
logger.info("Server starting on http://{}:{}".format(host, port))
run_app()
@click.command('upload')
@click.argument('source')
@click.argument('dest')
@click.option('--user', default="runner", help="user")
@click.option('--host', default=config["deploy"]["host"], help="host")
def upload_files(source, dest, user, host):
"""
Upload files
"""
env.user = user
env.host_string = host
env.use_ssh_config = True
rsync_project(local_dir=source, remote_dir=dest, exclude=(".git", ".idea", "node_modules", "build"))
@click.command('deploy')
@click.option('--user', default="runner", help="user")
@click.option('--host', default=config["deploy"]["host"], help="host")
def deploy(user, host):
"""
Deploy service
"""
env.user = user
env.host_string = host
env.use_ssh_config = True
name = config['name']
remote_dir = "/srv/{}".format(name)
rsync_project(local_dir=".", remote_dir=remote_dir, exclude=(".git", ".idea", "node_modules", "build"))
with cd(remote_dir):
run("sudo /srv/venv/luna/bin/pip install .".format(name))
run("sudo supervisorctl restart {}".format(name))
cli.add_command(init)
cli.add_command(start_shell)
cli.add_command(start_server)
cli.add_command(upload_files)
cli.add_command(deploy)
| {
"content_hash": "547827f68742a474c27da3801793e34a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 107,
"avg_line_length": 23.53932584269663,
"alnum_prop": 0.6348448687350835,
"repo_name": "docloud/luna",
"id": "71b1d1a20616f979fe9804e26b2555bd49c3ac20",
"size": "2110",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "luna/commands/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Python",
"bytes": "24893"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9755")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| {
"content_hash": "e45f3788de3b65e63da5581b2e5ba59e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 44,
"alnum_prop": 0.759090909090909,
"repo_name": "hobbitcoin-team/hobbitcoin",
"id": "100b79d073af62b0bc9e4f8893dfcf1e8ec12a35",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletchangepass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103297"
},
{
"name": "C++",
"bytes": "2527091"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "14722"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69728"
},
{
"name": "Shell",
"bytes": "9702"
},
{
"name": "TypeScript",
"bytes": "5244670"
}
],
"symlink_target": ""
} |
from rpython.jit.codewriter.flatten import Register, ListOfKind, Label, TLabel
from rpython.jit.codewriter.jitcode import SwitchDictDescr
# Some instructions require liveness information (the ones that can end up
# in generate_guard() in pyjitpl.py). This is done by putting special
# space operations called '-live-' in the graph. They turn into '-live-'
# operation in the ssarepr. Then the present module expands the arguments
# of the '-live-' operations to also include all values that are alive at
# this point (written to before, and read afterwards). You can also force
# extra variables to be alive by putting them as args of the '-live-'
# operation in the first place.
# For this to work properly, a special operation called '---' must be
# used to mark unreachable places (e.g. just after a 'goto').
# ____________________________________________________________
def compute_liveness(ssarepr):
label2alive = {}
while _compute_liveness_must_continue(ssarepr, label2alive):
pass
def _compute_liveness_must_continue(ssarepr, label2alive):
alive = set()
must_continue = False
def follow_label(lbl):
alive_at_point = label2alive.get(lbl.name, ())
alive.update(alive_at_point)
for i in range(len(ssarepr.insns)-1, -1, -1):
insn = ssarepr.insns[i]
if isinstance(insn[0], Label):
alive_at_point = label2alive.setdefault(insn[0].name, set())
prevlength = len(alive_at_point)
alive_at_point.update(alive)
if prevlength != len(alive_at_point):
must_continue = True
continue
if insn[0] == '-live-':
labels = []
for x in insn[1:]:
if isinstance(x, Register):
alive.add(x)
elif isinstance(x, TLabel):
follow_label(x)
labels.append(x)
ssarepr.insns[i] = insn[:1] + tuple(alive) + tuple(labels)
continue
if insn[0] == '---':
alive = set()
continue
args = insn[1:]
#
if len(args) >= 2 and args[-2] == '->':
reg = args[-1]
assert isinstance(reg, Register)
alive.discard(reg)
args = args[:-2]
#
for x in args:
if isinstance(x, Register):
alive.add(x)
elif isinstance(x, ListOfKind):
for y in x:
if isinstance(y, Register):
alive.add(y)
elif isinstance(x, TLabel):
follow_label(x)
elif isinstance(x, SwitchDictDescr):
for key, label in x._labels:
follow_label(label)
return must_continue
| {
"content_hash": "370387bc46932e7a3011d8701476ef58",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 35.25316455696203,
"alnum_prop": 0.5558348294434471,
"repo_name": "oblique-labs/pyVM",
"id": "6110df609879b4572cf9c05d7e5cfa7e72b5ef55",
"size": "2785",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/jit/codewriter/liveness.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
} |
secure_scheme_headers = {'X-FORWARDED-PROTO': 'https'}
| {
"content_hash": "039bd4e84e1a55e81ee77dca18c633ef",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 54,
"avg_line_length": 55,
"alnum_prop": 0.7090909090909091,
"repo_name": "isb-cgc/ISB-CGC-API",
"id": "08757196e3361c5ff848483afc6a4d196a3f435d",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gunicorn.conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2792"
},
{
"name": "HTML",
"bytes": "5755"
},
{
"name": "Python",
"bytes": "244162"
},
{
"name": "Shell",
"bytes": "6003"
}
],
"symlink_target": ""
} |
"""Tests for keypair API."""
from oslo_config import cfg
import six
from nova.compute import api as compute_api
from nova import context
from nova import db
from nova import exception
from nova.objects import keypair as keypair_obj
from nova import quota
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_crypto
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_keypair
CONF = cfg.CONF
QUOTAS = quota.QUOTAS
class KeypairAPITestCase(test_compute.BaseTestCase):
def setUp(self):
super(KeypairAPITestCase, self).setUp()
self.keypair_api = compute_api.KeypairAPI()
self.ctxt = context.RequestContext('fake', 'fake')
self._keypair_db_call_stubs()
self.existing_key_name = 'fake existing key name'
self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf'
'/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR'
'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/'
'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu'
'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8'
'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK'
'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU'
'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz')
self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a'
self.keypair_type = keypair_obj.KEYPAIR_TYPE_SSH
self.key_destroyed = False
def _keypair_db_call_stubs(self):
def db_key_pair_get_all_by_user(context, user_id):
return [dict(test_keypair.fake_keypair,
name=self.existing_key_name,
public_key=self.pub_key,
fingerprint=self.fingerprint)]
def db_key_pair_create(context, keypair):
return dict(test_keypair.fake_keypair, **keypair)
def db_key_pair_destroy(context, user_id, name):
if name == self.existing_key_name:
self.key_destroyed = True
def db_key_pair_get(context, user_id, name):
if name == self.existing_key_name and not self.key_destroyed:
return dict(test_keypair.fake_keypair,
name=self.existing_key_name,
public_key=self.pub_key,
fingerprint=self.fingerprint)
else:
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get)
def _check_notifications(self, action='create', key_name='foo'):
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
n1 = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('INFO', n1.priority)
self.assertEqual('keypair.%s.start' % action, n1.event_type)
self.assertEqual('api.%s' % CONF.host, n1.publisher_id)
self.assertEqual('fake', n1.payload['user_id'])
self.assertEqual('fake', n1.payload['tenant_id'])
self.assertEqual(key_name, n1.payload['key_name'])
n2 = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('INFO', n2.priority)
self.assertEqual('keypair.%s.end' % action, n2.event_type)
self.assertEqual('api.%s' % CONF.host, n2.publisher_id)
self.assertEqual('fake', n2.payload['user_id'])
self.assertEqual('fake', n2.payload['tenant_id'])
self.assertEqual(key_name, n2.payload['key_name'])
class CreateImportSharedTestMixIn(object):
"""Tests shared between create and import_key.
Mix-in pattern is used here so that these `test_*` methods aren't picked
up by the test runner unless they are part of a 'concrete' test case.
"""
def assertKeypairRaises(self, exc_class, expected_message, name):
func = getattr(self.keypair_api, self.func_name)
args = []
if self.func_name == 'import_key_pair':
args.append(self.pub_key)
args.append(self.keypair_type)
exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
name, *args)
self.assertEqual(expected_message, six.text_type(exc))
def assertInvalidKeypair(self, expected_message, name):
msg = 'Keypair data is invalid: %s' % expected_message
self.assertKeypairRaises(exception.InvalidKeypair, msg, name)
def test_name_too_short(self):
msg = ('Keypair name must be string and between 1 '
'and 255 characters long')
self.assertInvalidKeypair(msg, '')
def test_name_too_long(self):
msg = ('Keypair name must be string and between 1 '
'and 255 characters long')
self.assertInvalidKeypair(msg, 'x' * 256)
def test_invalid_chars(self):
msg = "Keypair name contains unsafe characters"
self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
def test_already_exists(self):
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
msg = ("Key pair '%(key_name)s' already exists." %
{'key_name': self.existing_key_name})
self.assertKeypairRaises(exception.KeyPairExists, msg,
self.existing_key_name)
def test_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return CONF.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
msg = "Maximum number of key pairs exceeded"
self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'create_key_pair'
def _check_success(self):
keypair, private_key = self.keypair_api.create_key_pair(
self.ctxt, self.ctxt.user_id, 'foo', key_type=self.keypair_type)
self.assertEqual('foo', keypair['name'])
self.assertEqual(self.keypair_type, keypair['type'])
self._check_notifications()
def test_success_ssh(self):
self._check_success()
def test_success_x509(self):
self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509
self._check_success()
class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'import_key_pair'
def _check_success(self):
keypair = self.keypair_api.import_key_pair(self.ctxt,
self.ctxt.user_id,
'foo',
self.pub_key,
self.keypair_type)
self.assertEqual('foo', keypair['name'])
self.assertEqual(self.keypair_type, keypair['type'])
self.assertEqual(self.fingerprint, keypair['fingerprint'])
self.assertEqual(self.pub_key, keypair['public_key'])
self.assertEqual(self.keypair_type, keypair['type'])
self._check_notifications(action='import')
def test_success_ssh(self):
self._check_success()
def test_success_x509(self):
self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509
certif, fingerprint = fake_crypto.get_x509_cert_and_fingerprint()
self.pub_key = certif
self.fingerprint = fingerprint
self._check_success()
def test_bad_key_data(self):
exc = self.assertRaises(exception.InvalidKeypair,
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'foo',
'bad key data')
msg = u'Keypair data is invalid: failed to generate fingerprint'
self.assertEqual(msg, six.text_type(exc))
class GetKeypairTestCase(KeypairAPITestCase):
def test_success(self):
keypair = self.keypair_api.get_key_pair(self.ctxt,
self.ctxt.user_id,
self.existing_key_name)
self.assertEqual(self.existing_key_name, keypair['name'])
class GetKeypairsTestCase(KeypairAPITestCase):
def test_success(self):
keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id)
self.assertEqual([self.existing_key_name],
[k['name'] for k in keypairs])
class DeleteKeypairTestCase(KeypairAPITestCase):
def test_success(self):
self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id,
self.existing_key_name)
self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id,
self.existing_key_name)
self.assertRaises(exception.KeypairNotFound,
self.keypair_api.get_key_pair, self.ctxt, self.ctxt.user_id,
self.existing_key_name)
self._check_notifications(action='delete',
key_name=self.existing_key_name)
| {
"content_hash": "f3609813b2865bb08d40f89a8152c4da",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 41.108225108225106,
"alnum_prop": 0.6084667228306655,
"repo_name": "yanheven/nova",
"id": "6167d4fde521799644da1905ce021489060fc386",
"size": "10131",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/unit/compute/test_keypairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16094122"
},
{
"name": "Shell",
"bytes": "17729"
},
{
"name": "Smarty",
"bytes": "405276"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseForbidden
from ..oauth2_backends import OAuthLibCore
from ..exceptions import FatalClientError
from ..settings import oauth2_settings
log = logging.getLogger("oauth2_provider")
SAFE_HTTP_METHODS = ['GET', 'HEAD', 'OPTIONS']
class OAuthLibMixin(object):
"""
This mixin decouples Django OAuth Toolkit from OAuthLib.
Users can configure the Server, Validator and OAuthlibCore
classes used by this mixin by setting the following class
variables:
* server_class
* validator_class
* oauthlib_core_class # TODO rename it as oauthlib_backend_class
"""
server_class = None
validator_class = None
@classmethod
def get_server_class(cls):
"""
Return the OAuthlib server class to use
"""
if cls.server_class is None:
raise ImproperlyConfigured(
"OAuthLibMixin requires either a definition of 'server_class'"
" or an implementation of 'get_server_class()'")
else:
return cls.server_class
@classmethod
def get_validator_class(cls):
"""
Return the RequestValidator implementation class to use
"""
if cls.validator_class is None:
raise ImproperlyConfigured(
"OAuthLibMixin requires either a definition of 'validator_class'"
" or an implementation of 'get_validator_class()'")
else:
return cls.validator_class
@classmethod
def get_oauthlib_core_class(cls):
"""
Return the OAuthLibCore implementation class to use, silently
defaults to OAuthLibCore class from oauth2_provider package
# TODO rename this as get_oauthlib_backend_class
"""
if not hasattr(cls, 'oauthlib_core_class'):
return OAuthLibCore
return cls.oauthlib_core_class
@classmethod
def get_server(cls):
"""
Return an instance of `server_class` initialized with a `validator_class`
object
"""
server_class = cls.get_server_class()
validator_class = cls.get_validator_class()
return server_class(validator_class())
@classmethod
def get_oauthlib_core(cls):
"""
Cache and return `OAuthlibCore` instance so it will be created only on first request
"""
if not hasattr(cls, '_oauthlib_core'):
server = cls.get_server()
core_class = cls.get_oauthlib_core_class()
cls._oauthlib_core = core_class(server)
return cls._oauthlib_core
def validate_authorization_request(self, request):
"""
A wrapper method that calls validate_authorization_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.validate_authorization_request(request)
def create_authorization_response(self, request, scopes, credentials, allow):
"""
A wrapper method that calls create_authorization_response on `server_class`
instance.
:param request: The current django.http.HttpRequest object
:param scopes: A space-separated string of provided scopes
:param credentials: Authorization credentials dictionary containing
`client_id`, `state`, `redirect_uri`, `response_type`
:param allow: True if the user authorize the client, otherwise False
"""
# TODO: move this scopes conversion from and to string into a utils function
scopes = scopes.split(" ") if scopes else []
core = self.get_oauthlib_core()
return core.create_authorization_response(request, scopes, credentials, allow)
def create_token_response(self, request):
"""
A wrapper method that calls create_token_response on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.create_token_response(request)
def verify_request(self, request):
"""
A wrapper method that calls verify_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.verify_request(request, scopes=self.get_scopes())
def get_scopes(self):
"""
This should return the list of scopes required to access the resources. By default it returns an empty list
"""
return []
def error_response(self, error, **kwargs):
"""
Return an error to be displayed to the resource owner if anything goes awry.
:param error: :attr:`OAuthToolkitError`
"""
oauthlib_error = error.oauthlib_error
error_response = {
'error': oauthlib_error,
'url': "{0}?{1}".format(oauthlib_error.redirect_uri, oauthlib_error.urlencoded)
}
error_response.update(kwargs)
# If we got a malicious redirect_uri or client_id, we will *not* redirect back to the URL.
if isinstance(error, FatalClientError):
redirect = False
else:
redirect = True
return redirect, error_response
class ScopedResourceMixin(object):
"""
Helper mixin that implements "scopes handling" behaviour
"""
required_scopes = None
def get_scopes(self, *args, **kwargs):
"""
Return the scopes needed to access the resource
:param args: Support scopes injections from the outside (not yet implemented)
"""
if self.required_scopes is None:
raise ImproperlyConfigured(
"ProtectedResourceMixin requires either a definition of 'required_scopes'"
" or an implementation of 'get_scopes()'")
else:
return self.required_scopes
class ProtectedResourceMixin(OAuthLibMixin):
"""
Helper mixin that implements OAuth2 protection on request dispatch, specially useful for Django Generic Views
"""
def dispatch(self, request, *args, **kwargs):
valid, r = self.verify_request(request)
if valid:
request.resource_owner = r.user
return super(ProtectedResourceMixin, self).dispatch(request, *args, **kwargs)
else:
return HttpResponseForbidden()
class ReadWriteScopedResourceMixin(ScopedResourceMixin, OAuthLibMixin):
"""
Helper mixin that implements "read and write scopes" behavior
"""
required_scopes = []
read_write_scope = None
def __new__(cls, *args, **kwargs):
provided_scopes = oauth2_settings._SCOPES
read_write_scopes = [oauth2_settings.READ_SCOPE, oauth2_settings.WRITE_SCOPE]
if not set(read_write_scopes).issubset(set(provided_scopes)):
raise ImproperlyConfigured(
"ReadWriteScopedResourceMixin requires following scopes {0}"
" to be in OAUTH2_PROVIDER['SCOPES'] list in settings".format(read_write_scopes)
)
return super(ReadWriteScopedResourceMixin, cls).__new__(cls, *args, **kwargs)
def dispatch(self, request, *args, **kwargs):
if request.method.upper() in SAFE_HTTP_METHODS:
self.read_write_scope = oauth2_settings.READ_SCOPE
else:
self.read_write_scope = oauth2_settings.WRITE_SCOPE
return super(ReadWriteScopedResourceMixin, self).dispatch(request, *args, **kwargs)
def get_scopes(self, *args, **kwargs):
scopes = super(ReadWriteScopedResourceMixin, self).get_scopes(*args, **kwargs)
return scopes + [self.read_write_scope] # this returns a copy so that self.required_scopes is not modified
| {
"content_hash": "c9fd7f019fb0c44449e81566886005e0",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 115,
"avg_line_length": 35.18666666666667,
"alnum_prop": 0.6422887457370215,
"repo_name": "svetlyak40wt/django-oauth-toolkit",
"id": "e91f6a0da1f83815bb033877fb2c21b0e0ff8725",
"size": "7917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oauth2_provider/views/mixins.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "207679"
}
],
"symlink_target": ""
} |
from lxml import etree
import os
import sys
import conf
import g
from gwis import command
from gwis.exception.gwis_error import GWIS_Error
from util_ import misc
log = g.log.getLogger('cmd.user_hello')
class Op_Handler(command.Op_Handler):
# *** Constructor
def __init__(self, req):
command.Op_Handler.__init__(self, req)
# ***
#
def __str__(self):
selfie = 'user_hello'
return selfie
# ***
#
def pre_decode(self):
# Call the base class first, we raises if the user's host IP is banned.
command.Op_Handler.pre_decode(self)
# Validate the user password. If validated, checks that the user has
# access to the branch, which raises an exception otherwise.
self.req.client.user_validate_maybe('password')
#
def decode_request(self):
# Not really anything to do; we already got the password, but we have to
# tell the base class to not check for the token as a consequence.
command.Op_Handler.decode_request(self, expect_no_token=True)
#
def fetch_n_save(self):
command.Op_Handler.fetch_n_save(self)
if (self.req.client.username == conf.anonymous_username):
raise GWIS_Error('No credentials provided', 'nocreds')
#
def prepare_response(self):
g.assurt(self.req.client.username != conf.anonymous_username)
# Skipping: BUG 2688: No need to use transaction_retryable,
# since user_token_gen does it own retrying.
e = etree.Element('token')
e.text = self.req.client.user_token_generate(self.req.client.username)
self.doc.append(e)
# FIXME: Bug nnnn: Generate the session ID / sessid here, too...
e = etree.Element('preferences')
p = self.preferences_get()
for (col, value) in p.items():
misc.xa_set(e, col, value)
self.doc.append(e)
# Bug nnnn: do not piggyback user prefs on hello's token resp.
# This also means we cannot close self.req.db until after
# preparing response -- all other prepare_response do not
# use db.
#
def preferences_get(self):
# NOTE: If client sends token, we never send this until they logon again.
# That is, if a user changes preferences from one browser, we
# won't reflect the changes in another browser until they
# explicitly logoff and then back on -- so people who save their
# tokens to logon automatically won't see preferences update
# across browsers automatically.
g.assurt(self.req.client.username != conf.anonymous_username)
# FIXME Why are quoting the string ourselves (with psycopg.QuotedString)
# rather than letting the connection cursor do it?
sql = (
"""
SELECT
email,
enable_watchers_email,
enable_watchers_digest,
route_viz,
rf_planner AS rf_planner,
rf_p1_priority AS p1_priority,
rf_p2_transit_pref AS p2_txpref,
rf_p3_weight_type AS p3_wgt,
rf_p3_rating_pump AS p3_rgi,
rf_p3_burden_pump AS p3_bdn,
rf_p3_spalgorithm AS p3_alg,
flashclient_settings AS fc_opts,
routefinder_settings AS rf_opts
FROM
user_
WHERE
user_.username = %s
""" % (self.req.db.quoted(self.req.client.username),))
return self.req.db.sql(sql)[0]
# ***
# ***
| {
"content_hash": "7430ff2b3c7fe6e6281bdf8de4e79389",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 31.37837837837838,
"alnum_prop": 0.6155612977318403,
"repo_name": "lbouma/Cyclopath",
"id": "519ccc178345cb3e9ecfb3c54795c6e10611747c",
"size": "3736",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "pyserver/gwis/command_/user_hello.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "3369673"
},
{
"name": "ApacheConf",
"bytes": "46372"
},
{
"name": "C",
"bytes": "281248"
},
{
"name": "CSS",
"bytes": "36786"
},
{
"name": "Gnuplot",
"bytes": "14865"
},
{
"name": "HTML",
"bytes": "203213"
},
{
"name": "Java",
"bytes": "688800"
},
{
"name": "JavaScript",
"bytes": "60678"
},
{
"name": "M4",
"bytes": "35700"
},
{
"name": "Makefile",
"bytes": "8036"
},
{
"name": "PHP",
"bytes": "18399"
},
{
"name": "PLSQL",
"bytes": "451"
},
{
"name": "PLpgSQL",
"bytes": "1407944"
},
{
"name": "Perl",
"bytes": "669009"
},
{
"name": "Python",
"bytes": "5830046"
},
{
"name": "Shell",
"bytes": "639435"
}
],
"symlink_target": ""
} |
OUTPUT = '{}. {}\n'.format
def list_animals(animals):
return ''.join(OUTPUT(i, a) for i, a in enumerate(animals, start=1))
| {
"content_hash": "cdaba4239d3f6d277fb83df37f927953",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 72,
"avg_line_length": 25.8,
"alnum_prop": 0.627906976744186,
"repo_name": "the-zebulan/CodeWars",
"id": "3fbee348613c4b15e9bfc72d975e2cb0dd9cb87c",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/beta/fix_the_loop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
"""Read the balance of your bank accounts via FinTS."""
from collections import namedtuple
from datetime import timedelta
import logging
from fints.client import FinTS3PinTanClient
from fints.dialog import FinTSDialogError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_PIN, CONF_URL, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=4)
ICON = "mdi:currency-eur"
BankCredentials = namedtuple("BankCredentials", "blz login pin url")
CONF_BIN = "bank_identification_number"
CONF_ACCOUNTS = "accounts"
CONF_HOLDINGS = "holdings"
CONF_ACCOUNT = "account"
ATTR_ACCOUNT = CONF_ACCOUNT
ATTR_BANK = "bank"
ATTR_ACCOUNT_TYPE = "account_type"
SCHEMA_ACCOUNTS = vol.Schema(
{
vol.Required(CONF_ACCOUNT): cv.string,
vol.Optional(CONF_NAME, default=None): vol.Any(None, cv.string),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACCOUNTS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
vol.Optional(CONF_HOLDINGS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensors.
Login to the bank and get a list of existing accounts. Create a
sensor for each account.
"""
credentials = BankCredentials(
config[CONF_BIN], config[CONF_USERNAME], config[CONF_PIN], config[CONF_URL]
)
fints_name = config.get(CONF_NAME, config[CONF_BIN])
account_config = {
acc[CONF_ACCOUNT]: acc[CONF_NAME] for acc in config[CONF_ACCOUNTS]
}
holdings_config = {
acc[CONF_ACCOUNT]: acc[CONF_NAME] for acc in config[CONF_HOLDINGS]
}
client = FinTsClient(credentials, fints_name)
balance_accounts, holdings_accounts = client.detect_accounts()
accounts = []
for account in balance_accounts:
if config[CONF_ACCOUNTS] and account.iban not in account_config:
_LOGGER.info("Skipping account %s for bank %s", account.iban, fints_name)
continue
account_name = account_config.get(account.iban)
if not account_name:
account_name = f"{fints_name} - {account.iban}"
accounts.append(FinTsAccount(client, account, account_name))
_LOGGER.debug("Creating account %s for bank %s", account.iban, fints_name)
for account in holdings_accounts:
if config[CONF_HOLDINGS] and account.accountnumber not in holdings_config:
_LOGGER.info(
"Skipping holdings %s for bank %s", account.accountnumber, fints_name
)
continue
account_name = holdings_config.get(account.accountnumber)
if not account_name:
account_name = f"{fints_name} - {account.accountnumber}"
accounts.append(FinTsHoldingsAccount(client, account, account_name))
_LOGGER.debug(
"Creating holdings %s for bank %s", account.accountnumber, fints_name
)
add_entities(accounts, True)
class FinTsClient:
"""Wrapper around the FinTS3PinTanClient.
Use this class as Context Manager to get the FinTS3Client object.
"""
def __init__(self, credentials: BankCredentials, name: str):
"""Initialize a FinTsClient."""
self._credentials = credentials
self.name = name
@property
def client(self):
"""Get the client object.
As the fints library is stateless, there is not benefit in caching
the client objects. If that ever changes, consider caching the client
object and also think about potential concurrency problems.
"""
return FinTS3PinTanClient(
self._credentials.blz,
self._credentials.login,
self._credentials.pin,
self._credentials.url,
)
def detect_accounts(self):
"""Identify the accounts of the bank."""
balance_accounts = []
holdings_accounts = []
for account in self.client.get_sepa_accounts():
try:
self.client.get_balance(account)
balance_accounts.append(account)
except IndexError:
# account is not a balance account.
pass
except FinTSDialogError:
# account is not a balance account.
pass
try:
self.client.get_holdings(account)
holdings_accounts.append(account)
except FinTSDialogError:
# account is not a holdings account.
pass
return balance_accounts, holdings_accounts
class FinTsAccount(SensorEntity):
"""Sensor for a FinTS balance account.
A balance account contains an amount of money (=balance). The amount may
also be negative.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs balance account."""
self._client = client
self._account = account
self._name = name
self._balance: float = None
self._currency: str = None
def update(self) -> None:
"""Get the current balance and currency for the account."""
bank = self._client.client
balance = bank.get_balance(self._account)
self._balance = balance.amount.amount
self._currency = balance.amount.currency
_LOGGER.debug("updated balance of account %s", self.name)
@property
def name(self) -> str:
"""Friendly name of the sensor."""
return self._name
@property
def state(self) -> float:
"""Return the balance of the account as state."""
return self._balance
@property
def unit_of_measurement(self) -> str:
"""Use the currency as unit of measurement."""
return self._currency
@property
def extra_state_attributes(self) -> dict:
"""Additional attributes of the sensor."""
attributes = {ATTR_ACCOUNT: self._account.iban, ATTR_ACCOUNT_TYPE: "balance"}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
return attributes
@property
def icon(self) -> str:
"""Set the icon for the sensor."""
return ICON
class FinTsHoldingsAccount(SensorEntity):
"""Sensor for a FinTS holdings account.
A holdings account does not contain money but rather some financial
instruments, e.g. stocks.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs holdings account."""
self._client = client
self._name = name
self._account = account
self._holdings = []
self._total: float = None
def update(self) -> None:
"""Get the current holdings for the account."""
bank = self._client.client
self._holdings = bank.get_holdings(self._account)
self._total = sum(h.total_value for h in self._holdings)
@property
def state(self) -> float:
"""Return total market value as state."""
return self._total
@property
def icon(self) -> str:
"""Set the icon for the sensor."""
return ICON
@property
def extra_state_attributes(self) -> dict:
"""Additional attributes of the sensor.
Lists each holding of the account with the current value.
"""
attributes = {
ATTR_ACCOUNT: self._account.accountnumber,
ATTR_ACCOUNT_TYPE: "holdings",
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
for holding in self._holdings:
total_name = f"{holding.name} total"
attributes[total_name] = holding.total_value
pieces_name = f"{holding.name} pieces"
attributes[pieces_name] = holding.pieces
price_name = f"{holding.name} price"
attributes[price_name] = holding.market_value
return attributes
@property
def name(self) -> str:
"""Friendly name of the sensor."""
return self._name
@property
def unit_of_measurement(self) -> str:
"""Get the unit of measurement.
Hardcoded to EUR, as the library does not provide the currency for the
holdings. And as FinTS is only used in Germany, most accounts will be
in EUR anyways.
"""
return "EUR"
| {
"content_hash": "4f349822c8c8554658961a18483b3f69",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 85,
"avg_line_length": 31.868613138686133,
"alnum_prop": 0.624942739349519,
"repo_name": "adrienbrault/home-assistant",
"id": "e7faff46155c2e82e11fa383ca5611d88caf12db",
"size": "8732",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/fints/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import defaultdict
import json
import logging
from django.conf import settings
from django.contrib import auth
from django.core import exceptions as django_exceptions
from django.core.validators import RegexValidator, MaxLengthValidator
from django.db import models as django_models, transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
import pyvat
from rest_framework import exceptions, serializers
from rest_framework.reverse import reverse
import six
from waldur_core.core import (models as core_models, fields as core_fields, serializers as core_serializers,
utils as core_utils)
from waldur_core.core.fields import MappedChoiceField
from waldur_core.monitoring.serializers import MonitoringSerializerMixin
from waldur_core.quotas import serializers as quotas_serializers
from waldur_core.structure import (models, SupportedServices, ServiceBackendError, ServiceBackendNotImplemented,
executors)
from waldur_core.structure.managers import filter_queryset_for_user
User = auth.get_user_model()
logger = logging.getLogger(__name__)
class IpCountValidator(MaxLengthValidator):
message = _('Only %(limit_value)s ip address is supported.')
class PermissionFieldFilteringMixin(object):
"""
Mixin allowing to filter related fields.
In order to constrain the list of entities that can be used
as a value for the field:
1. Make sure that the entity in question has corresponding
Permission class defined.
2. Implement `get_filtered_field_names()` method
in the class that this mixin is mixed into and return
the field in question from that method.
"""
def get_fields(self):
fields = super(PermissionFieldFilteringMixin, self).get_fields()
try:
request = self.context['request']
user = request.user
except (KeyError, AttributeError):
return fields
for field_name in self.get_filtered_field_names():
if field_name not in fields: # field could be not required by user
continue
field = fields[field_name]
field.queryset = filter_queryset_for_user(field.queryset, user)
return fields
def get_filtered_field_names(self):
raise NotImplementedError(
'Implement get_filtered_field_names() '
'to return list of filtered fields')
class PermissionListSerializer(serializers.ListSerializer):
"""
Allows to filter related queryset by user.
Counterpart of PermissionFieldFilteringMixin.
In order to use it set Meta.list_serializer_class. Example:
>>> class PermissionProjectSerializer(BasicProjectSerializer):
>>> class Meta(BasicProjectSerializer.Meta):
>>> list_serializer_class = PermissionListSerializer
>>>
>>> class CustomerSerializer(serializers.HyperlinkedModelSerializer):
>>> projects = PermissionProjectSerializer(many=True, read_only=True)
"""
def to_representation(self, data):
try:
request = self.context['request']
user = request.user
except (KeyError, AttributeError):
pass
else:
if isinstance(data, (django_models.Manager, django_models.query.QuerySet)):
data = filter_queryset_for_user(data.all(), user)
return super(PermissionListSerializer, self).to_representation(data)
class BasicUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta(object):
model = User
fields = ('url', 'uuid', 'username', 'full_name', 'native_name',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class BasicProjectSerializer(core_serializers.BasicInfoSerializer):
class Meta(core_serializers.BasicInfoSerializer.Meta):
model = models.Project
class PermissionProjectSerializer(BasicProjectSerializer):
class Meta(BasicProjectSerializer.Meta):
list_serializer_class = PermissionListSerializer
class NestedServiceProjectLinkSerializer(serializers.Serializer):
uuid = serializers.ReadOnlyField(source='service.uuid')
url = serializers.SerializerMethodField()
service_project_link_url = serializers.SerializerMethodField()
name = serializers.ReadOnlyField(source='service.settings.name')
type = serializers.SerializerMethodField()
state = serializers.SerializerMethodField()
shared = serializers.SerializerMethodField()
settings_uuid = serializers.ReadOnlyField(source='service.settings.uuid')
settings = serializers.SerializerMethodField()
validation_state = serializers.ChoiceField(
choices=models.ServiceProjectLink.States.CHOICES,
read_only=True,
help_text=_('A state of service compliance with project requirements.'))
validation_message = serializers.ReadOnlyField(
help_text=_('An error message for a service that is non-compliant with project requirements.'))
def get_settings(self, link):
"""
URL of service settings
"""
return reverse(
'servicesettings-detail', kwargs={'uuid': link.service.settings.uuid}, request=self.context['request'])
def get_url(self, link):
"""
URL of service
"""
view_name = SupportedServices.get_detail_view_for_model(link.service)
return reverse(view_name, kwargs={'uuid': link.service.uuid.hex}, request=self.context['request'])
def get_service_project_link_url(self, link):
view_name = SupportedServices.get_detail_view_for_model(link)
return reverse(view_name, kwargs={'pk': link.id}, request=self.context['request'])
def get_type(self, link):
return SupportedServices.get_name_for_model(link.service)
# XXX: SPL is intended to become stateless. For backward compatiblity we are returning here state from connected
# service settings. To be removed once SPL becomes stateless.
def get_state(self, link):
return link.service.settings.get_state_display()
def get_resources_count(self, link):
"""
Count total number of all resources connected to link
"""
total = 0
for model in SupportedServices.get_service_resources(link.service):
# Format query path from resource to service project link
query = {model.Permissions.project_path.split('__')[0]: link}
total += model.objects.filter(**query).count()
return total
def get_shared(self, link):
return link.service.settings.shared
class NestedServiceCertificationSerializer(core_serializers.AugmentedSerializerMixin,
core_serializers.HyperlinkedRelatedModelSerializer):
class Meta(object):
model = models.ServiceCertification
fields = ('uuid', 'url', 'name', 'description', 'link')
read_only_fields = ('name', 'description', 'link')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class ProjectTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.ProjectType
fields = ('uuid', 'url', 'name', 'description')
extra_kwargs = {
'url': {'lookup_field': 'uuid', 'view_name': 'project_type-detail'},
}
class ProjectSerializer(core_serializers.RestrictedSerializerMixin,
PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
quotas = quotas_serializers.BasicQuotaSerializer(many=True, read_only=True)
services = serializers.SerializerMethodField()
certifications = NestedServiceCertificationSerializer(
queryset=models.ServiceCertification.objects.all(),
many=True, required=False)
class Meta(object):
model = models.Project
fields = (
'url', 'uuid',
'name',
'customer', 'customer_uuid', 'customer_name', 'customer_native_name', 'customer_abbreviation',
'description',
'quotas',
'services',
'created',
'certifications',
'type', 'type_name',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'customer': {'lookup_field': 'uuid'},
'certifications': {'lookup_field': 'uuid'},
'type': {'lookup_field': 'uuid', 'view_name': 'project_type-detail'},
}
related_paths = {
'customer': ('uuid', 'name', 'native_name', 'abbreviation'),
'type': ('name',),
}
protected_fields = ('certifications',)
@staticmethod
def eager_load(queryset):
related_fields = (
'uuid',
'name',
'created',
'description',
'customer__uuid',
'customer__name',
'customer__native_name',
'customer__abbreviation',
)
return queryset.select_related('customer').only(*related_fields) \
.prefetch_related('quotas', 'certifications')
def create(self, validated_data):
certifications = validated_data.pop('certifications', [])
project = super(ProjectSerializer, self).create(validated_data)
project.certifications.add(*certifications)
return project
def get_filtered_field_names(self):
return 'customer',
def get_services(self, project):
if 'services' not in self.context:
self.context['services'] = self.get_services_map()
services = self.context['services'][project.pk]
serializer = NestedServiceProjectLinkSerializer(
services,
many=True,
read_only=True,
context={'request': self.context['request']})
return serializer.data
def get_services_map(self):
services = defaultdict(list)
related_fields = (
'id',
'service__settings__state',
'project_id',
'service__uuid',
'service__settings__uuid',
'service__settings__shared',
'service__settings__name',
)
for link_model in models.ServiceProjectLink.get_all_models():
links = (link_model.objects.all()
.select_related('service', 'service__settings')
.only(*related_fields)
.prefetch_related('service__settings__certifications'))
if isinstance(self.instance, list):
links = links.filter(project__in=self.instance)
else:
links = links.filter(project=self.instance)
for link in links:
services[link.project_id].append(link)
return services
class CustomerImageSerializer(serializers.ModelSerializer):
image = serializers.ImageField()
class Meta:
model = models.Customer
fields = ['image']
class CustomerSerializer(core_serializers.RestrictedSerializerMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer, ):
projects = PermissionProjectSerializer(many=True, read_only=True)
owners = BasicUserSerializer(source='get_owners', many=True, read_only=True)
support_users = BasicUserSerializer(source='get_support_users', many=True, read_only=True)
image = serializers.SerializerMethodField()
quotas = quotas_serializers.BasicQuotaSerializer(many=True, read_only=True)
COUNTRIES = core_fields.CountryField.COUNTRIES
if settings.WALDUR_CORE.get('COUNTRIES'):
COUNTRIES = [item for item in COUNTRIES if item[0] in settings.WALDUR_CORE['COUNTRIES']]
country = serializers.ChoiceField(required=False, choices=COUNTRIES, allow_blank=True)
country_name = serializers.ReadOnlyField(source='get_country_display')
class Meta(object):
model = models.Customer
fields = (
'url',
'uuid',
'created',
'name', 'native_name', 'abbreviation', 'contact_details',
'agreement_number', 'email', 'phone_number', 'access_subnets',
'projects',
'owners', 'support_users',
'registration_code',
'quotas',
'image',
'country', 'country_name', 'vat_code', 'is_company',
'type', 'postal', 'address', 'bank_name', 'bank_account',
'default_tax_percent', 'accounting_start_date',
)
protected_fields = ('agreement_number',)
read_only_fields = ('access_subnets', 'accounting_start_date', 'default_tax_percent')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_image(self, customer):
if not customer.image:
return settings.WALDUR_CORE.get('DEFAULT_CUSTOMER_LOGO')
return reverse('customer_image', kwargs={'uuid': customer.uuid}, request=self.context['request'])
@staticmethod
def eager_load(queryset):
return queryset.prefetch_related('quotas', 'projects')
def validate(self, attrs):
country = attrs.get('country')
vat_code = attrs.get('vat_code')
is_company = attrs.get('is_company')
if vat_code:
if not is_company:
raise serializers.ValidationError({
'vat_code': _('VAT number is not supported for private persons.')})
# Check VAT format
if not pyvat.is_vat_number_format_valid(vat_code, country):
raise serializers.ValidationError({'vat_code': _('VAT number has invalid format.')})
# Check VAT number in EU VAT Information Exchange System
# if customer is new or either VAT number or country of the customer has changed
if not self.instance or self.instance.vat_code != vat_code or self.instance.country != country:
check_result = pyvat.check_vat_number(vat_code, country)
if check_result.is_valid:
attrs['vat_name'] = check_result.business_name
attrs['vat_address'] = check_result.business_address
if not attrs.get('contact_details'):
attrs['contact_details'] = attrs['vat_address']
elif check_result.is_valid is False:
raise serializers.ValidationError({'vat_code': _('VAT number is invalid.')})
else:
logger.debug('Unable to check VAT number %s for country %s. Error message: %s',
vat_code, country, check_result.log_lines)
raise serializers.ValidationError({'vat_code': _('Unable to check VAT number.')})
return attrs
class NestedProjectPermissionSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedRelatedField(
source='project',
lookup_field='uuid',
view_name='project-detail',
queryset=models.Project.objects.all(),
)
uuid = serializers.ReadOnlyField(source='project.uuid')
name = serializers.ReadOnlyField(source='project.name')
permission = serializers.HyperlinkedRelatedField(
source='pk',
view_name='project_permission-detail',
queryset=models.ProjectPermission.objects.all(),
)
class Meta:
model = models.ProjectPermission
fields = ['url', 'uuid', 'name', 'role', 'permission', 'expiration_time']
class CustomerUserSerializer(serializers.ModelSerializer):
role = serializers.ReadOnlyField()
expiration_time = serializers.ReadOnlyField(source='perm.expiration_time')
permission = serializers.HyperlinkedRelatedField(
source='perm.pk',
view_name='customer_permission-detail',
queryset=models.CustomerPermission.objects.all(),
)
projects = NestedProjectPermissionSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ['url', 'uuid', 'username', 'full_name', 'email', 'role', 'permission', 'projects',
'expiration_time']
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def to_representation(self, user):
customer = self.context['customer']
permission = models.CustomerPermission.objects.filter(
customer=customer, user=user, is_active=True).first()
projects = models.ProjectPermission.objects.filter(
project__customer=customer, user=user, is_active=True)
setattr(user, 'perm', permission)
setattr(user, 'role', permission and permission.role)
setattr(user, 'projects', projects)
return super(CustomerUserSerializer, self).to_representation(user)
class ProjectUserSerializer(serializers.ModelSerializer):
role = serializers.ReadOnlyField()
expiration_time = serializers.ReadOnlyField(source='perm.expiration_time')
permission = serializers.HyperlinkedRelatedField(
source='perm.pk',
view_name='project_permission-detail',
queryset=models.ProjectPermission.objects.all(),
)
class Meta:
model = User
fields = ['url', 'uuid', 'username', 'full_name', 'email', 'role', 'permission',
'expiration_time']
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def to_representation(self, user):
project = self.context['project']
permission = models.ProjectPermission.objects.filter(
project=project, user=user, is_active=True).first()
setattr(user, 'perm', permission)
setattr(user, 'role', permission and permission.role)
return super(ProjectUserSerializer, self).to_representation(user)
class BasePermissionSerializer(core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer):
class Meta(object):
fields = ('user', 'user_full_name', 'user_native_name', 'user_username', 'user_uuid', 'user_email')
related_paths = {
'user': ('username', 'full_name', 'native_name', 'uuid', 'email'),
}
class BasicCustomerPermissionSerializer(BasePermissionSerializer):
class Meta(BasePermissionSerializer.Meta):
model = models.CustomerPermission
fields = (
'url', 'pk', 'role', 'customer_uuid', 'customer_name', 'customer_native_name', 'customer_abbreviation',
)
related_paths = dict(
customer=('name', 'native_name', 'abbreviation', 'uuid'),
**BasePermissionSerializer.Meta.related_paths
)
extra_kwargs = {
'customer': {
'view_name': 'customer-detail',
'lookup_field': 'uuid',
'queryset': models.Customer.objects.all(),
}
}
class CustomerPermissionSerializer(PermissionFieldFilteringMixin, BasePermissionSerializer):
class Meta(BasePermissionSerializer.Meta):
model = models.CustomerPermission
fields = (
'url', 'pk', 'role', 'created', 'expiration_time', 'created_by',
'customer', 'customer_uuid', 'customer_name', 'customer_native_name', 'customer_abbreviation',
) + BasePermissionSerializer.Meta.fields
related_paths = dict(
customer=('name', 'native_name', 'abbreviation', 'uuid'),
**BasePermissionSerializer.Meta.related_paths
)
protected_fields = (
'customer', 'role', 'user', 'created_by', 'created'
)
extra_kwargs = {
'user': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'queryset': User.objects.all(),
},
'created_by': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'read_only': True,
},
'customer': {
'view_name': 'customer-detail',
'lookup_field': 'uuid',
'queryset': models.Customer.objects.all(),
}
}
def validate(self, data):
if not self.instance:
customer = data['customer']
user = data['user']
if customer.has_user(user):
raise serializers.ValidationError(_('The fields customer and user must make a unique set.'))
return data
def create(self, validated_data):
customer = validated_data['customer']
user = validated_data['user']
role = validated_data['role']
expiration_time = validated_data.get('expiration_time')
created_by = self.context['request'].user
permission, _ = customer.add_user(user, role, created_by, expiration_time)
return permission
def validate_expiration_time(self, value):
if value is not None and value < timezone.now():
raise serializers.ValidationError(_('Expiration time should be greater than current time.'))
return value
def get_filtered_field_names(self):
return ('customer',)
class CustomerPermissionLogSerializer(CustomerPermissionSerializer):
class Meta(CustomerPermissionSerializer.Meta):
view_name = 'customer_permission_log-detail'
class ProjectPermissionSerializer(PermissionFieldFilteringMixin, BasePermissionSerializer):
customer_name = serializers.ReadOnlyField(source='project.customer.name')
class Meta(BasePermissionSerializer.Meta):
model = models.ProjectPermission
fields = (
'url', 'pk', 'role', 'created', 'expiration_time', 'created_by',
'project', 'project_uuid', 'project_name', 'customer_name'
) + BasePermissionSerializer.Meta.fields
related_paths = dict(
project=('name', 'uuid'),
**BasePermissionSerializer.Meta.related_paths
)
protected_fields = (
'project', 'role', 'user', 'created_by', 'created'
)
extra_kwargs = {
'user': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'queryset': User.objects.all(),
},
'created_by': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'read_only': True,
},
'project': {
'view_name': 'project-detail',
'lookup_field': 'uuid',
'queryset': models.Project.objects.all(),
}
}
def validate(self, data):
if not self.instance:
project = data['project']
user = data['user']
if project.has_user(user):
raise serializers.ValidationError(_('The fields project and user must make a unique set.'))
return data
def create(self, validated_data):
project = validated_data['project']
user = validated_data['user']
role = validated_data['role']
expiration_time = validated_data.get('expiration_time')
created_by = self.context['request'].user
permission, _ = project.add_user(user, role, created_by, expiration_time)
return permission
def validate_expiration_time(self, value):
if value is not None and value < timezone.now():
raise serializers.ValidationError(_('Expiration time should be greater than current time.'))
return value
def get_filtered_field_names(self):
return ('project',)
class BasicProjectPermissionSerializer(BasePermissionSerializer):
class Meta(BasePermissionSerializer.Meta):
model = models.ProjectPermission
fields = (
'url', 'pk', 'role', 'project_uuid', 'project_name',
)
related_paths = dict(
project=('name', 'uuid'),
**BasePermissionSerializer.Meta.related_paths
)
extra_kwargs = {
'project': {
'view_name': 'project-detail',
'lookup_field': 'uuid',
'queryset': models.Project.objects.all(),
}
}
class ProjectPermissionLogSerializer(ProjectPermissionSerializer):
class Meta(ProjectPermissionSerializer.Meta):
view_name = 'project_permission_log-detail'
class UserSerializer(serializers.HyperlinkedModelSerializer):
email = serializers.EmailField()
agree_with_policy = serializers.BooleanField(write_only=True, required=False,
help_text=_('User must agree with the policy to register.'))
preferred_language = serializers.ChoiceField(choices=settings.LANGUAGES, allow_blank=True, required=False)
competence = serializers.ChoiceField(choices=settings.WALDUR_CORE.get('USER_COMPETENCE_LIST', []),
allow_blank=True,
required=False)
token = serializers.ReadOnlyField(source='auth_token.key')
customer_permissions = serializers.SerializerMethodField()
project_permissions = serializers.SerializerMethodField()
def get_customer_permissions(self, user):
permissions = models.CustomerPermission.objects.filter(user=user, is_active=True).select_related('customer')
serializer = BasicCustomerPermissionSerializer(instance=permissions, many=True,
context=self.context)
return serializer.data
def get_project_permissions(self, user):
permissions = models.ProjectPermission.objects.filter(user=user, is_active=True).select_related('project')
serializer = BasicProjectPermissionSerializer(instance=permissions, many=True,
context=self.context)
return serializer.data
class Meta(object):
model = User
fields = (
'url',
'uuid', 'username',
'full_name', 'native_name',
'job_title', 'email', 'phone_number',
'organization',
'civil_number',
'description',
'is_staff', 'is_active', 'is_support',
'token', 'token_lifetime',
'registration_method',
'date_joined',
'agree_with_policy',
'agreement_date',
'preferred_language',
'competence',
'customer_permissions',
'project_permissions',
)
read_only_fields = (
'uuid',
'civil_number',
'registration_method',
'date_joined',
'agreement_date',
'customer_permissions',
'project_permissions',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_fields(self):
fields = super(UserSerializer, self).get_fields()
try:
request = self.context['view'].request
user = request.user
except (KeyError, AttributeError):
return fields
if not user.is_staff and not user.is_support:
del fields['is_active']
del fields['is_staff']
del fields['description']
if not self._can_see_token(user):
del fields['token']
del fields['token_lifetime']
if request.method in ('PUT', 'PATCH'):
fields['username'].read_only = True
return fields
def _can_see_token(self, user):
# Staff can see any token
# User can see his own token either via details view or /api/users/?current
if user.is_staff:
return True
elif isinstance(self.instance, list) and len(self.instance) == 1:
return self.instance[0] == user
else:
return self.instance == user
def validate(self, attrs):
agree_with_policy = attrs.pop('agree_with_policy', False)
if self.instance and not self.instance.agreement_date:
if not agree_with_policy:
raise serializers.ValidationError({'agree_with_policy': _('User must agree with the policy.')})
else:
attrs['agreement_date'] = timezone.now()
# Convert validation error from Django to DRF
# https://github.com/tomchristie/django-rest-framework/issues/2145
try:
user = User(id=getattr(self.instance, 'id', None), **attrs)
user.clean()
except django_exceptions.ValidationError as error:
raise exceptions.ValidationError(error.message_dict)
return attrs
class CreationTimeStatsSerializer(serializers.Serializer):
MODEL_NAME_CHOICES = (('project', 'project'), ('customer', 'customer'),)
MODEL_CLASSES = {'project': models.Project, 'customer': models.Customer}
model_name = serializers.ChoiceField(choices=MODEL_NAME_CHOICES)
start_timestamp = serializers.IntegerField(min_value=0)
end_timestamp = serializers.IntegerField(min_value=0)
segments_count = serializers.IntegerField(min_value=0)
def get_stats(self, user):
start_datetime = core_utils.timestamp_to_datetime(self.data['start_timestamp'])
end_datetime = core_utils.timestamp_to_datetime(self.data['end_timestamp'])
model = self.MODEL_CLASSES[self.data['model_name']]
filtered_queryset = filter_queryset_for_user(model.objects.all(), user)
created_datetimes = (
filtered_queryset
.filter(created__gte=start_datetime, created__lte=end_datetime)
.values('created')
.annotate(count=django_models.Count('id', distinct=True)))
time_and_value_list = [
(core_utils.datetime_to_timestamp(dt['created']), dt['count']) for dt in created_datetimes]
return core_utils.format_time_and_value_to_segment_list(
time_and_value_list, self.data['segments_count'],
self.data['start_timestamp'], self.data['end_timestamp'])
class PasswordSerializer(serializers.Serializer):
password = serializers.CharField(min_length=7, validators=[
RegexValidator(
regex='\d',
message=_('Ensure this field has at least one digit.'),
),
RegexValidator(
regex='[a-zA-Z]',
message=_('Ensure this field has at least one latin letter.'),
),
])
class SshKeySerializer(serializers.HyperlinkedModelSerializer):
user_uuid = serializers.ReadOnlyField(source='user.uuid')
class Meta(object):
model = core_models.SshPublicKey
fields = ('url', 'uuid', 'name', 'public_key', 'fingerprint', 'user_uuid', 'is_shared')
read_only_fields = ('fingerprint', 'is_shared')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def validate_public_key(self, value):
value = value.strip()
if len(value.splitlines()) > 1:
raise serializers.ValidationError(_('Key is not valid: it should be single line.'))
try:
fingerprint = core_models.get_ssh_key_fingerprint(value)
except (IndexError, TypeError):
raise serializers.ValidationError(_('Key is not valid: cannot generate fingerprint from it.'))
if core_models.SshPublicKey.objects.filter(fingerprint=fingerprint).exists():
raise serializers.ValidationError(_('Key with same fingerprint already exists.'))
return value
def get_fields(self):
fields = super(SshKeySerializer, self).get_fields()
try:
user = self.context['request'].user
except (KeyError, AttributeError):
return fields
if not user.is_staff:
del fields['user_uuid']
return fields
class ServiceCertificationsUpdateSerializer(serializers.Serializer):
certifications = NestedServiceCertificationSerializer(
queryset=models.ServiceCertification.objects.all(),
required=True,
many=True)
@transaction.atomic
def update(self, instance, validated_data):
certifications = validated_data.pop('certifications', None)
instance.certifications.clear()
instance.certifications.add(*certifications)
return instance
class ServiceCertificationSerializer(serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.ServiceCertification
fields = ('uuid', 'url', 'name', 'description', 'link')
extra_kwargs = {
'url': {'lookup_field': 'uuid', 'view_name': 'service-certification-detail'},
}
class ServiceSettingsSerializer(PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
customer_native_name = serializers.ReadOnlyField(source='customer.native_name')
state = MappedChoiceField(
choices=[(v, k) for k, v in core_models.StateMixin.States.CHOICES],
choice_mappings={v: k for k, v in core_models.StateMixin.States.CHOICES},
read_only=True)
quotas = quotas_serializers.BasicQuotaSerializer(many=True, read_only=True)
scope = core_serializers.GenericRelatedField(related_models=models.ResourceMixin.get_all_models(), required=False)
certifications = NestedServiceCertificationSerializer(many=True, read_only=True)
geolocations = serializers.JSONField(read_only=True)
class Meta(object):
model = models.ServiceSettings
fields = (
'url', 'uuid', 'name', 'type', 'state', 'error_message', 'shared',
'backend_url', 'username', 'password', 'token', 'certificate',
'customer', 'customer_name', 'customer_native_name',
'homepage', 'terms_of_services', 'certifications',
'quotas', 'scope', 'geolocations',
)
protected_fields = ('type', 'customer')
read_only_fields = ('shared', 'state', 'error_message')
related_paths = ('customer',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'customer': {'lookup_field': 'uuid'},
'certifications': {'lookup_field': 'uuid'},
}
write_only_fields = ('backend_url', 'username', 'token', 'password', 'certificate')
for field in write_only_fields:
field_params = extra_kwargs.setdefault(field, {})
field_params['write_only'] = True
def get_filtered_field_names(self):
return 'customer',
@staticmethod
def eager_load(queryset):
return queryset.select_related('customer').prefetch_related('quotas', 'certifications')
def get_fields(self):
fields = super(ServiceSettingsSerializer, self).get_fields()
request = self.context['request']
if isinstance(self.instance, self.Meta.model):
if self.can_see_extra_fields():
# If user can change settings he should be able to see value
for field in self.Meta.write_only_fields:
fields[field].write_only = False
serializer = self.get_service_serializer()
# Remove fields if they are not needed for service
filter_fields = serializer.SERVICE_ACCOUNT_FIELDS
if filter_fields is not NotImplemented:
for field in self.Meta.write_only_fields:
if field in filter_fields:
fields[field].help_text = filter_fields[field]
elif field in fields:
del fields[field]
# Add extra fields stored in options dictionary
extra_fields = serializer.SERVICE_ACCOUNT_EXTRA_FIELDS
if extra_fields is not NotImplemented:
for field in extra_fields:
fields[field] = serializers.CharField(required=False,
source='options.' + field,
allow_blank=True,
help_text=extra_fields[field])
if request.method == 'GET':
fields['type'] = serializers.ReadOnlyField(source='get_type_display')
return fields
def get_service_serializer(self):
service = SupportedServices.get_service_models()[self.instance.type]['service']
# Find service serializer by service type of settings object
return next(cls for cls in BaseServiceSerializer.__subclasses__()
if cls.Meta.model == service)
def can_see_extra_fields(self):
request = self.context['request']
if request.user.is_staff:
return True
if not self.instance.customer:
return False
return self.instance.customer.has_user(request.user, models.CustomerRole.OWNER)
def update(self, instance, validated_data):
if 'options' in validated_data:
new_options = dict.copy(instance.options)
new_options.update(validated_data['options'])
validated_data['options'] = new_options
return super(ServiceSettingsSerializer, self).update(instance, validated_data)
class ServiceSerializerMetaclass(serializers.SerializerMetaclass):
""" Build a list of supported services via serializers definition.
See SupportedServices for details.
"""
def __new__(cls, name, bases, args):
SupportedServices.register_service(args['Meta'].model)
serializer = super(ServiceSerializerMetaclass, cls).__new__(cls, name, bases, args)
SupportedServices.register_service_serializer(args['Meta'].model, serializer)
return serializer
class BaseServiceSerializer(six.with_metaclass(ServiceSerializerMetaclass,
PermissionFieldFilteringMixin,
core_serializers.RestrictedSerializerMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer)):
SERVICE_ACCOUNT_FIELDS = NotImplemented
SERVICE_ACCOUNT_EXTRA_FIELDS = NotImplemented
projects = BasicProjectSerializer(many=True, read_only=True)
customer_native_name = serializers.ReadOnlyField(source='customer.native_name')
settings = serializers.HyperlinkedRelatedField(
queryset=models.ServiceSettings.objects.filter(shared=True),
view_name='servicesettings-detail',
lookup_field='uuid',
allow_null=True)
# if project is defined service will be automatically connected to projects customer
# and SPL between service and project will be created
project = serializers.HyperlinkedRelatedField(
queryset=models.Project.objects.all().select_related('customer'),
view_name='project-detail',
lookup_field='uuid',
allow_null=True,
required=False,
write_only=True)
backend_url = serializers.URLField(max_length=200, allow_null=True, write_only=True, required=False)
username = serializers.CharField(max_length=100, allow_null=True, write_only=True, required=False)
password = serializers.CharField(max_length=100, allow_null=True, write_only=True, required=False)
domain = serializers.CharField(max_length=200, allow_null=True, write_only=True, required=False)
token = serializers.CharField(allow_null=True, write_only=True, required=False)
certificate = serializers.FileField(allow_null=True, write_only=True, required=False)
resources_count = serializers.SerializerMethodField()
service_type = serializers.SerializerMethodField()
state = serializers.SerializerMethodField()
scope = core_serializers.GenericRelatedField(related_models=models.ResourceMixin.get_all_models(), required=False)
tags = serializers.SerializerMethodField()
quotas = quotas_serializers.BasicQuotaSerializer(many=True, read_only=True)
shared = serializers.ReadOnlyField(source='settings.shared')
error_message = serializers.ReadOnlyField(source='settings.error_message')
terms_of_services = serializers.ReadOnlyField(source='settings.terms_of_services')
homepage = serializers.ReadOnlyField(source='settings.homepage')
geolocations = serializers.JSONField(source='settings.geolocations', read_only=True)
certifications = NestedServiceCertificationSerializer(many=True, read_only=True, source='settings.certifications')
name = serializers.ReadOnlyField(source='settings.name')
class Meta(object):
model = NotImplemented
fields = (
'uuid', 'url', 'name', 'state', 'service_type', 'shared',
'projects', 'project',
'customer', 'customer_uuid', 'customer_name', 'customer_native_name', 'resources_count',
'settings', 'settings_uuid', 'backend_url', 'username', 'password',
'token', 'certificate', 'domain', 'terms_of_services', 'homepage',
'certifications', 'geolocations', 'available_for_all', 'scope', 'tags', 'quotas',
)
settings_fields = ('backend_url', 'username', 'password', 'token', 'certificate', 'scope', 'domain')
protected_fields = ('customer', 'settings', 'project') + settings_fields
related_paths = ('customer', 'settings')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'customer': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
}
def __new__(cls, *args, **kwargs):
if cls.SERVICE_ACCOUNT_EXTRA_FIELDS is not NotImplemented:
cls.Meta.fields += tuple(cls.SERVICE_ACCOUNT_EXTRA_FIELDS.keys())
cls.Meta.protected_fields += tuple(cls.SERVICE_ACCOUNT_EXTRA_FIELDS.keys())
return super(BaseServiceSerializer, cls).__new__(cls, *args, **kwargs)
@staticmethod
def eager_load(queryset):
queryset = queryset.select_related('customer', 'settings')
projects = models.Project.objects.all().only('uuid', 'name')
return queryset.prefetch_related(django_models.Prefetch('projects', queryset=projects), 'quotas')
def get_tags(self, service):
return service.settings.get_tags()
def get_filtered_field_names(self):
return 'customer',
def get_fields(self):
fields = super(BaseServiceSerializer, self).get_fields()
if self.Meta.model is not NotImplemented and 'settings' in fields:
key = SupportedServices.get_model_key(self.Meta.model)
fields['settings'].queryset = fields['settings'].queryset.filter(type=key)
if self.SERVICE_ACCOUNT_FIELDS is not NotImplemented:
# each service settings could be connected to scope
self.SERVICE_ACCOUNT_FIELDS['scope'] = _('VM that contains service')
for field in self.Meta.settings_fields:
if field not in fields:
continue
if field in self.SERVICE_ACCOUNT_FIELDS:
fields[field].help_text = self.SERVICE_ACCOUNT_FIELDS[field]
else:
del fields[field]
return fields
def build_unknown_field(self, field_name, model_class):
if self.SERVICE_ACCOUNT_EXTRA_FIELDS is not NotImplemented:
if field_name in self.SERVICE_ACCOUNT_EXTRA_FIELDS:
backend = SupportedServices.get_service_backend(self.Meta.model)
kwargs = {
'write_only': True,
'required': False,
'allow_blank': True,
'help_text': self.SERVICE_ACCOUNT_EXTRA_FIELDS[field_name],
}
if hasattr(backend, 'DEFAULTS') and field_name in backend.DEFAULTS:
kwargs['help_text'] += ' (default: %s)' % json.dumps(backend.DEFAULTS[field_name])
kwargs['initial'] = backend.DEFAULTS[field_name]
return serializers.CharField, kwargs
return super(BaseServiceSerializer, self).build_unknown_field(field_name, model_class)
def validate_empty_values(self, data):
# required=False is ignored for settings FK, deal with it here
if 'settings' not in data:
data = data.copy()
data['settings'] = None
return super(BaseServiceSerializer, self).validate_empty_values(data)
def validate(self, attrs):
user = self.context['request'].user
customer = attrs.get('customer') or self.instance.customer
project = attrs.get('project')
if project and project.customer != customer:
raise serializers.ValidationError(
_('Service cannot be connected to project that does not belong to services customer.'))
settings = attrs.get('settings')
if not user.is_staff:
if not customer.has_user(user, models.CustomerRole.OWNER):
raise exceptions.PermissionDenied()
if not self.instance and settings and not settings.shared:
if attrs.get('customer') != settings.customer:
raise serializers.ValidationError(_('Customer must match settings customer.'))
if self.context['request'].method == 'POST':
name = self.initial_data.get('name')
if not name or not name.strip():
raise serializers.ValidationError({'name': 'Name cannot be empty'})
# Make shallow copy to protect from mutations
settings_fields = self.Meta.settings_fields[:]
create_settings = any([attrs.get(f) for f in settings_fields])
if not settings and not create_settings:
raise serializers.ValidationError(
_('Either service settings or credentials must be supplied.'))
extra_fields = tuple()
if self.SERVICE_ACCOUNT_EXTRA_FIELDS is not NotImplemented:
extra_fields += tuple(self.SERVICE_ACCOUNT_EXTRA_FIELDS.keys())
if create_settings:
required = getattr(self.Meta, 'required_fields', tuple())
for field in settings_fields:
if field in required and (field not in attrs or attrs[field] is None):
error = self.fields[field].error_messages['required']
raise serializers.ValidationError({field: six.text_type(error)})
args = {f: attrs.get(f) for f in settings_fields if f in attrs}
if extra_fields:
args['options'] = {f: attrs[f] for f in extra_fields if f in attrs}
name = self.initial_data.get('name')
if name is None:
raise serializers.ValidationError({'name': _('Name field is required.')})
settings = models.ServiceSettings(
type=SupportedServices.get_model_key(self.Meta.model),
name=name,
customer=customer,
**args)
try:
backend = settings.get_backend()
backend.ping(raise_exception=True)
except ServiceBackendError as e:
raise serializers.ValidationError(_('Wrong settings: %s.') % e)
except ServiceBackendNotImplemented:
pass
self._validate_settings(settings)
settings.save()
executors.ServiceSettingsCreateExecutor.execute(settings)
attrs['settings'] = settings
for f in settings_fields + extra_fields:
if f in attrs:
del attrs[f]
return attrs
def _validate_settings(self, settings):
pass
def get_resources_count(self, service):
return self.get_resources_count_map[service.pk]
@cached_property
def get_resources_count_map(self):
resource_models = SupportedServices.get_service_resources(self.Meta.model)
resource_models = set(resource_models) - set(models.SubResource.get_all_models())
counts = defaultdict(lambda: 0)
user = self.context['request'].user
for model in resource_models:
service_path = model.Permissions.service_path
if isinstance(self.instance, list):
query = {service_path + '__in': self.instance}
else:
query = {service_path: self.instance}
queryset = filter_queryset_for_user(model.objects.all(), user)
rows = queryset.filter(**query).values(service_path) \
.annotate(count=django_models.Count('id'))
for row in rows:
service_id = row[service_path]
counts[service_id] += row['count']
return counts
def get_service_type(self, obj):
return SupportedServices.get_name_for_model(obj)
def get_state(self, obj):
return obj.settings.get_state_display()
def create(self, attrs):
project = attrs.pop('project', None)
service = super(BaseServiceSerializer, self).create(attrs)
spl_model = service.projects.through
if project and not spl_model.objects.filter(project=project, service=service).exists():
spl_model.objects.create(project=project, service=service)
return service
class BaseServiceProjectLinkSerializer(PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
project = serializers.HyperlinkedRelatedField(
queryset=models.Project.objects.all(),
view_name='project-detail',
lookup_field='uuid')
service_name = serializers.ReadOnlyField(source='service.settings.name')
quotas = quotas_serializers.BasicQuotaSerializer(many=True, read_only=True)
class Meta(object):
model = NotImplemented
fields = (
'url',
'project', 'project_name', 'project_uuid',
'service', 'service_uuid', 'service_name', 'quotas',
)
related_paths = ('project', 'service')
extra_kwargs = {
'service': {'lookup_field': 'uuid', 'view_name': NotImplemented},
}
def get_filtered_field_names(self):
return 'project', 'service'
def validate(self, attrs):
if attrs['service'].customer != attrs['project'].customer:
raise serializers.ValidationError(_("Service customer doesn't match project customer."))
# XXX: Consider adding unique key (service, project) to the model instead
if self.Meta.model.objects.filter(service=attrs['service'], project=attrs['project']).exists():
raise serializers.ValidationError(_('This service project link already exists.'))
return attrs
class ResourceSerializerMetaclass(serializers.SerializerMetaclass):
""" Build a list of supported resource via serializers definition.
See SupportedServices for details.
"""
def __new__(cls, name, bases, args):
serializer = super(ResourceSerializerMetaclass, cls).__new__(cls, name, bases, args)
SupportedServices.register_resource_serializer(args['Meta'].model, serializer)
return serializer
class BasicResourceSerializer(serializers.Serializer):
uuid = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
resource_type = serializers.SerializerMethodField()
def get_resource_type(self, resource):
return SupportedServices.get_name_for_model(resource)
class ManagedResourceSerializer(BasicResourceSerializer):
project_name = serializers.ReadOnlyField(source='service_project_link.project.name')
project_uuid = serializers.ReadOnlyField(source='service_project_link.project.uuid')
customer_uuid = serializers.ReadOnlyField(source='service_project_link.project.customer.uuid')
customer_name = serializers.ReadOnlyField(source='service_project_link.project.customer.name')
class TagList(list):
"""
This class serializes tags as JSON list as the last step of serialization process.
"""
def __str__(self):
return json.dumps(self)
class TagSerializer(serializers.Serializer):
"""
This serializer updates tags field using django-taggit API.
"""
def create(self, validated_data):
if 'tags' in validated_data:
tags = validated_data.pop('tags')
instance = super(TagSerializer, self).create(validated_data)
instance.tags.set(*tags)
else:
instance = super(TagSerializer, self).create(validated_data)
return instance
def update(self, instance, validated_data):
if 'tags' in validated_data:
tags = validated_data.pop('tags')
instance = super(TagSerializer, self).update(instance, validated_data)
instance.tags.set(*tags)
else:
instance = super(TagSerializer, self).update(instance, validated_data)
return instance
class TagListSerializerField(serializers.Field):
child = serializers.CharField()
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'invalid_json': _('Invalid json list. A tag list submitted in string form must be valid json.'),
'not_a_str': _('All list items must be of string type.')
}
def to_internal_value(self, value):
if isinstance(value, six.string_types):
if not value:
value = '[]'
try:
value = json.loads(value)
except ValueError:
self.fail('invalid_json')
if not isinstance(value, list):
self.fail('not_a_list', input_type=type(value).__name__)
for s in value:
if not isinstance(s, six.string_types):
self.fail('not_a_str')
self.child.run_validation(s)
return value
def get_attribute(self, instance):
"""
Fetch tags from cache defined in TagMixin.
"""
return instance.get_tags()
def to_representation(self, value):
if not isinstance(value, TagList):
value = TagList(value)
return value
class BaseResourceSerializer(six.with_metaclass(ResourceSerializerMetaclass,
core_serializers.RestrictedSerializerMixin,
MonitoringSerializerMixin,
PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
TagSerializer,
serializers.HyperlinkedModelSerializer)):
state = serializers.ReadOnlyField(source='get_state_display')
project = serializers.HyperlinkedRelatedField(
queryset=models.Project.objects.all(),
view_name='project-detail',
lookup_field='uuid',
allow_null=True,
required=False,
)
project_name = serializers.ReadOnlyField(source='service_project_link.project.name')
project_uuid = serializers.ReadOnlyField(source='service_project_link.project.uuid')
service_project_link = serializers.HyperlinkedRelatedField(
view_name=NotImplemented,
queryset=NotImplemented,
allow_null=True,
required=False,
)
service = serializers.HyperlinkedRelatedField(
source='service_project_link.service',
view_name=NotImplemented,
read_only=True,
lookup_field='uuid')
service_name = serializers.ReadOnlyField(source='service_project_link.service.settings.name')
service_uuid = serializers.ReadOnlyField(source='service_project_link.service.uuid')
service_settings = serializers.HyperlinkedRelatedField(
queryset=models.ServiceSettings.objects.all(),
view_name='servicesettings-detail',
lookup_field='uuid',
allow_null=True,
required=False,
)
service_settings_uuid = serializers.ReadOnlyField(source='service_project_link.service.settings.uuid')
service_settings_state = serializers.ReadOnlyField(
source='service_project_link.service.settings.human_readable_state')
service_settings_error_message = serializers.ReadOnlyField(
source='service_project_link.service.settings.error_message')
customer = serializers.HyperlinkedRelatedField(
source='service_project_link.project.customer',
view_name='customer-detail',
read_only=True,
lookup_field='uuid')
customer_name = serializers.ReadOnlyField(source='service_project_link.project.customer.name')
customer_abbreviation = serializers.ReadOnlyField(source='service_project_link.project.customer.abbreviation')
customer_native_name = serializers.ReadOnlyField(source='service_project_link.project.customer.native_name')
created = serializers.DateTimeField(read_only=True)
resource_type = serializers.SerializerMethodField()
tags = TagListSerializerField(required=False)
access_url = serializers.SerializerMethodField()
is_link_valid = serializers.BooleanField(
source='service_project_link.is_valid',
read_only=True,
help_text=_('True if resource is originated from a service that satisfies an associated project requirements.'))
class Meta(object):
model = NotImplemented
fields = MonitoringSerializerMixin.Meta.fields + (
'url', 'uuid', 'name', 'description',
'service', 'service_name', 'service_uuid',
'service_settings', 'service_settings_uuid',
'service_settings_state', 'service_settings_error_message',
'project', 'project_name', 'project_uuid',
'customer', 'customer_name', 'customer_native_name', 'customer_abbreviation',
'tags', 'error_message',
'resource_type', 'state', 'created', 'service_project_link', 'backend_id',
'access_url', 'is_link_valid',
)
protected_fields = ('service', 'service_project_link', 'project', 'service_settings')
read_only_fields = ('error_message', 'backend_id')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_filtered_field_names(self):
return 'service_project_link',
def get_resource_type(self, obj):
return SupportedServices.get_name_for_model(obj)
def get_resource_fields(self):
return [f.name for f in self.Meta.model._meta.get_fields()]
# an optional generic URL for accessing a resource
def get_access_url(self, obj):
return obj.get_access_url()
@staticmethod
def eager_load(queryset):
return (
queryset
.select_related(
'service_project_link',
'service_project_link__service',
'service_project_link__service__settings',
'service_project_link__project',
'service_project_link__project__customer',
).prefetch_related('service_project_link__service__settings__certifications',
'service_project_link__project__certifications')
)
def get_fields(self):
fields = super(BaseResourceSerializer, self).get_fields()
# skip validation on object update
if not self.instance:
service_type = SupportedServices.get_model_key(self.Meta.model)
queryset = fields['service_settings'].queryset.filter(type=service_type)
fields['service_settings'].queryset = queryset
return fields
def validate(self, attrs):
# skip validation on object update
if self.instance:
return attrs
service_settings = attrs.pop('service_settings', None)
project = attrs.pop('project', None)
service_project_link = attrs.get('service_project_link')
if not service_project_link:
if service_settings and project:
spl_model = self.Meta.model.service_project_link.field.remote_field.model
try:
service_project_link = spl_model.objects.get(
service__settings=service_settings,
project=project,
)
attrs['service_project_link'] = service_project_link
except django_exceptions.ObjectDoesNotExist:
raise serializers.ValidationError(
_('You are not allowed to provision resource in current project using this provider. '
'Please specify another value for project and service_settings fields.')
)
else:
raise serializers.ValidationError(
_('Either service_project_link or service_settings and project should be specified.')
)
if not service_project_link.is_valid:
raise serializers.ValidationError({
'service_project_link': service_project_link.validation_message
})
return attrs
@transaction.atomic
def create(self, validated_data):
data = validated_data.copy()
fields = self.get_resource_fields()
# Remove `virtual` properties which ain't actually belong to the model
for prop in data.keys():
if prop not in fields:
del data[prop]
resource = super(BaseResourceSerializer, self).create(data)
resource.increase_backend_quotas_usage()
return resource
class SummaryResourceSerializer(core_serializers.BaseSummarySerializer):
@classmethod
def get_serializer(cls, model):
return SupportedServices.get_resource_serializer(model)
class SummaryServiceSerializer(core_serializers.BaseSummarySerializer):
@classmethod
def get_serializer(cls, model):
return SupportedServices.get_service_serializer(model)
class BaseResourceImportSerializer(PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
backend_id = serializers.CharField(write_only=True)
project = serializers.HyperlinkedRelatedField(
queryset=models.Project.objects.all(),
view_name='project-detail',
lookup_field='uuid',
write_only=True)
state = serializers.ReadOnlyField(source='get_state_display')
created = serializers.DateTimeField(read_only=True)
import_history = serializers.BooleanField(
default=True, write_only=True, help_text=_('Import historical resource usage.'))
class Meta(object):
model = NotImplemented
fields = (
'url', 'uuid', 'name', 'state', 'created',
'backend_id', 'project', 'import_history'
)
read_only_fields = ('name',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_filtered_field_names(self):
return 'project',
def get_fields(self):
fields = super(BaseResourceImportSerializer, self).get_fields()
# Context doesn't have service during schema generation
if 'service' in self.context:
fields['project'].queryset = self.context['service'].projects.all()
return fields
def validate(self, attrs):
if self.Meta.model.objects.filter(backend_id=attrs['backend_id']).exists():
raise serializers.ValidationError(
{'backend_id': _('This resource is already linked to Waldur.')})
spl_class = SupportedServices.get_related_models(self.Meta.model)['service_project_link']
spl = spl_class.objects.get(service=self.context['service'], project=attrs['project'])
attrs['service_project_link'] = spl
return attrs
def create(self, validated_data):
validated_data.pop('project')
return super(BaseResourceImportSerializer, self).create(validated_data)
class VirtualMachineSerializer(BaseResourceSerializer):
external_ips = serializers.ListField(
child=serializers.IPAddressField(protocol='ipv4'),
read_only=True,
)
internal_ips = serializers.ListField(
child=serializers.IPAddressField(protocol='ipv4'),
read_only=True,
)
ssh_public_key = serializers.HyperlinkedRelatedField(
view_name='sshpublickey-detail',
lookup_field='uuid',
queryset=core_models.SshPublicKey.objects.all(),
required=False,
write_only=True)
class Meta(BaseResourceSerializer.Meta):
fields = BaseResourceSerializer.Meta.fields + (
'start_time', 'cores', 'ram', 'disk', 'min_ram', 'min_disk',
'ssh_public_key', 'user_data', 'external_ips', 'internal_ips',
'latitude', 'longitude', 'key_name', 'key_fingerprint', 'image_name'
)
read_only_fields = BaseResourceSerializer.Meta.read_only_fields + (
'start_time', 'cores', 'ram', 'disk', 'min_ram', 'min_disk',
'external_ips', 'internal_ips',
'latitude', 'longitude', 'key_name', 'key_fingerprint', 'image_name'
)
protected_fields = BaseResourceSerializer.Meta.protected_fields + (
'user_data', 'ssh_public_key'
)
def get_fields(self):
fields = super(VirtualMachineSerializer, self).get_fields()
if 'request' in self.context:
user = self.context['request'].user
ssh_public_key = fields.get('ssh_public_key')
if ssh_public_key:
ssh_public_key.query_params = {'user_uuid': user.uuid.hex}
if not user.is_staff:
subquery = Q(user=user) | Q(is_shared=True)
ssh_public_key.queryset = ssh_public_key.queryset.filter(subquery)
return fields
def create(self, validated_data):
if 'image' in validated_data:
validated_data['image_name'] = validated_data['image'].name
return super(VirtualMachineSerializer, self).create(validated_data)
class PropertySerializerMetaclass(serializers.SerializerMetaclass):
""" Build a list of supported properties via serializers definition.
See SupportedServices for details.
"""
def __new__(cls, name, bases, args):
SupportedServices.register_property(args['Meta'].model)
return super(PropertySerializerMetaclass, cls).__new__(cls, name, bases, args)
class BasePropertySerializer(six.with_metaclass(PropertySerializerMetaclass,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer)):
class Meta(object):
model = NotImplemented
class AggregateSerializer(serializers.Serializer):
MODEL_NAME_CHOICES = (
('project', 'project'),
('customer', 'customer'),
)
MODEL_CLASSES = {
'project': models.Project,
'customer': models.Customer,
}
aggregate = serializers.ChoiceField(choices=MODEL_NAME_CHOICES, default='customer')
uuid = serializers.CharField(allow_null=True, default=None)
def get_aggregates(self, user):
model = self.MODEL_CLASSES[self.data['aggregate']]
queryset = filter_queryset_for_user(model.objects.all(), user)
if 'uuid' in self.data and self.data['uuid']:
queryset = queryset.filter(uuid=self.data['uuid'])
return queryset
def get_projects(self, user):
queryset = self.get_aggregates(user)
if self.data['aggregate'] == 'project':
return queryset.all()
else:
queryset = models.Project.objects.filter(customer__in=list(queryset))
return filter_queryset_for_user(queryset, user)
def get_service_project_links(self, user):
projects = self.get_projects(user)
return [model.objects.filter(project__in=projects)
for model in models.ServiceProjectLink.get_all_models()]
class PrivateCloudSerializer(BaseResourceSerializer):
extra_configuration = serializers.JSONField(read_only=True)
class Meta(BaseResourceSerializer.Meta):
fields = BaseResourceSerializer.Meta.fields + ('extra_configuration',)
| {
"content_hash": "812dd578bc5ea65eb60a8bacbabb393f",
"timestamp": "",
"source": "github",
"line_count": 1694,
"max_line_length": 120,
"avg_line_length": 40.61570247933884,
"alnum_prop": 0.6199148292952342,
"repo_name": "opennode/nodeconductor",
"id": "f5bc9c5b4c454011741a1319947edf7dad0b3285",
"size": "68803",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "waldur_core/structure/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1877"
},
{
"name": "HTML",
"bytes": "17528"
},
{
"name": "JavaScript",
"bytes": "248900"
},
{
"name": "Python",
"bytes": "1254720"
}
],
"symlink_target": ""
} |
import acos_client.v21.base as base
from port import Port
class Server(base.BaseV21):
def get(self, name, **kwargs):
return self._post("slb.server.search", {'name': name}, **kwargs)
def create(self, name, ip_address, **kwargs):
params = {
"server": {
"name": name,
"host": ip_address,
"status": kwargs.get('status', 1)
}
}
self._post("slb.server.create", params, **kwargs)
def update(self, name, ip_address, **kwargs):
params = {
"server": {
"name": name,
"host": ip_address,
"status": kwargs.get('status', 1)
}
}
self._post("slb.server.update", params, **kwargs)
def fetchStatistics(self, name, **kwargs):
return self._post("slb.server.fetchStatistics", {"name": name},
**kwargs)
def delete(self, name, **kwargs):
self._post("slb.server.delete", {"server": {"name": name}}, **kwargs)
def all(self, **kwargs):
return self._get('slb.server.getAll', **kwargs)
def all_delete(self, **kwargs):
self._get('slb.server.deleteAll', **kwargs)
def stats(self, name, **kwargs):
return self._post("slb.server.fetchStatistics",
{"server": {"name": name}}, **kwargs)
def all_stats(self, **kwargs):
return self._get('fetchAllStatistics', **kwargs)
@property
def port(self):
return Port(self.client)
| {
"content_hash": "ab3e7a0c941162d9a6f9112c5cdbea49",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 29.22641509433962,
"alnum_prop": 0.5203357004519045,
"repo_name": "dougwig/acos-client",
"id": "52c5507417952843e22d6b71e2ab68c68190e476",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acos_client/v21/slb/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "240705"
}
],
"symlink_target": ""
} |
from tacker.common import exceptions
from tacker import context
from tacker.db.db_sqlalchemy.models import VnfResource
from tacker import objects
from tacker.objects import vnf_package_vnfd
from tacker.objects import vnf_resources
from tacker.tests.unit.db.base import SqlTestCase
from tacker.tests.unit.objects import fakes
from tacker.tests import uuidsentinel
class TestVnfResource(SqlTestCase):
def setUp(self):
super(TestVnfResource, self).setUp()
self.context = context.get_admin_context()
self.vnf_instance = self._create_vnf_instance()
self.vnf_resource = self._create_vnf_resource()
def _create_vnf_instance(self):
vnf_package = objects.VnfPackage(context=self.context,
**fakes.vnf_package_data)
vnf_package.create()
vnf_pack_vnfd = fakes.get_vnf_package_vnfd_data(
vnf_package.id, uuidsentinel.vnfd_id)
vnf_pack_vnfd_obj = vnf_package_vnfd.VnfPackageVnfd(
context=self.context, **vnf_pack_vnfd)
vnf_pack_vnfd_obj.create()
vnf_package.vnf_package = "ONBOARDED"
vnf_package.save()
vnf_instance_data = fakes.get_vnf_instance_data(
vnf_pack_vnfd_obj.vnfd_id)
vnf_instance = objects.VnfInstance(context=self.context,
**vnf_instance_data)
vnf_instance.create()
return vnf_instance
def _create_vnf_resource(self):
vnf_resource = vnf_resources.VnfResource(
context=self.context,
**fakes.fake_vnf_resource_data(self.vnf_instance.id))
vnf_resource.create()
return vnf_resource
def test_vnf_resource_create(self):
resource_data = fakes.fake_vnf_resource_data(
self.vnf_instance.id)
resource_data.update({'id': uuidsentinel.id})
result = vnf_resources._vnf_resource_create(
self.context, resource_data)
self.assertTrue(result.id)
self.assertEqual('test', result.resource_name)
def test_vnf_resource_get_by_id(self):
result = vnf_resources._vnf_resource_get_by_id(
self.context, self.vnf_resource.id)
self.assertEqual(self.vnf_resource.id, result.id)
def test_vnf_resource_update(self):
update = {'resource_name': 'fake'}
result = vnf_resources._vnf_resource_update(
self.context, self.vnf_resource.id, update)
self.assertEqual('fake', result.resource_name)
def test_destroy_vnf_resource(self):
vnf_resources._destroy_vnf_resource(
self.context, self.vnf_resource.id)
self.assertRaises(
exceptions.VnfResourceNotFound,
vnf_resources.VnfResource.get_by_id, self.context,
self.vnf_resource.id)
def test_vnf_resource_list(self):
result = vnf_resources._vnf_resource_list(
self.context, self.vnf_instance.id)
self.assertTrue(result[0].id)
self.assertIsInstance(result[0], VnfResource)
def test_make_vnf_resources_list(self):
vnf_resource_db = vnf_resources._vnf_resource_list(
self.context, self.vnf_instance.id)
vnf_resource_list = vnf_resources._make_vnf_resources_list(
self.context, vnf_resources.VnfResourceList(), vnf_resource_db)
self.assertIsInstance(vnf_resource_list,
vnf_resources.VnfResourceList)
self.assertTrue(vnf_resource_list.objects[0].id)
| {
"content_hash": "1b30250dd63093a93c4ddbe20e7c82da",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 38.51648351648352,
"alnum_prop": 0.642796005706134,
"repo_name": "openstack/tacker",
"id": "3290f6d1cb1643aaf75158686b9084d909fd89db",
"size": "4134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/tests/unit/db/test_vnf_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "10809"
},
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "7648075"
},
{
"name": "Ruby",
"bytes": "2841"
},
{
"name": "Shell",
"bytes": "61750"
},
{
"name": "Smarty",
"bytes": "3624"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Universecoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| {
"content_hash": "c44356d1557429a0c1029005848e068d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.7112462006079028,
"repo_name": "univesalcoin/universecoin-update",
"id": "439159699b7cc3ef0d374e4bea4029b09f73dac1",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32847"
},
{
"name": "C++",
"bytes": "2614694"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "99781"
},
{
"name": "NSIS",
"bytes": "6210"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "3784"
},
{
"name": "QMake",
"bytes": "14732"
},
{
"name": "Shell",
"bytes": "11790"
}
],
"symlink_target": ""
} |
import random
def intro():
print("""I am thinking of a 3-digit number. Try to guess what it is.
Here are some clues:
When I say: That means:
Cold No digit is correct.
Warm One digit is correct but in the wrong position.
Hot One digit is correct and in the right position.
I have thought up a number. You have 10 guesses to get it.""")
def number_generator():
available = []
for number in range(0, 10):
available.append(number)
first_number = random.choice(available)
available.remove(first_number)
second_number = random.choice(available)
available.remove(second_number)
third_number = random.choice(available)
available.remove(third_number)
the_number = (first_number, second_number, third_number)
return the_number
def number_pick(the_number):
try_number = 1
while True:
if try_number > 10:
print("\nYou tried but you failed.\n\n")
success = "no"
return False, success
guessed_number = []
guess = input("\nGuess #{}: ".format(try_number))
if len(guess) == 3:
try:
for number in guess:
guessed_number.append(int(number))
except ValueError:
print("only numbers please~")
tuple_number = tuple(guessed_number)
if the_number == tuple_number:
success = "yes"
return False, success
else:
for index, value in enumerate(tuple_number):
if tuple_number[index - 1] == the_number[index - 1]:
print("Hot ", end="")
elif tuple_number[index - 1] in the_number:
print("Warm ", end="")
elif tuple_number[index - 1] not in the_number:
print("Cold ", end="")
try_number += 1
else:
print("\nOnly 3 numbers please!\n")
return success
def main():
intro()
success = number_pick(number_generator())
if success[1] == "yes":
print("You got it!\n")
elif success[1] == "no":
print("You have failed, how sad :,c")
play_again = input("Do you want to play again? (yes or no) ")
if play_again in ("yes", "y"):
main()
else:
pass
if __name__ == '__main__':
main()
| {
"content_hash": "d9ff078793dd76ad68697e200934dfdf",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 72,
"avg_line_length": 30.1875,
"alnum_prop": 0.537888198757764,
"repo_name": "Mdlkxzmcp/various_python",
"id": "d3562f763d85c9830518323347f0d90ee79ec22d",
"size": "2415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "random_old_stuff/Hot-Cold.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1713"
},
{
"name": "HTML",
"bytes": "10923"
},
{
"name": "JavaScript",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "262310"
}
],
"symlink_target": ""
} |
"""Benchmark script for TensorFlow.
See the README for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags as absl_flags
import tensorflow as tf
import benchmark_cnn
import cnn_util
import flags
import mlperf
from cnn_util import log_fn
flags.define_flags()
for name in flags.param_specs.keys():
absl_flags.declare_key_flag(name)
absl_flags.DEFINE_boolean(
'ml_perf_compliance_logging', False,
'Print logs required to be compliant with MLPerf. If set, must clone the '
'MLPerf training repo https://github.com/mlperf/training and add '
'https://github.com/mlperf/training/tree/master/compliance to the '
'PYTHONPATH')
def main(positional_arguments):
# Command-line arguments like '--distortions False' are equivalent to
# '--distortions=True False', where False is a positional argument. To prevent
# this from silently running with distortions, we do not allow positional
# arguments.
assert len(positional_arguments) >= 1
if len(positional_arguments) > 1:
raise ValueError('Received unknown positional arguments: %s'
% positional_arguments[1:])
params = benchmark_cnn.make_params_from_flags()
with mlperf.mlperf_logger(absl_flags.FLAGS.ml_perf_compliance_logging,
params.model):
params = benchmark_cnn.setup(params)
bench = benchmark_cnn.BenchmarkCNN(params)
tfversion = cnn_util.tensorflow_version_tuple()
log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))
bench.print_info()
bench.run()
if __name__ == '__main__':
app.run(main) # Raises error on invalid flags, unlike tf.app.run()
| {
"content_hash": "9bb6ae16994a41540f645b91bdaa4d59",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.707833047455689,
"repo_name": "annarev/benchmarks",
"id": "707dd60d058c970a4fd1761c9bdf28abc85ff36a",
"size": "2439",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "813002"
},
{
"name": "Shell",
"bytes": "5545"
}
],
"symlink_target": ""
} |
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
# What we know!获取评论数据
g = open('reviews.txt', 'r')
reviews = list(map(lambda x: x[:-1], g.readlines()))
g.close()
# What we WANT to know!获取评价数据
g = open('labels.txt', 'r')
labels = list(map(lambda x: x[:-1].upper(), g.readlines()))
g.close()
from collections import Counter
import numpy as np
import time
import sys
'''
# 定义词频计数器
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
# 统计不同结果词频
for i in range(len(reviews)):
if (labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
# 定义好坏比例统计数器
pos_neg_ratios = Counter()
# 计算不同结果词频比率
for term, cnt in list(total_counts.most_common()):
if (cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term] + 1)
pos_neg_ratios[term] = pos_neg_ratio
# 标准化比率正态化
for word, ratio in pos_neg_ratios.most_common():
if (ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
vocab = set(total_counts.keys())
vocab_size = len(vocab)
# 定义输入标准化的字典
layer_0 = np.zeros((1, vocab_size))
# 定义词的序列字典
word2index = {}
for i, word in enumerate(vocab):
word2index[word] = i
# 输入文字根据word2index转词频
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
# 定义输出数据转数字
def get_target_for_label(label):
if (label == 'POSITIVE'):
return 1
else:
return 0
'''
# 定义神经网络
class SentimentNetwork:
def __init__(self, reviews, labels, min_count=10, polarity_cutoff=0.1, hidden_nodes=10, learning_rate=0.1):
# set our random number generator
np.random.seed(1)
##project6初始化筛选特征
self.pre_process_data(reviews, polarity_cutoff, min_count)
self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)
# 根据评论和标签初始化词的字典和标签字典
def pre_process_data(self, reviews, polarity_cutoff, min_count):
# project6计算不同结果的词频比率
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if (labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
pos_neg_ratios = Counter()
for term, cnt in list(total_counts.most_common()):
if (cnt >= 50):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term] + 1)
pos_neg_ratios[term] = pos_neg_ratio
for word, ratio in pos_neg_ratios.most_common():
if (ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
'''
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
'''
# project6筛选特征
review_vocab = set()
for review in reviews:
for word in review.split(" "):
if (total_counts[word] > min_count): # 如果词频大于min_count
if (word in pos_neg_ratios.keys()): # 并且词的的正态化比率大于polarity_cutoff
if ((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):
review_vocab.add(word) # 加入特征
else:
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
# 初始化网络超参数
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes ** -0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1, input_nodes))
# project5 增加layer_1
self.layer_1 = np.zeros((1, hidden_nodes))
# 评论转词频矩阵
def update_input_layer(self, review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if (word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1 # project4减少噪声权重,不统计词频
# 标签转01输出
def get_target_for_label(self, label):
if (label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self, output):
return output * (1 - output)
def train(self, training_reviews_raw, training_labels):
# project5减少噪声权重,统计每个评论中那些词出现过
training_reviews = list()
for review in training_reviews_raw:
indices = set()
for word in review.split(" "):
if (word in self.word2index.keys()):
indices.add(self.word2index[word])
training_reviews.append(list(indices))
assert (len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
# project5输入修改
# self.update_input_layer(review)
# Hidden layer
# layer_1 = self.layer_0.dot(self.weights_0_1)
# project5减少噪声权重,统计每个评论中那些词出现过
self.layer_1 *= 0
for index in review:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
layer_2_error = layer_2 - self.get_target_for_label(
label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
self.weights_1_2 -= self.layer_1.T.dot(
layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
'''
self.weights_0_1 -= self.layer_0.T.dot(
layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
'''
for index in review:
self.weights_0_1[index] -= layer_1_delta[
0] * self.learning_rate # update input-to-hidden weights with gradient descent step
if (np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write(
"\rProgress:" + str(100 * i / float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(
reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(
i + 1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i + 1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if (pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i / float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i + 1) + " Testing Accuracy:" + str(
correct * 100 / float(i + 1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if (layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000], labels[:-1000], min_count=20, polarity_cutoff=0.5, learning_rate=0.0001)
mlp.train(reviews[:-3000], labels[:-3000])
print('')
mlp.test(reviews[-1000:], labels[-1000:])
| {
"content_hash": "f07e5634b1abaa5db0ef83d5a26fbe5a",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 131,
"avg_line_length": 33.08524590163935,
"alnum_prop": 0.5539589733425825,
"repo_name": "zchq88/DLminiProject",
"id": "2882a889017be56fee71e544dbae595298708013",
"size": "10594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentiment_network/Sentiment Classification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4752900"
},
{
"name": "Python",
"bytes": "10594"
}
],
"symlink_target": ""
} |
import io
import logging
import os
import re
import sys
from datetime import datetime
from datetime import timedelta
from django import db
from django.apps import apps
from django.conf import settings
import kolibri
from kolibri.core.deviceadmin.exceptions import IncompatibleDatabase
from kolibri.core.tasks.decorators import register_task
from kolibri.core.utils.lock import db_lock
from kolibri.utils.conf import KOLIBRI_HOME
from kolibri.utils.time_utils import local_now
# Import db instead of db.connections because we want to use an instance of
# connections that might be updated from outside.
logger = logging.getLogger(__name__)
# Use encoded text for Python 3 (doesn't work in Python 2!)
KWARGS_IO_READ = {"mode": "r", "encoding": "utf-8"}
KWARGS_IO_WRITE = {"mode": "w", "encoding": "utf-8"}
# Constant job_id for vacuum task
SCH_VACUUM_JOB_ID = "1"
# Use binary file mode for Python 2 (doesn't work in Python 3!)
if sys.version_info < (3,):
KWARGS_IO_READ = {"mode": "rb"}
KWARGS_IO_WRITE = {"mode": "wb"}
def default_backup_folder():
return os.path.join(KOLIBRI_HOME, "backups")
def get_dtm_from_backup_name(fname):
"""
Returns the date time string from our automated backup filenames
"""
p = re.compile(r"^db\-v[^_]+_(?P<dtm>[\d\-_]+).*\.dump$")
m = p.search(fname)
if m:
label = m.groups("dtm")[0]
date = label.split("_")[0]
time = label.split("_")[1]
return "{date} {time}".format(date=date, time=time.replace("-", ":"))
raise ValueError(
"Tried to get date component of unparsed filename: {}".format(fname)
)
def is_full_version(fname):
"""
Tells us if a backup file name is named as if it's from the exact same
version.
Supposes versions do not contain underscores '_'
"""
# Can contain suffixes denoting alpha, beta, post, dev etc.
full_version = kolibri.__version__
return fname.startswith("db-v{}_".format(full_version))
def dbbackup(old_version, dest_folder=None):
"""
Sqlite3 only
Backup database to dest_folder. Uses SQLite's built in iterdump():
https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.iterdump
Notice that it's important to add at least version and date to the path
of the backup, otherwise you risk that upgrade activities carried out on
the same date overwrite each other. It's also quite important for the user
to know which version of Kolibri that a certain database should match.
:param: dest_folder: Default is ~/.kolibri/backups/db-[version]-[date].dump
:returns: Path of new backup file
"""
if "sqlite3" not in settings.DATABASES["default"]["ENGINE"]:
raise IncompatibleDatabase()
if not dest_folder:
dest_folder = default_backup_folder()
# This file name is a convention, used to figure out the latest backup
# that was made (by the dbrestore command)
fname = "db-v{version}_{dtm}.dump".format(
version=old_version, dtm=datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
backup_path = os.path.join(dest_folder, fname)
# Setting encoding=utf-8: io.open() is Python 2 compatible
# See: https://github.com/learningequality/kolibri/issues/2875
with io.open(backup_path, **KWARGS_IO_WRITE) as f:
# If the connection hasn't been opened yet, then open it
if not db.connections["default"].connection:
db.connections["default"].connect()
for line in db.connections["default"].connection.iterdump():
f.write(line)
return backup_path
def dbrestore(from_file):
"""
Sqlite3 only
Restores the database given a special database dump file containing SQL
statements.
"""
if "sqlite3" not in settings.DATABASES["default"]["ENGINE"]:
raise IncompatibleDatabase()
dst_file = settings.DATABASES["default"]["NAME"]
# Close connections
db.connections.close_all()
# Wipe current database file
if not db.connections["default"].is_in_memory_db():
with open(dst_file, "w") as f:
f.truncate()
else:
logger.info("In memory database, not truncating: {}".format(dst_file))
# Setting encoding=utf-8: io.open() is Python 2 compatible
# See: https://github.com/learningequality/kolibri/issues/2875
with open(from_file, **KWARGS_IO_READ) as f:
db.connections["default"].connect()
db.connections["default"].connection.executescript(f.read())
# Finally, it's okay to import models and open database connections.
# We need this to avoid generating records with identical 'Instance ID'
# and conflicting counters, in case the database we're overwriting had
# already been synced with other devices.:
from morango.models import DatabaseIDModel
DatabaseIDModel.objects.create()
def search_latest(search_root, fallback_version):
logger.info("Searching latest backup in {}...".format(search_root))
newest = "" # Should be a path/filename.sqlite3
newest_dtm = ""
# All file names have to be according to the fall back version.
prefix = "db-v{}".format(fallback_version)
backups = os.listdir(search_root)
backups = filter(lambda f: f.endswith(".dump"), backups)
backups = filter(lambda f: f.startswith(prefix), backups)
# Everything is sorted alphanumerically, and since dates in the
# filenames behave accordingly, we can now traverse the list
# without having to access meta data, just use the file name.
backups = list(backups)
backups.sort()
for backup in backups:
try:
dtm = get_dtm_from_backup_name(backup)
except ValueError:
continue
# Always pick the newest version
if is_full_version(backup) or dtm > newest_dtm:
newest_dtm = dtm
newest = backup
if newest:
return os.path.join(search_root, newest)
@register_task(job_id=SCH_VACUUM_JOB_ID)
def perform_vacuum(database=db.DEFAULT_DB_ALIAS, full=False):
connection = db.connections[database]
if connection.vendor == "sqlite":
try:
with db_lock():
db.close_old_connections()
db.connections.close_all()
cursor = connection.cursor()
cursor.execute("vacuum;")
connection.close()
except Exception as e:
logger.error(e)
new_msg = (
"Vacuum of database {db_name} couldn't be executed. Possible reasons:\n"
" * There is an open transaction in the db.\n"
" * There are one or more active SQL statements.\n"
"The full error: {error_msg}"
).format(
db_name=db.connections[database].settings_dict["NAME"], error_msg=e
)
logger.error(new_msg)
else:
logger.info("Sqlite database Vacuum finished.")
elif connection.vendor == "postgresql":
if full:
morango_models = ("morango_recordmaxcounterbuffer", "morango_buffer")
else:
morango_models = [
m
for m in apps.get_models(include_auto_created=True)
if "morango.models" in str(m)
]
cursor = connection.cursor()
for m in morango_models:
if full:
cursor.execute("vacuum full analyze {};".format(m))
else:
cursor.execute("vacuum analyze {};".format(m._meta.db_table))
connection.close()
def schedule_vacuum():
current_dt = local_now()
vacuum_time = current_dt.replace(hour=3, minute=0, second=0, microsecond=0)
if vacuum_time < current_dt:
# If it is past 3AM, change the day to tomorrow.
vacuum_time = vacuum_time + timedelta(days=1)
# Repeat indefinitely
perform_vacuum.enqueue_at(vacuum_time, repeat=None, interval=24 * 60 * 60)
| {
"content_hash": "8b956f2a830796b699a7745b59475203",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 88,
"avg_line_length": 33.668067226890756,
"alnum_prop": 0.6420816173717708,
"repo_name": "indirectlylit/kolibri",
"id": "39ba918b1b17581b6600cb708e38409b747cdbe1",
"size": "8013",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/deviceadmin/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554964"
},
{
"name": "Dockerfile",
"bytes": "4114"
},
{
"name": "Gherkin",
"bytes": "365088"
},
{
"name": "HTML",
"bytes": "24294"
},
{
"name": "JavaScript",
"bytes": "1613945"
},
{
"name": "Makefile",
"bytes": "11953"
},
{
"name": "Python",
"bytes": "2860587"
},
{
"name": "SCSS",
"bytes": "5225"
},
{
"name": "Shell",
"bytes": "5245"
},
{
"name": "Vue",
"bytes": "1604613"
}
],
"symlink_target": ""
} |
from iota import Iota, TryteString, TransactionHash, TransactionTrytes, \
HttpAdapter, MockAdapter
from iota.adapter.wrappers import RoutingWrapper
from unittest import TestCase
import sys
from unittest.mock import MagicMock, patch
# Load mocked package on import from pow pkg.
# Therefore we can test without having to install it.
sys.modules['pow'] = MagicMock()
class LocalPowTestCase(TestCase):
"""
Unit tests for `local_pow` feature using `pow` package
from `ccurl.inteface.py`.
"""
# We are only interested in if the ccurl interface is called.
# Don't care about the values, and there is no actual PoW
# calculation in these tests. Testing the functional correctness
# of the PoW calculation is done in iotaledger/ccurl.interface.py.
# Filters are thoroughly tested in `attach_to_tangle_test.py`.
def setUp(self):
"""
These values will be used in the tests.
"""
# Will be padded to transaction length by TransactionTrytes()
self.trytes1 ='CCLDVADBEACCWCTCEAZBCDFCE'
# Will be padded to transaction length by TransactionTrytes()
self.trytes2 ='CGDEAHDFDPCBDGDPCRCHDXCCDBDEAKDPC'
# Will be padded to hash length by TransactionHash()
self.trunk ='EWSQPV9AGXUQRYAZIUONVBXFNWRWIGVCFT'
self.branch ='W9VELHQPPERYSG9ZLLAHQKDLJQBKYYZOS'
self.mwm = 14
# Create real objects so that we pass the filters
self.bundle = [TransactionTrytes(self.trytes1), TransactionTrytes(self.trytes2)]
# ccurl_bundle is only needed to differentiate between response
# from mocked pow and MockAdapter in some test cases.
self.ccurl_bundle = [TransactionTrytes(self.trytes1)]
self.trunk = TransactionHash(self.trunk)
self.branch = TransactionHash(self.branch)
def test_backward_compatibility(self):
"""
Test that the local_pow feature is backward compatible.
That is, if `local_pow` argument is omitted, it takes no
effect and the pow extension package is not called.
"""
with patch('pow.ccurl_interface.attach_to_tangle',
MagicMock(return_value=self.ccurl_bundle)) as mocked_ccurl:
self.adapter = MockAdapter()
self.adapter.seed_response('attachToTangle',{
'trytes': self.bundle,
})
# No `local_pow` argument is passed to the api!
api = Iota(self.adapter)
result = api.attach_to_tangle(
self.trunk,
self.branch,
self.bundle,
self.mwm)
# Ccurl interface was not called
self.assertFalse(mocked_ccurl.called)
# Result is the one returned by MockAdapter
self.assertEqual(result['trytes'], self.bundle)
# And not by mocked pow pkg
self.assertNotEqual(result['trytes'], self.ccurl_bundle)
def test_http_adapter(self):
"""
Test if local_pow feature works with HttpAdapter.
"""
# Note that we need correct return value to pass the
# response filter.
with patch('pow.ccurl_interface.attach_to_tangle',
MagicMock(return_value=self.bundle)) as mocked_ccurl:
api = Iota(HttpAdapter('http://localhost:14265/'),local_pow=True)
result = api.attach_to_tangle(
self.trunk,
self.branch,
self.bundle,
self.mwm)
self.assertTrue(mocked_ccurl.called)
self.assertEqual(result['trytes'], self.bundle)
def test_mock_adapter(self):
"""
Test if local_pow feature works with MockAdapter.
"""
# Note that we need correct return value to pass the
# response filter.
with patch('pow.ccurl_interface.attach_to_tangle',
MagicMock(return_value=self.bundle)) as mocked_ccurl:
api = Iota(MockAdapter(),local_pow=True)
result = api.attach_to_tangle(
self.trunk,
self.branch,
self.bundle,
self.mwm)
self.assertTrue(mocked_ccurl.called)
self.assertEqual(result['trytes'], self.bundle)
def test_routing_wrapper(self):
"""
Test if local_pow feature works with RoutingWrapper.
"""
# Note that we need correct return value to pass the
# response filter.
with patch('pow.ccurl_interface.attach_to_tangle',
MagicMock(return_value=self.bundle)) as mocked_ccurl:
# We are trying to redirect `attach_to_tangle` calls to localhost
# with a RoutingWrapper. However, if local_pow=true, the pow
# request will not reach the adapter, but will be directed to
# ccurl interface.
api = Iota(RoutingWrapper('http://12.34.56.78:14265')
.add_route('attachToTangle', 'http://localhost:14265'),
local_pow=True)
result = api.attach_to_tangle(
self.trunk,
self.branch,
self.bundle,
self.mwm)
self.assertTrue(mocked_ccurl.called)
self.assertEqual(result['trytes'], self.bundle)
def test_set_local_pow(self):
"""
Test if local_pow can be enabled/disabled dynamically.
"""
with patch('pow.ccurl_interface.attach_to_tangle',
MagicMock(return_value=self.ccurl_bundle)) as mocked_ccurl:
self.adapter = MockAdapter()
self.adapter.seed_response('attachToTangle',{
'trytes': self.bundle,
})
# First, we enable local_pow
api = Iota(self.adapter, local_pow=True)
result = api.attach_to_tangle(
self.trunk,
self.branch,
self.bundle,
self.mwm)
# Ccurl was called
self.assertTrue(mocked_ccurl.called)
# Result comes from ccurl
self.assertEqual(result['trytes'], self.ccurl_bundle)
# Reset mock, this clears the called attribute
mocked_ccurl.reset_mock()
# Disable local_pow
api.set_local_pow(local_pow=False)
# Try again
result = api.attach_to_tangle(
self.trunk,
self.branch,
self.bundle,
self.mwm)
# Ccurl interface was not called
self.assertFalse(mocked_ccurl.called)
# Result is the one returned by MockAdapter
self.assertEqual(result['trytes'], self.bundle)
# And not by mocked pow pkg
self.assertNotEqual(result['trytes'], self.ccurl_bundle) | {
"content_hash": "30cfad54d85fb15d47b68fde15a952bd",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 88,
"avg_line_length": 41.84756097560975,
"alnum_prop": 0.5912866093545097,
"repo_name": "iotaledger/iota.lib.py",
"id": "a3ee70d2a14d6749302d704242b4783ffec65881",
"size": "6863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/local_pow_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1053825"
}
],
"symlink_target": ""
} |
import os,sys
import utils as cu
import libDetection as det
import cPickle as pickle
import scipy.io
import numpy as np
params = cu.loadParams('dbDir relationsFile outputDir')
archive = {}
T = pickle.load( open(params['dbDir']+'/db.idx','rb') )
M = scipy.io.loadmat(params['dbDir']+'/db.cache')
archive['images'] = T.keys()
index = np.zeros( (len(T), 2), np.int )
for i in range(len(archive['images'])):
idx = T[archive['images'][i]]
index[i,0] = idx['s'] + 1
index[i,1] = idx['e']
archive['index'] = index
data = [x.split() for x in open(params['relationsFile'])]
categories = set()
labels = {}
for d in data:
r = [d[1]] + map(float,d[2:])
try: labels[d[0]].append( r )
except: labels[d[0]] = [ r ]
categories.add(d[1])
categories = list(categories)
categories.sort()
C = dict( [ (categories[c],c) for c in range(len(categories))] )
print 'Identifying labeled boxes'
L = np.zeros( (M['B'].shape[0], 60), np.int32 )
for img in labels.keys():
print img
idx = T[img]
for j in range(idx['s'],idx['e']):
box = M['B'][j,:].tolist()
for l in labels[img]:
iou = det.IoU(box,l[1:])
if iou == 1.0:
L[j,C[l[0]]] = 1
archive['labels'] = L
scipy.io.savemat( params['outputDir']+'/boxes.mat', {'boxes:':M['B']}, do_compression=True )
scipy.io.savemat( params['outputDir']+'/scores.mat', {'scores:':M['S']} )
scipy.io.savemat( params['outputDir']+'/labels.mat', {'labels:':L} )
scipy.io.savemat( params['outputDir']+'/index.mat', {'index:':index, 'images':T.keys()}, do_compression=True )
| {
"content_hash": "c0f876e9add327de5a7ec55da450801d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 110,
"avg_line_length": 26.517241379310345,
"alnum_prop": 0.61703511053316,
"repo_name": "jccaicedo/localization-agent",
"id": "16a7e54651a80d7562dd0c32d800ec7e3eab725f",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "detection/relations/makeRelationsLabelsMatrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "619"
},
{
"name": "Python",
"bytes": "391367"
},
{
"name": "Shell",
"bytes": "18874"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import errno
import hashlib
import os
import time
from datetime import datetime
from werkzeug.utils import secure_filename as _secure_filename
from indico.util.string import to_unicode, unicode_to_ascii
def silentremove(filename):
try:
os.remove(filename)
return True
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
def secure_filename(filename, fallback):
"""Returns a secure version of a filename.
This removes possibly dangerous characters and also converts the
filename to plain ASCII for maximum compatibility.
:param filename: A filename
:param fallback: The filename to use if there were no safe chars
in the original filename.
"""
if not filename:
return fallback
return _secure_filename(unicode_to_ascii(to_unicode(filename))) or fallback
def resolve_link(link):
"""Resolve a link to an absolute path.
:param link: An absolute path to a symlink.
"""
return os.path.normpath(os.path.join(os.path.dirname(link), os.readlink(link)))
def removedirs(base, name):
"""Delete the leaf dir and try deleting all parents.
:param base: The base dir `name` is relative to.
:param name: The path to the directory to be deleted.
This basically ``rmdir -p`` and acts like :func:`os.removedirs`
except that it will not ascend above `base`.
"""
os.rmdir(os.path.join(base, name))
head, tail = os.path.split(name)
if not tail:
head, tail = os.path.split(head)
while head and tail:
try:
os.rmdir(os.path.join(base, head))
except OSError:
break
head, tail = os.path.split(head)
def cleanup_dir(path, min_age, dry_run=False, exclude=None):
"""Delete old files from a directory.
This recurses into subdirectories and will also delete any empty
subdirectories.
:param path: The directory to clean up
:param min_age: A timedelta specifying how old files need to be
so they are deleted.
:param dry_run: If true, this function will not delete anything
but just return the files it would delete.
:param exclude: A callable that is invoked with the subdirectory
(relative to `path`). If it returns ``True``, the
directory and all its subdirs will be ignored.
:return: A set containing the deleted files.
"""
min_mtime = int(time.mktime((datetime.now() - min_age).timetuple()))
if not path or path == '/':
raise ValueError('Invalid path for cleanup: {}'.format(path))
deleted = set()
for root, dirs, files in os.walk(path):
relroot = os.path.relpath(root, path)
if relroot == '.':
relroot = ''
if exclude is not None and exclude(relroot):
del dirs[:] # avoid descending into subdirs
continue
has_files = False
for filename in files:
filepath = os.path.join(root, filename)
if os.path.getmtime(filepath) >= min_mtime:
has_files = True
elif dry_run or silentremove(filepath):
deleted.add(os.path.relpath(filepath, path))
else:
has_files = True # deletion failed
if not dry_run and not has_files and not dirs and relroot:
removedirs(path, relroot)
return deleted
def chmod_umask(path, execute=False):
"""Change the permissions of a file to the umask-based default.
:param path: The path to the file/directory
:param execute: Whether the x-bit may be set
"""
# XXX: umask cannot be read except when changing it,
# so we change it and immediately restore it...
# this is not thread safe and in theory prone to a race condition,
# the indico home dir is usually set to 710 and thus doesn't allow
# 'others' to access it at all. additionally, the temporary 027
# umask results in files being created with 640/750 and thus there's
# no risk of security issues/bugs in case the race condition actually
# happens (which is extremely unlikely anyawy)
umask = os.umask(0o027)
os.umask(umask)
default = 0o777 if execute else 0o666
os.chmod(path, default & ~umask)
def get_file_checksum(fileobj, chunk_size=1024*1024, algorithm=hashlib.md5):
checksum = algorithm()
while True:
chunk = fileobj.read(chunk_size)
if not chunk:
break
checksum.update(chunk)
return unicode(checksum.hexdigest())
| {
"content_hash": "e1bca70af9391eb2a85b6f307e6ef9ce",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 83,
"avg_line_length": 33.93382352941177,
"alnum_prop": 0.6446370530877573,
"repo_name": "OmeGak/indico",
"id": "e9455e7e10d6fb959a1ddbe39991dfc02eae2d24",
"size": "4829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/util/fs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547418"
},
{
"name": "HTML",
"bytes": "1366687"
},
{
"name": "JavaScript",
"bytes": "1678182"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4488419"
},
{
"name": "Shell",
"bytes": "2724"
},
{
"name": "TeX",
"bytes": "23051"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from .views import GlobalOverview
urlpatterns = patterns('',
url(r'^$', GlobalOverview.as_view(), name='index'),
)
| {
"content_hash": "4bf713376637756b9afe01a0494d353e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 23,
"alnum_prop": 0.7018633540372671,
"repo_name": "andrewsmedina/horizon",
"id": "382cd69abe9cc6edbbb6810f2002c0fdf901f061",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon/horizon/dashboards/syspanel/overview/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
from sqlalchemy.util.compat import import_
class DatabaseRemovedTest(fixtures.TestBase):
def test_deprecate_databases(self):
with testing.expect_deprecated_20(
"The `database` package is deprecated and will be removed in v2.0 "
):
import_("sqlalchemy.databases")
| {
"content_hash": "b7dde74e2bc42f451b19dc4b5b6358b0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 34.54545454545455,
"alnum_prop": 0.7157894736842105,
"repo_name": "j5int/sqlalchemy",
"id": "b2ee708e2640b41b1a7e6476a76713b6a4f21717",
"size": "380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/dialect/test_deprecations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "63151"
},
{
"name": "Python",
"bytes": "15339979"
}
],
"symlink_target": ""
} |
"""
Print stellar data (gaia ids, memberships etc.) for the paper.
Prepare two versions: One is the first few lines for the paper (stellar_memberships_example.tex),
and the other one is an online supplementary material.
Print only members of ScoCen. Background stars are not important!
"""
import numpy as np
from astropy.table import Table, unique, join
import astropy.units as u
from chronostar.component import SphereComponent
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
good_comps = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'T', 'U'] # No B and E
############################################
# Read data
try:
tab = tab0
except:
tab0 = Table.read(data_filename)
tab = tab0
#~ print('UNIQUE TABLE')
#~ tab = unique(tab, keys='source_id')
print(len(tab))
"""
Take only ScoCen members
"""
mask = [False]*len(tab)
for comp_ID in good_comps:
m = tab['membership%s'%comp_ID]>0.5
mask = mask | m
tab = tab[mask]
print('Number of stars in good comps with memb. prob. >0.5: ', len(tab))
# Rename some columns
tab.rename_column('bp_rp_extinction_corrected', '(Bp-Rp)0')
tab.rename_column('phot_g_mean_mag_extinction_corrected', 'G0')
tab.rename_column('best_component', 'comp')
tab.rename_column('best_component_membership', 'p')
tab.rename_column('EW(Li)_err', 'EW(Li)_error')
# Keys in the table
keys = ['source_id', '(Bp-Rp)0', 'G0', 'radial_velocity', 'radial_velocity_error', 'X', 'Y', 'Z', 'U', 'V', 'W', 'X_error', 'Y_error', 'Z_error', 'U_error', 'V_error', 'W_error', 'comp', 'p', 'EW(Li)', 'EW(Li)_error', 'Ref']
tab=tab[keys]
tab['radial_velocity'].unit = u.km/u.s
tab['radial_velocity_error'].unit = u.km/u.s
tab['X'].unit = u.pc
tab['Y'].unit = u.pc
tab['Z'].unit = u.pc
tab['X_error'].unit = u.pc
tab['Y_error'].unit = u.pc
tab['Z_error'].unit = u.pc
tab['U'].unit = u.km/u.s
tab['V'].unit = u.km/u.s
tab['W'].unit = u.km/u.s
tab['U_error'].unit = u.km/u.s
tab['V_error'].unit = u.km/u.s
tab['W_error'].unit = u.km/u.s
tab['EW(Li)'].unit = u.angstrom
tab['EW(Li)_error'].unit = u.angstrom
print(tab)
tab.write('scocen_kinematics_and_membership_probabilities_gaiaDR2.fits', overwrite=True)
| {
"content_hash": "31ba8c9326613441cd861f783b126102",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 224,
"avg_line_length": 28.530864197530864,
"alnum_prop": 0.6343574210298571,
"repo_name": "mikeireland/chronostar",
"id": "ecedf00787d501a0b972ee9a74c781126ebf6509",
"size": "2311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/scocen/print_stellar_memberships_BIG_table_for_paper_entire_fits_file_for_supplementary_material.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "308199"
},
{
"name": "C++",
"bytes": "2106"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "1116075"
},
{
"name": "SWIG",
"bytes": "4608"
},
{
"name": "Shell",
"bytes": "1163"
}
],
"symlink_target": ""
} |
'''
Hello student. Thank you for downloading a CORGIS library. However, you do not need to open this library. Instead you should use the following:
import music
If you opened the file because you are curious how this library works, then well done! We hope that you find it a useful learning experience. However, you should know that this code is meant to solve somewhat esoteric pedagogical problems, so it is often not best practices.
'''
import sys as _sys
import os as _os
import json as _json
import sqlite3 as _sql
import difflib as _difflib
class _Constants(object):
'''
Global singleton object to hide some of the constants; some IDEs reveal internal module details very aggressively, and there's no other way to hide stuff.
'''
_HEADER = {'User-Agent':
'CORGIS Music library for educational purposes'}
_PYTHON_3 = _sys.version_info >= (3, 0)
_TEST = False
_HARDWARE = 1000
if _Constants._PYTHON_3:
import urllib.request as _request
from urllib.parse import quote_plus as _quote_plus
from urllib.error import HTTPError as _HTTPError
else:
import urllib2 as _urllib2
from urllib import quote_plus as _quote_plus
from urllib2 import HTTPError as _HTTPError
class DatasetException(Exception):
''' Thrown when there is an error loading the dataset for some reason.'''
pass
_Constants._DATABASE_NAME = "music.db"
if not _os.access(_Constants._DATABASE_NAME, _os.F_OK):
raise DatasetException("Error! Could not find a \"{0}\" file. Make sure that there is a \"{0}\" in the same directory as \"{1}.py\"! Spelling is very important here.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.R_OK):
raise DatasetException("Error! Could not read the \"{0}\" file. Make sure that it readable by changing its permissions. You may need to get help from your instructor.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.W_OK):
_sys.stderr.write('The local cache (\" \") will not be updated. Make sure that it is writable by changing its permissions. You may need to get help from your instructor.\n'.format(_Constants._DATABASE_NAME))
_sys.stderr.flush()
_Constants._DATABASE = _sql.connect(_Constants._DATABASE_NAME)
class _Auxiliary(object):
@staticmethod
def _parse_type(value, type_func):
"""
Attempt to cast *value* into *type_func*, returning *default* if it fails.
"""
default = type_func(0)
if value is None:
return default
try:
return type_func(value)
except ValueError:
return default
@staticmethod
def _byteify(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_Auxiliary._byteify(key): _Auxiliary._byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._byteify(element) for element in input]
elif _Constants._PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
elif not _Constants._PYTHON_3 and isinstance(input, unicode):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input
@staticmethod
def _guess_schema(input):
if isinstance(input, dict):
return {str(key.encode('ascii', 'replace').decode('ascii')):
_Auxiliary._guess_schema(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._guess_schema(input[0])] if input else []
else:
return type(input)
################################################################################
# Domain Objects
################################################################################
################################################################################
# Interfaces
################################################################################
def get_song_by_name(title):
"""
Given the title of a song, returns information about the song.
:param title: The title of the song.
:type title: str
"""
# Match it against recommend values
potentials = [r[0].lower() for r in _Constants._DATABASE.execute("SELECT DISTINCT title FROM music").fetchall()]
if title.lower() not in potentials:
best_guesses = _difflib.get_close_matches(title, potentials)
if best_guesses:
raise DatasetException("Error, the given identifier could not be found. Perhaps you meant one of:\n\t{}".format('\n\t'.join(map('"{}"'.format, best_guesses))))
else:
raise DatasetException("Error, the given identifier could not be found. Please check to make sure you have the right spelling.")
if False:
# If there was a Test version of this method, it would go here. But alas.
pass
else:
rows = _Constants._DATABASE.execute("SELECT data FROM music WHERE title=? LIMIT 1".format(
hardware=_Constants._HARDWARE),
(title, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
data = data[0]
return _Auxiliary._byteify(data)
def get_songs_by_artist(artist, test=False):
"""
Given the name of an artist, returns all the songs by that artist in the database.
:param artist: The name of the artist or band.
:type artist: str
"""
# Match it against recommend values
potentials = [r[0].lower() for r in _Constants._DATABASE.execute("SELECT DISTINCT artist FROM music").fetchall()]
if artist.lower() not in potentials:
best_guesses = _difflib.get_close_matches(artist, potentials)
if best_guesses:
raise DatasetException("Error, the given identifier could not be found. Perhaps you meant one of:\n\t{}".format('\n\t'.join(map('"{}"'.format, best_guesses))))
else:
raise DatasetException("Error, the given identifier could not be found. Please check to make sure you have the right spelling.")
if _Constants._TEST or test:
rows = _Constants._DATABASE.execute("SELECT data FROM music WHERE artist=? LIMIT {hardware}".format(
hardware=_Constants._HARDWARE),
(artist, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
else:
rows = _Constants._DATABASE.execute("SELECT data FROM music WHERE artist=?".format(
hardware=_Constants._HARDWARE),
(artist, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
def get_songs(test=False):
"""
Gets a list of all the songs in the database.
"""
if _Constants._TEST or test:
rows = _Constants._DATABASE.execute("SELECT data FROM music LIMIT {hardware}".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
else:
rows = _Constants._DATABASE.execute("SELECT data FROM music".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
################################################################################
# Internalized testing code
################################################################################
def _test_interfaces():
from pprint import pprint as _pprint
from timeit import default_timer as _default_timer
# Production test
print("Production get_song_by_name")
start_time = _default_timer()
result = get_song_by_name("I Didn't Mean To")
_pprint(result)
print("Time taken: {}".format(_default_timer() - start_time))
# Production test
print("Production get_songs_by_artist")
start_time = _default_timer()
result = get_songs_by_artist("Aerosmith")
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Test test
print("Test get_songs_by_artist")
start_time = _default_timer()
result = get_songs_by_artist("Aerosmith", test=True)
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Production test
print("Production get_songs")
start_time = _default_timer()
result = get_songs()
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Test test
print("Test get_songs")
start_time = _default_timer()
result = get_songs(test=True)
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
if __name__ == '__main__':
from optparse import OptionParser as _OptionParser
_parser = _OptionParser()
_parser.add_option("-t", "--test", action="store_true",
default=False,
help="Execute the interfaces to test them.")
(_options, _args) = _parser.parse_args()
if _options.test:
_test_interfaces() | {
"content_hash": "d9bd219363583171691ca457d81520e9",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 275,
"avg_line_length": 38.434615384615384,
"alnum_prop": 0.5962173521465025,
"repo_name": "chandlercr/aima-python",
"id": "4d8ba7af27de078660ccf2f0bfac71a1f32369c1",
"size": "9993",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "submissions/Blue/music.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856"
},
{
"name": "HTML",
"bytes": "9947"
},
{
"name": "JavaScript",
"bytes": "10165"
},
{
"name": "Jupyter Notebook",
"bytes": "1382354"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "1917198"
}
],
"symlink_target": ""
} |
import warnings
warnings.warn(
"mezzanine.mobile has been deprecated. Please remove it from your "
"INSTALLED_APPS.", FutureWarning, stacklevel=2
)
| {
"content_hash": "cfa4e9c8a5509bf988d0ab78dfa6d7f1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 71,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.7452229299363057,
"repo_name": "christianwgd/mezzanine",
"id": "99341d15c78484192a1d1b6a48c40ed543c0a7f9",
"size": "157",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mezzanine/mobile/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "59995"
},
{
"name": "HTML",
"bytes": "82168"
},
{
"name": "JavaScript",
"bytes": "459482"
},
{
"name": "Python",
"bytes": "709342"
}
],
"symlink_target": ""
} |
__author__ = 'albertlwohletz'
from API import models
from django.http import HttpResponse
import json
def add_char(request):
# Get Request Information
name = request.GET['name']
image = request.GET['image']
hp = request.GET['hp']
ac = request.GET['ac']
count = int(request.GET['count'])
# Create new entries in DB
for i in range(0, count):
new_char = models.Chars(name=name + ' ' + str(i + 1), image=image, hp=hp, ac=ac)
new_char.save()
return HttpResponse("Success")
def remove_char(request):
id = request.GET['id']
models.Chars.objects.filter(id=id).delete()
return HttpResponse('Success')
# Returns json object of char for specified id.
def get_char(request):
id = int(request.GET['id'])
char = list(models.Chars.objects.filter(id=id))[0]
result_data = {"hp": char.hp, "ac": char.ac, "img": char.image, "name": char.name}
return HttpResponse(json.dumps(result_data), content_type="text/json")
def edit_char(request):
# Grab Variables from request.
id = int(request.GET['id'])
ac = request.GET['ac']
hp = request.GET['hp']
name = request.GET['name']
image = request.GET['image']
# Find Database Entry
char = models.Chars.objects.get(pk=id)
# Edit Database Entry
char.ac = ac
char.hp = hp
char.name = name
char.image = image
char.save()
return HttpResponse("Success", content_type="text/html")
| {
"content_hash": "179445eafc210adeee513682b12a636c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 88,
"avg_line_length": 27.78846153846154,
"alnum_prop": 0.6359861591695501,
"repo_name": "albertwohletz/combatmanager",
"id": "3837d5dedfc1461305232f238974eaaf30712844",
"size": "1445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "API/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28719"
},
{
"name": "JavaScript",
"bytes": "9590"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "6022"
}
],
"symlink_target": ""
} |
import unittest
from redis import Redis
from webhooks import webhook
from webhooks.senders import async_redis
class SendersAsyncRedisCase(unittest.TestCase):
def test_redis_sender(self):
redis_connection = Redis()
# Second, test the sender, which handles the async components
@webhook(sender_callable=async_redis.sender)
def sender(url, language, connection, encoding):
return {"language": language, "url": url}
response = sender(url="http://httpbin.org/post", language="python", connection=redis_connection, encoding='application/json')
assert response['status_code'] == 200
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "efc7d1688ba5968a5cfc215bc3439bae",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 133,
"avg_line_length": 33,
"alnum_prop": 0.6868686868686869,
"repo_name": "pydanny/webhooks",
"id": "36641b8cb33846f7975d53c9ba56d27f8c470ed0",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_senders_async_redis.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1233"
},
{
"name": "Python",
"bytes": "22216"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from .. import ivi
from .. import dmm
from .. import scpi
MeasurementFunctionMapping = {
'dc_volts': 'volt',
'ac_volts': 'volt:ac',
'dc_current': 'curr',
'ac_current': 'curr:ac',
'two_wire_resistance': 'res',
'four_wire_resistance': 'fres',
'frequency': 'freq',
'period': 'per',
'continuity': 'cont',
'diode': 'diod'}
MeasurementRangeMapping = {
'dc_volts': 'volt:dc:range',
'ac_volts': 'volt:ac:range',
'dc_current': 'curr:dc:range',
'ac_current': 'curr:ac:range',
'two_wire_resistance': 'res:range',
'four_wire_resistance': 'fres:range'}
MeasurementAutoRangeMapping = {
'dc_volts': 'volt:dc:range:auto',
'ac_volts': 'volt:ac:range:auto',
'dc_current': 'curr:dc:range:auto',
'ac_current': 'curr:ac:range:auto',
'two_wire_resistance': 'res:range:auto',
'four_wire_resistance': 'fres:range:auto'}
MeasurementResolutionMapping = {
'dc_volts': 'volt:dc:resolution',
'ac_volts': 'volt:ac:resolution',
'dc_current': 'curr:dc:resolution',
'ac_current': 'curr:ac:resolution',
'two_wire_resistance': 'res:resolution',
'four_wire_resistance': 'fres:resolution'}
class agilent34401A(scpi.dmm.Base, scpi.dmm.MultiPoint, scpi.dmm.SoftwareTrigger):
"Agilent 34401A IVI DMM driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '34401A')
super(agilent34401A, self).__init__(*args, **kwargs)
self._memory_size = 5
self._identity_description = "Agilent 34401A IVI DMM driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['34401A']
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent34401A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
| {
"content_hash": "761e2919754dd6137984473da52d7d17",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 99,
"avg_line_length": 34.94915254237288,
"alnum_prop": 0.6357904946653734,
"repo_name": "margguo/python-ivi",
"id": "e94118fc4be69036eb900fd4355f16779a806d17",
"size": "4124",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilent34401A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1738999"
}
],
"symlink_target": ""
} |
'''Algorithms for converting grammars to Chomsky Normal Form.'''
from cfg.core import ContextFreeGrammar, Terminal, Nonterminal, \
ProductionRule, SubscriptedNonterminal
from util.moreitertools import powerset
def is_cnf_rule(r, start):
'''Return whether a production rule is in CNF. Must indicate the grammar's
start variable.'''
rs = r.right_side
return (len(rs) == 1 and rs[0].is_terminal()) or \
(len(rs) == 2 and all(map(lambda x: x.is_nonterminal() and \
x != start, rs))) or \
(r.left_side == start and not rs)
def is_cnf(G):
'''Return whether a grammar is in CNF.'''
return all(map(lambda x: is_cnf_rule(x, G.start), G.productions))
def _first_rule_that(productions, pred):
for i, p in enumerate(productions):
if pred(p):
return i
def _first_empty_rule(productions, start):
return _first_rule_that(productions, \
lambda x: not x.right_side and \
not x.left_side == start)
def _first_unit_rule(productions):
return _first_rule_that(productions, \
lambda x: len(x.right_side) == 1 \
and isinstance(x.right_side[0], Nonterminal))
def substitutions(sentence, production):
'''Returns all of the distinct ways of applying a derivation rule to a
sentence, including no change at all.'''
indices = [i for i, s in enumerate(sentence) if s == production.left_side]
result = []
for subset in powerset(indices):
substitution = []
for i, symbol in enumerate(sentence):
if i in subset:
substitution.extend(production.right_side)
else:
substitution.append(symbol)
if substitution not in result:
result.append(substitution)
return result
def chain(p, used_variables):
'''Given a production rule p, return a list of equivalent rules such that
the right side of each rule is no more than two symbols long.'''
rs = p.right_side
if len(rs) <= 2:
return [p]
first = rs[0]
second_name = ''.join([str(s) for s in rs[1:]])
second = SubscriptedNonterminal.next_unused(second_name, used_variables)
first_new_rule = ProductionRule(p.left_side, (first, second))
second_new_rule = ProductionRule(second, rs[1:])
return [first_new_rule] + \
chain(second_new_rule, used_variables | set([second]))
def get_variables(productions):
'''Return a set of all the variables which appear in a list of productions.
'''
result = set()
for p in productions:
result.add(p.left_side)
for s in p.right_side:
if isinstance(s, Nonterminal):
result.add(s)
return result
def replace_terminals(p, proxy_rules):
'''Replace all the terminal symbols in a production rule with equivalent
variables, given a mapping from terminals to proxy production rules. Return
a pair containing the fixed rule and a list of the terminals replaced.'''
rs = p.right_side
if len(rs) < 2 or p in proxy_rules.itervalues():
return p, []
new_rs = []
replaced = []
for s in rs:
if isinstance(s, Terminal):
new_rs.append(proxy_rules[s].left_side)
replaced.append(s)
else:
new_rs.append(s)
return ProductionRule(p.left_side, new_rs), replaced
def ChomskyNormalForm(G):
'''Given a CFG G, return an equivalent CFG in Chomsky normal form.'''
productions = list(G.productions)
# Add a new start variable S0 and add the rule S0 -> S
S0 = SubscriptedNonterminal(G.start.name, 0)
productions[:0] = [ProductionRule(S0, [G.start])]
# Remove e rules
removed_rules = []
while True:
i = _first_empty_rule(productions, S0)
if i is None:
break
pe = productions[i]
removed_rules.append(pe)
del productions[i]
new_rules = [ProductionRule(rule.left_side, sentence) \
for rule in productions[1:] \
for sentence in substitutions(rule.right_side, pe)]
productions[1:] = [r for r in new_rules if r not in removed_rules]
# Remove unit rules
removed_rules = []
while True:
i = _first_unit_rule(productions)
if i is None:
break
pu = productions[i]
removed_rules.append(pu)
new_rules = [ProductionRule(pu.left_side, p.right_side) \
for p in productions if p.left_side == pu.right_side[0]]
productions[i:i+1] = [r for r in new_rules if r not in productions \
and r not in removed_rules]
# Chain right sides of rules
i = 0
while i < len(productions):
new_rules = chain(productions[i], get_variables(productions))
productions[i:i+1] = new_rules
i += len(new_rules)
# Replace terminal symbols with proxy variables
terminals = G.terminals
variables = get_variables(productions)
proxy_rules = \
{t : ProductionRule(
SubscriptedNonterminal.next_unused(t.name.upper(), variables),
[t]
) for t in terminals}
added = {t : False for t in terminals}
i = 0
while i < len(productions):
new_rule, replaced = replace_terminals(productions[i], proxy_rules)
productions[i] = new_rule
for t in replaced:
if not added[t]:
productions.append(proxy_rules[t])
added[t] = True
i += len(new_rules)
return ContextFreeGrammar(productions)
| {
"content_hash": "f91c54cbd1f08e261dfc779500eaec11",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 36.51923076923077,
"alnum_prop": 0.5961032122169563,
"repo_name": "bdusell/pycfg",
"id": "3e905f8e554a0c8dbf4c7580b3d746ab70676031",
"size": "5697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cfg/cnf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178533"
},
{
"name": "Shell",
"bytes": "752"
}
],
"symlink_target": ""
} |
'''
This module contains dependence structures for fitting models using
generalized estimating equations (GEE).
'''
from .covstruct import Independence, Exchangeable, GlobalOddsRatio,\
Autoregressive, Nested, CovStruct
| {
"content_hash": "b675ba6f4a7abb8d560c6024c41d447c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 32,
"alnum_prop": 0.7991071428571429,
"repo_name": "rgommers/statsmodels",
"id": "618b176741512c68959603c80751138d10f49bba",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/genmod/dependence_structures/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "12092"
},
{
"name": "CSS",
"bytes": "30159"
},
{
"name": "JavaScript",
"bytes": "16353"
},
{
"name": "Python",
"bytes": "7397890"
},
{
"name": "R",
"bytes": "21637"
},
{
"name": "Shell",
"bytes": "5232"
},
{
"name": "Stata",
"bytes": "16079"
}
],
"symlink_target": ""
} |
from pyramid.traversal import find_root
from snovault import (
calculated_property,
collection,
load_schema,
)
from snovault.util import Path
from .base import (
paths_filtered_by_status
)
from .dataset import Dataset
from .shared_calculated_properties import (
CalculatedAssaySynonyms,
CalculatedAssayTermID,
CalculatedAssaySlims,
CalculatedAssayTitle,
CalculatedCategorySlims,
CalculatedTypeSlims,
CalculatedObjectiveSlims,
)
from .assay_data import assay_terms
from .biosample import construct_biosample_summary
from .shared_biosample import biosample_summary_information
@collection(
name='transgenic-enhancer-experiments',
unique_key='accession',
properties={
'title': 'Transgenic enhancer experiments',
'description': 'Listing of Transgenic Enhancer Experiments',
})
class TransgenicEnhancerExperiment(
Dataset,
CalculatedAssaySynonyms,
CalculatedAssayTermID,
CalculatedAssayTitle,
CalculatedAssaySlims,
CalculatedCategorySlims,
CalculatedTypeSlims,
CalculatedObjectiveSlims):
item_type = 'transgenic_enhancer_experiment'
schema = load_schema('encoded:schemas/transgenic_enhancer_experiment.json')
embedded = Dataset.embedded + [
'biosample_ontology',
'biosamples',
'biosamples.donor.organism',
'biosamples.biosample_ontology',
'biosamples.organism',
'biosamples.characterizations',
'biosamples.treatments',
'related_series',
'possible_controls',
]
audit_inherit = [
'submitted_by',
'lab',
'award',
'documents.lab',
]
set_status_up = [
'documents',
'biosamples',
]
set_status_down = []
rev = Dataset.rev.copy()
rev.update({
'related_series': ('Series', 'related_datasets'),
'superseded_by': ('TransgenicEnhancerExperiment', 'supersedes')
})
@calculated_property(schema={
"title": "Related series",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Series.related_datasets",
},
"notSubmittable": True,
})
def related_series(self, request, related_series):
return paths_filtered_by_status(request, related_series)
@calculated_property(schema={
"title": "Superseded by",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "TransgenicEnhancerExperiment.supersedes",
},
"notSubmittable": True,
})
def superseded_by(self, request, superseded_by):
return paths_filtered_by_status(request, superseded_by)
@calculated_property(schema={
"title": "Biosample summary",
"type": "string",
})
def biosample_summary(self, request, biosamples=None):
drop_age_sex_flag = False
add_classification_flag = False
dictionaries_of_phrases = []
biosample_accessions = set()
if biosamples is not None:
for bs in biosamples:
biosampleObject = request.embed(bs, '@@object')
if biosampleObject['status'] == 'deleted':
continue
if biosampleObject['accession'] not in biosample_accessions:
biosample_accessions.add(biosampleObject['accession'])
biosample_info = biosample_summary_information(request, biosampleObject)
biosample_summary_dictionary = biosample_info[0]
biosample_drop_age_sex_flag = biosample_info[1]
biosample_add_classification_flag = biosample_info[2]
biosample_drop_originated_from_flag = biosample_info[3]
dictionaries_of_phrases.append(biosample_summary_dictionary)
if biosample_drop_age_sex_flag is True:
drop_age_sex_flag = True
if biosample_add_classification_flag is True:
add_classification_flag = True
sentence_parts = [
'genotype_strain',
'experiment_term_phrase',
'phase',
'fractionated',
'sex_stage_age',
'post_nucleic_acid_delivery_time',
'post_differentiation_time',
'synchronization',
'modifications_list',
'originated_from',
'treatments_phrase',
'depleted_in',
'disease_term_name',
'pulse_chase_time'
]
if drop_age_sex_flag:
sentence_parts.remove('sex_stage_age')
if add_classification_flag:
sentence_parts.insert(2, 'sample_type')
if biosample_drop_originated_from_flag:
sentence_parts.remove('originated_from')
if len(dictionaries_of_phrases) > 0:
return construct_biosample_summary(dictionaries_of_phrases, sentence_parts)
@calculated_property(schema={
"title": "Datapoint",
"description": "A flag to indicate whether the Transgenic Enhancer Experiment is a datapoint that should not be displayed on it's own.",
"type": "boolean",
"notSubmittable": True,
})
def datapoint(self, request):
return False | {
"content_hash": "8aaa4df6beba07fee32b03c107fb241a",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 144,
"avg_line_length": 33.8343949044586,
"alnum_prop": 0.6065512048192772,
"repo_name": "ENCODE-DCC/encoded",
"id": "31fd7c267bce81bf6ec225ca74c5cf8557f4c8ca",
"size": "5312",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/encoded/types/transgenic_enhancer_experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "Gherkin",
"bytes": "48806"
},
{
"name": "HTML",
"bytes": "371973"
},
{
"name": "JavaScript",
"bytes": "3493156"
},
{
"name": "Jsonnet",
"bytes": "15159"
},
{
"name": "Makefile",
"bytes": "875"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "2845978"
},
{
"name": "SCSS",
"bytes": "403800"
},
{
"name": "Shell",
"bytes": "30525"
}
],
"symlink_target": ""
} |
import os
import unittest
import hashlib
import shutil
from config import basedir
from app import app, db
from StringIO import StringIO
from time import sleep
from PIL import Image
testDbPath = os.path.join(basedir, 'test.db')
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + testDbPath
app.config['IMAGE_ROOT_DIR'] = os.path.join(basedir, "test_uploads")
app.config['UPLOADED_IMAGES_DEST'] = os.path.join(app.config['IMAGE_ROOT_DIR'], "uploads")
app.config['THUMBNAIL_DIR'] = os.path.join(app.config['IMAGE_ROOT_DIR'], "thumbs")
if os.path.isdir(app.config['UPLOADED_IMAGES_DEST']):
shutil.rmtree(app.config['UPLOADED_IMAGES_DEST'])
if os.path.isdir(app.config['THUMBNAIL_DIR']):
shutil.rmtree(app.config['THUMBNAIL_DIR'])
if os.path.isdir(testDbPath):
os.unlink(testDbPath)
self.app = app.test_client()
db.create_all()
def tearDown(self):
shutil.rmtree(app.config['UPLOADED_IMAGES_DEST'])
shutil.rmtree(app.config['THUMBNAIL_DIR'])
shutil.rmtree(app.config['IMAGE_ROOT_DIR'])
db.session.remove()
db.drop_all()
os.unlink(os.path.join(basedir, testDbPath))
def test_upload(self):
# First create an album
rv = self.app.post('/albums', data={
"name": "Test album",
"description": "Test description"
}, follow_redirects=True)
assert rv.status_code == 200
sleep(.5)
origPath = os.path.join("test_content", "picture1.jpg")
imagePath = os.path.join(app.config['UPLOADED_IMAGES_DEST'], "picture1.jpg")
thumbPath = os.path.join(app.config['THUMBNAIL_DIR'], "picture1.jpg")
imageData = open(origPath);
rv = self.app.post('/uploadPicture/1', data= {
"upload-photo": (StringIO(imageData.read()), "picture1.jpg"),
"upload-description": "Test Description"
}, follow_redirects=True);
assert rv.status_code == 200
sleep(2) # Thumbnail generation is asynchronous
# TODO: Don't assume this is picture 1, test order not guaranteed
rv = self.app.get('/picture/1', follow_redirects=True)
assert rv.status_code == 200
rv = self.app.get('/thumbnail/1', follow_redirects=True)
assert rv.status_code == 200
assert os.path.isfile(imagePath)
assert os.path.isfile(thumbPath)
assert hashlib.md5(open(origPath, 'rb').read()).hexdigest() == hashlib.md5(open(imagePath, 'rb').read()).hexdigest()
im = Image.open(thumbPath)
thumbWidth, thumbHeight = im.size
assert thumbWidth == 150
assert thumbHeight == 200
imageData.close()
imageData = open(origPath);
# Test a duplicate upload (test order is not guaranteed, so this has to be part of the first test
self.app.post('/uploadPicture/1', data= {
"upload-photo": (StringIO(imageData.read()), "picture1.jpg"),
"upload-description": "Test Description"
}, follow_redirects=True);
sleep(2) # Thumbnail generation is asynchronous
rv = self.app.get('/picture/2', follow_redirects=True)
assert rv.status_code == 200
rv = self.app.get('/thumbnail/2', follow_redirects=True)
assert rv.status_code == 200
# Make sure we didn't clobber the original image
assert len([name for name in os.listdir(app.config['UPLOADED_IMAGES_DEST']) if os.path.isfile(os.path.join(app.config['UPLOADED_IMAGES_DEST'], name))]) == 2
# Make sure we didn't clobber the original thumbnail
assert len([name for name in os.listdir(app.config['THUMBNAIL_DIR']) if os.path.isfile(os.path.join(app.config['THUMBNAIL_DIR'], name))]) == 2
sleep(1) # Sleep until background thread finishes scaling images
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c1c154e4f49200714f6be39847b97b51",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 164,
"avg_line_length": 44.977777777777774,
"alnum_prop": 0.6282114624505929,
"repo_name": "jsaxton/riGallery",
"id": "4b889b903a6490fcf3de04ad2a7efc6e3f0c7b7f",
"size": "4067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3899"
},
{
"name": "HTML",
"bytes": "62559"
},
{
"name": "JavaScript",
"bytes": "350954"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "8656"
},
{
"name": "Python",
"bytes": "30795"
},
{
"name": "Shell",
"bytes": "243"
}
],
"symlink_target": ""
} |
"""Tests for call_trees module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import functions
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class CallTreesTest(converter_testing.TestCase):
def test_function_no_args(self):
def test_fn(f):
return f() + 20
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda: 1), 21)
self.assertListEqual(self.dynamic_calls, [((), None)])
def test_function_with_expression_in_argument(self):
def test_fn(f, g):
return f(g() + 20) + 4000
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda x: x + 300, lambda: 1), 4321)
self.assertListEqual(self.dynamic_calls, [
((), None),
((21,), None),
])
def test_function_with_call_in_argument(self):
def test_fn(f, g):
return f(g()) + 300
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda x: x + 20, lambda: 1), 321)
self.assertListEqual(self.dynamic_calls, [
((), None),
((1,), None),
])
def test_function_chaining(self):
def get_one():
return 1
def test_fn():
return get_one().__add__(20)
with self.converted(test_fn, (functions, call_trees),
{'get_one': get_one}, ()) as result:
self.assertEqual(result.test_fn(), 21)
self.assertListEqual(self.dynamic_calls, [
((), None),
((20,), None),
])
def test_function_with_single_arg(self):
def test_fn(f, a):
return f(a) + 20
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda a: a, 1), 21)
self.assertListEqual(self.dynamic_calls, [((1,), None)])
def test_function_with_args_only(self):
def test_fn(f, a, b):
return f(a, b) + 300
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda a, b: a + b, 1, 20), 321)
self.assertListEqual(self.dynamic_calls, [((1, 20), None)])
def test_function_with_kwarg(self):
def test_fn(f, a, b):
return f(a, c=b) + 300
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda a, c: a + c, 1, 20), 321)
self.assertListEqual(self.dynamic_calls, [((1,), {'c': 20})])
def test_function_with_kwargs_starargs(self):
def test_fn(f, a, *args, **kwargs):
return f(a, *args, **kwargs) + 5
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(
result.test_fn(lambda *args, **kwargs: 7, 1, *[2, 3], **{
'b': 4,
'c': 5
}), 12)
self.assertListEqual(self.dynamic_calls, [((1, 2, 3), {'b': 4, 'c': 5})])
def test_function_with_starargs_only(self):
def f(*args):
return sum(args)
def test_fn():
args = [1, 20, 300]
return f(*args) + 4000
with self.converted(test_fn, (functions, call_trees),
{'f': f}) as result:
self.assertEqual(result.test_fn(), 4321)
self.assertListEqual(self.dynamic_calls, [((1, 20, 300), None)])
# TODO(b/142586827): Enable this test.
# def test_function_with_starargs_mixed(self):
#
# def f(a, b, c, d):
# return a * 1000 + b * 100 + c * 10 + d
#
# def test_fn():
# args1 = (1,)
# args2 = [3]
# return f(*args1, 2, *args2, 4)
#
# with self.converted(test_fn, (functions, call_trees),
# {'f': f}) as result:
# self.assertEqual(result.test_fn(), 1234)
# self.assertListEqual(self.dynamic_calls, [((1, 2, 3, 4), None)])
def test_function_with_kwargs_keywords(self):
def test_fn(f, a, b, **kwargs):
return f(a, b=b, **kwargs) + 5
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(
result.test_fn(lambda *args, **kwargs: 7, 1, 2, **{'c': 3}), 12)
self.assertListEqual(self.dynamic_calls, [((1,), {'b': 2, 'c': 3})])
# TODO(b/142586827): Enable this test.
# def test_function_with_multiple_kwargs(self):
#
# def test_fn(f, a, b, c, kwargs1, kwargs2):
# return f(a, b=b, **kwargs1, c=c, **kwargs2) + 5
#
# with self.converted(test_fn, (functions, call_trees), {}) as result:
# self.assertEqual(
# result.test_fn(lambda *args, **kwargs: 7, 1, 2, 3, {'d': 4},
# {'e': 5}), 12)
# self.assertListEqual(self.dynamic_calls, [((1,), {
# 'b': 2,
# 'c': 3,
# 'd': 4,
# 'e': 5
# })])
def test_function_with_call_in_lambda_argument(self):
def f(l, a):
return l(a) + 4000
def g(a, *args):
return a + sum(args)
def test_fn(f, g, a, *args):
return f(lambda x: g(x, *args), a)
with self.converted(test_fn, (functions, call_trees), {}) as result:
self.assertEqual(result.test_fn(f, g, 1, *(20, 300)), 4321)
def test_debugger_set_trace(self):
tracking_list = []
pdb = imp.new_module('fake_pdb')
pdb.set_trace = lambda: tracking_list.append(1)
def test_fn():
return pdb.set_trace()
with self.converted(test_fn, (functions, call_trees),
{'pdb': pdb}) as result:
result.test_fn()
self.assertListEqual(tracking_list, [1])
def test_class_method(self):
class TestClass(object):
def other_method(self, x):
return x + 20
def test_method(self, a):
return self.other_method(a) + 300
tc = TestClass()
with self.converted(TestClass.test_method, (functions, call_trees),
{}) as result:
self.assertEqual(321, result.test_method(tc, 1))
self.assertListEqual(self.dynamic_calls, [((1,), None)])
def test_object_method(self):
class TestClass(object):
def other_method(self, x):
return x + 20
def test_method(self, a):
return self.other_method(a) + 300
tc = TestClass()
with self.converted(tc.test_method, (functions, call_trees),
{}) as result:
self.assertEqual(321, result.test_method(tc, 1))
self.assertListEqual(self.dynamic_calls, [((1,), None)])
if __name__ == '__main__':
test.main()
| {
"content_hash": "4560c43b78af42da3b4bdd476f693597",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 79,
"avg_line_length": 29.43859649122807,
"alnum_prop": 0.5709177592371871,
"repo_name": "gunan/tensorflow",
"id": "86ca2dc9c244f309176edab41b7e59895f2453a8",
"size": "7411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/converters/call_trees_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20161231_1740'),
]
operations = [
migrations.AddField(
model_name='requestlogger',
name='sha1',
field=models.CharField(blank=True, default=None, max_length=40, null=True, verbose_name='SHA1'),
),
]
| {
"content_hash": "a17efe4519ddf49383612113d67db8f7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 108,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6118721461187214,
"repo_name": "AstroMatt/esa-subjective-time-perception",
"id": "93e1e5ecf499f7af4a775e513731850c152dd6cd",
"size": "511",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/logger/migrations/0003_requestlogger_sha1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "678"
},
{
"name": "HTML",
"bytes": "11242"
},
{
"name": "JavaScript",
"bytes": "5425"
},
{
"name": "Python",
"bytes": "97121"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import url, patterns
from projects.constants import LANGUAGES_REGEX
from urls import urlpatterns as main_patterns
urlpatterns = patterns(
'', # base view, flake8 complains if it is on the previous line.
url((r'^projects/(?P<project_slug>[\w.-]+)/(?P<lang_slug>\w{2})/'
r'(?P<version_slug>[\w.-]+)/(?P<filename>.*)$'),
'core.views.subproject_serve_docs',
name='subproject_docs_detail'),
url(r'^projects/(?P<project_slug>[\w.-]+)',
'core.views.subproject_serve_docs',
name='subproject_docs_detail'),
url(r'^projects/$',
'core.views.subproject_list',
name='subproject_docs_list'),
url(r'^(?P<lang_slug>\w{2})/(?P<version_slug>[\w.-]+)/(?P<filename>.*)$',
'core.views.serve_docs',
name='docs_detail'),
url(r'^(?P<lang_slug>\w{2})/(?P<version_slug>.*)/$',
'core.views.serve_docs',
{'filename': 'index.html'},
name='docs_detail'),
url(r'^page/(?P<filename>.*)$',
'core.views.redirect_page_with_filename',
name='docs_detail'),
url(r'^(?P<lang_slug>%s)/$' % LANGUAGES_REGEX,
'core.views.redirect_lang_slug',
name='lang_subdomain_handler'),
url(r'^(?P<version_slug>.*)/$',
'core.views.redirect_version_slug',
name='version_subdomain_handler'),
url(r'^$', 'core.views.redirect_project_slug'),
)
urlpatterns += main_patterns
| {
"content_hash": "f402e67fc791d21c07d251a5b311c74f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 31.369565217391305,
"alnum_prop": 0.5855855855855856,
"repo_name": "nyergler/pythonslides",
"id": "787ec419f6a8844259e81139a2a3fd02f307facc",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/ps",
"path": "readthedocs/core/subdomain_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53004"
},
{
"name": "JavaScript",
"bytes": "69469"
},
{
"name": "Python",
"bytes": "1031151"
},
{
"name": "Ruby",
"bytes": "9728"
},
{
"name": "Shell",
"bytes": "5540"
}
],
"symlink_target": ""
} |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from collections import namedtuple
import pytest
import pytz
from pingparsing import PingParsing
@pytest.fixture
def ping_parser():
return PingParsing(timezone=pytz.UTC)
PingTestData = namedtuple("PingTestData", "value expected replies")
| {
"content_hash": "cd45efa340c63d09b4dc1f5c2923f725",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 17.833333333333332,
"alnum_prop": 0.7694704049844237,
"repo_name": "thombashi/pingparsing",
"id": "52b9a293db74048e8011aa7581ea9a7977d1b4fd",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "509"
},
{
"name": "Python",
"bytes": "100698"
},
{
"name": "Shell",
"bytes": "265"
}
],
"symlink_target": ""
} |
VERSION = (1, 4, 4)
default_app_config = 'image.apps.ImageConfig' | {
"content_hash": "475db206f9f556bbe574c12ea33eaaf5",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 45,
"avg_line_length": 22,
"alnum_prop": 0.696969696969697,
"repo_name": "akhilari7/pa-dude",
"id": "9e7527f415dc6c4a111d4c2e296c0e128c8ca5bf",
"size": "66",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/image/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45763"
},
{
"name": "HTML",
"bytes": "175708"
},
{
"name": "JavaScript",
"bytes": "118949"
},
{
"name": "Python",
"bytes": "14267983"
},
{
"name": "Shell",
"bytes": "3240"
}
],
"symlink_target": ""
} |
extensions = [
'ipypublish.sphinx.gls',
]
exclude_patterns = ['_build']
master_doc = 'contents'
| {
"content_hash": "29c31700f881d3ebe353bd988527b24f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 29,
"avg_line_length": 20.8,
"alnum_prop": 0.6346153846153846,
"repo_name": "chrisjsewell/ipypublish",
"id": "2ad12e0f1a9d409c96e53c13f20c2fcbd1d23cdc",
"size": "104",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ipypublish/sphinx/tests/sourcedirs/bibgloss_missingref/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4549"
},
{
"name": "CSS",
"bytes": "7275"
},
{
"name": "HTML",
"bytes": "7194717"
},
{
"name": "JavaScript",
"bytes": "2124646"
},
{
"name": "Jupyter Notebook",
"bytes": "1319557"
},
{
"name": "Makefile",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "494303"
},
{
"name": "Shell",
"bytes": "552"
},
{
"name": "TeX",
"bytes": "267595"
}
],
"symlink_target": ""
} |
from django.test import TestCase, Client
from django.conf import settings
from django.contrib import admin
import random
from django.contrib.auth.models import User
from models import AccessAttempt
from decorators import FAILURE_LIMIT
# Only run tests if they have axes in middleware
# Basically a functional test
class AccessAttemptTest(TestCase):
NOT_GONNA_BE_PASSWORD = "sfdlermmvnLsefrlg0c9gjjPxmvLlkdf2#"
NOT_GONNA_BE_USERNAME = "whywouldyouohwhy"
def setUp(self):
for i in range(0, random.randrange(10, 50)):
username = "person%s" % i
email = "%s@example.org" % username
u = User.objects.create_user(email=email, username=username)
u.is_staff = True
u.save()
def _gen_bad_password(self):
return AccessAttemptTest.NOT_GONNA_BE_PASSWORD + str(random.random())
def _random_username(self, correct_username=False):
if not correct_username:
return (AccessAttemptTest.NOT_GONNA_BE_USERNAME +
str(random.random()))[:30]
else:
return random.choice(User.objects.filter(is_staff=True))
def _attempt_login(self, correct_username=False, user=""):
response = self.client.post(
'/admin/', {'username': self._random_username(correct_username),
'password': self._gen_bad_password()}
)
return response
def test_login_max(self, correct_username=False):
for i in range(0, FAILURE_LIMIT):
response = self._attempt_login(correct_username=correct_username)
self.assertContains(response, "this_is_the_login_form")
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._attempt_login()
self.assertContains(response, "Account locked")
def test_login_max_with_more(self, correct_username=False):
for i in range(0, FAILURE_LIMIT):
response = self._attempt_login(correct_username=correct_username)
self.assertContains(response, "this_is_the_login_form")
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
for i in range(0, random.randrange(1, 100)):
# try to log in a bunch of times
response = self._attempt_login()
self.assertContains(response, "Account locked")
def test_with_real_username_max(self):
self.test_login_max(correct_username=True)
def test_with_real_username_max_with_more(self):
self.test_login_max_with_more(correct_username=True)
| {
"content_hash": "b80d4f5f995827a881981de9a2ebbcd3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 39.36363636363637,
"alnum_prop": 0.6481909160892995,
"repo_name": "pombredanne/django-axes-1",
"id": "0eb892347ba8db92e06076d0c4b4221002def0a2",
"size": "2598",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "axes/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13313"
}
],
"symlink_target": ""
} |
import io
import os
import sys
import tempfile
import pytest
import hypothesis as h
import hypothesis.strategies as st
import numpy as np
import pyarrow as pa
import pyarrow.tests.strategies as past
from pyarrow.feather import (read_feather, write_feather, read_table,
FeatherDataset)
try:
from pandas.testing import assert_frame_equal
import pandas as pd
import pyarrow.pandas_compat
except ImportError:
pass
@pytest.fixture(scope='module')
def datadir(base_datadir):
return base_datadir / 'feather'
def random_path(prefix='feather_'):
return tempfile.mktemp(prefix=prefix)
@pytest.fixture(scope="module", params=[1, 2])
def version(request):
yield request.param
@pytest.fixture(scope="module", params=[None, "uncompressed", "lz4", "zstd"])
def compression(request):
if request.param in ['lz4', 'zstd'] and not pa.Codec.is_available(
request.param):
pytest.skip(f'{request.param} is not available')
yield request.param
TEST_FILES = None
def setup_module(module):
global TEST_FILES
TEST_FILES = []
def teardown_module(module):
for path in TEST_FILES:
try:
os.remove(path)
except os.error:
pass
@pytest.mark.pandas
def test_file_not_exist():
with pytest.raises(pa.ArrowIOError):
read_feather('test_invalid_file')
def _check_pandas_roundtrip(df, expected=None, path=None,
columns=None, use_threads=False,
version=None, compression=None,
compression_level=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(df, path, compression=compression,
compression_level=compression_level, version=version)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, use_threads=use_threads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
def _check_arrow_roundtrip(table, path=None, compression=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(table, path, compression=compression)
if not os.path.exists(path):
raise Exception('file not written')
result = read_table(path)
assert result.equals(table)
def _assert_error_on_write(df, exc, path=None, version=2):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
TEST_FILES.append(path)
def f():
write_feather(df, path, version=version)
pytest.raises(exc, f)
def test_dataset(version):
num_values = (100, 100)
num_files = 5
paths = [random_path() for i in range(num_files)]
data = {
"col_" + str(i): np.random.randn(num_values[0])
for i in range(num_values[1])
}
table = pa.table(data)
TEST_FILES.extend(paths)
for index, path in enumerate(paths):
rows = (
index * (num_values[0] // num_files),
(index + 1) * (num_values[0] // num_files),
)
write_feather(table[rows[0]: rows[1]], path, version=version)
data = FeatherDataset(paths).read_table()
assert data.equals(table)
@pytest.mark.pandas
def test_float_no_nulls(version):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_read_table(version):
num_values = (100, 100)
path = random_path()
TEST_FILES.append(path)
values = np.random.randint(0, 100, size=num_values)
columns = ['col_' + str(i) for i in range(100)]
table = pa.Table.from_arrays(values, columns)
write_feather(table, path, version=version)
result = read_table(path)
assert result.equals(table)
# Test without memory mapping
result = read_table(path, memory_map=False)
assert result.equals(table)
result = read_feather(path, memory_map=False)
assert_frame_equal(table.to_pandas(), result)
@pytest.mark.pandas
def test_use_threads(version):
# ARROW-14470
num_values = (10, 10)
path = random_path()
TEST_FILES.append(path)
values = np.random.randint(0, 10, size=num_values)
columns = ['col_' + str(i) for i in range(10)]
table = pa.Table.from_arrays(values, columns)
write_feather(table, path, version=version)
result = read_feather(path)
assert_frame_equal(table.to_pandas(), result)
# Test read_feather with use_threads=False
result = read_feather(path, use_threads=False)
assert_frame_equal(table.to_pandas(), result)
# Test read_table with use_threads=False
result = read_table(path, use_threads=False)
assert result.equals(table)
@pytest.mark.pandas
def test_float_nulls(version):
num_values = 100
path = random_path()
TEST_FILES.append(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
arrays = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
arrays.append(pa.array(values, mask=null_mask))
values[null_mask] = np.nan
expected_cols.append(values)
table = pa.table(arrays, names=dtypes)
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_no_nulls(version):
data, arr = {}, []
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
arr.append(values.astype(dtype))
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
table = pa.table(arr, names=numpy_dtypes)
_check_arrow_roundtrip(table)
@pytest.mark.pandas
def test_platform_numpy_integers(version):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_with_nulls(version):
# pandas requires upcast to float dtype
path = random_path()
TEST_FILES.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
arrays = []
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arrays.append(pa.array(values, mask=null_mask))
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
table = pa.table(arrays, names=int_dtypes)
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_no_nulls(version):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_nulls(version):
# pandas requires upcast to object dtype
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
table = pa.table([pa.array(values, mask=mask)], names=['bools'])
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
def test_buffer_bounds_error(version):
# ARROW-1676
path = random_path()
TEST_FILES.append(path)
for i in range(16, 256):
table = pa.Table.from_arrays(
[pa.array([None] + list(range(i)), type=pa.float64())],
names=["arr"]
)
_check_arrow_roundtrip(table)
def test_boolean_object_nulls(version):
repeats = 100
table = pa.Table.from_arrays(
[np.array([False, None, True] * repeats, dtype=object)],
names=["arr"]
)
_check_arrow_roundtrip(table)
@pytest.mark.pandas
def test_delete_partial_file_on_error(version):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
class CustomClass:
pass
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, 'bar', CustomClass(), np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path, version=version)
except Exception:
pass
assert not os.path.exists(path)
@pytest.mark.pandas
def test_strings(version):
repeats = 1000
# Mixed bytes, unicode, strings coerced to binary
values = [b'foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
ex_values = [b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': ex_values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_empty_strings(version):
df = pd.DataFrame({'strings': [''] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_none(version):
df = pd.DataFrame({'all_none': [None] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_null_category(version):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_multithreaded_read(version):
data = {'c{}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, use_threads=True, version=version)
@pytest.mark.pandas
def test_nan_as_null(version):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_category(version):
repeats = 1000
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_timestamp(version):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_timestamp_with_nulls(version):
df = pd.DataFrame({'test': [pd.Timestamp(2016, 1, 1),
None,
pd.Timestamp(2016, 1, 3)]})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
@pytest.mark.xfail(reason="not supported", raises=TypeError)
def test_timedelta_with_nulls_v1():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=1)
@pytest.mark.pandas
def test_timedelta_with_nulls():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=2)
@pytest.mark.pandas
def test_out_of_float64_timestamp_with_nulls(version):
df = pd.DataFrame(
{'test': pd.DatetimeIndex([1451606400000000001,
None, 14516064000030405])})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_non_string_columns(version):
df = pd.DataFrame({0: [1, 2, 3, 4],
1: [True, False, True, False]})
expected = df.rename(columns=str)
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
reason='unicode filenames not supported')
def test_unicode_filename(version):
# GH #209
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
_check_pandas_roundtrip(df, path=random_path(prefix=name),
version=version)
@pytest.mark.pandas
def test_read_columns(version):
df = pd.DataFrame({
'foo': [1, 2, 3, 4],
'boo': [5, 6, 7, 8],
'woo': [1, 3, 5, 7]
})
expected = df[['boo', 'woo']]
_check_pandas_roundtrip(df, expected, version=version,
columns=['boo', 'woo'])
def test_overwritten_file(version):
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
values = np.random.randint(0, 10, size=num_values)
table = pa.table({'ints': values})
write_feather(table, path)
table = pa.table({'more_ints': values[0:num_values//2]})
_check_arrow_roundtrip(table, path=path)
@pytest.mark.pandas
def test_filelike_objects(version):
buf = io.BytesIO()
# the copy makes it non-strided
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=['a', 'b', 'c']).copy()
write_feather(df, buf, version=version)
buf.seek(0)
result = read_feather(buf)
assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
def test_sparse_dataframe(version):
if not pa.pandas_compat._pandas_api.has_sparse:
pytest.skip("version of pandas does not support SparseDataFrame")
# GH #221
data = {'A': [0, 1, 2],
'B': [1, 0, 1]}
df = pd.DataFrame(data).to_sparse(fill_value=1)
expected = df.to_dense()
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_duplicate_columns_pandas():
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
_assert_error_on_write(df, ValueError)
def test_duplicate_columns():
# only works for version 2
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'a', 'b'])
_check_arrow_roundtrip(table)
_assert_error_on_write(table, ValueError, version=1)
@pytest.mark.pandas
def test_unsupported():
# https://github.com/wesm/feather/issues/240
# serializing actual python objects
# custom python objects
class A:
pass
df = pd.DataFrame({'a': [A(), A()]})
_assert_error_on_write(df, ValueError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
_assert_error_on_write(df, TypeError)
@pytest.mark.pandas
def test_v2_set_chunksize():
df = pd.DataFrame({'A': np.arange(1000)})
table = pa.table(df)
buf = io.BytesIO()
write_feather(table, buf, chunksize=250, version=2)
result = buf.getvalue()
ipc_file = pa.ipc.open_file(pa.BufferReader(result))
assert ipc_file.num_record_batches == 4
assert len(ipc_file.get_batch(0)) == 250
@pytest.mark.pandas
@pytest.mark.lz4
@pytest.mark.snappy
@pytest.mark.zstd
def test_v2_compression_options():
df = pd.DataFrame({'A': np.arange(1000)})
cases = [
# compression, compression_level
('uncompressed', None),
('lz4', None),
('lz4', 1),
('lz4', 12),
('zstd', 1),
('zstd', 10)
]
for compression, compression_level in cases:
_check_pandas_roundtrip(df, compression=compression,
compression_level=compression_level)
buf = io.BytesIO()
# Trying to compress with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support compression option"):
write_feather(df, buf, compression='lz4', version=1)
# Trying to set chunksize with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support chunksize option"):
write_feather(df, buf, chunksize=4096, version=1)
# Unsupported compressor
with pytest.raises(ValueError,
match='compression="snappy" not supported'):
write_feather(df, buf, compression='snappy')
def test_v2_lz4_default_compression():
# ARROW-8750: Make sure that the compression=None option selects lz4 if
# it's available
if not pa.Codec.is_available('lz4_frame'):
pytest.skip("LZ4 compression support is not built in C++")
# some highly compressible data
t = pa.table([np.repeat(0, 100000)], names=['f0'])
buf = io.BytesIO()
write_feather(t, buf)
default_result = buf.getvalue()
buf = io.BytesIO()
write_feather(t, buf, compression='uncompressed')
uncompressed_result = buf.getvalue()
assert len(default_result) < len(uncompressed_result)
def test_v1_unsupported_types():
table = pa.table([pa.array([[1, 2, 3], [], None])], names=['f0'])
buf = io.BytesIO()
with pytest.raises(TypeError,
match=("Unsupported Feather V1 type: "
"list<item: int64>. "
"Use V2 format to serialize all Arrow types.")):
write_feather(table, buf, version=1)
@pytest.mark.slow
@pytest.mark.pandas
def test_large_dataframe(version):
df = pd.DataFrame({'A': np.arange(400000000)})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.large_memory
@pytest.mark.pandas
def test_chunked_binary_error_message():
# ARROW-3058: As Feather does not yet support chunked columns, we at least
# make sure it's clear to the user what is going on
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
# Works fine with version 2
buf = io.BytesIO()
write_feather(df, buf, version=2)
result = read_feather(pa.BufferReader(buf.getvalue()))
assert_frame_equal(result, df)
with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum "
"capacity of a Feather binary column. This restriction "
"may be lifted in the future"):
write_feather(df, io.BytesIO(), version=1)
def test_feather_without_pandas(tempdir, version):
# ARROW-8345
table = pa.table([pa.array([1, 2, 3])], names=['f0'])
path = str(tempdir / "data.feather")
_check_arrow_roundtrip(table, path)
@pytest.mark.pandas
def test_read_column_selection(version):
# ARROW-8641
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=['a', 'b', 'c'])
# select columns as string names or integer indices
_check_pandas_roundtrip(
df, columns=['a', 'c'], expected=df[['a', 'c']], version=version)
_check_pandas_roundtrip(
df, columns=[0, 2], expected=df[['a', 'c']], version=version)
# different order is followed
_check_pandas_roundtrip(
df, columns=['b', 'a'], expected=df[['b', 'a']], version=version)
_check_pandas_roundtrip(
df, columns=[1, 0], expected=df[['b', 'a']], version=version)
def test_read_column_duplicated_selection(tempdir, version):
# duplicated columns in the column selection
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'c'])
path = str(tempdir / "data.feather")
write_feather(table, path, version=version)
expected = pa.table([[1, 2, 3], [4, 5, 6], [1, 2, 3]],
names=['a', 'b', 'a'])
for col_selection in [['a', 'b', 'a'], [0, 1, 0]]:
result = read_table(path, columns=col_selection)
assert result.equals(expected)
def test_read_column_duplicated_in_file(tempdir):
# duplicated columns in feather file (only works for feather v2)
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'a'])
path = str(tempdir / "data.feather")
write_feather(table, path, version=2)
# no selection works fine
result = read_table(path)
assert result.equals(table)
# selection with indices works
result = read_table(path, columns=[0, 2])
assert result.column_names == ['a', 'a']
# selection with column names errors
with pytest.raises(ValueError):
read_table(path, columns=['a', 'b'])
def test_nested_types(compression):
# https://issues.apache.org/jira/browse/ARROW-8860
table = pa.table({'col': pa.StructArray.from_arrays(
[[0, 1, 2], [1, 2, 3]], names=["f1", "f2"])})
_check_arrow_roundtrip(table, compression=compression)
table = pa.table({'col': pa.array([[1, 2], [3, 4]])})
_check_arrow_roundtrip(table, compression=compression)
table = pa.table({'col': pa.array([[[1, 2], [3, 4]], [[5, 6], None]])})
_check_arrow_roundtrip(table, compression=compression)
@h.given(past.all_tables, st.sampled_from(["uncompressed", "lz4", "zstd"]))
def test_roundtrip(table, compression):
_check_arrow_roundtrip(table, compression=compression)
@pytest.mark.lz4
def test_feather_v017_experimental_compression_backward_compatibility(datadir):
# ARROW-11163 - ensure newer pyarrow versions can read the old feather
# files from version 0.17.0 with experimental compression support (before
# it was officially added to IPC format in 1.0.0)
# file generated with:
# table = pa.table({'a': range(5)})
# from pyarrow import feather
# feather.write_feather(
# table, "v0.17.0.version=2-compression=lz4.feather",
# compression="lz4", version=2)
expected = pa.table({'a': range(5)})
result = read_table(datadir / "v0.17.0.version=2-compression=lz4.feather")
assert result.equals(expected)
| {
"content_hash": "da39acc5a41c2580f0f69446bfe19dfa",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 79,
"avg_line_length": 28.504347826086956,
"alnum_prop": 0.6200644992591301,
"repo_name": "laurentgo/arrow",
"id": "498440c5f586188e185766ea50b3ea20a4bf3879",
"size": "23732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyarrow/tests/test_feather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "73655"
},
{
"name": "Awk",
"bytes": "3683"
},
{
"name": "Batchfile",
"bytes": "32252"
},
{
"name": "C",
"bytes": "328114"
},
{
"name": "C#",
"bytes": "419434"
},
{
"name": "C++",
"bytes": "7254875"
},
{
"name": "CMake",
"bytes": "401649"
},
{
"name": "CSS",
"bytes": "3946"
},
{
"name": "Dockerfile",
"bytes": "42193"
},
{
"name": "FreeMarker",
"bytes": "2274"
},
{
"name": "Go",
"bytes": "364102"
},
{
"name": "HTML",
"bytes": "23047"
},
{
"name": "Java",
"bytes": "2296962"
},
{
"name": "JavaScript",
"bytes": "84850"
},
{
"name": "Lua",
"bytes": "8741"
},
{
"name": "M4",
"bytes": "8713"
},
{
"name": "MATLAB",
"bytes": "9068"
},
{
"name": "Makefile",
"bytes": "44853"
},
{
"name": "Meson",
"bytes": "36931"
},
{
"name": "Objective-C",
"bytes": "7559"
},
{
"name": "PLpgSQL",
"bytes": "56995"
},
{
"name": "Perl",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "1548489"
},
{
"name": "R",
"bytes": "155922"
},
{
"name": "Ruby",
"bytes": "682150"
},
{
"name": "Rust",
"bytes": "1609482"
},
{
"name": "Shell",
"bytes": "251436"
},
{
"name": "Thrift",
"bytes": "137291"
},
{
"name": "TypeScript",
"bytes": "932690"
}
],
"symlink_target": ""
} |
import numbers
from . import Image, ImageColor
from ._util import isStringType
"""
A simple 2D drawing interface for PIL images.
<p>
Application code should use the <b>Draw</b> factory, instead of
directly.
"""
class ImageDraw(object):
def __init__(self, im, mode=None):
"""
Create a drawing instance.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def getfont(self):
"""Get the current default font."""
if not self.font:
# FIXME: should add a font repository
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
def arc(self, xy, start, end, fill=None):
"""Draw an arc."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
def bitmap(self, xy, bitmap, fill=None):
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(self, xy, start, end, fill=None, outline=None):
"""Draw a chord."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
def ellipse(self, xy, fill=None, outline=None):
"""Draw an ellipse."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
def line(self, xy, fill=None, width=0):
"""Draw a line, or a connected sequence of line segments."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
def shape(self, shape, fill=None, outline=None):
"""(Experimental) Draw a shape."""
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
def pieslice(self, xy, start, end, fill=None, outline=None):
"""Draw a pieslice."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
def point(self, xy, fill=None):
"""Draw one or more individual pixels."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
def polygon(self, xy, fill=None, outline=None):
"""Draw a polygon."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
def rectangle(self, xy, fill=None, outline=None):
"""Draw a rectangle."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
def _multiline_check(self, text):
"""Draw text."""
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def text(self, xy, text, fill=None, font=None, anchor=None,
*args, **kwargs):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor,
*args, **kwargs)
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
def multiline_text(self, xy, text, fill=None, font=None, anchor=None,
spacing=4, align="left"):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
assert False, 'align must be "left", "center" or "right"'
self.text((left, top), line, fill, font, anchor)
top += line_spacing
left = xy[0]
def textsize(self, text, font=None, *args, **kwargs):
"""Get the size of a given string, in pixels."""
if self._multiline_check(text):
return self.multiline_textsize(text, font, *args, **kwargs)
if font is None:
font = self.getfont()
return font.getsize(text)
def multiline_textsize(self, text, font=None, spacing=4):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except AttributeError:
Outline = None
def getdraw(im=None, hints=None):
"""
(Experimental) A more advanced 2D drawing interface for PIL images,
based on the WCK interface.
:param im: The image to draw in.
:param hints: An optional list of hints.
:returns: A (drawing context, drawing resource factory) tuple.
"""
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
from . import _imagingagg as handler
except ImportError:
pass
if handler is None:
from . import ImageDraw2 as handler
if im:
im = handler.Draw(im)
return im, handler
def floodfill(image, xy, value, border=None):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple).
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
"""
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if background == value:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p == background:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
| {
"content_hash": "6fa1f01191bbe00474d0accdb6044ad6",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 75,
"avg_line_length": 34.31044776119403,
"alnum_prop": 0.5308856794849487,
"repo_name": "okwow123/djangol2",
"id": "ddf669f78a27814bcb1b265615171af6952b0e4a",
"size": "12705",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "example/env/lib/python2.7/site-packages/PIL/ImageDraw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "44659"
},
{
"name": "JavaScript",
"bytes": "3260"
},
{
"name": "Makefile",
"bytes": "694"
},
{
"name": "Python",
"bytes": "636751"
}
],
"symlink_target": ""
} |
"""
Michaelis-Menten Kinetics
=========================
Implements support for *Michaelis-Menten* kinetics, a model of enzyme kinetics:
- :func:`colour.biochemistry.reaction_rate_MichaelisMenten_Michaelis1913`
- :func:`colour.biochemistry.reaction_rate_MichaelisMenten_Abebe2017`
- :func:`colour.biochemistry.REACTION_RATE_MICHAELISMENTEN_METHODS`
- :func:`colour.biochemistry.reaction_rate_MichaelisMenten`
- :func:`colour.biochemistry.\
substrate_concentration_MichaelisMenten_Michaelis1913`
- :func:`colour.biochemistry.\
substrate_concentration_MichaelisMenten_Abebe2017`
- :func:`colour.biochemistry.SUBSTRATE_CONCENTRATION_MICHAELISMENTEN_METHODS`
- :func:`colour.biochemistry.substrate_concentration_MichaelisMenten`
References
----------
- :cite:`Abebe2017a` : Abebe, M. A., Pouli, T., Larabi, M.-C., & Reinhard,
E. (2017). Perceptual Lightness Modeling for High-Dynamic-Range Imaging.
ACM Transactions on Applied Perception, 15(1), 1-19. doi:10.1145/3086577
- :cite:`Wikipedia2003d` : Wikipedia. (2003). Michaelis-Menten kinetics.
Retrieved April 29, 2017, from
https://en.wikipedia.org/wiki/Michaelis%E2%80%93Menten_kinetics
"""
from __future__ import annotations
from colour.hints import (
Any,
FloatingOrArrayLike,
FloatingOrNDArray,
Literal,
Union,
)
from colour.utilities import (
CanonicalMapping,
as_float,
as_float_array,
filter_kwargs,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"reaction_rate_MichaelisMenten_Michaelis1913",
"reaction_rate_MichaelisMenten_Abebe2017",
"REACTION_RATE_MICHAELISMENTEN_METHODS",
"reaction_rate_MichaelisMenten",
"substrate_concentration_MichaelisMenten_Michaelis1913",
"substrate_concentration_MichaelisMenten_Abebe2017",
"SUBSTRATE_CONCENTRATION_MICHAELISMENTEN_METHODS",
"substrate_concentration_MichaelisMenten",
]
def reaction_rate_MichaelisMenten_Michaelis1913(
S: FloatingOrArrayLike,
V_max: FloatingOrArrayLike,
K_m: FloatingOrArrayLike,
) -> FloatingOrNDArray:
"""
Describe the rate of enzymatic reactions, by relating reaction rate
:math:`v` to concentration of a substrate :math:`S`.
Parameters
----------
S
Concentration of a substrate :math:`S`.
V_max
Maximum rate :math:`V_{max}` achieved by the system, at saturating
substrate concentration.
K_m
Substrate concentration :math:`K_m` at which the reaction rate is
half of :math:`V_{max}`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Reaction rate :math:`v`.
References
----------
:cite:`Wikipedia2003d`
Examples
--------
>>> reaction_rate_MichaelisMenten(0.5, 2.5, 0.8) # doctest: +ELLIPSIS
0.9615384...
"""
S = as_float_array(S)
V_max = as_float_array(V_max)
K_m = as_float_array(K_m)
v = (V_max * S) / (K_m + S)
return as_float(v)
def reaction_rate_MichaelisMenten_Abebe2017(
S: FloatingOrArrayLike,
V_max: FloatingOrArrayLike,
K_m: FloatingOrArrayLike,
b_m: FloatingOrArrayLike,
) -> FloatingOrNDArray:
"""
Describe the rate of enzymatic reactions, by relating reaction rate
:math:`v` to concentration of a substrate :math:`S` according to the
modified *Michaelis-Menten* kinetics equation as given by
*Abebe, Pouli, Larabi and Reinhard (2017)*.
Parameters
----------
S
Concentration of a substrate :math:`S` (or
:math:`(\\cfrac{Y}{Y_n})^{\\epsilon}`).
V_max
Maximum rate :math:`V_{max}` (or :math:`a_m`) achieved by the system,
at saturating substrate concentration.
K_m
Substrate concentration :math:`K_m` (or :math:`c_m`) at which the
reaction rate is half of :math:`V_{max}`.
b_m
Bias factor :math:`b_m`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Reaction rate :math:`v`.
References
----------
:cite:`Abebe2017a`
Examples
--------
>>> reaction_rate_MichaelisMenten_Abebe2017(0.5, 1.448, 0.635, 0.813)
... # doctest: +ELLIPSIS
0.6951512...
"""
S = as_float_array(S)
V_max = as_float_array(V_max)
K_m = as_float_array(K_m)
b_m = as_float_array(b_m)
v = (V_max * S) / (b_m * S + K_m)
return as_float(v)
REACTION_RATE_MICHAELISMENTEN_METHODS: CanonicalMapping = CanonicalMapping(
{
"Michaelis 1913": reaction_rate_MichaelisMenten_Michaelis1913,
"Abebe 2017": reaction_rate_MichaelisMenten_Abebe2017,
}
)
REACTION_RATE_MICHAELISMENTEN_METHODS.__doc__ = """
Supported *Michaelis-Menten* kinetics reaction rate equation computation
methods.
References
----------
:cite:`Wikipedia2003d`, :cite:`Abebe2017a`
"""
def reaction_rate_MichaelisMenten(
S: FloatingOrArrayLike,
V_max: FloatingOrArrayLike,
K_m: FloatingOrArrayLike,
method: Union[
Literal["Michaelis 1913", "Abebe 2017"], str
] = "Michaelis 1913",
**kwargs: Any,
) -> FloatingOrNDArray:
"""
Describe the rate of enzymatic reactions, by relating reaction rate
:math:`v` to concentration of a substrate :math:`S` according to given
method.
Parameters
----------
S
Concentration of a substrate :math:`S`.
V_max
Maximum rate :math:`V_{max}` achieved by the system, at saturating
substrate concentration.
K_m
Substrate concentration :math:`K_m` at which the reaction rate is
half of :math:`V_{max}`.
method
Computation method.
Other Parameters
----------------
b_m
{:func:`colour.biochemistry.reaction_rate_MichaelisMenten_Abebe2017`},
Bias factor :math:`b_m`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Reaction rate :math:`v`.
References
----------
:cite:`Wikipedia2003d`, :cite:`Abebe2017a`
Examples
--------
>>> reaction_rate_MichaelisMenten(0.5, 2.5, 0.8) # doctest: +ELLIPSIS
0.9615384...
>>> reaction_rate_MichaelisMenten(
... 0.5, 2.5, 0.8, method="Abebe 2017", b_m=0.813
... ) # doctest: +ELLIPSIS
1.0360547...
"""
method = validate_method(method, REACTION_RATE_MICHAELISMENTEN_METHODS)
function = REACTION_RATE_MICHAELISMENTEN_METHODS[method]
return function(S, V_max, K_m, **filter_kwargs(function, **kwargs))
def substrate_concentration_MichaelisMenten_Michaelis1913(
v: FloatingOrArrayLike,
V_max: FloatingOrArrayLike,
K_m: FloatingOrArrayLike,
) -> FloatingOrNDArray:
"""
Describe the rate of enzymatic reactions, by relating concentration of a
substrate :math:`S` to reaction rate :math:`v`.
Parameters
----------
v
Reaction rate :math:`v`.
V_max
Maximum rate :math:`V_{max}` achieved by the system, at saturating
substrate concentration.
K_m
Substrate concentration :math:`K_m` at which the reaction rate is
half of :math:`V_{max}`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Concentration of a substrate :math:`S`.
References
----------
:cite:`Wikipedia2003d`
Examples
--------
>>> substrate_concentration_MichaelisMenten(0.961538461538461, 2.5, 0.8)
... # doctest: +ELLIPSIS
0.4999999...
"""
v = as_float_array(v)
V_max = as_float_array(V_max)
K_m = as_float_array(K_m)
S = (v * K_m) / (V_max - v)
return as_float(S)
def substrate_concentration_MichaelisMenten_Abebe2017(
v: FloatingOrArrayLike,
V_max: FloatingOrArrayLike,
K_m: FloatingOrArrayLike,
b_m: FloatingOrArrayLike,
) -> FloatingOrNDArray:
"""
Describe the rate of enzymatic reactions, by relating concentration of a
substrate :math:`S` to reaction rate :math:`v` according to the modified
*Michaelis-Menten* kinetics equation as given by
*Abebe, Pouli, Larabi and Reinhard (2017)*.
Parameters
----------
v
Reaction rate :math:`v`.
V_max
Maximum rate :math:`V_{max}` (or :math:`a_m`) achieved by the system,
at saturating substrate concentration.
K_m
Substrate concentration :math:`K_m` (or :math:`c_m`) at which the
reaction rate is half of :math:`V_{max}`.
b_m
Bias factor :math:`b_m`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Concentration of a substrate :math:`S`.
References
----------
:cite:`Abebe2017a`
Examples
--------
>>> substrate_concentration_MichaelisMenten_Abebe2017(
... 0.695151224195871, 1.448, 0.635, 0.813
... ) # doctest: +ELLIPSIS
0.4999999...
"""
v = as_float_array(v)
V_max = as_float_array(V_max)
K_m = as_float_array(K_m)
b_m = as_float_array(b_m)
S = (v * K_m) / (V_max - b_m * v)
return as_float(S)
SUBSTRATE_CONCENTRATION_MICHAELISMENTEN_METHODS: (
CanonicalMapping
) = CanonicalMapping(
{
"Michaelis 1913": substrate_concentration_MichaelisMenten_Michaelis1913,
"Abebe 2017": substrate_concentration_MichaelisMenten_Abebe2017,
}
)
SUBSTRATE_CONCENTRATION_MICHAELISMENTEN_METHODS.__doc__ = """
Supported *Michaelis-Menten* kinetics substrate concentration equation
computation methods.
References
----------
:cite:`Wikipedia2003d`, :cite:`Abebe2017a`
"""
def substrate_concentration_MichaelisMenten(
v: FloatingOrArrayLike,
V_max: FloatingOrArrayLike,
K_m: FloatingOrArrayLike,
method: Union[
Literal["Michaelis 1913", "Abebe 2017"], str
] = "Michaelis 1913",
**kwargs: Any,
) -> FloatingOrNDArray:
"""
Describe the rate of enzymatic reactions, by relating concentration of a
substrate :math:`S` to reaction rate :math:`v` according to given method.
Parameters
----------
v
Reaction rate :math:`v`.
V_max
Maximum rate :math:`V_{max}` achieved by the system, at saturating
substrate concentration.
K_m
Substrate concentration :math:`K_m` at which the reaction rate is
half of :math:`V_{max}`.
method
Computation method.
Other Parameters
----------------
b_m
{:func:`colour.biochemistry.\
substrate_concentration_MichaelisMenten_Abebe2017`},
Bias factor :math:`b_m`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Concentration of a substrate :math:`S`.
References
----------
:cite:`Wikipedia2003d`, :cite:`Abebe2017a`
Examples
--------
>>> substrate_concentration_MichaelisMenten(0.961538461538461, 2.5, 0.8)
... # doctest: +ELLIPSIS
0.4999999...
>>> substrate_concentration_MichaelisMenten(
... 1.036054703688355, 2.5, 0.8, method="Abebe 2017", b_m=0.813
... )
... # doctest: +ELLIPSIS
0.5000000...
"""
method = validate_method(
method, SUBSTRATE_CONCENTRATION_MICHAELISMENTEN_METHODS
)
function = SUBSTRATE_CONCENTRATION_MICHAELISMENTEN_METHODS[method]
return function(v, V_max, K_m, **filter_kwargs(function, **kwargs))
| {
"content_hash": "b4718c76379da441521767f1bbb94753",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 80,
"avg_line_length": 27.839024390243903,
"alnum_prop": 0.6370247065007885,
"repo_name": "colour-science/colour",
"id": "9e318fca74ce3677cbbc0e87879033200af55ec1",
"size": "11414",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "colour/biochemistry/michaelis_menten.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7967270"
},
{
"name": "TeX",
"bytes": "163213"
},
{
"name": "Visual Basic 6.0",
"bytes": "1170"
}
],
"symlink_target": ""
} |
import glob
import unittest
test_file_strings = glob.glob('test_*.py')
module_strings = [str[0:len(str)-3] for str in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(str) for str
in module_strings]
testSuite = unittest.TestSuite(suites)
text_runner = unittest.TextTestRunner().run(testSuite) | {
"content_hash": "15b2d9abad928e0ec1580975ef7a461b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 36.44444444444444,
"alnum_prop": 0.7560975609756098,
"repo_name": "xlvector/dingpa",
"id": "80373921c61390fbc2860251a04ef4043be55916",
"size": "328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unittest/unittest_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18356"
}
],
"symlink_target": ""
} |
"""
decorators.py
Decorators for URL handlers
"""
from functools import wraps
import logging
from google.appengine.api import users
from birthday.models import Client
from flask import redirect, request, abort
def login_required(func):
"""Requires standard login credentials"""
@wraps(func)
def decorated_view(*args, **kwargs):
if not users.get_current_user():
return redirect(users.create_login_url(request.url))
return func(*args, **kwargs)
return decorated_view
def admin_required(func):
"""Requires domain admin credentials"""
@wraps(func)
def decorated_view(*args, **kwargs):
current_user = users.get_current_user()
if current_user:
client = Client.get_instance()
if not client:
abort(500) # Not installed yet
if current_user.email() not in client.administrators:
abort(403) # Unauthorized
return func(*args, **kwargs)
return redirect(users.create_login_url(request.url))
return decorated_view
def super_admin_required(func):
"""Requires App Engine admin credentials"""
@wraps(func)
def decorated_view(*args, **kwargs):
if users.get_current_user():
if not users.is_current_user_admin():
abort(401) # Unauthorized
return func(*args, **kwargs)
return redirect(users.create_login_url(request.url))
return decorated_view
| {
"content_hash": "0f596a6b65b73f9175d1e0b1ab2ac7ad",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 65,
"avg_line_length": 29.4,
"alnum_prop": 0.638095238095238,
"repo_name": "dcifuen/cloudbday",
"id": "9a5f060204eedd9412923c65b25481448d5b50f1",
"size": "1470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/birthday/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58101"
},
{
"name": "JavaScript",
"bytes": "584264"
},
{
"name": "Python",
"bytes": "3435495"
}
],
"symlink_target": ""
} |
from google.cloud.billing import budgets_v1beta1
async def sample_delete_budget():
# Create a client
client = budgets_v1beta1.BudgetServiceAsyncClient()
# Initialize request argument(s)
request = budgets_v1beta1.DeleteBudgetRequest(
name="name_value",
)
# Make the request
await client.delete_budget(request=request)
# [END billingbudgets_v1beta1_generated_BudgetService_DeleteBudget_async]
| {
"content_hash": "e4c4d89bc9076139bf41263046a2716b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 25.470588235294116,
"alnum_prop": 0.7344110854503464,
"repo_name": "googleapis/python-billingbudgets",
"id": "281c204708bc3e9d847ffc83e4b1f91389963cfa",
"size": "1837",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/billingbudgets_v1beta1_generated_budget_service_delete_budget_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "520005"
},
{
"name": "Shell",
"bytes": "30684"
}
],
"symlink_target": ""
} |
import requests
from pyquery import PyQuery as pq
import time
import json
import sys
import re
url = 'http://app1.sfda.gov.cn/datasearch/face3/content.jsp?tableId=26&tableName=TABLE26&tableView=%B9%FA%B2%FA%C6%F7%D0%B5&Id=116849'
def get_detail(item_id = 116849,**kwargs):
url = 'http://app1.sfda.gov.cn/datasearch/face3/content.jsp'
params = {}
params['tableId'] = 26
params['tableName'] = 'TABLE26'
params['tableView'] = ''
params['Id'] = item_id
params.update(kwargs)
try:
r = requests.get(url, params=params,timeout=3)
if r.status_code != 200:
return False
html = r.content.decode('utf-8')
return html
except Exception as e:
pass
def decode_detail_html(html):
item_info = {}
htmld = pq(html)
table = htmld('table')
trs = table('tr')
for i in range(1,len(trs)):
tr = trs.eq(i)
key = tr('td').eq(0).text()
value = tr('td').eq(1).text()
item_info[key] = value
return item_info
if __name__ == '__main__':
if len(sys.argv) <= 2:
item_id = 116849
if len(sys.argv) == 2:
item_id = int(sys.argv[1])
html = get_detail(item_id = item_id)
if html != None or item_id != False:
item_info = decode_detail_html(html)
print(json.dumps(item_info,ensure_ascii=False).encode('utf-8'))
exit()
fin = open(sys.argv[1],'r')
fout = open(sys.argv[2],'w')
for line in fin:
item_id = int(line.strip())
while True:
html = get_detail(item_id=item_id)
if html == None:
continue
elif html == False:
time.sleep(3)
continue
else:
item_info = decode_detail_html(html)
print(item_id,len(item_info.keys()))
fout.write(json.dumps(item_info,ensure_ascii=False).encode('utf-8'))
fout.write('\n')
break
| {
"content_hash": "311e56853ffcad196cc0b0ae7ff6c225",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 134,
"avg_line_length": 24,
"alnum_prop": 0.5308823529411765,
"repo_name": "alingse/crawler-common",
"id": "26c1768f0ea0d712608f1aee7be32b5fabeddb34",
"size": "2083",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "projects/sfda.gov/item_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30937"
}
],
"symlink_target": ""
} |
import requests
def _rev_auth_str(client_api_key, user_api_key):
"""Returns a Rev Auth string."""
return 'Rev %s:%s' % (client_api_key, user_api_key)
class RevAuth(requests.auth.AuthBase):
"""
Custom authorization for Rev API
See http://www.rev.com/api/security
Format:
Authorization: Rev [ClientAPIKey]:[UserAPIKey]
"""
def __init__(self, client_api_key, user_api_key):
self._client_api_key = client_api_key
self._user_api_key = user_api_key
def __call__(self, r):
r.headers['Authorization'] = _rev_auth_str(self._client_api_key, self._user_api_key)
return r | {
"content_hash": "a0438b113da82af020495da91a3c9eb6",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 27.869565217391305,
"alnum_prop": 0.6224648985959438,
"repo_name": "koemei/rev-api",
"id": "09b38d50c86735aa42999038f4e12d8ae5ef15ea",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rev/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30562"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('projects', '0016_auto_20160516_0951'),
]
operations = [
migrations.RenameField(
model_name='projectrating',
old_name='rate_date',
new_name='modified',
),
migrations.AddField(
model_name='projectrating',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 5, 16, 9, 57, 14, 342674, tzinfo=utc)),
preserve_default=False,
),
]
| {
"content_hash": "88180af27fde095c9e521340236e4048",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 129,
"avg_line_length": 27.076923076923077,
"alnum_prop": 0.6051136363636364,
"repo_name": "zurfyx/simple",
"id": "39fda84bba3e6780cd2d9f97acb1e81456feae43",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple/projects/migrations/0017_auto_20160516_0957.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11344"
},
{
"name": "HTML",
"bytes": "95034"
},
{
"name": "JavaScript",
"bytes": "2521"
},
{
"name": "Python",
"bytes": "137848"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import swapper
from accelerator_abstract.models.base_startup_attribute import (
BaseStartupAttribute
)
class StartupAttribute(BaseStartupAttribute):
class Meta(BaseStartupAttribute.Meta):
swappable = swapper.swappable_setting(
BaseStartupAttribute.Meta.app_label, "StartupAttribute")
| {
"content_hash": "0b895366625e2b33d38cc63e9eeac733",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 27.384615384615383,
"alnum_prop": 0.7668539325842697,
"repo_name": "masschallenge/django-accelerator",
"id": "48abbc127eb4afb2bd15a1bbfaaee027dc096b08",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "accelerator/models/startup_attribute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1848"
},
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "996767"
},
{
"name": "Shell",
"bytes": "2453"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.urls import reverse
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status, test
from rest_framework.authtoken.models import Token
from . import helpers
class TokenAuthenticationTest(test.APITransactionTestCase):
def setUp(self):
self.username = 'test'
self.password = 'secret'
self.auth_url = 'http://testserver' + reverse('auth-password')
self.test_url = 'http://testserver/api/'
get_user_model().objects.create_user(
self.username, 'admin@example.com', self.password
)
def tearDown(self):
cache.clear()
def test_user_can_authenticate_with_token(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_token_expires_based_on_user_token_lifetime(self):
user = get_user_model().objects.get(username=self.username)
configured_token_lifetime = settings.WALDUR_CORE.get(
'TOKEN_LIFETIME', timezone.timedelta(hours=1)
)
user_token_lifetime = configured_token_lifetime - timezone.timedelta(seconds=40)
user.token_lifetime = user_token_lifetime.seconds
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
mocked_now = timezone.now() + user_token_lifetime
with freeze_time(mocked_now):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], 'Token has expired.')
def test_token_creation_time_is_updated_on_every_request(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
self.client.get(self.test_url)
created2 = Token.objects.values_list('created', flat=True).get(key=token)
self.assertTrue(created1 < created2)
def test_account_is_blocked_after_five_failed_attempts(self):
for _ in range(5):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': 'WRONG'}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# this one should fail with a different error message
self.client.post(
self.auth_url, data={'username': self.username, 'password': 'WRONG'}
)
self.assertEqual(
response.data['detail'], 'Username is locked out. Try in 10 minutes.'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_expired_token_is_recreated_on_successful_authentication(self):
user = get_user_model().objects.get(username=self.username)
self.assertIsNotNone(user.token_lifetime)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
mocked_now = timezone.now() + timezone.timedelta(seconds=user.token_lifetime)
with freeze_time(mocked_now):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token2 = response.data['token']
self.assertNotEqual(token1, token2)
def test_not_expired_token_creation_time_is_updated_on_authentication(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token1)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
token2 = response.data['token']
created2 = Token.objects.values_list('created', flat=True).get(key=token2)
self.assertEqual(token1, token2)
self.assertTrue(created1 < created2)
def test_token_never_expires_if_token_lifetime_is_none(self):
user = get_user_model().objects.get(username=self.username)
user.token_lifetime = None
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
original_token = response.data['token']
year_ahead = timezone.now() + timezone.timedelta(days=365)
with freeze_time(year_ahead):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token_in_a_year = response.data['token']
self.assertEqual(original_token, token_in_a_year)
def test_token_created_date_is_refreshed_even_if_token_lifetime_is_none(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = get_user_model().objects.get(username=self.username)
original_token_lifetime = user.token_lifetime
original_created_value = user.auth_token.created
user.token_lifetime = None
user.save()
last_refresh_time = timezone.now() + timezone.timedelta(
seconds=original_token_lifetime
)
with freeze_time(last_refresh_time):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
user.auth_token.refresh_from_db()
self.assertTrue(user.auth_token.created > original_created_value)
user.token_lifetime = original_token_lifetime
user.save()
with freeze_time(last_refresh_time):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@helpers.override_waldur_core_settings(AUTHENTICATION_METHODS=[])
def test_authentication_fails_if_local_signin_is_disabled(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertTrue(b'Authentication method is disabled.' in response.content)
| {
"content_hash": "1c9cb0968c2a371d20a3f46b44396079",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 88,
"avg_line_length": 43.032967032967036,
"alnum_prop": 0.6407048008171604,
"repo_name": "opennode/waldur-mastermind",
"id": "041c35dabaa970553d0b8cd77750f179b2b60c8f",
"size": "7832",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_core/core/tests/test_authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
@register.filter
def float_add(field, value):
return str(float(field) + float(value))
@register.filter
def float_substract(field, value):
return str(float(field) - float(value)) | {
"content_hash": "05fd466269a749d3b37e4ced2f3bb0f5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 43,
"avg_line_length": 19.076923076923077,
"alnum_prop": 0.7298387096774194,
"repo_name": "bartscheers/banana",
"id": "f0088bb54cc94bee47a8795242db2bab83ad93ea",
"size": "248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "banana/templatetags/floatmath.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "215"
},
{
"name": "HTML",
"bytes": "109342"
},
{
"name": "JavaScript",
"bytes": "4506"
},
{
"name": "Python",
"bytes": "81386"
}
],
"symlink_target": ""
} |
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
from __future__ import division, absolute_import, print_function
import sys
import string
import fileinput
import re
import os
import copy
import platform
from . import __version__
# The eviroment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
del c
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
with open(file, 'r') as f:
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = 1
break
line = f.readline()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
cont = 0
finalline = ''
ll = ''
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile)
while True:
l = fin.readline()
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
(l, rl) = split_by_unquoted(l, '!')
l += ' '
if rl[:5].lower() == '!f2py': # f2py directive
l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
if l.strip() == '': # Skip empty line
cont = 0
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = 0
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end'
# endifs='end\s*(if|do|where|select|while|forall)'
endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern = re.compile(
beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif'
#
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrisicpattern = re.compile(
beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def split_by_unquoted(line, characters):
"""
Splits the line into (line[:i], line[i:]),
where i is the index of first occurrence of one of the characters
not within quotes, or len(line) if no such index exists
"""
assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
r = re.compile(
r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
r"(?P<after>{char}.*)\Z".format(
not_quoted="[^\"'{}]".format(re.escape(characters)),
char="[{}]".format(re.escape(characters)),
single_quoted=r"('([^'\\]|(\\.))*')",
double_quoted=r'("([^"\\]|(\\.))*")'))
m = r.match(line)
if m:
d = m.groupdict()
return (d["before"], d["after"])
return (line, "")
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occurred
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
_, has_semicolon = split_by_unquoted(line, ";")
if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
# split line on unquoted semicolons
line, semicolon_line = split_by_unquoted(line, ";")
while semicolon_line:
crackline(line, reset)
line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
crackline(line, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrisicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
before, after = split_by_unquoted(line, comma + '()')
l += before
while after:
if (after[0] == comma) and (f == 0):
l += '@' + comma + '@'
else:
l += after[0]
if after[0] == '(':
f += 1
elif after[0] == ')':
f -= 1
before, after = split_by_unquoted(after[1:], comma + '()')
l += before
assert not f, repr((f, line, l))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key:' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
if re.match(r'python\s*module', block, re.I):
block = 'python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block != 'interface':
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except Exception:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except Exception:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except Exception:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except Exception:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except Exception:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = {}
if 'vars' in groupcache[groupcounter]:
vars = groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l = [x.strip() for x in l]
if l[0][0] == ',':
l[0] = l[0][1:]
if l[0][0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
i = 0
j = 0
llen = len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
fc = 0
while (i < llen) and (fc or not l[1][i] == ','):
if l[1][i] == "'":
fc = not fc
i = i + 1
i = i + 1
if v not in vars:
vars[v] = {}
if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
v, vars[v]['='], l[1][j:i - 1]))
vars[v]['='] = l[1][j:i - 1]
j = i
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
l = ''
f = 0
cc = '\''
cb = ''
for c in line:
if cb == '\\' and c in ['\\', '\'', '"']:
l = l + c
cb = c
continue
if f == 0 and c in ['\'', '"']:
cc = c
if c == cc:
f = f + 1
elif c == cc:
f = f - 1
elif c == ' ' and f == 1:
l = l + '@_@'
continue
l = l + c
cb = c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except Exception:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
for g in block]
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
for b in block['body']]
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = rmbadname1(m.group('name').strip())
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] == 'interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
r = eval(e, g, l)
if type(r) in [type(0), type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except Exception:
pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl, args, star='*'):
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
except Exception:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
except Exception:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
if p1 == 0:
d = str(dl[1])
elif p1 < 0:
d = '%s-%s' % (dl[1], -p1)
else:
d = '%s+%s' % (dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1 + edl[1]
if p1 == 0:
d = '-(%s)' % (dl[0])
else:
d = '%s-(%s)' % (p1, dl[0])
else:
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
except Exception:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0]) == (0, 0):
return repr(d2[1] - d1[1] + 1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0] == 0 and d2[2] in args:
if b < 0:
return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0])
elif b:
return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0])
else:
return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0])
if d2[0] == 0 and d1[2] in args:
if b < 0:
return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0])
elif b:
return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0])
else:
return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0])
if d1[2] == d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a:
return repr(b), None, None
if b < 0:
return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a)
elif b:
return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a)
else:
return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a)
if d1[0] == d2[0] == 1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)' % c
if b == 0:
d = '%s-%s' % (d2[2], c)
elif b < 0:
d = '%s-%s-%s' % (d2[2], c, -b)
else:
d = '%s-%s+%s' % (d2[2], c, b)
elif d1[0] == 0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = c2
elif b < 0:
d = '%s-%s' % (c2, -b)
else:
d = '%s+%s' % (c2, b)
elif d2[0] == 0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
if b == 0:
d = c1
elif b < 0:
d = '%s-%s' % (c1, -b)
else:
d = '%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = '%s%s' % (c2, c1)
elif b < 0:
d = '%s%s-%s' % (c2, c1, -b)
else:
d = '%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7:
return 4
if p < 16:
return 8
machine = platform.machine().lower()
if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
# We need to act according to the data.
# The easy case is if the data has a kind-specifier,
# then we may easily remove those specifiers.
# However, it may be that the user uses other specifiers...(!)
is_replaced = False
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
orig_v_len = len(v)
v = v.replace('_' + vars[n]['kindselector']['kind'], '')
# Again, this will be true if even a single specifier
# has been replaced, see comment above.
is_replaced = len(v) < orig_v_len
if not is_replaced:
if not selected_kind_re.match(v):
v_ = v.split('_')
# In case there are additive parameters
if len(v_) > 1:
v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '')
# Currently this will not work for complex numbers.
# There is missing code for extracting a complex number,
# which may be defined in either of these:
# a) (Re, Im)
# b) cmplx(Re, Im)
# c) dcmplx(Re, Im)
# d) cmplx(Re, Im, <prec>)
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
elif iscomplex(vars[n]):
# FIXME complex numbers may also have exponents
if v[0] == '(' and v[-1] == ')':
# FIXME, unused l looks like potential bug
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError, TypeError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d == ':':
star = ':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and not dl[0] == star:
dl = ['1', dl[0]]
if len(dl) == 2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ':
d = d[4:]
if di and di[-4:] == '/(1)':
di = di[:-4]
if v:
savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape' # 'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend'] = []
vars[n]['check'] = []
if 'dimension' in vars[n]:
#/----< no check
i = -1
ni = len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps = [] # dependencies of 'd'
ad = ''
pd = ''
if d not in vars:
if d in savelindims:
pd, ad = '(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
if r not in vars:
continue
if re.match(r'.*?\b' + r + r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6] == 'depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps = ddeps + vars[d]['depend']
i = i + 1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend'] = [n]
if ni > 1:
vars[d]['='] = '%s%s(%s,%s)%s' % (
pd, shape_macro, n, i, ad)
else:
vars[d]['='] = '%slen(%s)%s' % (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni > 1:
vars[d]['check'] = ['%s%s(%s,%i)%s==%s'
% (pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check'] = [
'%slen(%s)%s>=%s' % (pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec'] = ['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length = '1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
ispure = 0
isrec = 1
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except Exception:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name'] and not block['block'] == 'function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = [l for l in vars[a]['attrspec']
if l not in ['external']]
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except Exception:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
""" % (f2py_version)
return header + pyf + footer
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n' % str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specified module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist = crackfortran(files)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
with open(pyffilename, 'w') as f:
f.write(pyf)
if showblocklist:
show(postlist)
| {
"content_hash": "9fb48ca772dc0ee17953bc999f0103e4",
"timestamp": "",
"source": "github",
"line_count": 3344,
"max_line_length": 207,
"avg_line_length": 38.553528708133975,
"alnum_prop": 0.4706530254492992,
"repo_name": "jorisvandenbossche/numpy",
"id": "2aaf5d7c6c12e962c7e89e99692724f4d4bba178",
"size": "128945",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "numpy/f2py/crackfortran.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9068647"
},
{
"name": "C++",
"bytes": "189527"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8307898"
},
{
"name": "Shell",
"bytes": "8482"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_daclif_gallamby_q2_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_corl_n","daclif_gallamby_q2_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "54e859f127ee6d9ad4242e340105f23e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 26.153846153846153,
"alnum_prop": 0.7088235294117647,
"repo_name": "anhstudios/swganh",
"id": "b34a23c453f67a21ae176c1c81c5d29f5f46175b",
"size": "485",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_daclif_gallamby_q2_needed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = "this-really-needs-to-be-changed"
OUTPUT_IMAGES_FOLDER = "images"
BASE_DIR = basedir
class Production(Config):
DEBUG = False
class Staging(Config):
DEVELOPMENT = True
DEBUG = True
class Development(Config):
DEVELOPMENT = True
DEBUG = True
class Testing(Config):
TESTING = True
| {
"content_hash": "41e491e5b58ddeef7d14876e9d71ac8d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 52,
"avg_line_length": 16.133333333333333,
"alnum_prop": 0.6611570247933884,
"repo_name": "KodeKracker/Image-Merging-API",
"id": "00c425de36600f7e294adcdd272057ef429ca24a",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17888"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Python",
"bytes": "9933"
}
],
"symlink_target": ""
} |
from vkapp.bot.logic.core.vk_trigger import VKTrigger
from .logic.core.StateMachine import StateMachine
from multiprocessing import Process, Array
def handle_update(update):
uid = update[3]
state_machine = StateMachine()
# p = Process(target=state_machine.fire, args=(VKTrigger, uid, update))
# p.start()
state_machine.fire(VKTrigger, uid, update)
| {
"content_hash": "dd38e4eeaa707ed652c95c96d7522418",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.7331536388140162,
"repo_name": "ParuninPavel/lenta4_hack",
"id": "601ea10476eb4aeaf84a47e25656190af94ad04c",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vkapp/bot/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4758"
},
{
"name": "HTML",
"bytes": "29228"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Jupyter Notebook",
"bytes": "2080923"
},
{
"name": "Python",
"bytes": "81323"
},
{
"name": "Shell",
"bytes": "8906"
}
],
"symlink_target": ""
} |
from django import forms
from threads.models import Answer
class reply_form(forms.ModelForm):
class Meta:
model = Answer
fields = ('description',)
| {
"content_hash": "5ae11f71436982744ea9053472cd49b9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 34,
"avg_line_length": 17.1,
"alnum_prop": 0.672514619883041,
"repo_name": "uniqna/uniqna",
"id": "c967b52d3ac7377883644fce5d1bd43d037fd4a1",
"size": "171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "threads/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "80596"
},
{
"name": "HTML",
"bytes": "198963"
},
{
"name": "JavaScript",
"bytes": "20477"
},
{
"name": "Python",
"bytes": "101788"
}
],
"symlink_target": ""
} |
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
# TOOD(Eric Ayers): There is no task or goal named 'jvm' as used in the config section where these parameters are located.
# We might need to rename these whem merging together the config and the new options system.
class JvmDebugConfig(object):
"""A utility class to consolodate fetching JVM flags needed for debugging from the configuration."""
@staticmethod
def debug_args(config):
return config.getlist('jvm', 'debug_args', default=[
'-Xdebug',
'-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address={debug_port}'
.format(debug_port=JvmDebugConfig.debug_port(config)),
])
@staticmethod
def debug_port(config):
return config.getint('jvm', 'debug_port', default=5005)
| {
"content_hash": "ae331af17d9fe5d4663d689a419b1ca4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 122,
"avg_line_length": 45.68421052631579,
"alnum_prop": 0.7131336405529954,
"repo_name": "square/pants",
"id": "d0ed0020f875e399ca98486722f2c175ce3398f2",
"size": "1015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/jvm_debug_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "273"
},
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "Java",
"bytes": "46389"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Python",
"bytes": "2250380"
},
{
"name": "Scala",
"bytes": "5517"
},
{
"name": "Shell",
"bytes": "29381"
},
{
"name": "Thrift",
"bytes": "1674"
}
],
"symlink_target": ""
} |
"""Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Arya McCarthy <arya@jhu.edu>
# Uwe F Mayer <uwe_f_mayer@yahoo.com>
# License: BSD 3 clause
import warnings
from math import log
import numpy as np
from scipy import sparse as sp
from ._expected_mutual_info_fast import expected_mutual_information
from ...utils.multiclass import type_of_target
from ...utils.validation import check_array, check_consistent_length
def check_clusterings(labels_true, labels_pred):
"""Check that the labels arrays are 1D and of same dimension.
Parameters
----------
labels_true : array-like of shape (n_samples,)
The true labels.
labels_pred : array-like of shape (n_samples,)
The predicted labels.
"""
labels_true = check_array(
labels_true,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
labels_pred = check_array(
labels_pred,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if "continuous" in (type_pred, type_label):
msg = (
"Clustering metrics expects discrete values but received"
f" {type_label} values for label, and {type_pred} values "
"for target"
)
warnings.warn(msg, UserWarning)
# input checks
if labels_true.ndim != 1:
raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
return labels_true, labels_pred
def _generalized_average(U, V, average_method):
"""Return a particular mean of two numbers."""
if average_method == "min":
return min(U, V)
elif average_method == "geometric":
return np.sqrt(U * V)
elif average_method == "arithmetic":
return np.mean([U, V])
elif average_method == "max":
return max(U, V)
else:
raise ValueError(
"'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'"
)
def contingency_matrix(
labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64
):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
eps : float, default=None
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : bool, default=False
If `True`, return a sparse CSR continency matrix. If `eps` is not
`None` and `sparse` is `True` will raise ValueError.
.. versionadded:: 0.18
dtype : numeric type, default=np.int64
Output dtype. Ignored if `eps` is not `None`.
.. versionadded:: 0.24
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer unless set
otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype
will be float.
Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix(
(np.ones(class_idx.shape[0]), (class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=dtype,
)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def pair_confusion_matrix(labels_true, labels_pred):
"""Pair confusion matrix arising from two clusterings [1]_.
The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix
between two clusterings by considering all pairs of samples and counting
pairs that are assigned into the same or into different clusters under
the true and predicted clusterings.
Considering a pair of samples that is clustered together a positive pair,
then as in binary classification the count of true negatives is
:math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is
:math:`C_{11}` and false positives is :math:`C_{01}`.
Read more in the :ref:`User Guide <pair_confusion_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
C : ndarray of shape (2, 2), dtype=np.int64
The contingency matrix.
See Also
--------
rand_score: Rand Score.
adjusted_rand_score: Adjusted Rand Score.
adjusted_mutual_info_score: Adjusted Mutual Information.
References
----------
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
Journal of Classification 2, 193–218 (1985).
<10.1007/BF01908075>`
Examples
--------
Perfectly matching labelings have all non-zero entries on the
diagonal regardless of actual label values:
>>> from sklearn.metrics.cluster import pair_confusion_matrix
>>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0])
array([[8, 0],
[0, 4]]...
Labelings that assign all classes members to the same clusters
are complete but may be not always pure, hence penalized, and
have some off-diagonal non-zero entries:
>>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1])
array([[8, 2],
[0, 2]]...
Note that the matrix is not symmetric.
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = np.int64(labels_true.shape[0])
# Computation using the contingency data
contingency = contingency_matrix(
labels_true, labels_pred, sparse=True, dtype=np.int64
)
n_c = np.ravel(contingency.sum(axis=1))
n_k = np.ravel(contingency.sum(axis=0))
sum_squares = (contingency.data**2).sum()
C = np.empty((2, 2), dtype=np.int64)
C[1, 1] = sum_squares - n_samples
C[0, 1] = contingency.dot(n_k).sum() - sum_squares
C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares
C[0, 0] = n_samples**2 - C[0, 1] - C[1, 0] - sum_squares
return C
def rand_score(labels_true, labels_pred):
"""Rand index.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings [1]_ [2]_.
The raw RI score [3]_ is:
RI = (number of agreeing pairs) / (number of pairs)
Read more in the :ref:`User Guide <rand_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
RI : float
Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for
perfect match.
See Also
--------
adjusted_rand_score: Adjusted Rand Score.
adjusted_mutual_info_score: Adjusted Mutual Information.
References
----------
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
Journal of Classification 2, 193–218 (1985).
<10.1007/BF01908075>`.
.. [2] `Wikipedia: Simple Matching Coefficient
<https://en.wikipedia.org/wiki/Simple_matching_coefficient>`_
.. [3] `Wikipedia: Rand Index <https://en.wikipedia.org/wiki/Rand_index>`_
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import rand_score
>>> rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but may not always be pure, hence penalized:
>>> rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.83...
"""
contingency = pair_confusion_matrix(labels_true, labels_pred)
numerator = contingency.diagonal().sum()
denominator = contingency.sum()
if numerator == denominator or denominator == 0:
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique
# cluster. These are perfect matches hence return 1.0.
return 1.0
return numerator / denominator
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation). The adjusted Rand index is bounded below by -0.5 for
especially discordant clusterings.
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
Returns
-------
ARI : float
Similarity score between -0.5 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
See Also
--------
adjusted_mutual_info_score : Adjusted Mutual Information.
References
----------
.. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985
https://link.springer.com/article/10.1007%2FBF01908075
.. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie
adjusted Rand index, Psychological Methods 2004
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
.. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size,
2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>`
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but may not always be pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2])
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
ARI may take a negative value for especially discordant labelings that
are a worse choice than the expected value of random labels::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1])
-0.5
"""
(tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred)
# convert to Python integer types, to avoid overflow or underflow
tn, fp, fn, tp = int(tn), int(fp), int(fn), int(tp)
# Special cases: empty data or full agreement
if fn == 0 and fp == 0:
return 1.0
return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness. V-Measure is identical to
:func:`normalized_mutual_info_score` with the arithmetic averaging
method.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Gluster labels to evaluate.
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
homogeneity : float
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
completeness : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
v_measure : float
Harmonic mean of the first two.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
completeness_score : Completeness metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (
(1 + beta)
* homogeneity
* completeness
/ (beta * homogeneity + completeness)
)
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
Returns
-------
homogeneity : float
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
See Also
--------
completeness_score : Completeness metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
1.000000
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
1.000000
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Compute completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
Returns
-------
completeness : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.999...
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred, *, beta=1.0):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score` with
the ``'arithmetic'`` option for averaging.
The V-measure is the harmonic mean between homogeneity and completeness::
v = (1 + beta) * homogeneity * completeness
/ (beta * homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
v_measure : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
completeness_score : Completeness metric of cluster labeling.
normalized_mutual_info_score : Normalized Mutual Information.
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harm completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2]
def mutual_info_score(labels_true, labels_pred, *, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels
of the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N}
\\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (i.e
``label_true``) with :math:`V` (i.e. ``label_pred``) will return the
same score value. This can be useful to measure the agreement of two
independent label assignments strategies on the same dataset when the
real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
contingency : {ndarray, sparse matrix} of shape \
(n_classes_true, n_classes_pred), default=None
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value, measured in nats using the
natural logarithm.
See Also
--------
adjusted_mutual_info_score : Adjusted against chance Mutual Information.
normalized_mutual_info_score : Normalized Mutual Information.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(
contingency,
accept_sparse=["csr", "csc", "coo"],
dtype=[int, np.int32, np.int64],
)
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" % type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
# Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a
# single cluster, implies MI = 0
if pi.size == 1 or pj.size == 1:
return 0.0
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype(
np.int64, copy=False
)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (
contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer
)
mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi)
return np.clip(mi.sum(), 0.0, None)
def adjusted_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (``label_true``)
with :math:`V` (``labels_pred``) will return the same score value. This can
be useful to measure the agreement of two independent label assignments
strategies on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
average_method : str, default='arithmetic'
How to compute the normalizer in the denominator. Possible options
are 'min', 'geometric', 'arithmetic', and 'max'.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'max' to
'arithmetic'.
Returns
-------
ami: float (upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative. The value is
in adjusted nats (based on the natural logarithm).
See Also
--------
adjusted_rand_score : Adjusted Rand Index.
mutual_info_score : Mutual Information (not adjusted for chance).
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
... # doctest: +SKIP
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
... # doctest: +SKIP
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
... # doctest: +SKIP
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# It corresponds to both labellings having zero entropy.
# This is a perfect match hence return 1.0.
if (
classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64, copy=False)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
denominator = normalizer - emi
# Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match.
# normalizer should always be >= emi, but because of floating-point
# representation, sometimes emi is slightly larger. Correct this
# by preserving the sign.
if denominator < 0:
denominator = min(denominator, -np.finfo("float64").eps)
else:
denominator = max(denominator, np.finfo("float64").eps)
ami = (mi - emi) / denominator
return ami
def normalized_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is a normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by some generalized mean of ``H(labels_true)``
and ``H(labels_pred))``, defined by the `average_method`.
This measure is not adjusted for chance. Therefore
:func:`adjusted_mutual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets.
average_method : str, default='arithmetic'
How to compute the normalizer in the denominator. Possible options
are 'min', 'geometric', 'arithmetic', and 'max'.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'geometric' to
'arithmetic'.
Returns
-------
nmi : float
Score between 0.0 and 1.0 in normalized nats (based on the natural
logarithm). 1.0 stands for perfectly complete labeling.
See Also
--------
v_measure_score : V-Measure (NMI with arithmetic mean option).
adjusted_rand_score : Adjusted Rand Index.
adjusted_mutual_info_score : Adjusted Mutual Information (adjusted
against chance).
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
... # doctest: +SKIP
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
... # doctest: +SKIP
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
... # doctest: +SKIP
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# It corresponds to both labellings having zero entropy.
# This is a perfect match hence return 1.0.
if (
classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64, copy=False)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
# At this point mi = 0 can't be a perfect match (the special case of a single
# cluster has been dealt with before). Hence, if mi = 0, the nmi must be 0 whatever
# the normalization.
if mi == 0:
return 0.0
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
return mi / normalizer
def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
.. versionadded:: 0.18
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
sparse : bool, default=False
Compute contingency matrix internally with sparse matrix.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
(n_samples,) = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
c = c.astype(np.int64, copy=False)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0
def entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : array-like of shape (n_samples,), dtype=int
The labels.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
# single cluster => zero entropy
if pi.size == 1:
return 0.0
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| {
"content_hash": "bd9905143052194e8e4b62f041010a38",
"timestamp": "",
"source": "github",
"line_count": 1171,
"max_line_length": 87,
"avg_line_length": 35.12211784799317,
"alnum_prop": 0.6445973546002723,
"repo_name": "ivannz/scikit-learn",
"id": "2d99f848369cec0173c9743ecba95a6d551ba4a2",
"size": "41133",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/metrics/cluster/_supervised.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "670108"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10468527"
},
{
"name": "Shell",
"bytes": "42860"
}
],
"symlink_target": ""
} |
"""Builds the documentation from the source code.
This deletes the old documentation first.
"""
import logging
import os
import shutil
import sys
import shakaBuildHelpers
def build_docs(_):
"""Builds the source code documentation."""
logging.info('Building the docs...')
base = shakaBuildHelpers.get_source_base()
shutil.rmtree(os.path.join(base, 'docs', 'api'), ignore_errors=True)
os.chdir(base)
if shakaBuildHelpers.is_windows() or shakaBuildHelpers.is_cygwin():
# Windows has a different command name. The Unix version does not seem to
# work on Cygwin, but the windows one does.
jsdoc = os.path.join('third_party', 'jsdoc', 'jsdoc.cmd')
else:
jsdoc = os.path.join('third_party', 'jsdoc', 'jsdoc')
cmd_line = [jsdoc, '-c', 'docs/jsdoc.conf.json', '-R', 'docs/api-mainpage.md']
return shakaBuildHelpers.execute_get_code(cmd_line)
if __name__ == '__main__':
shakaBuildHelpers.run_main(build_docs)
| {
"content_hash": "89c1a1fc91c10819ee51d4dff2f536a7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 27.823529411764707,
"alnum_prop": 0.6976744186046512,
"repo_name": "brightcove/shaka-player",
"id": "7bbffd4aca9a06a94a3c6caa3a9447361cb7c556",
"size": "1568",
"binary": false,
"copies": "3",
"ref": "refs/heads/bc/dev",
"path": "build/docs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3099"
},
{
"name": "JavaScript",
"bytes": "2158204"
},
{
"name": "Python",
"bytes": "62786"
}
],
"symlink_target": ""
} |
from nose.tools import assert_equal, assert_true, assert_almost_equal
import json
import requests
import re
import os
import yaml
file_dir_path = os.path.dirname(__file__)
TEST_ITEMS = os.path.join(file_dir_path,"test_items.yaml")
class TestGet(object):
def setUp(self):
"""Server Settings"""
self.url = 'http://localhost:8889'
self.api = requests.get(self.url + '/api/v1')
assert_true(self.api.ok)
with open(TEST_ITEMS) as test_items:
self.test_items = yaml.load(test_items)
def test_all_pages(self):
pages = json.loads(self.api.content)['pages']
# Check every url, that it gives a 200 OK response
error_pages = filter(lambda u: not requests.get(self.url + u).ok, pages)
assert_true(len(error_pages) == 0,
msg=('Pages resulted in error: {0} '.format(error_pages)))
def test_api_without_regexp(self):
pages = json.loads(self.api.content)['api']
have_regexp = re.compile('.*\(.+\).*')
# Filter out all url:s with regular expressions
# (don't know how to handle them just yet)
no_regexp_pages = filter(lambda x: have_regexp.match(x) is None,
pages)
# Check every url, that it gives a 200 OK response
error_pages = filter(lambda u: not requests.get(self.url + u).ok,
no_regexp_pages)
assert_true(len(error_pages) == 0,
msg=('Requests resulted in error: {0} '.format(error_pages)))
def test_api_test(self):
id = str(self.test_items['test'])
r = requests.get(self.url + '/api/v1' + '/test/' + id)
assert_true(r.ok)
def test_api_samples(self):
""" Testing:
'/api/v1/samples/start/([^/]*)$'
'/api/v1/samples/([^/]*)$',
'/api/v1/sample_summary/([^/]*)$',
'/api/v1/sample_run_counts/(\\w+)?',
'/api/v1/sample_readcount/(\\w+)?',
'/api/v1/sample_insert_sizes/([^/]*)$',
'/api/v1/sample_info/([^/]*)$',
'/api/v1/sample_coverage/([^/]*)$',
'/api/v1/sample_alignment/([^/]*)$',
'/api/v1/qc/([^/]*)$'
"""
sample_id1 = self.test_items['samples']['sample_id1']
sample_id2 = self.test_items['samples']['sample_id2']
sample_run_id = self.test_items['samples']['sample_run_id']
url = self.url + '/api/v1/'
urls = [url + 'samples/start/' + sample_id1,
url + 'samples/start/' + sample_id2,
url + 'samples/' + sample_id1,
url + 'samples/' + sample_id2,
url + 'sample_summary/' + sample_run_id,
url + 'sample_run_counts/' + sample_id1,
url + 'sample_run_counts/' + sample_id2,
url + 'sample_readcount/' + sample_id1,
url + 'sample_readcount/' + sample_id2,
url + 'sample_insert_sizes/' + sample_run_id,
url + 'sample_info/' + sample_id1,
url + 'sample_info/' + sample_id2,
url + 'sample_coverage/' + sample_run_id,
url + 'sample_alignment/' + sample_run_id,
url + 'qc/' + sample_run_id]
error_pages = filter(lambda u: not requests.get(u).ok, urls)
assert_true(len(error_pages) == 0,
msg=('Sample requests resulted in error {0} '.format(error_pages)))
def test_api_quotas(self):
""" Testing:
'/api/v1/quotas/(\\w+)?'
"""
quota_id = self.test_items['quota']['quota_id']
url = self.url + '/api/v1/'
urls = [url + 'quotas/' + quota_id]
error_pages = filter(lambda u: not requests.get(u).ok, urls)
assert_true(len(error_pages) == 0,
msg=('Quota requests resulted in error {0} '.format(error_pages)))
def test_api_flowcells(self):
"""" Testing:
'/api/v1/flowcells/([^/]*)$'
'/api/v1/flowcell_qc/([^/]*)$',
'/api/v1/flowcell_q30/([^/]*)$',
'/api/v1/flowcell_info/([^/]*)$',
'/api/v1/flowcell_demultiplex/([^/]*)$',
"""
flowcell_id = self.test_items['flowcells']['flowcell_id']
url = self.url + '/api/v1/'
urls = [url + 'flowcells/' + flowcell_id,
url + 'flowcell_qc/' + flowcell_id,
url + 'flowcell_q30/' + flowcell_id,
url + 'flowcell_info/' + flowcell_id,
url + 'flowcell_demultiplex/' + flowcell_id]
error_pages = filter(lambda u: not requests.get(u).ok, urls)
assert_true(len(error_pages) == 0,
msg=('Flowcell requests resulted in error {0} '.format(error_pages)))
non_error_url = filter(lambda u: u not in error_pages, urls)
empty_json = filter(lambda u: len(json.loads(requests.get(u).content)) == 0, non_error_url)
assert_true(len(empty_json) == 0,
msg=('Flowcell requests are empty: {0} '.format(empty_json)))
def test_api_misc(self):
""" Testing:
'/api/v1/project_summary/([^/]*)$'
'/api/v1/application/([^/]*)$',
"""
project_id = self.test_items['projects']['project_id']
application = self.test_items['application']['application']
url = self.url + '/api/v1/'
urls = [url + 'project_summary/' + project_id,
url + 'application/' + application]
error_pages = filter(lambda u: not requests.get(u).ok, urls)
assert_true(len(error_pages) == 0,
msg=('Misc requests resulted in error {0} '.format(error_pages)))
non_error_url = filter(lambda u: u not in error_pages, urls)
empty_json = filter(lambda u: len(json.loads(requests.get(u).content)) == 0, non_error_url)
assert_true(len(empty_json) == 0,
msg=('Misc requests are empty: {0} '.format(empty_json)))
| {
"content_hash": "eef99ef84b4a39a9227947a1594b579d",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 99,
"avg_line_length": 39.17763157894737,
"alnum_prop": 0.5286314021830395,
"repo_name": "kate-v-stepanova/genomics-status",
"id": "3fb38698709b917107ddbf7b98610256e6e35772",
"size": "5977",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21846"
},
{
"name": "HTML",
"bytes": "215390"
},
{
"name": "JavaScript",
"bytes": "241310"
},
{
"name": "Python",
"bytes": "221859"
}
],
"symlink_target": ""
} |
from typing import Any, Optional, Union
import pandas as pd
import ibis.expr.analysis as an
import ibis.expr.operations as ops
import ibis.util
from ibis.backends.pandas.core import execute
from ibis.backends.pandas.execution import constants
from ibis.expr.scope import Scope
def get_grouping(grouper):
# this is such an annoying hack
assert isinstance(grouper, list)
if len(grouper) == 1:
return grouper[0]
return grouper
def get_join_suffix_for_op(op: ops.TableColumn, join_op: ops.Join):
(root_table,) = an.find_immediate_parent_tables(op)
left_root, right_root = an.find_immediate_parent_tables(
[join_op.left, join_op.right]
)
return {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}[root_table]
def compute_sort_key(key, data, timecontext, scope=None, **kwargs):
if isinstance(key, str):
return key, None
elif key.name in data:
return key.name, None
else:
if scope is None:
scope = Scope()
scope = scope.merge_scopes(
Scope({t: data}, timecontext) for t in an.find_immediate_parent_tables(key)
)
new_column = execute(key, scope=scope, **kwargs)
name = ibis.util.guid()
new_column.name = name
return name, new_column
def compute_sorted_frame(df, order_by, group_by=(), timecontext=None, **kwargs):
sort_keys = []
ascending = []
for value in group_by:
sort_keys.append(value)
ascending.append(True)
for key in order_by:
sort_keys.append(key)
ascending.append(key.ascending)
new_columns = {}
computed_sort_keys = []
for key in sort_keys:
computed_sort_key, temporary_column = compute_sort_key(
key, df, timecontext, **kwargs
)
computed_sort_keys.append(computed_sort_key)
if temporary_column is not None:
new_columns[computed_sort_key] = temporary_column
result = df.assign(**new_columns)
result = result.sort_values(
computed_sort_keys, ascending=ascending, kind='mergesort'
)
# TODO: we'll eventually need to return this frame with the temporary
# columns and drop them in the caller (maybe using post_execute?)
ngrouping_keys = len(group_by)
return (
result,
computed_sort_keys[:ngrouping_keys],
computed_sort_keys[ngrouping_keys:],
)
def coerce_to_output(
result: Any, node: ops.Node, index: Optional[pd.Index] = None
) -> Union[pd.Series, pd.DataFrame]:
"""Cast the result to either a Series or DataFrame.
This method casts result of an execution to a Series or DataFrame,
depending on the type of the expression and shape of the result.
Parameters
----------
result: Any
The result to cast
node: ibis.expr.operations.Node
The operation node associated with the result
index: pd.Index
Optional. If passed, scalar results will be broadcasted according
to the index.
Returns
-------
result: A Series or DataFrame
Examples
--------
For dataframe outputs, see ``ibis.util.coerce_to_dataframe``.
>>> coerce_to_output(pd.Series(1), node)
0 1
Name: result, dtype: int64
>>> coerce_to_output(1, node)
0 1
Name: result, dtype: int64
>>> coerce_to_output(1, node, [1,2,3])
1 1
2 1
3 1
Name: result, dtype: int64
>>> coerce_to_output([1,2,3], node)
0 [1, 2, 3]
Name: result, dtype: object
"""
if isinstance(result, pd.DataFrame):
rows = result.to_dict(orient="records")
return pd.Series(rows, name=node.name)
# columnar result
if isinstance(result, pd.Series):
return result.rename(node.name)
# Wrap `result` into a single-element Series.
return pd.Series([result], name=node.name)
| {
"content_hash": "693ee85fd1e607fdce642c3c7860712d",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 87,
"avg_line_length": 28.94074074074074,
"alnum_prop": 0.6316867161504991,
"repo_name": "cpcloud/ibis",
"id": "0c8c0fb63c1bae7855f61f53677cf41a2a367730",
"size": "3907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ibis/backends/pandas/execution/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44931"
},
{
"name": "CMake",
"bytes": "1862"
},
{
"name": "Dockerfile",
"bytes": "70"
},
{
"name": "JavaScript",
"bytes": "2713"
},
{
"name": "Nix",
"bytes": "12592"
},
{
"name": "Python",
"bytes": "2958224"
},
{
"name": "Shell",
"bytes": "3167"
}
],
"symlink_target": ""
} |
"""
Time period and interval parameter parsers for Graphite backend.
"""
from datetime import datetime, timedelta
import re
class TimeParserValueError(ValueError):
"""
A value could not be parsed.
"""
INTERVAL_RE = re.compile(r'(?P<count>\d+)(?P<unit>.+)')
UNIT_VALUES = {}
def _set_unit_value(value, *names):
for name in names:
UNIT_VALUES[name] = value
_set_unit_value(1, "s", "second", "seconds")
_set_unit_value(60, "min", "mins", "minute", "minutes")
_set_unit_value(3600, "h", "hour", "hours")
_set_unit_value(86400, "d", "day", "days")
_set_unit_value(7 * 86400, "w", "week", "weeks")
_set_unit_value(30 * 86400, "mon", "month", "months")
_set_unit_value(365 * 86400, "y", "year", "years")
def interval_to_seconds(interval_str):
"""
Parse a time interval specifier of the form "<count><unit>" into the
number of seconds contained in the interval.
NOTE: This is stricter than Graphite's parser, which accepts any string
starting with the shortest prefix for a unit.
"""
parts = INTERVAL_RE.match(interval_str)
if parts is None:
raise TimeParserValueError(
"Invalid interval string: %r" % (interval_str,))
count = int(parts.groupdict()["count"])
unit = parts.groupdict()["unit"]
unit_multiplier = UNIT_VALUES.get(unit)
if unit_multiplier is None:
raise TimeParserValueError(
"Invalid interval string: %r" % (interval_str,))
return count * unit_multiplier
def _call_or_raise(exc, func, *args, **kw):
"""
Call func(*args, **kw) and catch any exceptions, raising exc instead.
This exists to avoid a bunch of boilerplate try/except blocks in
parse_absolute_time()
"""
try:
return func(*args, **kw)
except:
raise exc
def parse_absolute_time(time_str):
"""
Parse a Graphite-compatible absolute time specifier into a datetime object.
This accepts `HH:MM_YYYYMMDD` and `YYYYMMDD` formats as well as unix
timestamps.
"""
# Build an exception to pass to _call_or_raise()
exc = TimeParserValueError("Invalid time string: %r" % (time_str,))
if ":" in time_str:
return _call_or_raise(exc, datetime.strptime, time_str, "%H:%M_%Y%m%d")
elif time_str.isdigit():
# This is the same test graphite uses to determine whether a string is
# a unix timestamp or a `YYYYMMDD` string. It's important that we make
# the same decisions as graphite because differences could let very
# expensive requests slip through and potentially break either the API
# or graphite.
if len(time_str) == 8 and all([int(time_str[:4]) > 1900,
int(time_str[4:6]) < 13,
int(time_str[6:]) < 32]):
return _call_or_raise(exc, datetime.strptime, time_str, "%Y%m%d")
else:
return _call_or_raise(
exc, datetime.utcfromtimestamp, int(time_str))
else:
raise exc
def parse_time(time_str, now):
"""
Parse a Graphite-compatible absolute or relative time specifier into a
datetime object.
NOTE: This is stricter than Graphite's parser and accepts a narrower
variety of formats. Currently, only relative time specifiers are
supported.
"""
if time_str in ["now", "today"]:
return now
if time_str == "yesterday":
return now - timedelta(days=1)
if time_str == "tomorrow":
return now + timedelta(days=1)
if time_str.startswith("-"):
interval = interval_to_seconds(time_str[1:])
return now - timedelta(seconds=interval)
return parse_absolute_time(time_str)
| {
"content_hash": "cfb287449b15dae294cf5818abc314b1",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 32.321739130434786,
"alnum_prop": 0.6203927898843153,
"repo_name": "praekelt/go-metrics-api",
"id": "74370a9cbe57484465828489be45be13ff2919a4",
"size": "3717",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "go_metrics/metrics/graphite_time_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "62867"
},
{
"name": "Shell",
"bytes": "1051"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
from stack import Stack
def test_initialization():
stack = Stack()
assert isinstance(stack, Stack)
assert stack.list.head is None
def test_push():
stack = Stack()
stack.push(5)
assert stack.list.head.value == 5
def test_pop():
stack = Stack()
stack.push(5)
assert stack.pop() == 5
def test_initialization_list():
stack = Stack([5, 10, 'string'])
assert stack.pop() == 'string'
assert stack.pop() == 10
assert stack.pop() == 5
def test_pop_empty():
stack = Stack()
with pytest.raises(IndexError):
stack.pop()
| {
"content_hash": "994640418c8236018ba0bf95c235713c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 39,
"avg_line_length": 18.314285714285713,
"alnum_prop": 0.62402496099844,
"repo_name": "ndraper2/data-structures",
"id": "fb4955ae2ab2dc12433a7d32a98d94052644dfce",
"size": "665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29350"
}
],
"symlink_target": ""
} |
from genestack import utils
from genestack.genestack_exceptions import GenestackException
from genestack.java import java_object
class QueryRange(object):
"""
Class that represents the offset-limit pair used in queries as query bounds.
"""
CLASS_NAME = 'com.genestack.api.queries.QueryRange'
def __init__(self, offset, limit, max_page_size):
utils.validate_type(offset, (int, long))
utils.validate_type(limit, (int, long))
utils.validate_type(max_page_size, (int, long))
if offset < 0 or limit <= 0:
raise GenestackException('Incorrect query bounds')
if limit > max_page_size:
raise GenestackException('Maximum page size exceeded')
self.offset = offset
self.limit = limit
def as_java_object(self):
return java_object(self.CLASS_NAME, {'offset': self.offset, 'limit': self.limit})
| {
"content_hash": "7dd3bc2e9d7ddb2500237e5be2219751",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 89,
"avg_line_length": 34.53846153846154,
"alnum_prop": 0.6648106904231625,
"repo_name": "genestack/task-library",
"id": "23406cf0243ee47099a2ec97fbf3c054c0795f89",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genestack/query_range.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "252859"
}
],
"symlink_target": ""
} |
"""This module contains an object that represents a change in the Telegram message auto
deletion.
"""
from telegram._telegramobject import TelegramObject
from telegram._utils.types import JSONDict
class MessageAutoDeleteTimerChanged(TelegramObject):
"""This object represents a service message about a change in auto-delete timer settings.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`message_auto_delete_time` is equal.
.. versionadded:: 13.4
Args:
message_auto_delete_time (:obj:`int`): New auto-delete time for messages in the
chat.
Attributes:
message_auto_delete_time (:obj:`int`): New auto-delete time for messages in the
chat.
"""
__slots__ = ("message_auto_delete_time",)
def __init__(
self,
message_auto_delete_time: int,
*,
api_kwargs: JSONDict = None,
):
super().__init__(api_kwargs=api_kwargs)
self.message_auto_delete_time = message_auto_delete_time
self._id_attrs = (self.message_auto_delete_time,)
| {
"content_hash": "f7a248359ce8a513a14988cc6cf4761a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 93,
"avg_line_length": 29.94736842105263,
"alnum_prop": 0.663444639718805,
"repo_name": "tzpBingo/github-trending",
"id": "a6e6e67e6cfedcbab97549cd42dfb5ee99d2a57a",
"size": "1947",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codespace/python/telegram/_messageautodeletetimerchanged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
@app.task(bind=True)
def random_site(self):
try:
res = requests.get('https://voat.co/random')
except requests.RequestException as e:
raise self.retry(exc=e, countdown=5)
return res.content.decode('utf-8')
| {
"content_hash": "0d3f125b83a5a16d1d4668c25ba6dfee",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.7216981132075472,
"repo_name": "blakev/hackustate-distributed-python",
"id": "604e12536702cd1d8aa9f70b37c885294b05ecb1",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/tasks_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3760"
},
{
"name": "HTML",
"bytes": "2298"
},
{
"name": "Python",
"bytes": "2166"
}
],
"symlink_target": ""
} |
import sys
if sys.version_info < (3, 7):
# This is needed because the Python Kubernetes client >= 12.0 contains a logging object, meaning that
# v1.Pod et al. are not pickleable on Python 3.6.
# Python 3.7 added this via https://bugs.python.org/issue30520 in 2017 -- but Python 3.6 doesn't have this
# method.
# This is duplicated/backported from airflow.logging_config in 2.2, but by having it here as well it means
# that we can update the version used in this provider and have it work for older versions
import copyreg
import logging
def _reduce_Logger(logger):
if logging.getLogger(logger.name) is not logger:
import pickle
raise pickle.PicklingError('logger cannot be pickled')
return logging.getLogger, (logger.name,)
def _reduce_RootLogger(logger):
return logging.getLogger, ()
if logging.Logger not in copyreg.dispatch_table:
copyreg.pickle(logging.Logger, _reduce_Logger)
copyreg.pickle(logging.RootLogger, _reduce_RootLogger)
| {
"content_hash": "05386e95a2fc618c165df67276654b5b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 110,
"avg_line_length": 38.96296296296296,
"alnum_prop": 0.6929657794676806,
"repo_name": "mistercrunch/airflow",
"id": "0998e31143fc8efe33568df27509dadd7f100f61",
"size": "1839",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "airflow/providers/cncf/kubernetes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
} |
"""
Tests for L{twisted.web.twcgi}.
"""
import sys
import os
import json
from io import BytesIO
from twisted.trial import unittest
from twisted.internet import reactor, interfaces, error
from twisted.python import util, failure, log
from twisted.web.http import NOT_FOUND, INTERNAL_SERVER_ERROR
from twisted.web import client, twcgi, server, resource, http_headers
from twisted.web.test._util import _render
from twisted.web.test.test_web import DummyRequest
DUMMY_CGI = '''\
print "Header: OK"
print
print "cgi output"
'''
DUAL_HEADER_CGI = '''\
print "Header: spam"
print "Header: eggs"
print
print "cgi output"
'''
BROKEN_HEADER_CGI = '''\
print "XYZ"
print
print "cgi output"
'''
SPECIAL_HEADER_CGI = '''\
print "Server: monkeys"
print "Date: last year"
print
print "cgi output"
'''
READINPUT_CGI = '''\
# this is an example of a correctly-written CGI script which reads a body
# from stdin, which only reads env['CONTENT_LENGTH'] bytes.
import os, sys
body_length = int(os.environ.get('CONTENT_LENGTH',0))
indata = sys.stdin.read(body_length)
print "Header: OK"
print
print "readinput ok"
'''
READALLINPUT_CGI = '''\
# this is an example of the typical (incorrect) CGI script which expects
# the server to close stdin when the body of the request is complete.
# A correct CGI should only read env['CONTENT_LENGTH'] bytes.
import sys
indata = sys.stdin.read()
print "Header: OK"
print
print "readallinput ok"
'''
NO_DUPLICATE_CONTENT_TYPE_HEADER_CGI = '''\
print "content-type: text/cgi-duplicate-test"
print
print "cgi output"
'''
HEADER_OUTPUT_CGI = '''\
import json
import os
print("")
print("")
vals = {x:y for x,y in os.environ.items() if x.startswith("HTTP_")}
print(json.dumps(vals))
'''
class PythonScript(twcgi.FilteredScript):
filter = sys.executable
class CGITests(unittest.TestCase):
"""
Tests for L{twcgi.FilteredScript}.
"""
if not interfaces.IReactorProcess.providedBy(reactor):
skip = "CGI tests require a functional reactor.spawnProcess()"
def startServer(self, cgi):
root = resource.Resource()
cgipath = util.sibpath(__file__, cgi)
root.putChild("cgi", PythonScript(cgipath))
site = server.Site(root)
self.p = reactor.listenTCP(0, site)
return self.p.getHost().port
def tearDown(self):
if getattr(self, 'p', None):
return self.p.stopListening()
def writeCGI(self, source):
cgiFilename = os.path.abspath(self.mktemp())
with open(cgiFilename, 'wt') as cgiFile:
cgiFile.write(source)
return cgiFilename
def testCGI(self):
cgiFilename = self.writeCGI(DUMMY_CGI)
portnum = self.startServer(cgiFilename)
d = client.Agent(reactor).request(
"GET", 'http://localhost:%d/cgi' % (portnum,))
d.addCallback(client.readBody)
d.addCallback(self._testCGI_1)
return d
def _testCGI_1(self, res):
self.assertEqual(res, "cgi output" + os.linesep)
def test_protectedServerAndDate(self):
"""
If the CGI script emits a I{Server} or I{Date} header, these are
ignored.
"""
cgiFilename = self.writeCGI(SPECIAL_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
agent = client.Agent(reactor)
d = agent.request(b"GET", url)
d.addCallback(discardBody)
def checkResponse(response):
self.assertNotIn('monkeys',
response.headers.getRawHeaders('server'))
self.assertNotIn('last year',
response.headers.getRawHeaders('date'))
d.addCallback(checkResponse)
return d
def test_noDuplicateContentTypeHeaders(self):
"""
If the CGI script emits a I{content-type} header, make sure that the
server doesn't add an additional (duplicate) one, as per ticket 4786.
"""
cgiFilename = self.writeCGI(NO_DUPLICATE_CONTENT_TYPE_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
agent = client.Agent(reactor)
d = agent.request(b"GET", url)
d.addCallback(discardBody)
def checkResponse(response):
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['text/cgi-duplicate-test'])
return response
d.addCallback(checkResponse)
return d
def test_noProxyPassthrough(self):
"""
The CGI script is never called with the Proxy header passed through.
"""
cgiFilename = self.writeCGI(HEADER_OUTPUT_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
agent = client.Agent(reactor)
headers = http_headers.Headers({"Proxy": ["foo"],
"X-Innocent-Header": ["bar"]})
d = agent.request(b"GET", url, headers=headers)
def checkResponse(response):
headers = json.loads(response)
self.assertEqual(
set(headers.keys()),
{"HTTP_HOST", "HTTP_CONNECTION", "HTTP_X_INNOCENT_HEADER"})
d.addCallback(client.readBody)
d.addCallback(checkResponse)
return d
def test_duplicateHeaderCGI(self):
"""
If a CGI script emits two instances of the same header, both are sent in
the response.
"""
cgiFilename = self.writeCGI(DUAL_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
agent = client.Agent(reactor)
d = agent.request(b"GET", url)
d.addCallback(discardBody)
def checkResponse(response):
self.assertEqual(
response.headers.getRawHeaders('header'), ['spam', 'eggs'])
d.addCallback(checkResponse)
return d
def test_malformedHeaderCGI(self):
"""
Check for the error message in the duplicated header
"""
cgiFilename = self.writeCGI(BROKEN_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
agent = client.Agent(reactor)
d = agent.request(b"GET", url)
d.addCallback(discardBody)
loggedMessages = []
def addMessage(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
log.addObserver(addMessage)
self.addCleanup(log.removeObserver, addMessage)
def checkResponse(ignored):
self.assertIn("ignoring malformed CGI header: 'XYZ'",
loggedMessages)
d.addCallback(checkResponse)
return d
def testReadEmptyInput(self):
cgiFilename = os.path.abspath(self.mktemp())
with open(cgiFilename, 'wt') as cgiFile:
cgiFile.write(READINPUT_CGI)
portnum = self.startServer(cgiFilename)
agent = client.Agent(reactor)
d = agent.request(b"GET", "http://localhost:%d/cgi" % (portnum,))
d.addCallback(client.readBody)
d.addCallback(self._testReadEmptyInput_1)
return d
testReadEmptyInput.timeout = 5
def _testReadEmptyInput_1(self, res):
self.assertEqual(res, "readinput ok%s" % os.linesep)
def testReadInput(self):
cgiFilename = os.path.abspath(self.mktemp())
with open(cgiFilename, 'wt') as cgiFile:
cgiFile.write(READINPUT_CGI)
portnum = self.startServer(cgiFilename)
agent = client.Agent(reactor)
d = agent.request(
uri="http://localhost:%d/cgi" % (portnum,),
method=b"POST",
bodyProducer=client.FileBodyProducer(
BytesIO(b"Here is your stdin")),
)
d.addCallback(client.readBody)
d.addCallback(self._testReadInput_1)
return d
testReadInput.timeout = 5
def _testReadInput_1(self, res):
self.assertEqual(res, "readinput ok%s" % os.linesep)
def testReadAllInput(self):
cgiFilename = os.path.abspath(self.mktemp())
with open(cgiFilename, 'wt') as cgiFile:
cgiFile.write(READALLINPUT_CGI)
portnum = self.startServer(cgiFilename)
d = client.Agent(reactor).request(
uri="http://localhost:%d/cgi" % (portnum,),
method=b"POST",
bodyProducer=client.FileBodyProducer(
BytesIO(b"Here is your stdin")),
)
d.addCallback(client.readBody)
d.addCallback(self._testReadAllInput_1)
return d
testReadAllInput.timeout = 5
def _testReadAllInput_1(self, res):
self.assertEqual(res, "readallinput ok%s" % os.linesep)
def test_useReactorArgument(self):
"""
L{twcgi.FilteredScript.runProcess} uses the reactor passed as an
argument to the constructor.
"""
class FakeReactor:
"""
A fake reactor recording whether spawnProcess is called.
"""
called = False
def spawnProcess(self, *args, **kwargs):
"""
Set the C{called} flag to C{True} if C{spawnProcess} is called.
@param args: Positional arguments.
@param kwargs: Keyword arguments.
"""
self.called = True
fakeReactor = FakeReactor()
request = DummyRequest(['a', 'b'])
resource = twcgi.FilteredScript("dummy-file", reactor=fakeReactor)
_render(resource, request)
self.assertTrue(fakeReactor.called)
class CGIScriptTests(unittest.TestCase):
"""
Tests for L{twcgi.CGIScript}.
"""
def test_pathInfo(self):
"""
L{twcgi.CGIScript.render} sets the process environment I{PATH_INFO} from
the request path.
"""
class FakeReactor:
"""
A fake reactor recording the environment passed to spawnProcess.
"""
def spawnProcess(self, process, filename, args, env, wdir):
"""
Store the C{env} L{dict} to an instance attribute.
@param process: Ignored
@param filename: Ignored
@param args: Ignored
@param env: The environment L{dict} which will be stored
@param wdir: Ignored
"""
self.process_env = env
_reactor = FakeReactor()
resource = twcgi.CGIScript(self.mktemp(), reactor=_reactor)
request = DummyRequest(['a', 'b'])
_render(resource, request)
self.assertEqual(_reactor.process_env["PATH_INFO"],
"/a/b")
class CGIDirectoryTests(unittest.TestCase):
"""
Tests for L{twcgi.CGIDirectory}.
"""
def test_render(self):
"""
L{twcgi.CGIDirectory.render} sets the HTTP response code to I{NOT
FOUND}.
"""
resource = twcgi.CGIDirectory(self.mktemp())
request = DummyRequest([''])
d = _render(resource, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
def test_notFoundChild(self):
"""
L{twcgi.CGIDirectory.getChild} returns a resource which renders an
response with the HTTP I{NOT FOUND} status code if the indicated child
does not exist as an entry in the directory used to initialized the
L{twcgi.CGIDirectory}.
"""
path = self.mktemp()
os.makedirs(path)
resource = twcgi.CGIDirectory(path)
request = DummyRequest(['foo'])
child = resource.getChild("foo", request)
d = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
class CGIProcessProtocolTests(unittest.TestCase):
"""
Tests for L{twcgi.CGIProcessProtocol}.
"""
def test_prematureEndOfHeaders(self):
"""
If the process communicating with L{CGIProcessProtocol} ends before
finishing writing out headers, the response has I{INTERNAL SERVER
ERROR} as its status code.
"""
request = DummyRequest([''])
protocol = twcgi.CGIProcessProtocol(request)
protocol.processEnded(failure.Failure(error.ProcessTerminated()))
self.assertEqual(request.responseCode, INTERNAL_SERVER_ERROR)
def discardBody(response):
"""
Discard the body of a HTTP response.
@param response: The response.
@return: The response.
"""
return client.readBody(response).addCallback(lambda _: response)
| {
"content_hash": "619aa753a3eb313c8641660ccca21304",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 80,
"avg_line_length": 29.950934579439252,
"alnum_prop": 0.6101099929791716,
"repo_name": "EricMuller/mynotes-backend",
"id": "b294ec143df7f4205ac4339749d0f1690c904da1",
"size": "12892",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/src/twisted/web/test/test_cgi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
} |
"""
Unit Tests for nova.manager
"""
from nova import manager
from nova import test
class ManagerTestCase(test.TestCase):
def test_additional_apis_for_dispatcher(self):
class MyAPI(object):
pass
m = manager.Manager()
api = MyAPI()
dispatch = m.create_rpc_dispatcher(additional_apis=[api])
self.assertEqual(len(dispatch.callbacks), 3)
self.assertTrue(api in dispatch.callbacks)
| {
"content_hash": "1158d0b19716cbb025c695e04442ade3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 23.42105263157895,
"alnum_prop": 0.6561797752808989,
"repo_name": "qwefi/nova",
"id": "7faac8608da60bedd9250defd1f3f84202c42afe",
"size": "1101",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/tests/test_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11596912"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
} |
import sqlalchemy
from pypi_org.data.modelbase import SqlAlchemyBase
class Maintainer(SqlAlchemyBase):
__tablename__ = 'maintainers'
user_id: int = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
package_id: str = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
| {
"content_hash": "e4446af30fc7ebce0d483afde9ca86d4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.7636986301369864,
"repo_name": "Wintellect/WintellectWebinars",
"id": "6b17acd3c634386bd8578e445615c213d23bdf93",
"size": "292",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "2019-06-06-ten-tips-python-web-devs-kennedy/code/top_10_web_explore/ex02_ngrok/pypi_org/data/maintainers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "47583"
},
{
"name": "CSS",
"bytes": "39803"
},
{
"name": "HTML",
"bytes": "87870"
},
{
"name": "JavaScript",
"bytes": "4383753"
},
{
"name": "Jupyter Notebook",
"bytes": "234737"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "208421"
},
{
"name": "SCSS",
"bytes": "152"
},
{
"name": "Shell",
"bytes": "4251"
},
{
"name": "TypeScript",
"bytes": "142946"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.location_and_climate import SiteGroundTemperatureDeep
log = logging.getLogger(__name__)
class TestSiteGroundTemperatureDeep(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_sitegroundtemperaturedeep(self):
pyidf.validation_level = ValidationLevel.error
obj = SiteGroundTemperatureDeep()
# real
var_january_deep_ground_temperature = 1.1
obj.january_deep_ground_temperature = var_january_deep_ground_temperature
# real
var_february_deep_ground_temperature = 2.2
obj.february_deep_ground_temperature = var_february_deep_ground_temperature
# real
var_march_deep_ground_temperature = 3.3
obj.march_deep_ground_temperature = var_march_deep_ground_temperature
# real
var_april_deep_ground_temperature = 4.4
obj.april_deep_ground_temperature = var_april_deep_ground_temperature
# real
var_may_deep_ground_temperature = 5.5
obj.may_deep_ground_temperature = var_may_deep_ground_temperature
# real
var_june_deep_ground_temperature = 6.6
obj.june_deep_ground_temperature = var_june_deep_ground_temperature
# real
var_july_deep_ground_temperature = 7.7
obj.july_deep_ground_temperature = var_july_deep_ground_temperature
# real
var_august_deep_ground_temperature = 8.8
obj.august_deep_ground_temperature = var_august_deep_ground_temperature
# real
var_september_deep_ground_temperature = 9.9
obj.september_deep_ground_temperature = var_september_deep_ground_temperature
# real
var_october_deep_ground_temperature = 10.1
obj.october_deep_ground_temperature = var_october_deep_ground_temperature
# real
var_november_deep_ground_temperature = 11.11
obj.november_deep_ground_temperature = var_november_deep_ground_temperature
# real
var_december_deep_ground_temperature = 12.12
obj.december_deep_ground_temperature = var_december_deep_ground_temperature
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].january_deep_ground_temperature, var_january_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].february_deep_ground_temperature, var_february_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].march_deep_ground_temperature, var_march_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].april_deep_ground_temperature, var_april_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].may_deep_ground_temperature, var_may_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].june_deep_ground_temperature, var_june_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].july_deep_ground_temperature, var_july_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].august_deep_ground_temperature, var_august_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].september_deep_ground_temperature, var_september_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].october_deep_ground_temperature, var_october_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].november_deep_ground_temperature, var_november_deep_ground_temperature)
self.assertAlmostEqual(idf2.sitegroundtemperaturedeeps[0].december_deep_ground_temperature, var_december_deep_ground_temperature) | {
"content_hash": "e7c138255813e2001546a2579f25dd50",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 139,
"avg_line_length": 50.5,
"alnum_prop": 0.7256701279884086,
"repo_name": "rbuffat/pyidf",
"id": "ece1c6e4e85b37cbb33821ac00c591747d2c3d6e",
"size": "4141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sitegroundtemperaturedeep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
} |
import os
from twisted.trial import unittest
from zope.interface.verify import verifyObject
from scrapyd.config import Config
from scrapyd.interfaces import ISpiderScheduler
from scrapyd.scheduler import SpiderScheduler
from scrapyd.utils import get_spider_queues
class SpiderSchedulerTest(unittest.TestCase):
def setUp(self):
d = self.mktemp()
eggs_dir = self.eggs_dir = os.path.join(d, 'eggs')
dbs_dir = os.path.join(d, 'dbs')
os.mkdir(d)
os.makedirs(eggs_dir)
os.makedirs(dbs_dir)
os.makedirs(os.path.join(eggs_dir, 'mybot1'))
os.makedirs(os.path.join(eggs_dir, 'mybot2'))
config = Config(values={'eggs_dir': eggs_dir, 'dbs_dir': dbs_dir})
self.queues = get_spider_queues(config)
self.sched = SpiderScheduler(config)
def test_interface(self):
verifyObject(ISpiderScheduler, self.sched)
def test_list_update_projects(self):
self.assertEqual(sorted(self.sched.list_projects()), sorted(['mybot1', 'mybot2']))
os.makedirs(os.path.join(self.eggs_dir, 'mybot3'))
self.sched.update_projects()
self.assertEqual(sorted(self.sched.list_projects()), sorted(['mybot1', 'mybot2', 'mybot3']))
def test_schedule(self):
q1, q2 = self.queues['mybot1'], self.queues['mybot2']
self.assertFalse(q1.count())
self.sched.schedule('mybot1', 'myspider1', 2, a='b')
self.sched.schedule('mybot2', 'myspider2', 1, c='d')
self.sched.schedule('mybot2', 'myspider3', 10, e='f')
self.assertEqual(q1.pop(), {'name': 'myspider1', 'a': 'b'})
self.assertEqual(q2.pop(), {'name': 'myspider3', 'e': 'f'})
self.assertEqual(q2.pop(), {'name': 'myspider2', 'c': 'd'})
| {
"content_hash": "19d99f92becadbb73d6fc018afde8dab",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 100,
"avg_line_length": 39.68181818181818,
"alnum_prop": 0.6397479954180985,
"repo_name": "scrapy/scrapyd",
"id": "82ccfc21a692489c393e7d79e48e53d9b2ba867a",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapyd/tests/test_scheduler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "91198"
}
],
"symlink_target": ""
} |
"""
A generalized application configuration utility.
Features include:
- lazy eval
- merges configuration files
- parameter type validation, with custom validation
- parameter aliases
Easily extensible to other source formats, e.g. json and ini
Limitations:
- at the moment only supports a "flat" config structure; no nested data structures
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod
from collections import Mapping, defaultdict
from glob import glob
from itertools import chain
from logging import getLogger
from os import environ, stat
from os.path import basename, join
from stat import S_IFDIR, S_IFMT, S_IFREG
from enum import Enum, EnumMeta
from .compat import (isiterable, iteritems, itervalues, odict, primitive_types, string_types,
text_type, with_metaclass)
from .constants import NULL
from .path import expand
from .serialize import yaml_load
from .. import CondaError, CondaMultiError
from .._vendor.auxlib.collection import AttrDict, first, frozendict, last, make_immutable
from .._vendor.auxlib.exceptions import ThisShouldNeverHappenError
from .._vendor.auxlib.type_coercion import TypeCoercionError, typify_data_structure
from .._vendor.boltons.setutils import IndexedSet
try: # pragma: no cover
from cytoolz.dicttoolz import merge
from cytoolz.functoolz import excepts
from cytoolz.itertoolz import concat, concatv, unique
except ImportError: # pragma: no cover
from .._vendor.toolz.dicttoolz import merge
from .._vendor.toolz.functoolz import excepts
from .._vendor.toolz.itertoolz import concat, concatv, unique
try: # pragma: no cover
from ruamel_yaml.comments import CommentedSeq, CommentedMap
from ruamel_yaml.scanner import ScannerError
except ImportError: # pragma: no cover
from ruamel.yaml.comments import CommentedSeq, CommentedMap # pragma: no cover
from ruamel.yaml.scanner import ScannerError
log = getLogger(__name__)
EMPTY_MAP = frozendict()
def pretty_list(iterable, padding=' '): # TODO: move elsewhere in conda.common
if not isiterable(iterable):
iterable = [iterable]
return '\n'.join("%s- %s" % (padding, item) for item in iterable)
def pretty_map(dictionary, padding=' '):
return '\n'.join("%s%s: %s" % (padding, key, value) for key, value in iteritems(dictionary))
class LoadError(CondaError):
def __init__(self, message, filepath, line, column):
self.line = line
self.filepath = filepath
self.column = column
msg = "Load Error: in %s on line %s, column %s. %s" % (filepath, line, column, message)
super(LoadError, self).__init__(msg)
class ConfigurationError(CondaError):
pass
class ValidationError(ConfigurationError):
def __init__(self, parameter_name, parameter_value, source, msg=None, **kwargs):
self.parameter_name = parameter_name
self.parameter_value = parameter_value
self.source = source
super(ValidationError, self).__init__(msg, **kwargs)
class MultipleKeysError(ValidationError):
def __init__(self, source, keys, preferred_key):
self.source = source
self.keys = keys
msg = ("Multiple aliased keys in file %s:\n"
"%s"
"Must declare only one. Prefer '%s'" % (source, pretty_list(keys), preferred_key))
super(MultipleKeysError, self).__init__(preferred_key, None, source, msg=msg)
class InvalidTypeError(ValidationError):
def __init__(self, parameter_name, parameter_value, source, wrong_type, valid_types, msg=None):
self.wrong_type = wrong_type
self.valid_types = valid_types
if msg is None:
msg = ("Parameter %s = %r declared in %s has type %s.\n"
"Valid types:\n%s" % (parameter_name, parameter_value,
source, wrong_type, pretty_list(valid_types)))
super(InvalidTypeError, self).__init__(parameter_name, parameter_value, source, msg=msg)
class InvalidElementTypeError(InvalidTypeError):
def __init__(self, parameter_name, parameter_value, source, wrong_type,
valid_types, index_or_key):
qualifier = "at index" if isinstance(index_or_key, int) else "for key"
msg = ("Parameter %s declared in %s has invalid element %r %s %s.\n"
"Valid element types:\n"
"%s." % (parameter_name, source, parameter_value, qualifier,
index_or_key, pretty_list(valid_types)))
super(InvalidElementTypeError, self).__init__(parameter_name, parameter_value, source,
wrong_type, valid_types, msg=msg)
class CustomValidationError(ValidationError):
def __init__(self, parameter_name, parameter_value, source, custom_message):
msg = ("Parameter %s = %r declared in %s is invalid.\n"
"%s" % (parameter_name, parameter_value, source, custom_message))
super(CustomValidationError, self).__init__(parameter_name, parameter_value, source,
msg=msg)
class MultiValidationError(CondaMultiError, ConfigurationError):
def __init__(self, errors, *args, **kwargs):
super(MultiValidationError, self).__init__(errors, *args, **kwargs)
def raise_errors(errors):
if not errors:
return True
elif len(errors) == 1:
raise errors[0]
else:
raise MultiValidationError(errors)
class ParameterFlag(Enum):
final = "final"
top = "top"
bottom = "bottom"
def __str__(self):
return "%s" % self.value
@classmethod
def from_name(cls, name):
return cls[name]
@classmethod
def from_value(cls, value):
return cls(value)
@classmethod
def from_string(cls, string):
try:
string = string.strip('!#')
return cls.from_value(string)
except (ValueError, AttributeError):
return None
@with_metaclass(ABCMeta)
class RawParameter(object):
def __init__(self, source, key, raw_value):
self.source = source
self.key = key
self._raw_value = raw_value
def __repr__(self):
return text_type(vars(self))
@abstractmethod
def value(self, parameter_obj):
raise NotImplementedError()
@abstractmethod
def keyflag(self):
raise NotImplementedError()
@abstractmethod
def valueflags(self, parameter_obj):
raise NotImplementedError()
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return dict((key, cls(source, key, from_map[key])) for key in from_map)
return EMPTY_MAP
class EnvRawParameter(RawParameter):
source = 'envvars'
def value(self, parameter_obj):
if hasattr(parameter_obj, 'string_delimiter'):
assert isinstance(self._raw_value, string_types)
string_delimiter = getattr(parameter_obj, 'string_delimiter')
# TODO: add stripping of !important, !top, and !bottom
return tuple(v for v in (
vv.strip() for vv in self._raw_value.split(string_delimiter)
) if v)
else:
return self.__important_split_value[0].strip()
def keyflag(self):
return ParameterFlag.final if len(self.__important_split_value) >= 2 else None
def valueflags(self, parameter_obj):
if hasattr(parameter_obj, 'string_delimiter'):
string_delimiter = getattr(parameter_obj, 'string_delimiter')
# TODO: add stripping of !important, !top, and !bottom
return tuple('' for _ in self._raw_value.split(string_delimiter))
else:
return self.__important_split_value[0].strip()
@property
def __important_split_value(self):
return self._raw_value.split("!important")
@classmethod
def make_raw_parameters(cls, appname):
keystart = "{0}_".format(appname.upper())
raw_env = dict((k.replace(keystart, '', 1).lower(), v)
for k, v in iteritems(environ) if k.startswith(keystart))
return super(EnvRawParameter, cls).make_raw_parameters(EnvRawParameter.source, raw_env)
class ArgParseRawParameter(RawParameter):
source = 'cmd_line'
def value(self, parameter_obj):
return make_immutable(self._raw_value)
def keyflag(self):
return None
def valueflags(self, parameter_obj):
return None if isinstance(parameter_obj, PrimitiveParameter) else ()
@classmethod
def make_raw_parameters(cls, args_from_argparse):
return super(ArgParseRawParameter, cls).make_raw_parameters(ArgParseRawParameter.source,
args_from_argparse)
class YamlRawParameter(RawParameter):
# this class should encapsulate all direct use of ruamel.yaml in this module
def __init__(self, source, key, raw_value, keycomment):
self._keycomment = keycomment
super(YamlRawParameter, self).__init__(source, key, raw_value)
def value(self, parameter_obj):
self.__process(parameter_obj)
return self._value
def keyflag(self):
return ParameterFlag.from_string(self._keycomment)
def valueflags(self, parameter_obj):
self.__process(parameter_obj)
return self._valueflags
def __process(self, parameter_obj):
if hasattr(self, '_value'):
return
elif isinstance(self._raw_value, CommentedSeq):
valuecomments = self._get_yaml_list_comments(self._raw_value)
self._valueflags = tuple(ParameterFlag.from_string(s) for s in valuecomments)
self._value = tuple(self._raw_value)
elif isinstance(self._raw_value, CommentedMap):
valuecomments = self._get_yaml_map_comments(self._raw_value)
self._valueflags = dict((k, ParameterFlag.from_string(v))
for k, v in iteritems(valuecomments) if v is not None)
self._value = frozendict(self._raw_value)
elif isinstance(self._raw_value, primitive_types):
self._valueflags = None
self._value = self._raw_value
else:
raise ThisShouldNeverHappenError() # pragma: no cover
@staticmethod
def _get_yaml_key_comment(commented_dict, key):
try:
return commented_dict.ca.items[key][2].value.strip()
except (AttributeError, KeyError):
return None
@staticmethod
def _get_yaml_list_comments(value):
items = value.ca.items
raw_comment_lines = tuple(excepts((AttributeError, KeyError, TypeError),
lambda q: items.get(q)[0].value.strip() or None,
lambda _: None # default value on exception
)(q)
for q in range(len(value)))
return raw_comment_lines
@staticmethod
def _get_yaml_map_comments(rawvalue):
return dict((key, excepts((AttributeError, KeyError),
lambda k: rawvalue.ca.items[k][2].value.strip() or None,
lambda _: None # default value on exception
)(key))
for key in rawvalue)
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return dict((key, cls(source, key, from_map[key],
cls._get_yaml_key_comment(from_map, key)))
for key in from_map)
return EMPTY_MAP
@classmethod
def make_raw_parameters_from_file(cls, filepath):
with open(filepath, 'r') as fh:
try:
ruamel_yaml = yaml_load(fh)
except ScannerError as err:
mark = err.problem_mark
raise LoadError("Invalid YAML", filepath, mark.line, mark.column)
return cls.make_raw_parameters(filepath, ruamel_yaml) or EMPTY_MAP
def load_file_configs(search_path):
# returns an ordered map of filepath and dict of raw parameter objects
def _file_yaml_loader(fullpath):
assert fullpath.endswith((".yml", ".yaml")) or "condarc" in basename(fullpath), fullpath
yield fullpath, YamlRawParameter.make_raw_parameters_from_file(fullpath)
def _dir_yaml_loader(fullpath):
for filepath in sorted(concatv(glob(join(fullpath, "*.yml")),
glob(join(fullpath, "*.yaml")))):
yield filepath, YamlRawParameter.make_raw_parameters_from_file(filepath)
# map a stat result to a file loader or a directory loader
_loader = {
S_IFREG: _file_yaml_loader,
S_IFDIR: _dir_yaml_loader,
}
def _get_st_mode(path):
# stat the path for file type, or None if path doesn't exist
try:
return S_IFMT(stat(path).st_mode)
except OSError:
return None
expanded_paths = tuple(expand(path) for path in search_path)
stat_paths = (_get_st_mode(path) for path in expanded_paths)
load_paths = (_loader[st_mode](path)
for path, st_mode in zip(expanded_paths, stat_paths)
if st_mode is not None)
raw_data = odict(kv for kv in chain.from_iterable(load_paths))
return raw_data
@with_metaclass(ABCMeta)
class Parameter(object):
_type = None
_element_type = None
def __init__(self, default, aliases=(), validation=None):
self._name = None
self._names = None
self.default = default
self.aliases = aliases
self._validation = validation
def _set_name(self, name):
# this is an explicit method, and not a descriptor/setter
# it's meant to be called by the Configuration metaclass
self._name = name # lgtm [py/mutable-descriptor]
_names = frozenset(x for x in chain(self.aliases, (name, )))
self._names = _names # lgtm [py/mutable-descriptor]
return name
@property
def name(self):
if self._name is None:
# The Configuration metaclass should call the `_set_name` method.
raise ThisShouldNeverHappenError() # pragma: no cover
return self._name
@property
def names(self):
if self._names is None:
# The Configuration metaclass should call the `_set_name` method.
raise ThisShouldNeverHappenError() # pragma: no cover
return self._names
def _raw_parameters_from_single_source(self, raw_parameters):
# while supporting parameter name aliases, we enforce that only one definition is given
# per data source
keys = self.names & frozenset(raw_parameters.keys())
matches = {key: raw_parameters[key] for key in keys}
numkeys = len(keys)
if numkeys == 0:
return None, None
elif numkeys == 1:
return next(itervalues(matches)), None
elif self.name in keys:
return matches[self.name], MultipleKeysError(raw_parameters[next(iter(keys))].source,
keys, self.name)
else:
return None, MultipleKeysError(raw_parameters[next(iter(keys))].source,
keys, self.name)
def _get_all_matches(self, instance):
# a match is a raw parameter instance
matches = []
multikey_exceptions = []
for filepath, raw_parameters in iteritems(instance.raw_data):
match, error = self._raw_parameters_from_single_source(raw_parameters)
if match is not None:
matches.append(match)
if error:
multikey_exceptions.append(error)
return matches, multikey_exceptions
@abstractmethod
def _merge(self, matches):
raise NotImplementedError()
def __get__(self, instance, instance_type):
# strategy is "extract and merge," which is actually just map and reduce
# extract matches from each source in SEARCH_PATH
# then merge matches together
if self.name in instance._cache_:
return instance._cache_[self.name]
matches, errors = self._get_all_matches(instance)
try:
result = typify_data_structure(self._merge(matches) if matches else self.default,
self._element_type)
except TypeCoercionError as e:
errors.append(CustomValidationError(self.name, e.value, "<<merged>>", text_type(e)))
else:
errors.extend(self.collect_errors(instance, result))
raise_errors(errors)
instance._cache_[self.name] = result # lgtm [py/uninitialized-local-variable]
return result # lgtm [py/uninitialized-local-variable]
def collect_errors(self, instance, value, source="<<merged>>"):
"""Validate a Parameter value.
Args:
instance (Configuration): The instance object to which the Parameter descriptor is
attached.
value: The value to be validated.
"""
errors = []
if not isinstance(value, self._type):
errors.append(InvalidTypeError(self.name, value, source, type(value),
self._type))
elif self._validation is not None:
result = self._validation(value)
if result is False:
errors.append(ValidationError(self.name, value, source))
elif isinstance(result, string_types):
errors.append(CustomValidationError(self.name, value, source, result))
return errors
def _match_key_is_important(self, raw_parameter):
return raw_parameter.keyflag() is ParameterFlag.final
def _first_important_matches(self, matches):
idx = first(enumerate(matches), lambda x: self._match_key_is_important(x[1]),
apply=lambda x: x[0])
return matches if idx is None else matches[:idx+1]
@staticmethod
def _str_format_flag(flag):
return " #!%s" % flag if flag is not None else ''
@staticmethod
def _str_format_value(value):
if value is None:
return 'None'
return value
@classmethod
def repr_raw(cls, raw_parameter):
raise NotImplementedError()
class PrimitiveParameter(Parameter):
"""Parameter type for a Configuration class that holds a single python primitive value.
The python primitive types are str, int, float, complex, bool, and NoneType. In addition,
python 2 has long and unicode types.
"""
def __init__(self, default, aliases=(), validation=None, element_type=None):
"""
Args:
default (Any): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value. Returning
`None` also indicates a valid value.
element_type (type or Tuple[type]): Type-validation of parameter's value. If None,
type(default) is used.
"""
self._type = type(default) if element_type is None else element_type
self._element_type = self._type
super(PrimitiveParameter, self).__init__(default, aliases, validation)
def _merge(self, matches):
important_match = first(matches, self._match_key_is_important, default=None)
if important_match is not None:
return important_match.value(self)
last_match = last(matches, lambda x: x is not None, default=None)
if last_match is not None:
return last_match.value(self)
raise ThisShouldNeverHappenError() # pragma: no cover
def repr_raw(self, raw_parameter):
return "%s: %s%s" % (raw_parameter.key,
self._str_format_value(raw_parameter.value(self)),
self._str_format_flag(raw_parameter.keyflag()))
class SequenceParameter(Parameter):
"""Parameter type for a Configuration class that holds a sequence (i.e. list) of python
primitive values.
"""
_type = tuple
def __init__(self, element_type, default=(), aliases=(), validation=None,
string_delimiter=','):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element in
the sequence.
default (Iterable[str]): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
self.string_delimiter = string_delimiter
super(SequenceParameter, self).__init__(default, aliases, validation)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(SequenceParameter, self).collect_errors(instance, value)
element_type = self._element_type
for idx, element in enumerate(value):
if not isinstance(element, element_type):
errors.append(InvalidElementTypeError(self.name, element, source,
type(element), element_type, idx))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches_and_values = tuple((match, match.value(self)) for match in
self._first_important_matches(matches))
for match, value in relevant_matches_and_values:
if not isinstance(value, tuple):
raise InvalidTypeError(self.name, value, match.source, value.__class__.__name__,
self._type.__name__)
# get individual lines from important_matches that were marked important
# these will be prepended to the final result
def get_marked_lines(match, marker, parameter_obj):
return tuple(line
for line, flag in zip(match.value(parameter_obj),
match.valueflags(parameter_obj))
if flag is marker) if match else ()
top_lines = concat(get_marked_lines(m, ParameterFlag.top, self) for m, _ in
relevant_matches_and_values)
# also get lines that were marked as bottom, but reverse the match order so that lines
# coming earlier will ultimately be last
bottom_lines = concat(get_marked_lines(m, ParameterFlag.bottom, self) for m, _ in
reversed(relevant_matches_and_values))
# now, concat all lines, while reversing the matches
# reverse because elements closer to the end of search path take precedence
all_lines = concat(v for _, v in reversed(relevant_matches_and_values))
# stack top_lines + all_lines, then de-dupe
top_deduped = tuple(unique(concatv(top_lines, all_lines)))
# take the top-deduped lines, reverse them, and concat with reversed bottom_lines
# this gives us the reverse of the order we want, but almost there
# NOTE: for a line value marked both top and bottom, the bottom marker will win out
# for the top marker to win out, we'd need one additional de-dupe step
bottom_deduped = unique(concatv(reversed(tuple(bottom_lines)), reversed(top_deduped)))
# just reverse, and we're good to go
return tuple(reversed(tuple(bottom_deduped)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for q, value in enumerate(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self)[q]
lines.append(" - %s%s" % (self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
def _get_all_matches(self, instance):
# this is necessary to handle argparse `action="append"`, which can't be set to a
# default value of NULL
# it also config settings like `channels: ~`
matches, exceptions = super(SequenceParameter, self)._get_all_matches(instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, exceptions
class MapParameter(Parameter):
"""Parameter type for a Configuration class that holds a map (i.e. dict) of python
primitive values.
"""
_type = dict
def __init__(self, element_type, default=None, aliases=(), validation=None):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element.
default (Mapping): The parameter's default value. If None, will be an empty dict.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
super(MapParameter, self).__init__(default or dict(), aliases, validation)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(MapParameter, self).collect_errors(instance, value)
if isinstance(value, Mapping):
element_type = self._element_type
errors.extend(InvalidElementTypeError(self.name, val, source, type(val),
element_type, key)
for key, val in iteritems(value) if not isinstance(val, element_type))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches_and_values = tuple((match, match.value(self)) for match in
self._first_important_matches(matches))
for match, value in relevant_matches_and_values:
if not isinstance(value, Mapping):
raise InvalidTypeError(self.name, value, match.source, value.__class__.__name__,
self._type.__name__)
# mapkeys with important matches
def key_is_important(match, key):
return match.valueflags(self).get(key) == ParameterFlag.final
important_maps = tuple(dict((k, v)
for k, v in iteritems(match_value)
if key_is_important(match, k))
for match, match_value in relevant_matches_and_values)
# dump all matches in a dict
# then overwrite with important matches
return merge(concatv((v for _, v in relevant_matches_and_values),
reversed(important_maps)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for valuekey, value in iteritems(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self).get(valuekey)
lines.append(" %s: %s%s" % (valuekey, self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
def _get_all_matches(self, instance):
# it also config settings like `proxy_servers: ~`
matches, exceptions = super(MapParameter, self)._get_all_matches(instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, exceptions
class ConfigurationType(type):
"""metaclass for Configuration"""
def __init__(cls, name, bases, attr):
super(ConfigurationType, cls).__init__(name, bases, attr)
# call _set_name for each parameter
cls.parameter_names = tuple(p._set_name(name) for name, p in iteritems(cls.__dict__)
if isinstance(p, Parameter))
@with_metaclass(ConfigurationType)
class Configuration(object):
def __init__(self, search_path=(), app_name=None, argparse_args=None):
# Currently, __init__ does a **full** disk reload of all files.
# A future improvement would be to cache files that are already loaded.
self.raw_data = odict()
self._cache_ = dict()
self._reset_callbacks = IndexedSet()
self._validation_errors = defaultdict(list)
self._set_search_path(search_path)
self._set_env_vars(app_name)
self._set_argparse_args(argparse_args)
def _set_search_path(self, search_path):
self._search_path = IndexedSet(search_path)
self._set_raw_data(load_file_configs(search_path))
self._reset_cache()
return self
def _set_env_vars(self, app_name=None):
self._app_name = app_name
if not app_name:
return self
self.raw_data[EnvRawParameter.source] = EnvRawParameter.make_raw_parameters(app_name)
self._reset_cache()
return self
def _set_argparse_args(self, argparse_args):
# the argparse_args we store internally in this class as self._argparse_args
# will be a mapping type, not a non-`dict` object like argparse_args is natively
if hasattr(argparse_args, '__dict__'):
# the argparse_args from argparse will be an object with a __dict__ attribute
# and not a mapping type like this method will turn it into
self._argparse_args = AttrDict((k, v) for k, v, in iteritems(vars(argparse_args))
if v is not NULL)
elif not argparse_args:
# argparse_args can be initialized as `None`
self._argparse_args = AttrDict()
else:
# we're calling this method with argparse_args that are a mapping type, likely
# already having been processed by this method before
self._argparse_args = AttrDict((k, v) for k, v, in iteritems(argparse_args)
if v is not NULL)
source = ArgParseRawParameter.source
self.raw_data[source] = ArgParseRawParameter.make_raw_parameters(self._argparse_args)
self._reset_cache()
return self
def _set_raw_data(self, raw_data):
self.raw_data.update(raw_data)
self._reset_cache()
return self
def _reset_cache(self):
self._cache_ = dict()
for callback in self._reset_callbacks:
callback()
return self
def register_reset_callaback(self, callback):
self._reset_callbacks.add(callback)
def check_source(self, source):
# this method ends up duplicating much of the logic of Parameter.__get__
# I haven't yet found a way to make it more DRY though
typed_values = {}
validation_errors = []
raw_parameters = self.raw_data[source]
for key in self.parameter_names:
parameter = self.__class__.__dict__[key]
match, multikey_error = parameter._raw_parameters_from_single_source(raw_parameters)
if multikey_error:
validation_errors.append(multikey_error)
if match is not None:
try:
untyped_value = match.value(parameter)
if untyped_value is None:
if isinstance(parameter, SequenceParameter):
untyped_value = ()
elif isinstance(parameter, MapParameter):
untyped_value = {}
typed_value = typify_data_structure(untyped_value, parameter._element_type)
except TypeCoercionError as e:
validation_errors.append(CustomValidationError(match.key, e.value,
match.source, text_type(e)))
else:
collected_errors = parameter.collect_errors(self, typed_value, match.source)
if collected_errors:
validation_errors.extend(collected_errors)
else:
typed_values[match.key] = typed_value # parameter.repr_raw(match)
else:
# this situation will happen if there is a multikey_error and none of the
# matched keys is the primary key
pass
return typed_values, validation_errors
def validate_all(self):
validation_errors = list(chain.from_iterable(self.check_source(source)[1]
for source in self.raw_data))
raise_errors(validation_errors)
self.validate_configuration()
@staticmethod
def _collect_validation_error(func, *args, **kwargs):
try:
func(*args, **kwargs)
except ConfigurationError as e:
return e.errors if hasattr(e, 'errors') else e,
return ()
def validate_configuration(self):
errors = chain.from_iterable(Configuration._collect_validation_error(getattr, self, name)
for name in self.parameter_names)
post_errors = self.post_build_validation()
raise_errors(tuple(chain.from_iterable((errors, post_errors))))
def post_build_validation(self):
return ()
def collect_all(self):
typed_values = odict()
validation_errors = odict()
for source in self.raw_data:
typed_values[source], validation_errors[source] = self.check_source(source)
raise_errors(tuple(chain.from_iterable(itervalues(validation_errors))))
return odict((k, v) for k, v in iteritems(typed_values) if v)
def describe_parameter(self, parameter_name):
# TODO, in Parameter base class, rename element_type to value_type
if parameter_name not in self.parameter_names:
parameter_name = '_' + parameter_name
parameter = self.__class__.__dict__[parameter_name]
assert isinstance(parameter, Parameter)
# dedupe leading underscore from name
name = parameter.name.lstrip('_')
aliases = tuple(alias for alias in parameter.aliases if alias != name)
description = self.get_descriptions()[name]
et = parameter._element_type
if type(et) == EnumMeta:
et = [et]
if not isiterable(et):
et = [et]
element_types = tuple(_et.__name__ for _et in et)
details = {
'parameter_type': parameter.__class__.__name__.lower().replace("parameter", ""),
'name': name,
'aliases': aliases,
'element_types': element_types,
'default_value': parameter.default,
'description': description.replace('\n', ' ').strip(),
}
if isinstance(parameter, SequenceParameter):
details['string_delimiter'] = parameter.string_delimiter
return details
def list_parameters(self):
return tuple(sorted(name.lstrip('_') for name in self.parameter_names))
def typify_parameter(self, parameter_name, value):
# return a tuple with correct parameter name and typed-value
if parameter_name not in self.parameter_names:
parameter_name = '_' + parameter_name
parameter = self.__class__.__dict__[parameter_name]
assert isinstance(parameter, Parameter)
return typify_data_structure(value, parameter._element_type)
def get_descriptions(self):
raise NotImplementedError()
| {
"content_hash": "f1669d9344c298c6ab2823b4fd3b388b",
"timestamp": "",
"source": "github",
"line_count": 886,
"max_line_length": 99,
"avg_line_length": 41.12528216704289,
"alnum_prop": 0.6057304388396411,
"repo_name": "Microsoft/PTVS",
"id": "297592c37ccf53c8abadb406c7f72dc726975379",
"size": "36461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/common/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
from .service_principal_base_py3 import ServicePrincipalBase
class ServicePrincipalCreateParameters(ServicePrincipalBase):
"""Request parameters for creating a new service principal.
All required parameters must be populated in order to send to Azure.
:param account_enabled: whether or not the service principal account is
enabled
:type account_enabled: bool
:param app_role_assignment_required: Specifies whether an
AppRoleAssignment to a user or group is required before Azure AD will
issue a user or access token to the application.
:type app_role_assignment_required: bool
:param key_credentials: The collection of key credentials associated with
the service principal.
:type key_credentials: list[~azure.graphrbac.models.KeyCredential]
:param password_credentials: The collection of password credentials
associated with the service principal.
:type password_credentials:
list[~azure.graphrbac.models.PasswordCredential]
:param service_principal_type: the type of the service principal
:type service_principal_type: str
:param tags: Optional list of tags that you can apply to your service
principals. Not nullable.
:type tags: list[str]
:param app_id: Required. The application ID.
:type app_id: str
"""
_validation = {
'app_id': {'required': True},
}
_attribute_map = {
'account_enabled': {'key': 'accountEnabled', 'type': 'bool'},
'app_role_assignment_required': {'key': 'appRoleAssignmentRequired', 'type': 'bool'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
'service_principal_type': {'key': 'servicePrincipalType', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'app_id': {'key': 'appId', 'type': 'str'},
}
def __init__(self, *, app_id: str, account_enabled: bool=None, app_role_assignment_required: bool=None, key_credentials=None, password_credentials=None, service_principal_type: str=None, tags=None, **kwargs) -> None:
super(ServicePrincipalCreateParameters, self).__init__(account_enabled=account_enabled, app_role_assignment_required=app_role_assignment_required, key_credentials=key_credentials, password_credentials=password_credentials, service_principal_type=service_principal_type, tags=tags, **kwargs)
self.app_id = app_id
| {
"content_hash": "ff73ca9be1e3368059ab57e9fff97fe1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 298,
"avg_line_length": 51.791666666666664,
"alnum_prop": 0.7015285599356396,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b9b2a3541f0c329d1615ea80d4bd5ca6047b42b5",
"size": "2960",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/service_principal_create_parameters_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
import logging
import tempfile
import thread, threading
import Queue
import requests
from contextlib import contextmanager
logger = logging.getLogger(__name__)
def download_file(url, fpath):
logger.debug('starting to fetch %s', url)
r = requests.get(url, stream=True)
with open(fpath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*64):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
logger.debug('fetch %s', fpath)
return fpath
def to_utf8(s):
"""Convert a string to utf8. If the argument is an iterable
(list/tuple/set), then each element of it would be converted instead.
>>> to_utf8('a')
'a'
>>> to_utf8(u'a')
'a'
>>> to_utf8([u'a', u'b', u'\u4f60'])
['a', 'b', '\\xe4\\xbd\\xa0']
"""
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return s.encode('utf-8')
elif isinstance(s, (list, tuple, set)):
return [to_utf8(v) for v in s]
else:
return s
@contextmanager
def create_tmp_file(content=''):
fd, name = tempfile.mkstemp()
try:
if content:
os.write(fd, content)
yield name
finally:
os.close(fd)
os.remove(name)
class WorkerPool(object):
def __init__(self, func, nworker=10):
self.nworker = nworker
self.func = func
self.queue = Queue.Queue()
self._stop = threading.Event()
def start(self):
for _ in xrange(self.nworker):
thread.start_new_thread(self.do_work, tuple())
def stop(self):
self._stop.set()
def add_task(self, msg):
self.queue.put(msg)
def do_work(self):
while not self._stop.isSet():
try:
msg = self.queue.get(timeout=1)
except Queue.Empty:
continue
self.func(msg)
| {
"content_hash": "698c6827b4561808aa6ff67a53979185",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 73,
"avg_line_length": 24.025,
"alnum_prop": 0.5660770031217481,
"repo_name": "zatonovo/slackbot",
"id": "f89ebe1183f79db05cacdcab0e9569246aab7ecf",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slackbot/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38437"
},
{
"name": "Shell",
"bytes": "1134"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
registry = {}
def serialize(objects, user=None, serializer=None):
if user is None:
user = AnonymousUser()
if not objects:
return objects
# sets aren't predictable, so generally you should use a list, but it's
# supported out of convenience
elif not isinstance(objects, (list, tuple, set, frozenset)):
return serialize([objects], user=user, serializer=serializer)[0]
# elif isinstance(obj, dict):
# return dict((k, serialize(v, request=request)) for k, v in obj.iteritems())
if serializer is None:
# find the first object that is in the registry
for o in objects:
try:
serializer = registry[type(o)]
break
except KeyError:
pass
else:
return objects
attrs = serializer.get_attrs(
# avoid passing NoneType's to the serializer as they're allowed and
# filtered out of serialize()
item_list=[o for o in objects if o is not None],
user=user,
)
return [serializer(o, attrs=attrs.get(o, {}), user=user) for o in objects]
def register(type):
def wrapped(cls):
registry[type] = cls()
return cls
return wrapped
class Serializer(object):
def __call__(self, obj, attrs, user):
if obj is None:
return
return self.serialize(obj, attrs, user)
def get_attrs(self, item_list, user):
return {}
def serialize(self, obj, attrs, user):
return {}
| {
"content_hash": "8ea60047d552d1f3099bc84a82594670",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 26.62295081967213,
"alnum_prop": 0.603448275862069,
"repo_name": "mitsuhiko/sentry",
"id": "e71cbbd646110d6a3889e15dc34dcb8c20bd3506",
"size": "1624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/serializers/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "171113"
},
{
"name": "Python",
"bytes": "877258"
}
],
"symlink_target": ""
} |
from urllib.parse import parse_qs, urlparse
import requests
from ._abstract import AbstractScraper
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0"
}
# These languages use measurements like cups and spoons rather than grams and liters
IMPERIAL_LANGUAGES = ["en"]
KPTN_DEFAULT_LANGUAGE = "en"
class KptnCook(AbstractScraper):
def __init__(self, url, proxies=None, timeout=None, *args, **kwargs):
super().__init__(url, proxies=proxies, timeout=timeout, *args, **kwargs)
parsed_url = urlparse(self.url)
# Extract language from URL
query = parse_qs(parsed_url.query)
self.lang = query["lang"][0] if "lang" in query else KPTN_DEFAULT_LANGUAGE
# Build final recipe url (of type mobile.kptncook.com/*)
self.final_url = "".join(
["https://", parsed_url.hostname, parsed_url.path, f"?lang={self.lang}"]
)
# Extract recipe id from the url path
recipe_uid = parsed_url.path.split("/")[-1]
# Request the final recipe json from the kptncook api
api_url = f"https://mobile.kptncook.com/recipes/search?kptnkey=6q7QNKy-oIgk-IMuWisJ-jfN7s6&lang={self.lang}"
json_request_body = [{"uid": recipe_uid}]
self.recipe_json = requests.post(
api_url,
headers=HEADERS,
proxies=proxies,
timeout=timeout,
json=json_request_body,
).json()[0]
@classmethod
def host(self, subdomain="mobile"):
return f"{subdomain}.kptncook.com"
def author(self):
author = self.recipe_json["authors"][0]
return f"{author['name']} ({author['link']})"
def title(self):
return self.recipe_json["title"]
def category(self):
return self.recipe_json["rtype"]
def total_time(self):
return self.recipe_json["cookingTime"] + self.recipe_json["preparationTime"]
def cook_time(self):
return self.recipe_json["cookingTime"]
def prep_time(self):
return self.recipe_json["preparationTime"]
def yields(self):
return 1
def nutrients(self):
return self.recipe_json["recipeNutrition"]
def canonical_url(self):
return self.final_url
def image(self):
return f"{self.recipe_json['imageList'][0]['url']}?kptnkey=6q7QNKy-oIgk-IMuWisJ-jfN7s6"
def ingredients(self):
return [
" ".join(
str(x)
for x in
# The filter is needed because "measure" and "quantity" fields are not always provided
filter(
None,
[
ingredient.get("quantity"),
ingredient.get("measure"),
ingredient["ingredient"]["title"],
]
if self.lang not in IMPERIAL_LANGUAGES
else [
ingredient.get("quantityUS"),
ingredient.get("measureUS"),
ingredient["ingredient"]["title"],
],
)
)
for ingredient in self.recipe_json["ingredients"]
]
def instructions(self):
return "\n".join(step["title"] for step in self.recipe_json["steps"])
def ratings(self):
return None
def cuisine(self):
return None
def description(self):
return self.recipe_json["authorComment"]
def language(self):
return self.lang
| {
"content_hash": "ca33613d45f7c829c464262301b913b7",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 116,
"avg_line_length": 31.548672566371682,
"alnum_prop": 0.5669004207573632,
"repo_name": "hhursev/recipe-scraper",
"id": "7d2b26c161352773c6d4cd921934d5c40bffdb84",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "recipe_scrapers/kptncook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88554"
}
],
"symlink_target": ""
} |
import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.Utils import formatdate
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), safe))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), safe))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i / j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
if sys.version_info >= (2, 6):
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
else:
# Python 2.4, 2.5 compatibility. This actually works for Python 2.6 and
# above, but the above definition is much more obviously correct and so is
# preferred going forward.
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
| {
"content_hash": "0fd55641081233e618bb5ad759b2082a",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 85,
"avg_line_length": 34.16504854368932,
"alnum_prop": 0.6234725774367718,
"repo_name": "mcr/ietfdb",
"id": "50ad00e11b755850ba2775d726d51a79830ad3a5",
"size": "7038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/utils/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "239198"
},
{
"name": "JavaScript",
"bytes": "450755"
},
{
"name": "Perl",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "10286676"
},
{
"name": "Ruby",
"bytes": "3468"
},
{
"name": "Shell",
"bytes": "39950"
},
{
"name": "TeX",
"bytes": "23944"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.