text stringlengths 4 1.02M | meta dict |
|---|---|
"""Sphinx build configuration file."""
import os
import sys
from sphinx.ext import apidoc
from docutils import nodes
from docutils import transforms
# Change PYTHONPATH to include artifacts module and dependencies.
sys.path.insert(0, os.path.abspath('..'))
import artifacts # pylint: disable=wrong-import-position
import utils.dependencies # pylint: disable=wrong-import-position
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_markdown_tables',
'sphinx_rtd_theme',
]
# We cannot install architecture dependent Python modules on readthedocs,
# therefore we mock most imports.
pip_installed_modules = set(['six'])
dependency_helper = utils.dependencies.DependencyHelper(
dependencies_file=os.path.join('..', 'dependencies.ini'),
test_dependencies_file=os.path.join('..', 'test_dependencies.ini'))
modules_to_mock = set(dependency_helper.dependencies.keys())
modules_to_mock = modules_to_mock.difference(pip_installed_modules)
autodoc_mock_imports = sorted(modules_to_mock)
# Options for the Sphinx Napoleon extension, which reads Google-style
# docstrings.
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
# General information about the project.
# pylint: disable=redefined-builtin
project = 'Digital Forensics Artifacts Repository'
copyright = 'The Digital Forensics Artifacts Repository authors'
version = artifacts.__version__
release = artifacts.__version__
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'artifactsdoc'
# -- Options linkcheck ----------------------------------------------------
linkcheck_ignore = [
]
# -- Code to rewrite links for readthedocs --------------------------------
# This function is a Sphinx core event callback, the format of which is detailed
# here: https://www.sphinx-doc.org/en/master/extdev/appapi.html#events
# pylint: disable=unused-argument
def RunSphinxAPIDoc(app):
"""Runs sphinx-apidoc to auto-generate documentation.
Args:
app (sphinx.application.Sphinx): Sphinx application. Required by the
the Sphinx event callback API.
"""
current_directory = os.path.abspath(os.path.dirname(__file__))
module_path = os.path.join(current_directory, '..', 'artifacts')
api_directory = os.path.join(current_directory, 'sources', 'api')
apidoc.main(['-o', api_directory, module_path, '--force'])
class MarkdownLinkFixer(transforms.Transform):
"""Transform definition to parse .md references to internal pages."""
default_priority = 1000
_URI_PREFIXES = []
def _FixLinks(self, node):
"""Corrects links to .md files not part of the documentation.
Args:
node (docutils.nodes.Node): docutils node.
Returns:
docutils.nodes.Node: docutils node, with correct URIs outside
of Markdown pages outside the documentation.
"""
if isinstance(node, nodes.reference) and 'refuri' in node:
reference_uri = node['refuri']
for uri_prefix in self._URI_PREFIXES:
if (reference_uri.startswith(uri_prefix) and not (
reference_uri.endswith('.asciidoc') or
reference_uri.endswith('.md'))):
node['refuri'] = reference_uri + '.md'
break
return node
def _Traverse(self, node):
"""Traverses the document tree rooted at node.
Args:
node (docutils.nodes.Node): docutils node.
"""
self._FixLinks(node)
for child_node in node.children:
self._Traverse(child_node)
# pylint: disable=arguments-differ
def apply(self):
"""Applies this transform on document tree."""
self._Traverse(self.document)
# pylint: invalid-name
def setup(app):
"""Called at Sphinx initialization.
Args:
app (sphinx.application.Sphinx): Sphinx application.
"""
# Triggers sphinx-apidoc to generate API documentation.
app.connect('builder-inited', RunSphinxAPIDoc)
app.add_config_value(
'recommonmark_config', {'enable_auto_toc_tree': True}, True)
app.add_transform(MarkdownLinkFixer)
| {
"content_hash": "962f4715f069faca947c5b3aed6bc669",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 80,
"avg_line_length": 30.236686390532544,
"alnum_prop": 0.6913894324853229,
"repo_name": "ForensicArtifacts/artifacts",
"id": "8706d25835807c87eb475d25f3df67ac4db795f2",
"size": "5134",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "771"
},
{
"name": "Python",
"bytes": "112796"
},
{
"name": "Shell",
"bytes": "1244"
}
],
"symlink_target": ""
} |
'''
'''
from __future__ import absolute_import
import __builtin__
import tempfile
from contextlib import contextmanager, closing
from random import randrange
from .filepath import FilePath, DirPath
@contextmanager
def temporary_file(*args, **kwds):
tmp_path = FilePath(tempfile.mktemp(*args, **kwds))
try:
yield tmp_path
finally:
tmp_path.unlink_carefully()
@contextmanager
def temporary_directory(*args, **kwds):
tmp_path = DirPath(tempfile.mktemp(*args, **kwds))
tmp_path.reqdir()
try:
yield tmp_path
finally:
tmp_path.rmdir_carefully(recursive=True)
rnd_holder = '<RND>'
@contextmanager
def temp_file_proxy(path, mode='w', tmp_suffix = '~tmp~%s~' % rnd_holder, open=__builtin__.open, **kwds):
path = FilePath(path)
if rnd_holder not in tmp_suffix:
tmp_path = FilePath(path + tmp_suffix)
else:
while True:
assert tmp_suffix.count(rnd_holder) == 1
rnd_tmp_suffix = tmp_suffix.replace(rnd_holder, '%X' % randrange(0xffffffff))
tmp_path = FilePath(path + rnd_tmp_suffix)
if not tmp_path.exists():
break
if tmp_path.exists():
raise RuntimeError("temporary file %s already exists" % (tmp_path,))
try:
with closing(open(tmp_path, mode, **kwds)) as fp:
yield fp
tmp_path.rename(path)
finally:
tmp_path.unlink_carefully()
| {
"content_hash": "29996454512a66ad33daa743c2eb2dc2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 105,
"avg_line_length": 25.642857142857142,
"alnum_prop": 0.6225626740947076,
"repo_name": "matthagy/Jamenson",
"id": "12032dcab10ba88ae0659ffd70fb8019d040dc31",
"size": "1436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jamenson/runtime/tempfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "444789"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CryptoKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('flags', models.PositiveIntegerField(help_text=b'Key flags.', verbose_name='flags')),
('active', models.BooleanField(help_text=b'Check to activate key.', verbose_name='active')),
('content', models.TextField(help_text=b'Enter the key data.', null=True, verbose_name='content', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
],
options={
'ordering': ['domain'],
'db_table': 'cryptokeys',
'verbose_name': 'crypto key',
'verbose_name_plural': 'crypto keys',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'This field is the actual domainname. This is the field that powerDNS matches to when it gets a request. The domainname should be in the format of: domainname.TLD (no trailing dot)', unique=True, max_length=255, verbose_name='name', db_index=True)),
('master', models.CharField(help_text=b'Enter a comma delimited list of nameservers that are master for this domain. This setting applies only to slave zones.', max_length=128, null=True, verbose_name='master', blank=True)),
('last_check', models.PositiveIntegerField(help_text=b'Last time this domain was checked for freshness.', max_length=11, null=True, verbose_name='last check')),
('type', models.CharField(default=b'NATIVE', help_text=b'Select the zone type. Native refers to native SQL replication. Master/Slave refers to DNS server based zone transfers.', max_length=6, verbose_name='type', choices=[(b'NATIVE', b'Native'), (b'MASTER', b'Master'), (b'SLAVE', b'Slave')])),
('notified_serial', models.PositiveIntegerField(help_text=b'The last notified serial of a master domain. This is updated from the SOA record of the domain.', max_length=11, null=True, verbose_name='notified serial')),
('account', models.CharField(help_text=b'Determine if a certain host is a supermaster for a certain domain name. (???)', max_length=40, null=True, verbose_name='account', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('created_by', models.ForeignKey(related_name='powerdns_manager_domain_created_by', verbose_name='owner username', to=settings.AUTH_USER_MODEL, help_text=b'The Django user this zone belongs to.', null=True)),
],
options={
'ordering': ['name'],
'db_table': 'domains',
'verbose_name': 'zone',
'verbose_name_plural': 'zones',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DomainMetadata',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('kind', models.CharField(help_text=b'Select a setting.', max_length=16, verbose_name='setting', choices=[(b'ALLOW-AXFR-FROM', b'ALLOW-AXFR-FROM'), (b'AXFR-MASTER-TSIG', b'AXFR-MASTER-TSIG'), (b'LUA-AXFR-SCRIPT', b'LUA-AXFR-SCRIPT'), (b'NSEC3NARROW', b'NSEC3NARROW'), (b'NSEC3PARAM', b'NSEC3PARAM'), (b'PRESIGNED', b'PRESIGNED'), (b'SOA-EDIT', b'SOA-EDIT'), (b'TSIG-ALLOW-AXFR', b'TSIG-ALLOW-AXFR')])),
('content', models.TextField(help_text=b'Enter the metadata.', null=True, verbose_name='content', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('domain', models.ForeignKey(related_name='powerdns_manager_domainmetadata_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain this record belongs to.')),
],
options={
'ordering': ['kind'],
'db_table': 'domainmetadata',
'verbose_name': 'domain metadata',
'verbose_name_plural': 'domain metadata',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DynamicZone',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_dynamic', models.BooleanField(help_text=b'Check to mark this zone as dynamic. An API key will be generated for you so as to be able to update the A nd AAAA records IP addresses over HTTP.', verbose_name='Dynamic zone')),
('api_key', models.CharField(help_text=b'The API key is generated automatically. To reset it, use the relevant action in the changelist view.', max_length=64, null=True, verbose_name='API Key')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('domain', models.ForeignKey(related_name='powerdns_manager_dynamiczone_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain, the A and AAAA records of which might be updated dynamically over HTTP.', unique=True)),
],
options={
'ordering': ['-domain'],
'db_table': 'dynamiczones',
'verbose_name': 'dynamic zone',
'verbose_name_plural': 'dynamic zones',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b"Actual name of a record. Must not end in a '.' and be fully qualified - it is not relative to the name of the domain! For example: www.test.com (no trailing dot)", max_length=255, null=True, verbose_name='name', db_index=True)),
('type', models.CharField(choices=[(b'SOA', b'SOA'), (b'NS', b'NS'), (b'MX', b'MX'), (b'A', b'A'), (b'AAAA', b'AAAA'), (b'CNAME', b'CNAME'), (b'PTR', b'PTR'), (b'TXT', b'TXT'), (b'SPF', b'SPF'), (b'SRV', b'SRV')], max_length=10, help_text=b'Select the type of the resource record.', null=True, verbose_name='type', db_index=True)),
('content', models.CharField(help_text=b"This is the 'right hand side' of a DNS record. For an A record, this is the IP address for example.", max_length=64000, null=True, verbose_name='content')),
('ttl', models.PositiveIntegerField(help_text=b'How long the DNS-client are allowed to remember this record. Also known as Time To Live(TTL) This value is in seconds.', max_length=11, null=True, verbose_name='TTL', blank=True)),
('prio', models.PositiveIntegerField(help_text=b'For MX records, this should be the priority of the mail exchanger specified.', max_length=11, null=True, verbose_name='priority')),
('auth', models.NullBooleanField(help_text=b"The 'auth' field should be set to '1' for data for which the zone itself is authoritative, which includes the SOA record and its own NS records. The 'auth' field should be 0 however for NS records which are used for delegation, and also for any glue (A, AAAA) records present for this purpose. Do note that the DS record for a secure delegation should be authoritative!", verbose_name='authoritative')),
('ordername', models.CharField(help_text=b'http://doc.powerdns.com/dnssec-modes.html#dnssec-direct-database', max_length=255, null=True, verbose_name='ordername', db_index=True)),
('change_date', models.PositiveIntegerField(help_text=b'Timestamp for the last update. This is used by PowerDNS internally.', max_length=11, null=True, verbose_name='change date')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('domain', models.ForeignKey(related_name='powerdns_manager_record_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain this record belongs to.')),
],
options={
'get_latest_by': 'date_modified',
'ordering': ['name', 'type'],
'verbose_name_plural': 'records',
'db_table': 'records',
'verbose_name': 'record',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SuperMaster',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField(help_text=b'IP address for supermaster (IPv4 or IPv6).', unique=True, verbose_name='IP address')),
('nameserver', models.CharField(help_text=b'Hostname of the supermaster.', unique=True, max_length=255, verbose_name='nameserver')),
('account', models.CharField(help_text=b'Account name (???)', max_length=40, null=True, verbose_name='account', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
],
options={
'ordering': ['nameserver'],
'db_table': 'supermasters',
'verbose_name': 'supermaster',
'verbose_name_plural': 'supermasters',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TsigKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Enter a name for the key.', max_length=255, verbose_name='name')),
('algorithm', models.CharField(help_text=b'Select hashing algorithm.', max_length=50, verbose_name='algorithm', choices=[(b'hmac-md5', b'hmac-md5')])),
('secret', models.CharField(help_text=b'Enter the shared secret.', max_length=255, verbose_name='secret')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('created_by', models.ForeignKey(related_name='powerdns_manager_tsigkey_created_by', verbose_name='created by', to=settings.AUTH_USER_MODEL, help_text=b'The Django user this TSIG key belongs to.', null=True)),
],
options={
'get_latest_by': 'date_modified',
'ordering': ['name'],
'verbose_name_plural': 'TSIG Keys',
'db_table': 'tsigkeys',
'verbose_name': 'TSIG Key',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ZoneTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Enter a name for the template.', max_length=100, verbose_name='name')),
('content', models.TextField(help_text=b'Enter the template content. The placeholder #origin# is expanded to the origin of the zone to which the template is applied.', null=True, verbose_name='content', blank=True)),
('notes', models.TextField(help_text=b'Space for notes about the template.', null=True, verbose_name='notes', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('created_by', models.ForeignKey(related_name='powerdns_manager_zonetemplate_created_by', verbose_name='template creator', to=settings.AUTH_USER_MODEL, help_text=b'The Django user this template belongs to.')),
],
options={
'get_latest_by': 'date_modified',
'ordering': ['name', 'date_modified'],
'verbose_name_plural': 'templates',
'db_table': 'zonetemplates',
'verbose_name': 'template',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='zonetemplate',
unique_together=set([('name', 'created_by')]),
),
migrations.AlterUniqueTogether(
name='tsigkey',
unique_together=set([('name', 'algorithm')]),
),
migrations.AlterIndexTogether(
name='record',
index_together=set([('domain', 'ordername'), ('name', 'type')]),
),
migrations.AddField(
model_name='cryptokey',
name='domain',
field=models.ForeignKey(related_name='powerdns_manager_cryptokey_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain this record belongs to.'),
preserve_default=True,
),
]
| {
"content_hash": "6ec835d82f36496d9a62f02cedd313b3",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 464,
"avg_line_length": 73.21808510638297,
"alnum_prop": 0.6029059208136578,
"repo_name": "gnotaras/django-powerdns-manager",
"id": "873047eac9c10326abfc23de6366b228c0bcf40b",
"size": "13789",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/powerdns_manager/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19616"
},
{
"name": "Python",
"bytes": "196479"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from .securityhandlerhelper import securityhandlerhelper
import re as re
dateTimeFormat = '%Y-%m-%d %H:%M'
import arcrest
from . import featureservicetools as featureservicetools
from arcrest.hostedservice import AdminFeatureService
import datetime, time
import json
import os
import arcresthelper.common as common
import gc
import sys
from .packages.six.moves import urllib_parse as urlparse
try:
import pyparsing
pyparsingInstall = True
from arcresthelper import select_parser
except:
pyparsingInstall = False
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
#----------------------------------------------------------------------
def trace():
"""Determines information about where an error was thrown.
Returns:
tuple: line number, filename, error message
Examples:
>>> try:
... 1/0
... except:
... print("Error on '{}'\\nin file '{}'\\nwith error '{}'".format(*trace()))
...
Error on 'line 1234'
in file 'C:\\foo\\baz.py'
with error 'ZeroDivisionError: integer division or modulo by zero'
"""
import traceback, inspect, sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
class publishingtools(securityhandlerhelper):
#----------------------------------------------------------------------
def getItemID(self, userContent, title=None, name=None, itemType=None):
"""Gets the ID of an item by a combination of title, name, and type.
Args:
userContent (list): A list of user content.
title (str): The title of the item. Defaults to ``None``.
name (str): The name of the item. Defaults to ``None``.
itemType (str): The type of the item. Defaults to ``None``.
Returns:
str: The item's ID. If the item does not exist, ``None``.
Raises:
AttributeError: If both ``title`` and ``name`` are not specified (``None``).
See Also:
:py:func:`getItem`
"""
itemID = None
if name is None and title is None:
raise AttributeError('Name or Title needs to be specified')
for item in userContent:
if title is None and name is not None:
if item.name == name and (itemType is None or item.type == itemType):
return item.id
elif title is not None and name is None:
if item.title == title and (itemType is None or item.type == itemType):
return item.id
else:
if item.name == name and item.title == title and (itemType is None or item.type == itemType):
return item.id
return None
#----------------------------------------------------------------------
def getItem(self, userContent, title=None, name=None, itemType=None):
"""Gets an item by a combination of title, name, and type.
Args:
userContent (list): A list of user content.
title (str): The title of the item. Defaults to ``None``.
name (str): The name of the item. Defaults to ``None``.
itemType (str): The type of the item. Defaults to ``None``.
Returns:
str: The item's ID. If the item does not exist, ``None``.
Raises:
AttributeError: If both ``title`` and ``name`` are not specified (``None``).
See Also:
:py:func:`getItemID`
"""
itemID = None
if name is None and title is None:
raise AttributeError('Name or Title needs to be specified')
for item in userContent:
if title is None and name is not None:
if item.name == name and (itemType is None or item.type == itemType):
return item
elif title is not None and name is None:
if item.title == title and (itemType is None or item.type == itemType):
return item
else:
if item.name == name and item.title == title and (itemType is None or item.type == itemType):
return item
return None
#----------------------------------------------------------------------
def folderExist(self, name, folders):
"""Determines if a folder exists, case insensitively.
Args:
name (str): The name of the folder to check.
folders (list): A list of folder dicts to check against. The dicts must contain
the key:value pair ``title``.
Returns:
bool: ``True`` if the folder exists in the list, ``False`` otherwise.
"""
if name is not None and name != '':
folderID = None
for folder in folders:
if folder['title'].lower() == name.lower():
return True
del folders
return folderID
else:
return False
#----------------------------------------------------------------------
def publishItems(self, items_info):
"""Publishes a list of items.
Args:
items_info (list): A list of JSON configuration items to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.User.addItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
item_results = None
item_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
item_results = []
for item_info in items_info:
if 'ReplaceTag' in item_info:
itemInfo = {"ReplaceTag":item_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{FeatureService}" }
itemInfo['ItemInfo'] = self._publishItems(config=item_info)
if itemInfo['ItemInfo'] is not None and 'name' in itemInfo['ItemInfo']:
print ("%s created" % itemInfo['ItemInfo']['name'])
item_results.append(itemInfo)
else:
print (str(itemInfo['ItemInfo']))
return item_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
item_results = None
item_info = None
admin = None
del itemInfo
del item_results
del item_info
del admin
gc.collect()
#----------------------------------------------------------------------
def _publishItems(self, config):
name = None
tags = None
description = None
extent = None
admin = None
adminusercontent = None
itemData = None
itemId = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
itemParams = None
content = None
userInfo = None
userCommunity = None
results = None
folderName = None
folderId = None
res = None
sea = None
group_ids = None
shareResults = None
updateParams = None
url = None
resultItem = {}
try:
name = ''
tags = ''
description = ''
extent = ''
webmap_data = ''
if 'Data' in config:
itemData = config['Data']
if 'Url' in config:
url = config['Url']
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
skipIfExist = False
if 'SkipIfExist' in config:
skipIfExist = config['SkipIfExist']
if str(skipIfExist).lower() == 'true':
skipIfExist = True
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=itemType,searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
if skipIfExist == True:
resultItem['itemId'] = item.id
resultItem['url'] = item.item._curl + "/data"
resultItem['folderId'] = folderId
resultItem['name'] = name
return resultItem
results = item.updateItem(itemParameters=itemParams,
data=itemData,serviceUrl=url)
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=url,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
filePath=itemData)
#updateParams = arcrest.manageorg.ItemParameter()
#updateParams.title = name
#updateResults = item.updateItem(itemParameters=updateParams)
except Exception as e:
print (e)
if item is None:
return "Item could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
resultItem['itemId'] = item.id
resultItem['url'] = item.item._curl + "/data"
resultItem['folderId'] = folderId
resultItem['name'] = name
return resultItem
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
admin = None
adminusercontent = None
itemData = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
itemId = None
thumbnail = None
itemType = None
itemParams = None
content = None
userInfo = None
userCommunity = None
results = None
folderName = None
folderId = None
res = None
sea = None
group_ids = None
shareResults = None
updateParams = None
del name
del tags
del description
del extent
del admin
del adminusercontent
del itemData
del datestring
del snippet
del everyone
del org
del groupNames
del itemId
del thumbnail
del itemType
del itemParams
del content
del userInfo
del userCommunity
del results
del folderName
del folderId
del res
del sea
del group_ids
del shareResults
del updateParams
gc.collect()
#----------------------------------------------------------------------
def publishMap(self, maps_info, fsInfo=None, itInfo=None):
"""Publishes a list of maps.
Args:
maps_info (list): A list of JSON configuration maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
itemId = None
map_results = None
replaceInfo = None
replaceItem = None
map_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
map_results = []
for map_info in maps_info:
itemInfo = {}
if 'ReplaceInfo' in map_info:
replaceInfo = map_info['ReplaceInfo']
else:
replaceInfo = None
if replaceInfo != None:
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Layer':
if fsInfo is not None:
for fs in fsInfo:
if fs is not None and replaceItem['ReplaceString'] == fs['ReplaceTag']:
replaceItem['ReplaceString'] = fs['FSInfo']['url']
replaceItem['ItemID'] = fs['FSInfo']['itemId']
replaceItem['ItemFolder'] = fs['FSInfo']['folderId']
if 'convertCase' in fs['FSInfo']:
replaceItem['convertCase'] = fs['FSInfo']['convertCase']
elif 'ItemID' in replaceItem:
if 'ItemFolder' in replaceItem == False:
itemId = replaceItem['ItemID']
itemInfo = admin.content.getItem(itemId=itemId)
if itemInfo.owner:
if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:
replaceItem['ItemFolder'] = itemInfo.ownerFolder
else:
replaceItem['ItemFolder'] = None
elif replaceItem['ReplaceType'] == 'Global':
if itInfo is not None:
for itm in itInfo:
if itm is not None:
if replaceItem['ReplaceString'] == itm['ReplaceTag']:
if 'ItemInfo' in itm:
if 'url' in itm['ItemInfo']:
replaceItem['ReplaceString'] = itm['ItemInfo']['url']
if 'ReplaceTag' in map_info:
itemInfo = {"ReplaceTag":map_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{WebMap}" }
itemInfo['MapInfo'] = self._publishMap(config=map_info,
replaceInfo=replaceInfo)
map_results.append(itemInfo)
print ("%s webmap created" % itemInfo['MapInfo']['Name'])
return map_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
itemId = None
replaceInfo = None
replaceItem = None
map_info = None
admin = None
del itemInfo
del itemId
del replaceInfo
del replaceItem
del map_info
del admin
gc.collect()
#----------------------------------------------------------------------
def _publishMap(self, config, replaceInfo=None, operationalLayers=None, tableLayers=None):
name = None
tags = None
description = None
extent = None
webmap_data = None
itemJson = None
update_service = None
admin = None
adminusercontent = None
resultMap = None
json_data = None
replaceItem = None
opLayers = None
opLayer = None
layers = None
item = None
response = None
layerIdx = None
updatedLayer = None
updated = None
text = None
itemParams = None
updateResults = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
try:
name = ''
tags = ''
description = ''
extent = ''
webmap_data = None
mapJson = config['ItemJSON']
if isinstance(mapJson,list):
webmap_data = []
for jsonItem in mapJson:
#if os.path.exists(jsonItem) == False:
#return {"Results":{"error": "%s does not exist" % jsonItem}}
#if webmap_data is None:
#try:
#with open(jsonItem) as webMapInfo:
#webmap_data = json.load(webMapInfo)
#except:
#raise ValueError("%s is not a valid JSON File" % jsonItem)
#else:
try:
with open(jsonItem) as webMapInfo:
webmap_data.append(json.load(webMapInfo))
except:
raise ValueError("%s is not a valid JSON File" % jsonItem)
webmap_data = common.merge_dicts(webmap_data)
else:
if os.path.exists(mapJson) == False:
return {"Results":{"error": "%s does not exist" % mapJson}}
try:
with open(mapJson) as webMapInfo:
webmap_data = json.load(webMapInfo)
except:
raise ValueError("%s is not a valid JSON File" % mapJson)
update_service = 'FALSE'
resultMap = {'Layers':[],'Tables':[],'Results':{}}
if webmap_data is not None:
layersInfo= {}
if operationalLayers:
webmap_data['operationalLayers'] = operationalLayers
if tableLayers:
webmap_data['tables'] = tableLayers
if replaceInfo:
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Global':
webmap_data = common.find_replace(webmap_data,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Layer':
if 'tables' in webmap_data:
opLayers = webmap_data['tables']
for opLayer in opLayers:
layerInfo= {}
if replaceItem['SearchString'] in opLayer['url']:
opLayer['url'] = opLayer['url'].replace(replaceItem['SearchString'],replaceItem['ReplaceString'])
if 'ItemID' in replaceItem:
opLayer['itemId'] = replaceItem['ItemID']
else:
opLayer['itemId'] = None
#opLayer['itemId'] = get_guid()
if 'convertCase' in replaceItem:
if replaceItem['convertCase'] == 'lower':
layerInfo = {}
layerInfo['convertCase'] = replaceItem['convertCase']
layerInfo['fields'] = []
if 'layerDefinition' in opLayer:
if 'drawingInfo' in opLayer["layerDefinition"]:
if 'renderer' in opLayer["layerDefinition"]['drawingInfo']:
if 'field1' in opLayer["layerDefinition"]['drawingInfo']['renderer']:
opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'] = opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'].lower()
if 'labelingInfo' in opLayer["layerDefinition"]['drawingInfo']:
lblInfos = opLayer["layerDefinition"]['drawingInfo']['labelingInfo']
if len(lblInfos) > 0:
for lblInfo in lblInfos:
if 'labelExpression' in lblInfo:
result = re.findall(r"\[.*\]", lblInfo['labelExpression'])
if len(result)>0:
for res in result:
lblInfo['labelExpression'] = str(lblInfo['labelExpression']).replace(res,str(res).lower())
if 'labelExpressionInfo' in lblInfo:
if 'value' in lblInfo['labelExpressionInfo']:
result = re.findall(r"{.*}", lblInfo['labelExpressionInfo']['value'])
if len(result)>0:
for res in result:
lblInfo['labelExpressionInfo']['value'] = str(lblInfo['labelExpressionInfo']['value']).replace(res,str(res).lower())
if 'popupInfo' in opLayer:
if 'mediaInfos' in opLayer['popupInfo'] and not opLayer['popupInfo']['mediaInfos'] is None:
for chart in opLayer['popupInfo']['mediaInfos']:
if 'value' in chart:
if 'normalizeField' in chart and not chart['normalizeField'] is None:
chart['normalizeField'] = chart['normalizeField'].lower()
if 'fields' in chart['value']:
for i in range(len(chart['value']['fields'])):
chart['value']['fields'][i] = str(chart['value']['fields'][i]).lower()
if 'fieldInfos' in opLayer['popupInfo']:
for field in opLayer['popupInfo']['fieldInfos']:
newFld = str(field['fieldName']).lower()
if 'description' in opLayer['popupInfo']:
opLayer['popupInfo']['description'] = common.find_replace(obj = opLayer['popupInfo']['description'],
find = "{" + field['fieldName'] + "}",
replace = "{" + newFld + "}")
layerInfo['fields'].append({"PublishName":field['fieldName'],
'ConvertName':newFld})
field['fieldName'] = newFld
layersInfo[opLayer['id']] = layerInfo
opLayers = webmap_data['operationalLayers']
for opLayer in opLayers:
layerInfo= {}
if replaceItem['SearchString'] in opLayer['url']:
opLayer['url'] = opLayer['url'].replace(replaceItem['SearchString'],replaceItem['ReplaceString'])
if 'ItemID' in replaceItem:
opLayer['itemId'] = replaceItem['ItemID']
else:
opLayer['itemId'] = None
#opLayer['itemId'] = get_guid()
if 'convertCase' in replaceItem:
if replaceItem['convertCase'] == 'lower':
layerInfo = {}
layerInfo['convertCase'] = replaceItem['convertCase']
layerInfo['fields'] = []
if 'layerDefinition' in opLayer:
if 'drawingInfo' in opLayer["layerDefinition"]:
if 'renderer' in opLayer["layerDefinition"]['drawingInfo']:
if 'field1' in opLayer["layerDefinition"]['drawingInfo']['renderer']:
opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'] = opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'].lower()
if 'labelingInfo' in opLayer["layerDefinition"]['drawingInfo']:
lblInfos = opLayer["layerDefinition"]['drawingInfo']['labelingInfo']
if len(lblInfos) > 0:
for lblInfo in lblInfos:
if 'labelExpression' in lblInfo:
result = re.findall(r"\[.*\]", lblInfo['labelExpression'])
if len(result)>0:
for res in result:
lblInfo['labelExpression'] = str(lblInfo['labelExpression']).replace(res,str(res).lower())
if 'labelExpressionInfo' in lblInfo:
if 'value' in lblInfo['labelExpressionInfo']:
result = re.findall(r"{.*}", lblInfo['labelExpressionInfo']['value'])
if len(result)>0:
for res in result:
lblInfo['labelExpressionInfo']['value'] = str(lblInfo['labelExpressionInfo']['value']).replace(res,str(res).lower())
if 'popupInfo' in opLayer:
if 'mediaInfos' in opLayer['popupInfo'] and not opLayer['popupInfo']['mediaInfos'] is None:
for k in range(len(opLayer['popupInfo']['mediaInfos'])):
chart = opLayer['popupInfo']['mediaInfos'][k]
if 'value' in chart:
if 'normalizeField' in chart and not chart['normalizeField'] is None:
chart['normalizeField'] = chart['normalizeField'].lower()
if 'fields' in chart['value']:
for i in range(len(chart['value']['fields'])):
chart['value']['fields'][i] = str(chart['value']['fields'][i]).lower()
opLayer['popupInfo']['mediaInfos'][k] = chart
if 'fieldInfos' in opLayer['popupInfo']:
for field in opLayer['popupInfo']['fieldInfos']:
newFld = str(field['fieldName']).lower()
if 'description' in opLayer['popupInfo']:
opLayer['popupInfo']['description'] = common.find_replace(obj = opLayer['popupInfo']['description'],
find = "{" + field['fieldName'] + "}",
replace = "{" + newFld + "}")
layerInfo['fields'].append({"PublishName":field['fieldName'],
'ConvertName':newFld})
field['fieldName'] = newFld
layersInfo[opLayer['id']] = layerInfo
opLayers = webmap_data['operationalLayers']
resultMap['Layers'] = {}
for opLayer in opLayers:
currentID = opLayer['id']
#if 'url' in opLayer:
#opLayer['id'] = common.getLayerName(url=opLayer['url']) + "_" + str(common.random_int_generator(maxrange = 9999))
if 'applicationProperties' in webmap_data:
if 'editing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['editing'] is None:
if 'locationTracking' in webmap_data['applicationProperties']['editing'] and \
not webmap_data['applicationProperties']['editing']['locationTracking'] is None:
if 'info' in webmap_data['applicationProperties']['editing']['locationTracking'] and \
not webmap_data['applicationProperties']['editing']['locationTracking']['info'] is None:
if 'layerId' in webmap_data['applicationProperties']['editing']['locationTracking']['info']:
if webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] == currentID:
webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] = opLayer['id']
if 'viewing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['viewing'] is None:
if 'search' in webmap_data['applicationProperties']['viewing'] and \
not webmap_data['applicationProperties']['viewing']['search'] is None:
if 'layers' in webmap_data['applicationProperties']['viewing']['search'] and \
not webmap_data['applicationProperties']['viewing']['search']['layers'] is None:
for k in range(len(webmap_data['applicationProperties']['viewing']['search']['layers'])):
searchlayer = webmap_data['applicationProperties']['viewing']['search']['layers'][k]
if searchlayer['id'] == currentID:
searchlayer['id'] = opLayer['id']
if 'fields' in searchlayer and \
not searchlayer['fields'] is None:
for i in range(len(searchlayer['fields'])):
searchlayer['fields'][i]['Name'] = str(searchlayer['fields'][i]['Name']).lower()
if 'field' in searchlayer and \
not searchlayer['field'] is None:
searchlayer['field']['name'] = searchlayer['field']['name'].lower()
webmap_data['applicationProperties']['viewing']['search']['layers'][k] = searchlayer
if 'applicationProperties' in webmap_data:
webmap_data['applicationProperties'] = common.find_replace(webmap_data['applicationProperties'], currentID, opLayer['id'])
resultLayer = {"Name":opLayer['title'],
"ID":opLayer['id']
}
if currentID in layersInfo:
resultLayer['FieldInfo'] = layersInfo[currentID]
resultMap['Layers'][currentID] = resultLayer
if 'tables' in webmap_data:
opLayers = webmap_data['tables']
for opLayer in opLayers:
currentID = opLayer['id']
#opLayer['id'] = common.getLayerName(url=opLayer['url']) + "_" + str(common.random_int_generator(maxrange = 9999))
if 'applicationProperties' in webmap_data:
if 'editing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['editing'] is None:
if 'locationTracking' in webmap_data['applicationProperties']['editing'] and \
not webmap_data['applicationProperties']['editing']['locationTracking'] is None:
if 'info' in webmap_data['applicationProperties']['editing']['locationTracking'] and \
not webmap_data['applicationProperties']['editing']['locationTracking']['info'] is None:
if 'layerId' in webmap_data['applicationProperties']['editing']['locationTracking']['info']:
if webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] == currentID:
webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] = opLayer['id']
if 'viewing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['viewing'] is None:
if 'search' in webmap_data['applicationProperties']['viewing'] and \
not webmap_data['applicationProperties']['viewing']['search'] is None:
if 'layers' in webmap_data['applicationProperties']['viewing']['search'] and \
not webmap_data['applicationProperties']['viewing']['search']['layers'] is None:
for k in range(len(webmap_data['applicationProperties']['viewing']['search']['layers'])):
searchlayer = webmap_data['applicationProperties']['viewing']['search']['layers'][k]
if searchlayer['id'] == currentID:
searchlayer['id'] = opLayer['id']
if 'fields' in searchlayer and \
not searchlayer['fields'] is None:
for i in range(len(searchlayer['fields'])):
searchlayer['fields'][i]['Name'] = str(searchlayer['fields'][i]['Name']).lower()
if 'field' in searchlayer and \
not searchlayer['field'] is None:
searchlayer['field']['name'] = searchlayer['field']['name'].lower()
webmap_data['applicationProperties']['viewing']['search']['layers'][k] = searchlayer
if 'applicationProperties' in webmap_data:
webmap_data['applicationProperties'] = common.find_replace(webmap_data['applicationProperties'], currentID, opLayer['id'])
resultMap['Tables'].append({"Name":opLayer['title'],"ID":opLayer['id']})
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
extent = config['Extent']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
if webmap_data is None:
return None
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = "Web Map"
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=itemType,searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(webmap_data))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(webmap_data))
except Exception as e:
print (e)
if item is None:
return "Item could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
updateResults = item.updateItem(itemParameters=updateParams)
resultMap['Results']['itemId'] = item.id
resultMap['folderId'] = folderId
resultMap['Name'] = name
return resultMap
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
webmap_data = None
itemJson = None
update_service = None
admin = None
adminusercontent = None
resultMap = None
json_data = None
replaceItem = None
opLayers = None
opLayer = None
layers = None
item = None
response = None
layerIdx = None
updatedLayer = None
updated = None
text = None
itemParams = None
updateResults = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
del name
del tags
del description
del extent
del webmap_data
del itemJson
del update_service
del admin
del adminusercontent
del resultMap
del json_data
del replaceItem
del opLayers
del opLayer
del layers
del item
del response
del layerIdx
del updatedLayer
del updated
del text
del itemParams
del updateResults
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del thumbnail
del itemType
del typeKeywords
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
gc.collect()
#----------------------------------------------------------------------
def publishCombinedWebMap(self, maps_info, webmaps):
"""Publishes a combination of web maps.
Args:
maps_info (list): A list of JSON configuration combined web maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
admin = None
map_results = None
map_info = None
operationalLayers = None
tableLayers = None
item = None
response = None
opLays = None
operationalLayers = None
tblLays = None
tblLayer = None
itemInfo = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
map_results = []
for map_info in maps_info:
operationalLayers = []
tableLayers = []
for webmap in webmaps:
item = admin.content.getItem(itemId=webmap)
response = item.itemData()
if 'operationalLayers' in response:
opLays = []
for opLayer in response['operationalLayers']:
opLays.append(opLayer)
opLays.extend(operationalLayers)
operationalLayers = opLays
if 'tables' in response:
tblLays = []
for tblLayer in response['tables']:
tblLays.append(tblLayer)
tblLays.extend(tableLayers)
tableLayers = tblLays
if 'ReplaceTag' in map_info:
itemInfo = {"ReplaceTag":map_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{WebMap}" }
itemInfo['MapInfo'] = self._publishMap(config=map_info,
replaceInfo=None,
operationalLayers=operationalLayers,
tableLayers=tableLayers)
map_results.append(itemInfo)
if not itemInfo is None:
if not 'error' in itemInfo['MapInfo']['Results']:
print ("%s webmap created" % itemInfo['MapInfo']['Name'])
else:
print (str(itemInfo['MapInfo']['Results']))
else:
print ("Map not created")
return map_results
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishedCombinedWebMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
admin = None
map_info = None
tableLayers = None
item = None
response = None
opLays = None
operationalLayers = None
tblLays = None
tblLayer = None
itemInfo = None
del admin
del map_info
del tableLayers
del item
del response
del opLays
del operationalLayers
del tblLays
del tblLayer
del itemInfo
gc.collect()
#----------------------------------------------------------------------
def publishFsFromMXD(self, fs_config):
"""Publishes the layers in a MXD to a feauture service.
Args:
fs_config (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
fs = None
res = None
resItm = None
if self.securityhandler is None:
print ("Security handler required")
return
if self.securityhandler.is_portal:
url = self.securityhandler.org_url
else:
url = 'http://www.arcgis.com'
try:
res = []
if isinstance(fs_config, list):
for fs in fs_config:
if 'ReplaceTag' in fs:
resItm = {"ReplaceTag":fs['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs, url=url)
if not resItm['FSInfo'] is None and 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
else:
if 'ReplaceTag' in fs_config:
resItm = {"ReplaceTag":fs_config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs_config, url=url)
if 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
resItm = None
fs = None
del resItm
del fs
gc.collect()
#----------------------------------------------------------------------
def publishFeatureCollections(self, configs):
"""Publishes feature collections to a feature service.
Args:
configs (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
config = None
res = None
resItm = None
try:
res = []
if isinstance(configs, list):
for config in configs:
if 'ReplaceTag' in config:
resItm = {"ReplaceTag":config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
if 'Zip' in config:
resItm['FCInfo'] = self._publishFeatureCollection(config=config)
if not resItm['FCInfo'] is None and 'id' in resItm['FCInfo']:
print ("%s feature collection created" % resItm['FCInfo']['id'])
res.append(resItm)
else:
print (str(resItm['FCInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFeatureCollections",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
resItm = None
config = None
del resItm
del config
gc.collect()
#----------------------------------------------------------------------
def _publishFSFromMXD(self, config, url='http://www.arcgis.com'):
mxd = None
q = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
capabilities = None
maxRecordCount = None
loc_df = None
datestring = None
service_name = None
service_name_safe = None
sd_Info = None
admin = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
resultSD = None
publishParameters = None
resultFS = None
delres = None
status = None
group_ids = None
shareResults = None
updateParams = None
enableEditTracking = None
adminFS = None
json_dict = None
enableResults = None
layer = None
layers = None
layUpdateResult = None
definition = None
try:
# Report settings
dataFile = None
if 'Mxd' in config:
dataFile = config['Mxd']
elif 'Zip' in config:
dataFile = config['Zip']
# Service settings
service_name = config['Title']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
if 'EnableEditTracking' in config:
print ("enableEditTracking parameter has been deprecated, please add a definition section to the config")
enableEditTracking = config['EnableEditTracking']
else:
#print ("Please add an EnableEditTracking parameter to your feature service section")
enableEditTracking = False
folderName = config['Folder']
thumbnail = config['Thumbnail']
if 'Capabilities' in config:
print ("Capabilities parameter has been deprecated, please add a definition section to the config")
capabilities = config['Capabilities']
if 'Definition' in config:
definition = config['Definition']
if 'capabilities' in definition:
capabilities = definition['capabilities']
if 'maxRecordCount' in config:
maxRecordCount = config["maxRecordCount"]
else:
maxRecordCount = '1000' # If not cast as a string, the MXDtoFeatureServiceDef method called below returns an error stating 'cannot serialize 1000 (type int)'
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
skipIfExist = False
if 'SkipIfExist' in config:
skipIfExist = config['SkipIfExist']
if str(skipIfExist).lower() == 'true':
skipIfExist = True
datestring = datetime.datetime.now().strftime(loc_df)
service_name = service_name.replace('{DATE}',datestring)
service_name = service_name.replace('{Date}',datestring)
service_name_safe = service_name.replace(' ','_')
service_name_safe = service_name_safe.replace(':','_')
service_name_safe = service_name_safe.replace('-','_')
if os.path.exists(path=dataFile) == False:
raise ValueError("data file does not exit")
extension = os.path.splitext(dataFile)[1]
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
hostingServers = admin.hostingServers()
if len(hostingServers) == 0:
return "No hosting servers can be found, if this is portal, update the settings to include a hosting server."
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
if skipIfExist == True:
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType='Feature Service',searchorg=False)
if 'total' in items:
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if itemId is not None:
defItem = content.getItem(itemId)
results = {
"url": defItem.url,
"folderId": folderId,
"itemId": defItem.id,
"convertCase": self._featureServiceFieldCase,
"messages":"Exist"
}
return results
else:
print ("Error searching organzation, {0}".format(items))
if (extension == ".mxd"):
dataFileType = "serviceDefinition"
searchType = "Service Definition"
sd_Info = arcrest.common.servicedef.MXDtoFeatureServiceDef(mxd_path=dataFile,
service_name=service_name_safe,
tags=None,
description=None,
folder_name=None,
capabilities=capabilities,
maxRecordCount=maxRecordCount,
server_type='MY_HOSTED_SERVICES',
url=url)
if sd_Info is not None:
publishParameters = arcrest.manageorg.PublishSDParameters(tags=sd_Info['tags'],
overwrite='true')
elif (extension == ".zip"):
dataFileType = "Shapefile"
searchType = "Shapefile"
sd_Info = {'servicedef':dataFile,'tags':config['Tags']}
description = ""
if 'Description' in config:
description = config['Description']
publishParameters = arcrest.manageorg.PublishShapefileParameter(name=service_name,
layerInfo={'capabilities':capabilities},
description=description)
if 'hasStaticData' in definition:
publishParameters.hasStaticData = definition['hasStaticData']
if sd_Info is None:
print ("Publishing SD or Zip not valid")
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Publishing SD or Zip not valid"
})
itemParams = arcrest.manageorg.ItemParameter()
#if isinstance(hostingServers[0],arcrest.manageags.administration.AGSAdministration):
#itemParams.title = service_name_safe
#else:
#itemParams.title = service_name
itemParams.title = service_name
itemParams.thumbnail = thumbnail
itemParams.type = searchType
itemParams.overwrite = True
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType=searchType,searchorg=False)
defItem = None
defItemID = None
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == searchType:
if 'name' in res and res['name'] == service_name:
defItemID = res['id']
break
if 'title' in res and res['title'] == service_name:
defItemID = res['id']
break
#itemId = items['results'][0]['id']
if not defItemID is None:
defItem = content.getItem(defItemID).userItem
resultSD = defItem.updateItem(itemParameters=itemParams,
data=sd_Info['servicedef'])
if 'error' in resultSD:
return resultSD
if defItem.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=defItem.id,folder=folderId)
else:
try:
defItem = userInfo.addItem(itemParameters=itemParams,
filePath=sd_Info['servicedef'],
overwrite=True,
url=None,
text=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
except Exception as e:
print (e)
if defItem is None:
return "Item could not be added "
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
publishParameters=publishParameters,
overwrite = True,
wait=True)
except Exception as e:
print ("Error publishing item: Error Details: {0}".format(str(e)))
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title =service_name, itemType='Feature Service',searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if not itemId is None:
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title =service_name_safe, itemType='Feature Service',searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name_safe:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name_safe:
itemId = res['id']
break
if not itemId is None:
existingItem = admin.content.getItem(itemId = itemId).userItem
if existingItem.url is not None:
adminFS = AdminFeatureService(url=existingItem.url, securityHandler=self._securityHandler)
cap = str(adminFS.capabilities)
existingDef = {}
if 'Sync' in cap:
print ("Disabling Sync")
capItems = cap.split(',')
if 'Sync' in capItems:
capItems.remove('Sync')
existingDef['capabilities'] = ','.join(capItems)
enableResults = adminFS.updateDefinition(json_dict=existingDef)
if 'error' in enableResults:
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Sync Disabled")
else:
print ("Attempting to delete")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
adminFS = None
del adminFS
else:
print ("Attempting to delete")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Item exist and cannot be found, probably owned by another user.")
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Item exist and cannot be found, probably owned by another user."
}
)
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
overwrite = True,
publishParameters=publishParameters,
wait=True)
except Exception as e:
print ("Overwrite failed, deleting")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
overwrite = True,
publishParameters=publishParameters,
wait=True)
except Exception as e:
return e
results = {
"url": serviceItem.url,
"folderId": folderId,
"itemId": serviceItem.id,
"convertCase": self._featureServiceFieldCase,
"messages":""
}
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=serviceItem.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = service_name
updateResults = serviceItem.updateItem(itemParameters=updateParams)
adminFS = AdminFeatureService(url=serviceItem.url, securityHandler=self._securityHandler)
if enableEditTracking == True or str(enableEditTracking).upper() == 'TRUE':
json_dict = {'editorTrackingInfo':{}}
json_dict['editorTrackingInfo']['allowOthersToDelete'] = True
json_dict['editorTrackingInfo']['allowOthersToUpdate'] = True
json_dict['editorTrackingInfo']['enableEditorTracking'] = True
json_dict['editorTrackingInfo']['enableOwnershipAccessControl'] = False
enableResults = adminFS.updateDefinition(json_dict=json_dict)
if 'error' in enableResults:
results['messages'] += enableResults
json_dict = {'editFieldsInfo':{}}
json_dict['editFieldsInfo']['creationDateField'] = ""
json_dict['editFieldsInfo']['creatorField'] = ""
json_dict['editFieldsInfo']['editDateField'] = ""
json_dict['editFieldsInfo']['editorField'] = ""
layers = adminFS.layers
tables = adminFS.tables
for layer in layers:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] += layUpdateResult['error']
if not tables is None:
for layer in tables:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] += layUpdateResult['error']
if definition is not None:
enableResults = adminFS.updateDefinition(json_dict=definition)
if enableResults is not None and 'error' in enableResults:
results['messages'] = enableResults
else:
if 'editorTrackingInfo' in definition:
if 'enableEditorTracking' in definition['editorTrackingInfo']:
if definition['editorTrackingInfo']['enableEditorTracking'] == True:
json_dict = {'editFieldsInfo':{}}
json_dict['editFieldsInfo']['creationDateField'] = ""
json_dict['editFieldsInfo']['creatorField'] = ""
json_dict['editFieldsInfo']['editDateField'] = ""
json_dict['editFieldsInfo']['editorField'] = ""
layers = adminFS.layers
tables = adminFS.tables
for layer in layers:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] = layUpdateResult['error']
if not tables is None:
for layer in tables:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] = layUpdateResult['error']
return results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
definition = None
mxd = None
q = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
capabilities = None
maxRecordCount = None
loc_df = None
datestring = None
service_name = None
service_name_safe = None
sd_Info = None
admin = None
itemParams = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
resultSD = None
publishParameters = None
resultFS = None
delres = None
status = None
group_ids = None
shareResults = None
updateParams = None
enableEditTracking = None
adminFS = None
json_dict = None
enableResults = None
layer = None
layers = None
layUpdateResult = None
del definition
del layer
del layers
del layUpdateResult
del mxd
del q
del everyone
del org
del groupNames
del folderName
del thumbnail
del capabilities
del maxRecordCount
del loc_df
del datestring
del service_name
del service_name_safe
del sd_Info
del admin
del itemParams
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del resultSD
del publishParameters
del resultFS
del delres
del status
del group_ids
del shareResults
del updateParams
del enableEditTracking
del adminFS
del json_dict
del enableResults
gc.collect()
#----------------------------------------------------------------------
def _publishAppLogic(self, appDet, map_info=None, fsInfo=None):
itemInfo = None
replaceInfo = None
replaceItem = None
mapDet = None
lay = None
itemId = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
itemInfo = {}
if 'ReplaceInfo' in appDet:
replaceInfo = appDet['ReplaceInfo']
else:
replaceInfo = None
if replaceInfo != None:
for replaceItem in replaceInfo:
if fsInfo is not None:
for fsDet in fsInfo:
if 'ReplaceTag' in fsDet:
if 'ReplaceString' in replaceItem:
if fsDet is not None and replaceItem['ReplaceString'] == fsDet['ReplaceTag'] and \
(replaceItem['ReplaceType'] == 'Service' or replaceItem['ReplaceType'] == 'Layer'):
replaceItem['ReplaceString'] = fsDet['FSInfo']['url']
replaceItem['ItemID'] = fsDet['FSInfo']['itemId']
replaceItem['ItemFolder'] = fsDet['FSInfo']['folderId']
if 'convertCase' in fsDet['FSInfo']:
replaceItem['convertCase'] = fsDet['FSInfo']['convertCase']
replaceItem['ReplaceType'] = "Global"
if map_info is not None:
for mapDet in map_info:
if 'ReplaceTag' in mapDet:
if 'ReplaceString' in replaceItem:
if mapDet is not None and replaceItem['ReplaceString'] == mapDet['ReplaceTag'] and \
replaceItem['ReplaceType'] == 'Map':
replaceItem['ItemID'] = mapDet['MapInfo']['Results']['itemId']
replaceItem['ItemFolder'] = mapDet['MapInfo']['folderId']
replaceItem['LayerInfo'] = mapDet['MapInfo']['Layers']
elif mapDet is not None and replaceItem['ReplaceType'] == 'Layer':
repInfo = replaceItem['ReplaceString'].split("|")
if len(repInfo) == 2:
if repInfo[0] == mapDet['ReplaceTag']:
for key,value in mapDet['MapInfo']['Layers'].items():
if value["Name"] == repInfo[1]:
replaceItem['ReplaceString'] = value["ID"]
if 'ItemID' in replaceItem:
if 'ItemFolder' in replaceItem == False:
itemId = replaceItem['ItemID']
itemInfo = admin.content.getItem(itemId=itemId)
if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:
replaceItem['ItemFolder'] = itemInfo['ownerFolder']
else:
replaceItem['ItemFolder'] = None
if 'ReplaceTag' in appDet:
itemInfo = {"ReplaceTag":appDet['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{App}" }
if appDet['Type'] == 'Web Mapping Application':
itemInfo['AppInfo'] = self._publishApp(config=appDet,
replaceInfo=replaceInfo)
elif appDet['Type'] == 'Operation View':
itemInfo['AppInfo'] = self._publishDashboard(config=appDet,
replaceInfo=replaceInfo)
else:
itemInfo['AppInfo'] = self._publishApp(config=appDet,
replaceInfo=replaceInfo)
if not itemInfo['AppInfo'] is None:
if not 'error' in itemInfo['AppInfo']['Results'] :
print ("%s app created" % itemInfo['AppInfo']['Name'])
else:
print (str(itemInfo['AppInfo']['Results']))
else:
print ("App was not created")
return itemInfo
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishAppLogic",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
replaceInfo = None
replaceItem = None
mapDet = None
lay = None
itemId = None
admin = None
del admin
del replaceInfo
del replaceItem
del mapDet
del lay
del itemId
gc.collect()
#----------------------------------------------------------------------
def publishApp(self, app_info, map_info=None, fsInfo=None):
"""Publishes apps to AGOL/Portal
Args:
app_info (list): A list of JSON configuration apps to publish.
map_info (list): Defaults to ``None``.
fsInfo (list): Defaults to ``None``.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
appDet = None
try:
app_results = []
if isinstance(app_info, list):
for appDet in app_info:
app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo))
else:
app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo))
return app_results
except (common.ArcRestHelperError) as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
appDet = None
del appDet
gc.collect()
#----------------------------------------------------------------------
def _publishApp(self, config, replaceInfo):
resultApp = None
name = None
tags = None
description = None
extent = None
itemJson = None
admin = None
json_data = None
itemData = None
replaceItem = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
url = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
userCommunity = None
userContent = None
res = None
folderId = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
url = None
updateResults = None
portal = None
try:
resultApp = {'Results':{}}
name = ''
tags = ''
description = ''
extent = ''
itemJson = config['ItemJSON']
if os.path.exists(itemJson) == False:
return {"Results":{"error": "%s does not exist" % itemJson} }
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
portalself = admin.portals.portalSelf
if portalself.urlKey is None or portalself.customBaseUrl is None:
parsedURL = urlparse.urlparse(url=self._securityHandler.org_url, scheme='', allow_fragments=True)
orgURL = parsedURL.netloc + parsedURL.path
else:
orgURL = portalself.urlKey + '.' + portalself.customBaseUrl
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
folderName = config['Folder']
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
if os.path.exists(itemJson):
with open(itemJson) as json_data:
try:
itemData = json.load(json_data)
except:
raise ValueError("%s is not a valid JSON File" % itemJson)
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Map' and 'ItemID' in replaceItem:
if 'values' in itemData:
if 'webmap' in itemData['values']:
if itemData['values']['webmap'] == replaceItem['SearchString']:
itemData['values']['webmap'] = replaceItem['ItemID']
if 'folderId' in itemData:
itemData['folderId'] = replaceItem['ItemFolder']
if 'map' in itemData:
if 'itemId' in itemData['map']:
if itemData['map']['itemId'] == replaceItem['SearchString']:
itemData['map']['itemId'] = replaceItem['ItemID']
elif replaceItem['ReplaceType'] == 'Layer' and 'ReplaceString' in replaceItem:
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Folder':
if 'id' in userInfo.currentFolder:
folderID = userInfo.currentFolder['id']
else:
folderID = None
itemData = common.find_replace(itemData,replaceItem['SearchString'],folderID)
elif replaceItem['ReplaceType'] == 'Org':
itemData = common.find_replace(itemData,replaceItem['SearchString'],orgURL)
elif replaceItem['ReplaceType'] == 'GeoService':
if 'geometry' in portalself.helperServices:
if 'url' in portalself.helperServices["geometry"]:
itemData = common.find_replace(itemData,replaceItem['SearchString'],portalself.helperServices["geometry"]['url'])
elif replaceItem['ReplaceType'] == 'Global':
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
else:
print ("%s does not exist." % itemJson)
itemData = None
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
url = config['Url']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.description = description
itemParams.tags = tags
itemParams.snippet = snippet
itemParams.description = description
itemParams.typeKeywords = ",".join(typeKeywords)
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name,
itemType=
["Web Mapping Application",
"Application"],
searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(itemData))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(
itemParameters=itemParams,
overwrite=True,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(itemData))
except Exception as e:
print (e)
if item is None:
return "App could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
url = url.replace("{AppID}",item.id)
url = url.replace("{OrgURL}",orgURL)
#if portalself.urlKey is None or portalself.customBaseUrl is None:
#parsedURL = urlparse.urlparse(url=self._securityHandler.org_url, scheme='', allow_fragments=True)
#else:
#url = url.replace("{OrgURL}", portalself.urlKey + '.' + portalself.customBaseUrl)
updateParams.url = url
updateResults = item.updateItem(itemParameters=updateParams)
resultApp['Results']['itemId'] = item.id
resultApp['folderId'] = folderId
resultApp['Name'] = name
return resultApp
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
itemJson = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
url = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
userCommunity = None
userContent = None
res = None
folderId = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
url = None
updateResults = None
portal = None
del name
del portal
del tags
del description
del extent
del itemJson
del admin
del adminusercontent
del json_data
del itemData
del replaceItem
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del url
del thumbnail
del itemType
del typeKeywords
del itemParams
del userCommunity
del userContent
del res
del folderId
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
del updateResults
gc.collect()
#----------------------------------------------------------------------
def _publishDashboard(self, config, replaceInfo):
resultApp = None
tags = None
description = None
extent = None
itemJson = None
layerIDSwitch = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
item = None
response = None
layerNamesID = None
layerIDs = None
tableNamesID = None
tableIDs = None
opLayer = None
widget = None
widgets = None
mapTool = None
dataSource = None
configFileAsString = None
repl = None
name = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
resultApp = None
updateResults = None
try:
resultApp = {'Results':{}}
tags = ''
description = ''
extent = ''
itemJson = config['ItemJSON']
if os.path.exists(itemJson) == False:
return {"Results":{"error": "%s does not exist" % itemJson} }
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
folderName = config['Folder']
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.refresh()
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
layerIDSwitch = []
if os.path.exists(itemJson):
with open(itemJson) as json_data:
try:
itemData = json.load(json_data)
except:
raise ValueError("%s is not a valid JSON File" % itemJson)
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Global':
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Map' and 'ItemID' in replaceItem:
item = admin.content.getItem(itemId=replaceItem['ItemID'])
response = item.itemData()
layerNamesID = {}
layerIDs =[]
tableNamesID = {}
tableIDs =[]
if 'operationalLayers' in response:
for opLayer in response['operationalLayers']:
#if 'LayerInfo' in replaceItem:
#for layers in replaceItem['LayerInfo']:
layerNamesID[opLayer['title']] = opLayer['id']
layerIDs.append(opLayer['id'])
if 'tables' in response:
for opLayer in response['tables']:
tableNamesID[opLayer['title']] = opLayer['id']
tableIDs.append(opLayer['id'])
widgets = itemData['widgets']
dataSourceIDToFields = {}
for widget in widgets:
if 'mapId' in widget:
if replaceItem['SearchString'] == widget['mapId']:
widget['mapId'] = replaceItem['ItemID']
if 'mapTools' in widget:
for mapTool in widget['mapTools']:
if 'layerIds' in mapTool:
mapTool['layerIds'] = layerIDs
if 'dataSources' in widget:
for dataSource in widget['dataSources']:
if 'layerId' in dataSource:
if 'LayerInfo' in replaceItem:
if dataSource['layerId'] in replaceItem['LayerInfo']:
layerIDSwitch.append({"OrigID":dataSource['layerId'],
"NewID":replaceItem['LayerInfo'][dataSource['layerId']]['ID']})
#'FieldInfo':replaceItem['LayerInfo'][dataSource['layerId']]['FieldInfo']})
#dataSourceIDToFields[dataSource['id']] = {'NewID': replaceItem['LayerInfo'][dataSource['layerId']]['ID'],
#'FieldInfo': replaceItem['LayerInfo'][dataSource['layerId']]['FieldInfo']}
dataSource['layerId'] = replaceItem['LayerInfo'][dataSource['layerId']]['ID']
elif dataSource['name'] in layerNamesID:
layerIDSwitch.append({"OrigID":dataSource['layerId'],"NewID":layerNamesID[dataSource['name']] })
dataSource['layerId'] = layerNamesID[dataSource['name']]
for dataSource in widget['dataSources']:
if 'filter' in dataSource:
if dataSource['parentDataSourceId'] in dataSourceIDToFields:
if 'whereClause' in dataSource['filter']:
whercla = str(dataSource['filter']['whereClause'])
if pyparsingInstall:
try:
selectResults = select_parser.select_stmt.parseString("select * from xyzzy where " + whercla)
whereElements = list(selectResults['where_expr'])
for h in range(len(whereElements)):
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whereElements[h] == field['PublishName']:
whereElements[h] = field['ConvertName']
#whercla = whercla.replace(
#old=field['PublishName'],
#new=field['ConvertName'])
dataSource['filter']['whereClause'] = " ".join(whereElements)
except select_parser.ParseException as pe:
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whercla.contains(field['PublishName']):
whercla = whercla.replace(
old=field['PublishName'],
new=field['ConvertName'])
else:
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whercla.contains(field['PublishName']):
whercla = whercla.replace(
old=field['PublishName'],
new=field['ConvertName'])
configFileAsString = json.dumps(itemData)
for repl in layerIDSwitch:
configFileAsString.replace(repl['OrigID'],repl['NewID'])
itemData = json.loads(configFileAsString)
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.description = description
itemParams.snippet = snippet
itemParams.typeKeywords = ",".join(typeKeywords)
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=
["Web Mapping Application",
"Application",
"Operation View"],
searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(itemData))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(
itemParameters=itemParams,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(itemData))
except Exception as e:
print (e)
if item is None:
return "Dashboard could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
updateResults = item.updateItem(itemParameters=updateParams)
resultApp['Results']['itemId'] = item.id
resultApp['folderId'] = folderId
resultApp['Name'] = name
return resultApp
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishDashboard",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
tags = None
description = None
extent = None
itemJson = None
layerIDSwitch = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
item = None
response = None
layerNamesID = None
layerIDs = None
tableNamesID = None
tableIDs = None
opLayer = None
widget = None
widgets = None
mapTool = None
dataSource = None
configFileAsString = None
repl = None
name = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
updateResults = None
del tags
del description
del extent
del itemJson
del layerIDSwitch
del admin
del json_data
del itemData
del replaceItem
del item
del response
del layerNamesID
del layerIDs
del tableNamesID
del tableIDs
del opLayer
del widget
del widgets
del mapTool
del dataSource
del configFileAsString
del repl
del name
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del thumbnail
del itemType
del typeKeywords
del itemParams
del adminusercontent
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
del updateResults
gc.collect()
#----------------------------------------------------------------------
def updateFeatureService(self, efs_config):
"""Updates a feature service.
Args:
efs_config (list): A list of JSON configuration feature service details to update.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
fsRes = None
fst = None
fURL = None
resItm= None
try:
fsRes = []
fst = featureservicetools.featureservicetools(securityinfo=self)
if isinstance(efs_config, list):
for ext_service in efs_config:
fURL = None
cs = 0
try:
if 'ChunkSize' in ext_service:
if common.is_number(ext_service['ChunkSize']):
cs = ext_service['ChunkSize']
except Exception as e:
pass
resItm={"DeleteDetails": None,"AddDetails":None}
if 'ItemId' in ext_service and 'LayerName' in ext_service:
fs = fst.GetFeatureService(itemId=ext_service['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=ext_service['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in ext_service:
fURL = ext_service['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
continue
if 'DeleteInfo' in ext_service:
if str(ext_service['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=ext_service['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print ("Delete Successful: %s" % fURL)
else:
print (str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = ext_service['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print ("Add Successful: %s " % fURL)
else:
print (str(resItm['AddDetails']))
else:
resItm={"DeleteDetails": None,"AddDetails":None}
fURL = efs_config['URL']
cs = 0
try:
if 'ChunkSize' in efs_config:
if common.is_number(efs_config['ChunkSize']):
cs = efs_config['ChunkSize']
except Exception as e:
pass
if 'ItemId' in efs_config and 'LayerName' in efs_config:
fs = fst.GetFeatureService(itemId=efs_config['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=efs_config['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in efs_config:
fURL = efs_config['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
return None
if 'DeleteInfo' in efs_config:
if str(efs_config['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=efs_config['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print (" Delete Successful: %s" % fURL)
else:
print (" " + str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = efs_config['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print (" Add Successful: %s " % fURL)
else:
print (" " + str(resItm['AddDetails']))
return fsRes
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "updateFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
fst = None
fURL = None
resItm= None
del fst
del fURL
del resItm
gc.collect()
#----------------------------------------------------------------------
def _publishFeatureCollection(self, config):
try:
# Service settings
zipfile = config['Zip']
service_name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
description = ""
if 'Description' in config:
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
extent = config['Extent']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
typeKeywords = config['typeKeywords']
datestring = datetime.datetime.now().strftime(loc_df)
service_name = service_name.replace('{DATE}',datestring)
service_name = service_name.replace('{Date}',datestring)
service_name_safe = service_name.replace(' ','_')
service_name_safe = service_name_safe.replace(':','_')
service_name_safe = service_name_safe.replace('-','_')
if os.path.exists(path=zipfile) == False:
raise ValueError("Zip does not exit")
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
feature_content = content.FeatureContent
publishParameters = arcrest.manageorg.GenerateParameter(
name=service_name,maxRecordCount=4000
)
fcResults = feature_content.generate(publishParameters=publishParameters,
itemId=None,
filePath=zipfile,
fileType='shapefile')
if not 'featureCollection' in fcResults:
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": fcResults
})
if not 'layers' in fcResults['featureCollection']:
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": fcResults
})
fcJson = {'visibility':True,
'showLegend':True,
'opacity':1}
for layer in fcResults['featureCollection']['layers']:
oidFldName = ''
highOID = -1
popInfo = {'title':'',
'description':None,
'showAttachments': False,
'mediaInfo': [],
'fieldInfos': []
}
if 'layerDefinition' in layer:
extVal = extent.split(',')
layer['layerDefinition']['extent'] = {'type':'extent',
'xmin':extVal[0],
'ymin':extVal[1],
'xmax':extVal[2],
'ymax':extVal[3]
}
layer['layerDefinition']['spatialReference'] = {'wkid':102100}
if 'fields' in layer['layerDefinition']:
for field in layer['layerDefinition']['fields']:
fieldInfos = None
if field['type'] == 'esriFieldTypeOID':
oidFldName = field['name']
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':False,
'tooltip':'',
'visible':False,
'format':None,
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeInteger':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':{
'places':0,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeDouble':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':{
'places':2,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeString':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':None,
'stringFieldOption':'textbox'
}
else:
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':None,
'stringFieldOption':'textbox'
}
if fieldInfos is not None:
popInfo['fieldInfos'].append(fieldInfos)
if 'featureSet' in layer:
if 'features' in layer['featureSet']:
for feature in layer['featureSet']['features']:
if 'attributes' in feature:
if feature['attributes'][oidFldName] > highOID:
highOID = feature[oidFldName]
layer['nextObjectId'] = highOID + 1
fcJson['layers'] = fcResults['featureCollection']['layers']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.type = "Feature Collection"
itemParams.title = service_name
itemParams.thumbnail = thumbnail
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType='Feature Collection',searchorg=False)
itemId = None
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Collection':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
resultSD = item.updateItem(itemParameters=itemParams,
text=fcJson)
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
resultSD = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
text= fcJson,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
if 'error' in resultSD:
if not itemId is None:
print ("Attempting to delete")
delres=userInfo.deleteItems(items=itemId)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Item exist and cannot be found, probably owned by another user.")
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Item exist and cannot be found, probably owned by another user."
})
resultSD = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
text=fcResults['featureCollection'],
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
return resultSD
else:
return resultSD
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
gc.collect()
| {
"content_hash": "509bcd499add12d306b79716159c994f",
"timestamp": "",
"source": "github",
"line_count": 3119,
"max_line_length": 201,
"avg_line_length": 42.84834882975313,
"alnum_prop": 0.4404312950824579,
"repo_name": "BrunoCaimar/ArcREST",
"id": "c88a58592072e604c111b8ccedddfa7833cc2874",
"size": "133645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arcresthelper/publishingtools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "48383"
},
{
"name": "Python",
"bytes": "2160407"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from toscaparser.tosca_template import ToscaTemplate
import yaml
from tacker.common import utils
from tacker.tests import constants
from tacker.tests.functional import base
from tacker.tests.utils import read_file
from tacker.vm.tosca import utils as toscautils
CONF = cfg.CONF
class VnfTestToscaMultipleVDU(base.BaseTackerTest):
def test_create_delete_tosca_vnf_with_multiple_vdus(self):
data = dict()
input_yaml = read_file('sample-tosca-vnfd-multi-vdu.yaml')
data['tosca'] = input_yaml
toscal = data['tosca']
vnfd_name = 'sample-tosca-vnfd-multi-vdu'
tosca_arg = {'vnfd': {'name': vnfd_name,
'attributes': {'vnfd': toscal}}}
# Create vnfd with tosca template
vnfd_instance = self.client.create_vnfd(body=tosca_arg)
self.assertIsNotNone(vnfd_instance)
# Create vnf with vnfd_id
vnfd_id = vnfd_instance['vnfd']['id']
vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name':
"test_tosca_vnf_with_multiple_vdus"}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
vnf_id = vnf_instance['vnf']['id']
self.wait_until_vnf_active(vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.assertEqual(self.client.show_vnf(vnf_id)['vnf']['status'],
'ACTIVE')
self.validate_vnf_instance(vnfd_instance, vnf_instance)
# Validate mgmt_url with input yaml file
mgmt_url = self.client.show_vnf(vnf_id)['vnf']['mgmt_url']
self.assertIsNotNone(mgmt_url)
mgmt_dict = yaml.load(str(mgmt_url))
input_dict = yaml.load(input_yaml)
toscautils.updateimports(input_dict)
tosca = ToscaTemplate(parsed_params={}, a_file=False,
yaml_dict_tpl=input_dict)
vdus = toscautils.findvdus(tosca)
self.assertEqual(len(mgmt_dict.keys()), len(vdus))
for vdu in vdus:
self.assertIsNotNone(mgmt_dict[vdu.name])
self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name]))
# Delete vnf_instance with vnf_id
try:
self.client.delete_vnf(vnf_id)
except Exception:
assert False, "vnf Delete of test_vnf_with_multiple_vdus failed"
# Delete vnfd_instance
self.addCleanup(self.client.delete_vnfd, vnfd_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
| {
"content_hash": "0acba684061847de4e158d8cc6b690a9",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 37.869565217391305,
"alnum_prop": 0.6119402985074627,
"repo_name": "trozet/tacker",
"id": "10c83aeeba3d6e844132aa3bee7035fdf0c5bed6",
"size": "3239",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/tests/functional/vnfm/test_tosca_vnf_multiple_vdu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "948830"
},
{
"name": "Shell",
"bytes": "29090"
}
],
"symlink_target": ""
} |
"""
Tests the EventAdmin service
:author: Thomas Calmant
"""
# Standard library
import random
import threading
import time
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.ipopo.constants import use_ipopo
import pelix.framework
import pelix.services
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
class DummyEventHandler(object):
"""
Dummy event handler
"""
def __init__(self):
"""
Sets up members
"""
# Topic of the last received event
self.last_event = None
self.last_props = {}
self.__event = threading.Event()
# Behavior
self.change_props = False
self.sleep = 0
def handle_event(self, topic, properties):
"""
Handles an event received from EventAdmin
"""
# Add some behavior
if self.change_props:
properties['change'] = self.change_props
if self.sleep:
time.sleep(self.sleep)
# Keep received values
self.last_event = topic
self.last_props = properties
self.__event.set()
def pop_event(self):
"""
Pops the list of events
"""
# Clear the event for next try
self.__event.clear()
# Reset last event
event, self.last_event = self.last_event, None
return event
def wait(self, timeout):
"""
Waits for the event to be received
"""
self.__event.wait(timeout)
# ------------------------------------------------------------------------------
class EventAdminTest(unittest.TestCase):
"""
Tests the EventAdmin service
"""
def setUp(self):
"""
Prepares a framework and a registers a service to export
"""
# Create the framework
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.services.eventadmin'))
self.framework.start()
# Instantiate the EventAdmin component
context = self.framework.get_bundle_context()
with use_ipopo(context) as ipopo:
self.eventadmin = ipopo.instantiate(
pelix.services.FACTORY_EVENT_ADMIN,
"evtadmin", {})
def _register_handler(self, topics, evt_filter=None):
"""
Registers an event handler
:param topics: Event topics
:param evt_filter: Event filter
"""
svc = DummyEventHandler()
context = self.framework.get_bundle_context()
svc_reg = context.register_service(
pelix.services.SERVICE_EVENT_HANDLER, svc,
{pelix.services.PROP_EVENT_TOPICS: topics,
pelix.services.PROP_EVENT_FILTER: evt_filter})
return svc, svc_reg
def tearDown(self):
"""
Cleans up for next test
"""
# Stop the framework
pelix.framework.FrameworkFactory.delete_framework(self.framework)
self.framework = None
def testNoHandler(self):
"""
Tests events when no event handler is registered
"""
self.eventadmin.send('/titi/toto', {'some.value': 42})
def testTopics(self):
"""
Tests the topics filtering
"""
# Prepare a handler
handler, _ = self._register_handler('/titi/*')
# Assert the handler is empty
self.assertEqual(handler.pop_event(), None)
# Send events, with a matching topic
for topic in ('/titi/toto', '/titi/', '/titi/42', '/titi/toto/tata'):
self.eventadmin.send(topic)
self.assertEqual(handler.pop_event(), topic)
# Send events, with a non-matching topic
for topic in ('/toto/titi/42', '/titi', '/toto/42'):
self.eventadmin.send(topic)
self.assertEqual(handler.pop_event(), None)
def testFilters(self):
"""
Tests the events filtering
"""
# Prepare a handler
handler, _ = self._register_handler(None, '(answer=42)')
# Assert the handler is empty
self.assertEqual(handler.pop_event(), None)
# Send event, with matching properties
for topic in ('/titi/toto', '/toto/', '/titi/42', '/titi/toto/tata'):
for value in (42, '42', [1, 2, 42, 20], {42, 10},
(10, 21, 42)):
evt_props = {'answer': value}
self.eventadmin.send(topic, evt_props)
# Check properties
self.assertDictContainsSubset(evt_props, handler.last_props)
self.assertIsNot(handler.last_props, evt_props)
# Check topic
self.assertEqual(handler.pop_event(), topic)
# Send events, with a non-matching properties
for value in (' 42 ', 21, [1, 2, 3], (4, 5, 6), {7, 8, 9}):
self.eventadmin.send(topic, {'answer': value})
self.assertEqual(handler.pop_event(), None)
def testPost(self):
"""
Tests the post event method
"""
# Prepare a handler
handler, _ = self._register_handler('/titi/*')
# Post a message
topic = '/titi/toto'
self.eventadmin.post(topic)
# Wait a little
handler.wait(1)
self.assertEqual(handler.pop_event(), topic)
# Add a handler
handler_2, handler_2_reg = self._register_handler('/titi/*')
handler_3, _ = self._register_handler('/titi/*')
# Let the first handler sleep
handler.sleep = 1
# Post a message
self.eventadmin.post(topic)
# Wait a little (so that the list of handlers is prepared)
time.sleep(.2)
# Unregister the second handler
handler_2_reg.unregister()
# Register a new one
handler_4, _ = self._register_handler('/titi/*')
# Wait a little: only handlers present during the call to 'post'
# and still present during the notification loop must be notified
handler.wait(2)
handler_3.wait(2)
self.assertEqual(handler.pop_event(), topic)
self.assertEqual(handler_2.pop_event(), None)
self.assertEqual(handler_3.pop_event(), topic)
self.assertEqual(handler_4.pop_event(), None)
def testProperties(self):
"""
Ensures that each handler get its own copy of the properties
"""
# Prepare handlers
handler_1, _ = self._register_handler('/titi/*')
handler_2, _ = self._register_handler('/titi/*')
handler_3, _ = self._register_handler('/titi/*')
for handler in (handler_1, handler_2, handler_3):
handler.change_props = random.randint(1, 10)
# Send an event
evt_props = {'answer': 42}
self.eventadmin.send('/titi/toto', evt_props)
for handler in (handler_1, handler_2, handler_3):
# Check that the original properties are kept
self.assertDictContainsSubset(evt_props, handler.last_props)
# Check that the handler value has been stored
self.assertEqual(
handler.last_props['change'], handler.change_props)
| {
"content_hash": "4afb6c28f3b6e962c6adb1c43e34d351",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 30.146341463414632,
"alnum_prop": 0.5511057173678533,
"repo_name": "tcalmant/ipopo",
"id": "0e24411f97c6b3bd0db90de1c83999190d166c8d",
"size": "7470",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "tests/services/test_eventadmin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2183067"
}
],
"symlink_target": ""
} |
from madrona.features.forms import FeatureForm
from seak.models import Folder, Scenario
from django import forms
class FolderForm(FeatureForm):
class Meta(FeatureForm.Meta):
model = Folder
class ScenarioForm(FeatureForm):
input_scalefactor = forms.FloatField(widget=forms.TextInput(attrs={'class': 'slidervalue'}), initial=1.0)
class Meta(FeatureForm.Meta):
model = Scenario
exclude = list(FeatureForm.Meta.exclude)
for f in model.output_fields():
exclude.append(f.attname)
| {
"content_hash": "7db79abb70685ce5a5b561433c2f1a7c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 109,
"avg_line_length": 33.375,
"alnum_prop": 0.7116104868913857,
"repo_name": "Ecotrust/juniper-priorities",
"id": "a7fe2fa00b1a20ecfe28f39a73f4609b6dec7cb3",
"size": "534",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "priorities/seak/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1516821"
},
{
"name": "HTML",
"bytes": "254677"
},
{
"name": "JavaScript",
"bytes": "257314"
},
{
"name": "PHP",
"bytes": "3062"
},
{
"name": "PLpgSQL",
"bytes": "2432"
},
{
"name": "Pascal",
"bytes": "88"
},
{
"name": "Puppet",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "354305"
},
{
"name": "Shell",
"bytes": "19623"
}
],
"symlink_target": ""
} |
import numpy as np
from mock import MagicMock
import pytest
from ..data import Data, Component, ComponentID, DerivedComponent
from ..hub import Hub, HubListener
from ..data_collection import DataCollection
from ..message import (Message, DataCollectionAddMessage,
DataCollectionDeleteMessage,
ComponentsChangedMessage)
from ..component_link import ComponentLink
class HubLog(HubListener):
def __init__(self):
self.messages = []
def register_to_hub(self, hub):
hub.subscribe(self, Message)
def notify(self, message):
self.messages.append(message)
class TestDataCollection(object):
def setup_method(self, method):
self.dc = DataCollection()
self.data = MagicMock()
self.hub = Hub()
self.log = HubLog()
self.log.register_to_hub(self.hub)
def test_init_scalar(self):
"""Single data object passed to init adds to collection"""
d = Data()
dc = DataCollection(d)
assert d in dc
def test_init_list(self):
"""List of data objects passed to init auto-added to collection"""
d1 = Data()
d2 = Data()
dc = DataCollection([d1, d2])
assert d1 in dc
assert d2 in dc
def test_data(self):
""" data attribute is a list of all appended data"""
self.dc.append(self.data)
assert self.dc.data == [self.data]
def test_append(self):
""" append method adds to collection """
self.dc.append(self.data)
assert self.data in self.dc
def test_multi_append(self):
""" append method works with lists """
d = Data('test1', x=[1, 2, 3])
d2 = Data('test2', y=[2, 3, 4])
self.dc.append([d, d2])
assert d in self.dc
assert d2 in self.dc
def test_ignore_multi_add(self):
""" data only added once, even after multiple calls to append """
self.dc.append(self.data)
self.dc.append(self.data)
assert len(self.dc) == 1
def test_remove(self):
self.dc.append(self.data)
self.dc.remove(self.data)
assert not self.data in self.dc
def test_ignore_multi_remove(self):
self.dc.append(self.data)
self.dc.remove(self.data)
self.dc.remove(self.data)
assert not self.data in self.dc
def test_append_broadcast(self):
""" Call to append generates a DataCollectionAddMessage """
self.dc.register_to_hub(self.hub)
self.dc.append(self.data)
msg = self.log.messages[-1]
assert msg.sender == self.dc
assert isinstance(msg, DataCollectionAddMessage)
assert msg.data is self.data
def test_remove_broadcast(self):
""" call to remove generates a DataCollectionDeleteMessage """
self.dc.register_to_hub(self.hub)
self.dc.append(self.data)
self.dc.remove(self.data)
msg = self.log.messages[-1]
assert msg.sender == self.dc
assert isinstance(msg, DataCollectionDeleteMessage)
assert msg.data is self.data
def test_register_adds_hub(self):
self.dc.register_to_hub(self.hub)
assert self.dc.hub is self.hub
def test_invalid_register(self):
"""Type error is raised if hub is not a Hub object"""
with pytest.raises(TypeError) as exc:
self.dc.register_to_hub(3)
assert exc.value.args[0] == "Input is not a Hub object: <type 'int'>"
def test_register_assigns_hub_of_data(self):
self.dc.append(self.data)
self.dc.register_to_hub(self.hub)
self.data.register_to_hub.assert_called_once_with(self.hub)
def test_get_item(self):
self.dc.append(self.data)
assert self.dc[0] is self.data
def test_iter(self):
self.dc.append(self.data)
assert set(self.dc) == set([self.data])
def test_len(self):
assert len(self.dc) == 0
self.dc.append(self.data)
assert len(self.dc) == 1
self.dc.append(self.data)
assert len(self.dc) == 1
self.dc.remove(self.data)
assert len(self.dc) == 0
def test_derived_links_autoadd(self):
"""When appending a data set, its DerivedComponents
should be ingested into the LinkManager"""
d = Data()
id1 = ComponentID("id1")
id2 = ComponentID("id2")
link = ComponentLink([id1], id2)
dc = DerivedComponent(d, link)
d.add_component(Component(np.array([1, 2, 3])), id1)
d.add_component(dc, id2)
dc = DataCollection()
dc.append(d)
assert link in dc._link_manager
def test_catch_data_add_component_message(self):
"""DerviedAttributes added to a dataset in a collection
should generate messages that the collection catches.
"""
d = Data()
id1 = ComponentID("id1")
id2 = ComponentID("id2")
link = ComponentLink([id1], id2)
dc = DerivedComponent(d, link)
self.dc.register_to_hub(self.hub)
self.dc.append(d)
d.add_component(Component(np.array([1, 2, 3])), id1)
assert not link in self.dc._link_manager
d.add_component(dc, id2)
msg = self.log.messages[-1]
assert isinstance(msg, ComponentsChangedMessage)
assert link in self.dc._link_manager
def test_coordinate_links_auto_added(self):
d = Data()
id1 = ComponentID("id1")
id2 = ComponentID("id2")
link = ComponentLink([id1], id2)
self.data.coordinate_links = [link]
self.dc.append(self.data)
assert link in self.dc.links
def test_add_links(self):
""" links attribute behaves like an editable list """
d = Data()
comp = MagicMock(spec_set=Component)
id1 = ComponentID("id1")
id2 = ComponentID("id2")
link = ComponentLink([id1], id2)
self.dc.set_links([link])
assert link in self.dc.links
def test_add_links_updates_components(self):
"""setting links attribute automatically adds components to data"""
d = Data()
comp = MagicMock(spec_set=Component)
id1 = ComponentID("id1")
d.add_component(comp, id1)
id2 = ComponentID("id2")
self.dc.append(d)
link = ComponentLink([id1], id2, using=lambda x: None)
self.dc.set_links([link])
assert id2 in d.components
def test_links_propagated(self):
"""Web of links is grown and applied to data automatically"""
from ..component_link import ComponentLink
d = Data()
dc = DataCollection([d])
cid1 = d.add_component(np.array([1, 2, 3]), 'a')
cid2 = ComponentID('b')
cid3 = ComponentID('c')
dummy = lambda x: None
links = ComponentLink([cid1], cid2, dummy)
dc.add_link(links)
assert cid2 in d.components
links = ComponentLink([cid2], cid3, dummy)
dc.add_link(links)
assert cid3 in d.components
def test_merge_links(self):
"""Trivial links should be merged, discarding the duplicate ID"""
d1 = Data(x=[1, 2, 3])
d2 = Data(x=[2, 3, 4])
dc = DataCollection([d1, d2])
duplicated_id = d2.id['x']
link = ComponentLink([d1.id['x']], d2.id['x'])
dc.add_link(link)
assert d1.id['x'] is d2.id['x']
assert d1.id['x'] is not duplicated_id
assert duplicated_id not in d1.components
assert duplicated_id not in d2.components
np.testing.assert_array_equal(d1[d1.id['x']], [1, 2, 3])
np.testing.assert_array_equal(d2[d1.id['x']], [2, 3, 4])
| {
"content_hash": "e5cf6ba9bb68e064b34249934f457152",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 77,
"avg_line_length": 32.392405063291136,
"alnum_prop": 0.5960661716816464,
"repo_name": "glue-viz/glue-qt",
"id": "ae154147cb6b05962d3079d1a831e46744806e77",
"size": "7730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/core/tests/test_data_collection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4040"
},
{
"name": "Python",
"bytes": "2472826"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Member.image'
db.add_column(u'cmsplugin_member', 'image',
self.gf('django.db.models.fields.files.ImageField')(default='/static/img/person-placeholder.jpg', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Member.image'
db.delete_column(u'cmsplugin_member', 'image')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'plugins.credential': {
'Meta': {'object_name': 'Credential', 'db_table': "u'cmsplugin_credential'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Description'"}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'inverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Name'", 'max_length': '100'})
},
u'plugins.member': {
'Meta': {'object_name': 'Member', 'db_table': "u'cmsplugin_member'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Description'"}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "'/static/img/person-placeholder.jpg'", 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Name'", 'max_length': '100'})
},
u'plugins.reference': {
'Meta': {'object_name': 'Reference', 'db_table': "u'cmsplugin_reference'", '_ormbases': ['cms.CMSPlugin']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Description'"}),
'referee': ('django.db.models.fields.CharField', [], {'default': "'Referee'", 'max_length': '100'})
},
u'plugins.service': {
'Meta': {'object_name': 'Service', 'db_table': "u'cmsplugin_service'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Description'"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Name'", 'max_length': '100'})
},
u'plugins.team': {
'Meta': {'object_name': 'Team', 'db_table': "u'cmsplugin_team'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Description'"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Name'", 'max_length': '100'})
}
}
complete_apps = ['plugins'] | {
"content_hash": "b608ffe3eb0640de803d6b2b2dbff58b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 157,
"avg_line_length": 68.88607594936708,
"alnum_prop": 0.5648658581403896,
"repo_name": "robertour/commas",
"id": "a18e935aeb145292a6a81cc012e12ca9f9bc47c9",
"size": "5466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/migrations/0011_auto__add_field_member_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "303484"
},
{
"name": "JavaScript",
"bytes": "12522"
},
{
"name": "Python",
"bytes": "151771"
}
],
"symlink_target": ""
} |
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'bernard357/plumbery'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| {
"content_hash": "bc7e0e92f37648f9f6c618dd04c42461",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 30.775,
"alnum_prop": 0.6812889249932305,
"repo_name": "jalbin/plumbery",
"id": "c647f410260affe4aa28ba81b8a436a06a789345",
"size": "3739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "travis_pypi_setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2134"
},
{
"name": "Python",
"bytes": "97748"
},
{
"name": "Shell",
"bytes": "694"
}
],
"symlink_target": ""
} |
import chardet
import six
from django.utils.translation import ugettext_lazy as _
from tablib.compat import BytesIO, StringIO
from tablib.core import Dataset
from tablib.formats import _csv, _json, _xls, _xlsx, _yaml
from typing_extensions import OrderedDict
try:
from yaml import CSafeDumper
except:
CSafeDumper = None
from multi_import.exceptions import InvalidFileError
from multi_import.helpers import strings
class FileFormat(object):
title = None
@property
def key(self):
return self.title
@property
def extension(self):
return self.key
def detect(self, file_handler, file_contents):
return False
def pre_read(self, file_object):
return file_object
def read(self, file_handler, file_contents):
raise NotImplementedError()
def write(self, dataset):
raise NotImplementedError()
class TabLibFileFormat(FileFormat):
def __init__(
self,
file_format,
content_type,
read_file_as_string=False,
empty_file_requires_example_row=False,
):
self.format = file_format
self.content_type = content_type
self.read_file_as_string = read_file_as_string
self.empty_file_requires_example_row = empty_file_requires_example_row
@property
def key(self):
return self.format.title
def get_file_object(self, file_handler, file_contents):
if self.read_file_as_string:
return file_contents
file_handler.seek(0)
return file_handler
def detect(self, file_handler, file_contents):
file_object = self.get_file_object(file_handler, file_contents)
try:
return self.format.detect(file_object)
except AttributeError:
pass
return False
def pre_read(self, file_object):
return file_object
def read(self, file_handler, file_contents):
file_object = self.get_file_object(file_handler, file_contents)
file_object = self.pre_read(file_object)
try:
dataset = Dataset()
try:
self.format.import_set(dataset, file_object)
except TypeError:
# Versions of tablib>=0.11.5 expect a
# buffer-like object to pass to BytesIO
self.format.import_set(dataset, file_object.read())
return dataset
except (AttributeError, KeyError):
raise InvalidFileError(_(u"Empty or Invalid File."))
def export_set(self, dataset):
return self.format.export_set(dataset)
def write(self, dataset: Dataset) -> BytesIO:
"""Return a BytesIO stream representing a set of items."""
data = self.export_set(dataset)
return self._write_to_bytes(data)
def _write_to_bytes(self, data) -> BytesIO:
f = BytesIO()
if isinstance(data, six.text_type):
data = data.encode("utf-8")
f.write(data)
return f
class CsvFormat(TabLibFileFormat):
def __init__(self):
super(CsvFormat, self).__init__(
_csv, "application/csv", read_file_as_string=True
)
@classmethod
def ensure_unicode(cls, file_contents):
if isinstance(file_contents, six.text_type):
return file_contents
charset = chardet.detect(file_contents)
encoding = charset["encoding"]
encoding_confidence = charset["confidence"]
if encoding and encoding_confidence > 0.5:
return file_contents.decode(encoding.lower()).encode("utf8")
else:
raise InvalidFileError(_(u"Unknown file type."))
def pre_read(self, file_object):
file_object = self.ensure_unicode(file_object)
file_object = strings.normalize_string(file_object)
return file_object
class JsonFormat(TabLibFileFormat):
def __init__(self):
super(JsonFormat, self).__init__(
_json,
"application/json",
read_file_as_string=True,
empty_file_requires_example_row=True,
)
def export_set(self, dataset):
return _json.json.dumps(
dataset.dict,
default=_json.date_handler,
ensure_ascii=False,
sort_keys=False,
indent=2,
)
class YamlFormat(TabLibFileFormat):
def __init__(self):
super(YamlFormat, self).__init__(
_yaml, "application/x-yaml", empty_file_requires_example_row=True
)
def export_set(self, dataset):
# By default use the C-based CSafeDumper,
# otherwise fallback to pure Python SafeDumper.
if CSafeDumper:
return _yaml.yaml.dump(
dataset._package(ordered=False),
allow_unicode=True,
default_flow_style=False,
sort_keys=False,
Dumper=CSafeDumper
)
else:
return _yaml.yaml.safe_dump(
dataset._package(ordered=False),
allow_unicode=True,
default_flow_style=False,
sort_keys=False
)
def detect(self, file_handler, file_contents):
try:
return super(YamlFormat, self).detect(file_handler, file_contents)
except _yaml.yaml.error.YAMLError:
raise InvalidFileError(_(u"Invalid YAML File."))
class TxtFormat(FileFormat):
title = "txt"
content_type = "text/plain"
def detect(self, file_handler, file_contents):
return False
def read(self, file_handler, file_contents):
raise NotImplementedError()
def write(self, dataset):
f = BytesIO()
stream = StringIO()
for row in dataset._package():
for key, val in row.items():
stream.write("-" * len(key) + "\n")
stream.write(key.encode("utf-8") + "\n")
stream.write("-" * len(key) + "\n")
stream.write(val.encode("utf-8") + "\n\n")
stream.write("\n" + "*" * 50 + "\n\n\n")
f.write(stream.getvalue())
return f
csv = CsvFormat()
txt = TxtFormat()
xls = TabLibFileFormat(_xls, "application/vnd.ms-excel", read_file_as_string=True)
xlsx = TabLibFileFormat(
_xlsx, "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
json = JsonFormat()
yaml = YamlFormat()
all_formats = (xlsx, xls, csv, json, yaml, txt)
supported_mimetypes = (
"text/plain",
"text/csv",
"application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/json",
"application/x-yaml",
"text/yaml",
# When Content-Type unspecified, defaults to this.
# https://sdelements.atlassian.net/browse/LIBR-355
# https://stackoverflow.com/questions/12061030/why-am-i-getting-mime-type-of-csv-file-as-application-octet-stream
"application/octet-stream",
)
| {
"content_hash": "ece5fd4aeb900ea9471850cbadbd0dba",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 117,
"avg_line_length": 29,
"alnum_prop": 0.6052517674217285,
"repo_name": "sdelements/django-multi-import",
"id": "20181d95839444606e6a1d188aaf0771b3ee8024",
"size": "6931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multi_import/formats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "68511"
}
],
"symlink_target": ""
} |
'''
This program will take a /24 IP Network input and print the list regardless of entry
forms: 10.2.2. or 10.2.3 or 10.2.3.0
'''
#Request IP Network input
a = raw_input("Enter an IP Network: ")
# Create list
octets = a.split(".")
#Make 3 octets
octets = octets[:3]
#Append 4th 0 Octet
octets.append('0')
#print result
print "Octets of the IP Network entered are: %s" % (octets)
#Join octets and create IP Network format
ip_netw = ".".join(octets)
print "%20s %20s %20s" % ('NETWORK_NUMBER','FIRST_OCTET_BINARY','FIRST_OCTET_HEX')
#Use conversion from hex to\from bin
first_bin = bin(int(octets[0]))
first_hex = hex(int(octets[0]))
print "%20s %20s %20s" % (ip_netw,first_bin,first_hex)
| {
"content_hash": "4503443d641fa67935b07b8a4a2100b1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 20.735294117647058,
"alnum_prop": 0.675177304964539,
"repo_name": "shamurti/Python_Class",
"id": "c6dc2648d179a1beb0668a47be889e198c26e9b4",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "using_slice_conversion_etc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53853"
}
],
"symlink_target": ""
} |
def ${function_name}(${', '.join(input_names)}, *args, **kwargs):
%for line in body_code.splitlines():
${line}
%endfor
${return_code}
| {
"content_hash": "c5858c3da814ee1248c98aeeafbe0704",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 65,
"avg_line_length": 28.4,
"alnum_prop": 0.6056338028169014,
"repo_name": "jcrudy/sklearntools",
"id": "d5bb6fd3e4ff828d738a202dc66e490ac902757b",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearntools/sym/resources/python_function_template.mako.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "753"
},
{
"name": "Python",
"bytes": "338122"
}
],
"symlink_target": ""
} |
"""Find the IP address of a VirtualBox virtual machine and ssh into it.\
Add or update /etc/hosts entries upon user interaction."""
import os
import subprocess
import sys
def check_exists(name):
"""Check if the virtual machine exists."""
virtualbox_exists = subprocess.Popen(["VBoxManage", "list", "vms"], stdout=subprocess.PIPE)
if name in virtualbox_exists.communicate()[0]:
return True
else:
return False
def check_up(name):
"""Check if the virtual machine is currently powered on."""
virtualbox_up = subprocess.Popen(["VBoxManage", "list", "runningvms"], stdout=subprocess.PIPE)
if name in virtualbox_up.communicate()[0]:
return True
else:
return False
def find_host(name):
"""Check if an entry already exists in /etc/hosts."""
hosts = open("/etc/hosts", "r")
for line in hosts:
if name in line:
return True
return False
def host_outdated(address, name):
"""Check if the entry for the virtual machine in /etc/hosts is outdated."""
hosts = open("/etc/hosts", "r")
for line in hosts:
if name in line:
if address not in line:
return True
return False
def add_host(address, name):
"""Add an entry in /etc/hosts for the virtual machine."""
hosts = open("/etc/hosts", "rt")
hosts_contents = hosts.read() + "\n" + address + "\t" + name + "\n"
temp_hosts = open("/tmp/etc_hosts.tmp", "wt")
temp_hosts.write(hosts_contents)
# Move the temp hosts file into place with sudo permissions
os.system("sudo mv /tmp/etc_hosts.tmp /etc/hosts")
def update_host(address, name):
"""Update an entry in /etc/hosts to have the correct IP address."""
hosts = open("/etc/hosts", "r")
data = hosts.readlines()
new_hosts = []
for line in data:
if name in line:
new_hosts.append(address + "\t" + name + "\n")
else:
new_hosts.append(line)
temp_hosts = open("/tmp/etc_hosts.tmp", "wt")
temp_hosts.writelines(new_hosts)
# Move the temp hosts file into place with sudo permissions
os.system("sudo mv /tmp/etc_hosts.tmp /etc/hosts")
def main(): # Define as a function to adhere to style guidelines
"""Where the magic happens."""
try:
sys.argv[1]
except IndexError:
print "Missing name of virtual machine"
return
# Check if the user is supplying the virtual machine's name correctly
try:
sys.argv[2]
# If the name is correct, run the program
except IndexError:
if not check_exists(sys.argv[1]):
print "The specified virtual machine does not appear to exist."
return
if not check_up(sys.argv[1]):
headless_input = raw_input("The specified virtual machine does not appear to be running. Would you like to start the machine in 'headless' mode? [Y/n] ")
if len(headless_input) == 0 or headless_input == "Y" or headless_input == "y": # If the user responds in the affirmative
subprocess.Popen(["VBoxManage", "startvm", sys.argv[1], "--type", "headless"], stdout=subprocess.PIPE)
print "Please wait for the machine to boot before trying to connect again."
return
else:
return
virtualbox_ip = subprocess.Popen(["VBoxManage", "guestproperty", "get", sys.argv[1], "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=subprocess.PIPE)
ip_response = virtualbox_ip.communicate()[0]
if ip_response == "No value set!\n":
print "Could not find the virtual machine's IP address. Are network settings configured correctly and are VirtualBox Guest additions installed on the virtual machine?"
return
if find_host(sys.argv[1]):
if host_outdated(ip_response.split()[1], sys.argv[1]):
hosts_input = raw_input("/etc/hosts has an outdated entry for this virtual machine. Would you like to update it? [Y/n] ")
if len(hosts_input) == 0 or hosts_input == "Y" or hosts_input == "y": # If the user responds in the affirmative
update_host(ip_response.split()[1], sys.argv[1])
else:
hosts_input = raw_input("/etc/hosts does not have an entry for this virtual machine. Would you like to add one? [Y/n] ")
if len(hosts_input) == 0 or hosts_input == "Y" or hosts_input == "y": # If the user responds in the affirmative
add_host(ip_response.split()[1], sys.argv[1])
os.system("ssh " + ip_response.split()[1])
else:
print "If your virtual machine's name contains spaces, please wrap it in quotes."
return
main() # Run the function so the module is useful in a CLI
| {
"content_hash": "a0c57b2ed40c8fe9da283da402507118",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 179,
"avg_line_length": 36.784615384615385,
"alnum_prop": 0.6200334588038477,
"repo_name": "jessemillar/pythonista",
"id": "e43c119d4db52aa64705756ce99b8c481c466fc1",
"size": "4782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Desktop/vssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6857"
}
],
"symlink_target": ""
} |
import base64, os
def get_content(texts):
if isinstance(texts, str) or isinstance(texts, unicode):
file_path = texts
with open(file_path, 'r') as f:
result = list(f.read().decode("utf-8"))
f.close()
return result
else:
return texts
def write_file( path, data ):
with open( path, "w" ) as f:
f.write(data)
f.close()
def read_by_base64(path):
with open(path, "r") as f:
data = f.read()
f.close()
return base64.b64encode(data)
def delete_file(path):
os.remove(path)
def delete_files(paths):
for path in paths:
delete_file(path)
| {
"content_hash": "7a43f45795c082df6336247d7875d539",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 20.59375,
"alnum_prop": 0.5614567526555387,
"repo_name": "eHanlin/font-generator",
"id": "c2064c416dab128850beca360bb53cec76fc04a0",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fontGenerator/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "523"
},
{
"name": "Python",
"bytes": "9782"
},
{
"name": "Shell",
"bytes": "1802"
}
],
"symlink_target": ""
} |
from setuptools import setup
import sys
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
setup(
name='azure-storage',
version='0.20.1',
description='Microsoft Azure Storage Client Library for Python',
long_description=open('README.rst', 'r').read(),
license='Apache License 2.0',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-storage-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License',
],
zip_safe=False,
packages=[
'azure',
'azure.storage',
'azure.storage._http',
'azure.storage.blob',
'azure.storage.queue',
'azure.storage.table',
'azure.storage.file',
],
install_requires=[
'azure-nspkg',
'azure-common',
'python-dateutil',
'requests',
] + (['futures'] if sys.version_info < (3,0) else []),
)
| {
"content_hash": "df345fbaf95869554e29e3a3898a840c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 73,
"avg_line_length": 30.25925925925926,
"alnum_prop": 0.5899632802937577,
"repo_name": "phonnz/azure-storage-python",
"id": "73648d88f1682d792a779a820f36d2e9bb33e1c1",
"size": "2400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "739574"
}
],
"symlink_target": ""
} |
"""
Firebase Cloud Messaging
Previously known as GCM / C2DM
Documentation is available on the Firebase Developer website:
https://firebase.google.com/docs/cloud-messaging/
"""
import json
from django.core.exceptions import ImproperlyConfigured
from .compat import Request, urlopen
from .conf import get_manager
from .exceptions import NotificationError
from .models import GCMDevice
# Valid keys for FCM messages. Reference:
# https://firebase.google.com/docs/cloud-messaging/http-server-ref
FCM_TARGETS_KEYS = [
"to", "condition", "notification_key"
]
FCM_OPTIONS_KEYS = [
"collapse_key", "priority", "content_available", "delay_while_idle", "time_to_live",
"restricted_package_name", "dry_run"
]
FCM_NOTIFICATIONS_PAYLOAD_KEYS = [
"title", "body", "icon", "sound", "badge", "color", "tag", "click_action",
"body_loc_key", "body_loc_args", "title_loc_key", "title_loc_args", "android_channel_id"
]
class GCMError(NotificationError):
pass
def _chunks(l, n):
"""
Yield successive chunks from list \a l with a minimum size \a n
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def _gcm_send(data, content_type, application_id):
key = get_manager().get_gcm_api_key(application_id)
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (key),
"Content-Length": str(len(data)),
}
request = Request(get_manager().get_post_url("GCM", application_id), data, headers)
return urlopen(
request, timeout=get_manager().get_error_timeout("GCM", application_id)
).read().decode("utf-8")
def _fcm_send(data, content_type, application_id):
key = get_manager().get_fcm_api_key(application_id)
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (key),
"Content-Length": str(len(data)),
}
request = Request(get_manager().get_post_url("FCM", application_id), data, headers)
return urlopen(
request, timeout=get_manager().get_error_timeout("FCM", application_id)
).read().decode("utf-8")
def _cm_handle_response(registration_ids, response_data, cloud_type, application_id=None):
response = response_data
if response.get("failure") or response.get("canonical_ids"):
ids_to_remove, old_new_ids = [], []
throw_error = False
for index, result in enumerate(response["results"]):
error = result.get("error")
if error:
# https://firebase.google.com/docs/cloud-messaging/http-server-ref#error-codes
# If error is NotRegistered or InvalidRegistration, then we will deactivate devices
# because this registration ID is no more valid and can't be used to send messages,
# otherwise raise error
if error in ("NotRegistered", "InvalidRegistration"):
ids_to_remove.append(registration_ids[index])
else:
throw_error = True
result["original_registration_id"] = registration_ids[index]
# If registration_id is set, replace the original ID with the new value (canonical ID)
# in your server database. Note that the original ID is not part of the result, you need
# to obtain it from the list of registration_ids in the request (using the same index).
new_id = result.get("registration_id")
if new_id:
old_new_ids.append((registration_ids[index], new_id))
if ids_to_remove:
removed = GCMDevice.objects.filter(
registration_id__in=ids_to_remove, cloud_message_type=cloud_type
)
removed.update(active=False)
for old_id, new_id in old_new_ids:
_cm_handle_canonical_id(new_id, old_id, cloud_type)
if throw_error:
raise GCMError(response)
return response
def _cm_send_request(
registration_ids, data, cloud_type="GCM", application_id=None,
use_fcm_notifications=True, **kwargs
):
"""
Sends a FCM or GCM notification to one or more registration_ids as json data.
The registration_ids needs to be a list.
"""
payload = {"registration_ids": registration_ids} if registration_ids else {}
data = data.copy()
# If using FCM, optionnally autodiscovers notification related keys
# https://firebase.google.com/docs/cloud-messaging/concept-options#notifications_and_data_messages
if cloud_type == "FCM" and use_fcm_notifications:
notification_payload = {}
if "message" in data:
notification_payload["body"] = data.pop("message", None)
for key in FCM_NOTIFICATIONS_PAYLOAD_KEYS:
value_from_extra = data.pop(key, None)
if value_from_extra:
notification_payload[key] = value_from_extra
value_from_kwargs = kwargs.pop(key, None)
if value_from_kwargs:
notification_payload[key] = value_from_kwargs
if notification_payload:
payload["notification"] = notification_payload
if data:
payload["data"] = data
# Attach any additional non falsy keyword args (targets, options)
# See ref : https://firebase.google.com/docs/cloud-messaging/http-server-ref#table1
payload.update({
k: v for k, v in kwargs.items() if v and (k in FCM_TARGETS_KEYS or k in FCM_OPTIONS_KEYS)
})
# Sort the keys for deterministic output (useful for tests)
json_payload = json.dumps(payload, separators=(",", ":"), sort_keys=True).encode("utf-8")
# Sends requests and handles the response
if cloud_type == "GCM":
response = json.loads(_gcm_send(
json_payload, "application/json", application_id=application_id
))
elif cloud_type == "FCM":
response = json.loads(_fcm_send(
json_payload, "application/json", application_id=application_id
))
else:
raise ImproperlyConfigured("cloud_type must be FCM or GCM not %s" % str(cloud_type))
return _cm_handle_response(registration_ids, response, cloud_type, application_id)
def _cm_handle_canonical_id(canonical_id, current_id, cloud_type):
"""
Handle situation when FCM server response contains canonical ID
"""
devices = GCMDevice.objects.filter(cloud_message_type=cloud_type)
if devices.filter(registration_id=canonical_id, active=True).exists():
devices.filter(registration_id=current_id).update(active=False)
else:
devices.filter(registration_id=current_id).update(registration_id=canonical_id)
def send_message(registration_ids, data, cloud_type, application_id=None, **kwargs):
"""
Sends a FCM (or GCM) notification to one or more registration_ids. The registration_ids
can be a list or a single string. This will send the notification as json data.
A reference of extra keyword arguments sent to the server is available here:
https://firebase.google.com/docs/cloud-messaging/http-server-ref#table1
"""
if cloud_type in ("FCM", "GCM"):
max_recipients = get_manager().get_max_recipients(cloud_type, application_id)
else:
raise ImproperlyConfigured("cloud_type must be FCM or GCM not %s" % str(cloud_type))
# Checks for valid recipient
if registration_ids is None and "/topics/" not in kwargs.get("to", ""):
return
# Bundles the registration_ids in an list if only one is sent
if not isinstance(registration_ids, list):
registration_ids = [registration_ids] if registration_ids else None
# FCM only allows up to 1000 reg ids per bulk message
# https://firebase.google.com/docs/cloud-messaging/server#http-request
if registration_ids:
ret = []
for chunk in _chunks(registration_ids, max_recipients):
ret.append(_cm_send_request(
chunk, data, cloud_type=cloud_type, application_id=application_id, **kwargs
))
return ret[0] if len(ret) == 1 else ret
else:
return _cm_send_request(None, data, cloud_type=cloud_type, **kwargs)
send_bulk_message = send_message
| {
"content_hash": "8ef9c92f20b5e9489352343c12c5591d",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 99,
"avg_line_length": 34.64622641509434,
"alnum_prop": 0.718992511912866,
"repo_name": "matthewh/django-push-notifications",
"id": "854e5667c642e31ff9995470b51fcb7d5e2ef24e",
"size": "7345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push_notifications/gcm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126422"
}
],
"symlink_target": ""
} |
from ryu.lib.mac import haddr_to_bin
from ryu.lib.stringify import StringifyMixin
from ryu.ofproto.ofproto_v1_0_parser import OFPMatch
class PacketMatch(StringifyMixin):
def __init__(self, in_port=None, dl_src=None, dl_dst=None, dl_vlan=None,
dl_vlan_pcp=None, dl_type=None, nw_tos=None, nw_proto=None,
nw_src=None, nw_dst=None, tp_src=None, tp_dst=None,
nw_src_mask=None, nw_dst_mask=None):
super(PacketMatch, self).__init__()
self.in_port = in_port
self.dl_src = dl_src
self.dl_dst = dl_dst
self.dl_vlan = dl_vlan
self.dl_vlan_pcp = dl_vlan_pcp
self.dl_type = dl_type
self.nw_tos = nw_tos
self.nw_proto = nw_proto
self.nw_src = nw_src
self.nw_dst = nw_dst
self.tp_src = tp_src
self.tp_dst = tp_dst
self.nw_src_mask = nw_src_mask
self.nw_dst_mask = nw_dst_mask
def __add__(self, other):
assert isinstance(other, self.__class__)
kwargs = {}
for k, v in self.__dict__.items():
kwargs[k] = getattr(self, k) or getattr(other, k)
return PacketMatch(**kwargs)
def to_ofp_match(self):
kwargs = self.__dict__.copy()
if self.dl_src is not None:
kwargs["dl_src"] = haddr_to_bin(self.dl_src)
if self.dl_dst is not None:
kwargs["dl_dst"] = haddr_to_bin(self.dl_dst)
return OFPMatch(**kwargs)
class ACLResult(object):
def __init__(self, accept, match=PacketMatch()):
super(ACLResult, self).__init__()
self.accept = accept
self.match = match
def __add__(self, other):
assert isinstance(other, self.__class__)
return ACLResult(self.accept and other.accept,
self.match + other.match)
def __bool__(self):
return self.accept
| {
"content_hash": "f72e90f122abcd402dd034f7ae9a12ac",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 30.983606557377048,
"alnum_prop": 0.5650793650793651,
"repo_name": "shimojo-lab/flowsieve",
"id": "df9eb2cee15a5be76412f85c7a0e98c073091bb1",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowsieve/acl/acl_result.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77179"
},
{
"name": "Ruby",
"bytes": "3095"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
} |
import unittest
import re
from passgen import passgen
class TestPassgen(unittest.TestCase):
def test_errors(self):
with self.assertRaises(ValueError):
passgen(length=-2)
with self.assertRaises(ValueError):
passgen(case="spam")
with self.assertRaises(ValueError):
passgen(digits=False, letters=False)
def test_case(self):
password = passgen(case='upper')
self.assertEqual(password, password.upper())
password = passgen(case='lower')
self.assertEqual(password, password.lower())
def test_length(self):
password = passgen(length=10)
self.assertEqual(len(password), 10)
password = passgen(length=15, case='upper', digits=False)
self.assertEqual(len(password), 15)
def test_limit_punctuation(self):
pl = '.'
password = passgen(letters=True, digits=False, length=6,
punctuation=True, limit_punctuation=pl)
password = re.sub(r"[a-zA-Z.]", "", password)
self.assertTrue(password == '')
def test_only_letters(self):
password = passgen(letters=True, digits=False)
password = re.sub(r"[a-zA-Z]", "", password)
self.assertTrue(password == '')
def test_only_digits(self):
password = passgen(letters=False, digits=True)
password = re.sub(r"[0-9]", "", password)
self.assertTrue(password == '')
def test_class_always_occurs(self):
# Generating 4-char password with all classes
for i in range(1, 100):
password = passgen(punctuation=True, case='both', length=4)
password = re.sub(r"[0-9]", "", password)
self.assertTrue(len(password) == 3)
password = re.sub(r"[a-z]", "", password)
self.assertTrue(len(password) == 2)
password = re.sub(r"[A-Z]", "", password)
self.assertTrue(len(password) == 1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6e7de9f1f88a3c6b821592f9efa80401",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 71,
"avg_line_length": 32.274193548387096,
"alnum_prop": 0.5897051474262869,
"repo_name": "soslan/passgen",
"id": "f2cf1fa43e0b7c33730bf6bb688e2d98b1b17508",
"size": "2001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_passgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10676"
}
],
"symlink_target": ""
} |
import array
import struct
from impacket import ImpactPacket
from impacket import uuid
from impacket import dcerpc
from impacket.dcerpc import ndrutils
from impacket.dcerpc import transport
MSRPC_UUID_MGMT = uuid.uuidtup_to_bin(("afa8bd80-7d8a-11c9-bef4-08002b102989", "1.0"))
class IfIdsRequestHeader(ImpactPacket.Header):
OP_NUM = 0
def get_header_size(self):
return 0
class IdIdsResponseHeader(ImpactPacket.Header):
__SIZE = 12
def __init__(self, aBuffer = None):
ImpactPacket.Header.__init__(self, IdIdsResponseHeader.__SIZE)
self.endianness = '<'
if aBuffer: self.load_header(aBuffer)
def get_ifcount(self):
return self.get_long(4, self.endianness)
def _get_iflists_offset(self):
return 12 + 4 * self.get_ifcount()
def get_if_binuuid(self, index):
offset = self._get_iflists_offset() + 20*index
#print "offset: %08x" % offset
#print "bytes:", repr(self.get_bytes())
return self.get_bytes()[offset:offset+20]
def get_header_size(self):
return IdIdsResponseHeader.__SIZE + 4 * self.get_ifcount() + 20 * self.get_ifcount()
class DCERPCMgmt:
def __init__(self, dcerpc):
self._dcerpc = dcerpc
def inq_if_ids(self):
req = IfIdsRequestHeader()
self._dcerpc.send(req)
data = self._dcerpc.recv()
resp = IdIdsResponseHeader(data)
return resp
| {
"content_hash": "001c6f41180d65812a4e9ce7b0b6c702",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 92,
"avg_line_length": 27.442307692307693,
"alnum_prop": 0.6517168885774351,
"repo_name": "hecchi777/S3-SlaacSecuritySolution",
"id": "114576f1183f86925a1d3edd5c78f2ac1bb238f4",
"size": "1743",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "impacket-0.9.11/impacket/dcerpc/mgmt.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1234"
},
{
"name": "C++",
"bytes": "23499"
},
{
"name": "Perl",
"bytes": "6245"
},
{
"name": "Python",
"bytes": "3644642"
},
{
"name": "Shell",
"bytes": "839"
}
],
"symlink_target": ""
} |
"""Test Z-Wave binary sensors."""
import asyncio
import datetime
from unittest.mock import patch
from homeassistant.components.zwave import const
from homeassistant.components.binary_sensor import zwave
from tests.mock.zwave import (
MockNode, MockValue, MockEntityValues, value_changed)
def test_get_device_detects_none(mock_openzwave):
"""Test device is not returned."""
node = MockNode()
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_trigger_sensor(mock_openzwave):
"""Test device is a trigger sensor."""
node = MockNode(
manufacturer_id='013c', product_type='0002', product_id='0002')
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert isinstance(device, zwave.ZWaveTriggerSensor)
assert device.device_class == "motion"
def test_get_device_detects_workaround_sensor(mock_openzwave):
"""Test that workaround returns a binary sensor."""
node = MockNode(manufacturer_id='010f', product_type='0b00')
value = MockValue(data=False, node=node,
command_class=const.COMMAND_CLASS_SENSOR_ALARM)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert isinstance(device, zwave.ZWaveBinarySensor)
def test_get_device_detects_sensor(mock_openzwave):
"""Test that device returns a binary sensor."""
node = MockNode()
value = MockValue(data=False, node=node,
command_class=const.COMMAND_CLASS_SENSOR_BINARY)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert isinstance(device, zwave.ZWaveBinarySensor)
def test_binary_sensor_value_changed(mock_openzwave):
"""Test value changed for binary sensor."""
node = MockNode()
value = MockValue(data=False, node=node,
command_class=const.COMMAND_CLASS_SENSOR_BINARY)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
value_changed(value)
assert device.is_on
@asyncio.coroutine
def test_trigger_sensor_value_changed(hass, mock_openzwave):
"""Test value changed for trigger sensor."""
node = MockNode(
manufacturer_id='013c', product_type='0002', product_id='0002')
value = MockValue(data=False, node=node)
value_off_delay = MockValue(data=15, node=node)
values = MockEntityValues(primary=value, off_delay=value_off_delay)
device = zwave.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
yield from hass.async_add_job(value_changed, value)
assert device.invalidate_after is None
device.hass = hass
value.data = True
yield from hass.async_add_job(value_changed, value)
assert device.is_on
test_time = device.invalidate_after - datetime.timedelta(seconds=1)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
assert device.is_on
test_time = device.invalidate_after
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
assert not device.is_on
| {
"content_hash": "a8b3e375c7f1a8a60802a74daf38addf",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 71,
"avg_line_length": 34.01960784313726,
"alnum_prop": 0.7028818443804035,
"repo_name": "LinuxChristian/home-assistant",
"id": "a5dabf6953a6664e627d278cdc5fb7e65d3a3da0",
"size": "3470",
"binary": false,
"copies": "13",
"ref": "refs/heads/dev",
"path": "tests/components/binary_sensor/test_zwave.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1733802"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7415265"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15154"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Exceptions raised by OSBS
"""
from __future__ import print_function, absolute_import, unicode_literals
import json
from traceback import format_tb
class OsbsException(Exception):
def __init__(self, message=None, cause=None, traceback=None):
if message is None and cause is not None:
message = repr(cause)
super(OsbsException, self).__init__(message)
self.message = message
self.cause = cause
self.traceback = traceback
def __str__(self):
if self.cause and self.traceback and not hasattr(self, '__context__'):
return ("%s\n\n" % self.message +
"Original traceback (most recent call last):\n" +
"".join(format_tb(self.traceback)) +
"%r" % self.cause)
else:
return super(OsbsException, self).__str__()
def __repr__(self):
if self.cause and not hasattr(self, '__context__'):
return "OsbsException: %s (from %r)" % (self.message, self.cause)
else:
return super(OsbsException, self).__repr__()
class OsbsResponseException(OsbsException):
""" OpenShift didn't respond with OK (200) status """
def __init__(self, message, status_code, *args, **kwargs):
super(OsbsResponseException, self).__init__(message, *args, **kwargs)
self.status_code = status_code
# try decoding openshift Status object
# https://docs.openshift.org/latest/rest_api/openshift_v1.html#v1-status
try:
self.json = json.loads(message)
except ValueError:
self.json = None
class OsbsNetworkException(OsbsException):
""" cURL returned an error """
def __init__(self, url, message, status_code, *args, **kwargs):
super(OsbsNetworkException, self).__init__("(%s) %s" % (status_code,
message),
*args, **kwargs)
self.url = url
self.status_code = status_code
class OsbsAuthException(OsbsException):
pass
class OsbsValidationException(OsbsException):
pass
class OsbsWatchBuildNotFound(OsbsException):
""" watch stream ended and build was not found """
| {
"content_hash": "d34789295c2760d392c6b22b8917beb5",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 31.68831168831169,
"alnum_prop": 0.5967213114754099,
"repo_name": "jpopelka/osbs-client",
"id": "bd58db19220a836cdaa839d21582f1eb694f2d94",
"size": "2440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osbs/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "290758"
},
{
"name": "Shell",
"bytes": "2165"
}
],
"symlink_target": ""
} |
"""
Test Resty Codes
Unit test for the resty codes API.
by: Carl J. Nobile
email: carl.nobile@gmail.com
"""
__docformat__ = "restructuredtext en"
import unittest
from unittest import skip
from io import StringIO
from rulesengine import InvalidNodeSizeException
from restycodes import (RESTYARGS, RestyCodes, ConditionHandler,
InvalidConditionNameException, getCodeStatus,)
class TestRestyCodes(unittest.TestCase):
"""
Tests for the RestyCodes class.
"""
SKIP_MESSAGE = "This test will fail until all conditions are implemented."
def __init__(self, name):
"""
:Parameters:
name : str
Unit test name.
"""
super(TestRestyCodes, self).__init__(name)
def setUp(self):
"""
Create the RestyCodes instance.
"""
self._rc = RestyCodes(storeSeq=True)
def tearDown(self):
pass
def testSetConditions(self):
# Test for an invalid keyword arg.
replacements = {'wrongArg': False}
self.assertRaises(InvalidConditionNameException,
self._rc.setConditions,
**replacements)
# Test for proper operation.
self.__runTest(4, 401, {'authorized': False,
'acceptExists': True})
def test_serviceAvailable(self):
self.__runTest(1, 503, {'serviceAvailable': False})
def test_requestUrlTooLong(self):
self.__runTest(2, 414, {'requestUrlTooLong': True})
def test_badRequest(self):
self.__runTest(3, 400, {'badRequest': True})
def test_authorized(self):
self.__runTest(4, 401, {'authorized': False})
def test_forbidden(self):
self.__runTest(5, 403, {'forbidden': True})
def test_notImplemented(self):
self.__runTest(6, 501, {'notImplemented': True})
def test_unsupportedMediaType(self):
self.__runTest(7, 415, {'unsupportedMediaType': True})
def test_requestEntityTooLarge(self):
self.__runTest(8, 413, {'requestEntityTooLarge': True})
def test_options(self):
self.__runTest(9, 200, {'options': True})
def test_commonMethod(self):
self.__runTest(11, 405, {'commonMethod': False})
def test_knownMethod(self):
self.__runTest(11, 501, {'commonMethod': False,
'knownMethod': False})
def test_methodAllowedOnResource(self):
self.__runTest(11, 405, {'methodAllowedOnResource': False})
def test_acceptExists(self):
self.__runTest(27, 200, {'acceptExists': True}) #, calls=True)
def test_acceptMediaTypeAvaliable(self):
self.__runTest(13, 406, {'acceptExists': True,
'acceptMediaTypeAvaliable': False})
def test_acceptLanguageExists(self):
self.__runTest(27, 200, {'acceptLanguageExists': True}) #, calls=True)
def test_acceptLanguageAvaliable(self):
self.__runTest(14, 406, {'acceptLanguageExists': True,
'acceptLanguageAvaliable': False})
def test_acceptCharacterSetExists(self):
self.__runTest(27, 200, {'acceptCharacterSetExists': True})
def test_acceptCharacterSetAvaliable(self):
self.__runTest(15, 406, {'acceptCharacterSetExists': True,
'acceptCharacterSetAvaliable': False})
def test_acceptEncodingExists(self):
self.__runTest(27, 200, {'acceptEncodingExists': True})
def test_acceptEncodingAvaliable(self):
self.__runTest(16, 406, {'acceptEncodingExists': True,
'acceptEncodingAvaliable': False})
#@skip(SKIP_MESSAGE)
def test_resourceExists(self):
self.__runTest(26, 200, {'resourceExists': True}, calls=False)
self.__runTest(20, 404, {'resourceExists': False}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifMatchExists(self):
self.__runTest(28, 200, {'resourceExists': True,
'ifMatchExists': True}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifMatchAnyExists(self):
self.__runTest(27, 200, {'resourceExists': True,
'ifMatchExists': True,
'ifMatchAnyExists': True}, calls=False)
self.__runTest(28, 200, {'resourceExists': True,
'ifMatchExists': True,
'ifMatchAnyExists': False}, calls=False)
self.__runTest(17, 412, {'resourceExists': False,
'ifMatchAnyExists': True}, calls=False)
def test_eTagInMatch(self):
self.__runTest(19, 412, {'resourceExists': True,
'ifMatchExists': True,
'ifMatchAnyExists': False,
'eTagInMatch': False}, calls=False)
self.__runTest(28, 200, {'resourceExists': True,
'ifMatchExists': True,
'ifMatchAnyExists': False,
'eTagInMatch': True}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifUnmodifiedSinceExists(self):
self.__runTest(27, 200, {'resourceExists': True,
'ifUnmodifiedSinceExists': True}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifUnmodifiedSinceIsValidDate(self):
self.__runTest(28, 200, {'resourceExists': True,
'ifUnmodifiedSinceExists': True,
'ifUnmodifiedSinceIsValidDate': True},
calls=False)
def test_lastModifiedGtIfUnmodifiedSince(self):
self.__runTest(20, 412, {'resourceExists': True,
'ifUnmodifiedSinceExists': True,
'ifUnmodifiedSinceIsValidDate': True,
'lastModifiedGtIfUnmodifiedSince': True})
self.__runTest(28, 200, {'resourceExists': True,
'ifUnmodifiedSinceExists': True,
'ifUnmodifiedSinceIsValidDate': True,
'lastModifiedGtIfUnmodifiedSince': False})
def test_put(self):
self.__runTest(27, 200, {'resourceExists': True,
'put': True})
self.__runTest(21, 201, {'resourceExists': False,
'put': True})
def test_applyToDifferentURI(self):
self.__runTest(19, 301, {'resourceExists': False,
'put': True,
'applyToDifferentURI': True})
def test_conflict(self):
self.__runTest(24, 409, {'resourceExists': True,
'put': True,
'conflict': True})
self.__runTest(20, 409, {'resourceExists': False,
'put': True,
'conflict': True}) #, calls=True)
def test_newResourceCreated(self):
self.__runTest(22, 202, {'resourceExists': False,
'put': True,
'newResourceCreated': False}) #, calls=True)
self.__runTest(24, 202, {'resourceExists': False,
'post': True,
'newResourceCreated': False}) #, calls=True)
def test_resourcePreviouslyExisted(self):
self.__runTest(22, 410, {'resourceExists': False,
'resourcePreviouslyExisted': True})
def test_resourceMovedPermanently(self):
self.__runTest(20, 301, {'resourceExists': False,
'resourcePreviouslyExisted': True,
'resourceMovedPermanently': True})
def test_resourceMovedTemporarily(self):
self.__runTest(21, 307, {'resourceExists': False,
'resourcePreviouslyExisted': True,
'resourceMovedTemporarily': True})
#@skip(SKIP_MESSAGE)
def test_post(self):
self.__runTest(23, 201, {'resourceExists': False,
'post': True})
self.__runTest(25, 201, {'resourceExists': False,
'resourcePreviouslyExisted': True,
'post': True}) #, calls=True)
def test_permitPostToMissingResource(self):
self.__runTest(21, 404, {'resourceExists': False,
'post': True,
'permitPostToMissingResource': False},
calls=False)
self.__runTest(23, 410, {'resourceExists': False,
'resourcePreviouslyExisted': True,
'post': True,
'permitPostToMissingResource': False},
calls=False)
def test_redirect(self):
self.__runTest(23, 303, {'resourceExists': True,
'post': True,
'redirect': True}, calls=False)
self.__runTest(23, 303, {'resourceExists': True,
'resourcePreviouslyExisted': True,
'post': True,
'permitPostToMissingResource': True,
'redirect': True}, calls=False)
self.__runTest(23, 303, {'resourceExists': True,
'post': True,
'permitPostToMissingResource': True,
'redirect': True}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifNoneMatchExists(self):
self.__runTest(28, 200, {'resourceExists': True,
'ifNoneMatchExists': True}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifNoneMatchAnyExists(self):
self.__runTest(21, 304, {'resourceExists': True,
'ifNoneMatchExists': True,
'ifNoneMatchAnyExists': True}, calls=False)
self.__runTest(28, 200, {'resourceExists': True,
'ifNoneMatchExists': True,
'ifNoneMatchAnyExists': False}, calls=False)
#@skip(SKIP_MESSAGE)
def test_eTagInIfNoneMatch(self):
self.__runTest(22, 304, {'resourceExists': True,
'ifNoneMatchExists': True,
'eTagInIfNoneMatch': True}, calls=False)
self.__runTest(28, 200, {'resourceExists': True,
'ifNoneMatchExists': True,
'eTagInIfNoneMatch': False}, calls=False)
def test_getOrHead(self):
self.__runTest(21, 304, {'resourceExists': True,
'ifNoneMatchExists': True,
'ifNoneMatchAnyExists': True,
'getOrHead': True}, calls=False)
self.__runTest(21, 412, {'resourceExists': True,
'ifNoneMatchExists': True,
'ifNoneMatchAnyExists': True,
'getOrHead': False}, calls=False)
self.__runTest(22, 304, {'resourceExists': True,
'ifNoneMatchExists': True,
'eTagInIfNoneMatch': True,
'getOrHead': True}, calls=False)
self.__runTest(22, 412, {'resourceExists': True,
'ifNoneMatchExists': True,
'eTagInIfNoneMatch': True,
'getOrHead': False}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifModifiedSinceExists(self):
self.__runTest(27, 200, {'resourceExists': True,
'ifModifiedSinceExists': True}, calls=False)
#@skip(SKIP_MESSAGE)
def test_ifModifiedSinceIsValidDate(self):
self.__runTest(27, 200, {'resourceExists': True,
'ifModifiedSinceExists': True,
'ifModifiedSinceIsValidDate': False},
calls=False)
self.__runTest(28, 200, {'resourceExists': True,
'ifModifiedSinceExists': True,
'ifModifiedSinceIsValidDate': True},
calls=False)
#@skip(SKIP_MESSAGE)
def test_ifModifiedSinceGtNow(self):
self.__runTest(29, 200, {'resourceExists': True,
'ifModifiedSinceExists': True,
'ifModifiedSinceIsValidDate': True,
'ifModifiedSinceGtNow': False}, calls=False)
self.__runTest(28, 200, {'resourceExists': True,
'ifModifiedSinceExists': True,
'ifModifiedSinceIsValidDate': True,
'ifModifiedSinceGtNow': True}, calls=False)
def test_lastModifiedGtIfModifiedSince(self):
self.__runTest(23, 304, {'resourceExists': True,
'ifModifiedSinceExists': True,
'ifModifiedSinceIsValidDate': True,
'ifModifiedSinceGtNow': False,
'lastModifiedGtIfModifiedSince': False})
self.__runTest(29, 200, {'resourceExists': True,
'ifModifiedSinceExists': True,
'ifModifiedSinceIsValidDate': True,
'ifModifiedSinceGtNow': False,
'lastModifiedGtIfModifiedSince': True})
#@skip(SKIP_MESSAGE)
def test_delete(self):
self.__runTest(24, 200, {'resourceExists': True,
'delete': True}, calls=False)
self.__runTest(26, 200, {'resourceExists': True,
'delete': False}, calls=False)
def test_methodEnacted(self):
self.__runTest(22, 202, {'resourceExists': True,
'delete': True,
'methodEnacted': False}, calls=False)
def test_responseIncludesAnEntity(self):
self.__runTest(22, 204, {'resourceExists': False,
'put': True,
'newResourceCreated': False,
'responseIncludesAnEntity': False},
calls=False)
self.__runTest(23, 204, {'resourceExists': True,
'delete': True,
'responseIncludesAnEntity': False},
calls=False)
def test_multipleRepresentation(self):
self.__runTest(24, 300, {'resourceExists': True,
'delete': True,
'multipleRepresentation': True})#, calls=True)
self.__runTest(26, 300, {'resourceExists': True,
'post': True,
'multipleRepresentation': True})#, calls=True)
self.__runTest(27, 300, {'resourceExists': True,
'put': True,
'multipleRepresentation': True})#, calls=True)
self.__runTest(26, 300, {'resourceExists': True,
'multipleRepresentation': True})#, calls=True)
def __runTest(self, expect, code, condition, calls=False):
kwargs = self._rc.setConditions(**condition)
result = self._rc.getStatus(**kwargs)
count = self._rc.getIterationCount()
self.__printCalls(calls=calls)
msg = "Iteration count should be {}, but found {}".format(
expect, count)
self.assertTrue(count == expect, msg)
msg = "Status code should be {}, but found {}".format(
code, result[0])
self.assertTrue(result[0] == code, msg)
def __printCalls(self, calls=False):
if calls:
seq = self._rc.getCallSequence()
print
for call in seq:
print call
print "Total Count: {}".format(len(seq))
class TestConditionHandler(unittest.TestCase):
"""
Tests for the ConditionHandler class.
"""
def __init__(self, name):
"""
:Parameters:
name : str
Unit test name.
"""
super(TestConditionHandler, self).__init__(name)
def setUp(self):
"""
Create the RestyCodes instance.
"""
self._ch = ConditionHandler()
def tearDown(self):
pass
def test_requestUrlTooLong(self):
for size, code in ((20, 200), (19, 200), (18, 414)):
self._ch.requestUrlTooLong("someverylongurl.com", size)
self.__runTest(code, "with size: {}".format(size))
def test_requestEntityTooLarge(self):
rawEntity = StringIO()
rawEntity.write(u"GET / http/1.1\r\n")
rawEntity.write(u"Host: example.org\r\n")
rawEntity.write(u"\r\n")
rawEntity.write(u"Some entity body text.\r\n")
result = rawEntity.getvalue()
rawEntity.close()
for size, code in ((62, 200), (61, 200), (60, 413)):
self._ch.requestEntityTooLarge(result, size)
self.__runTest(code, "with size: {}".format(size))
def test_method(self):
for method, code in (('DELETE', 200), ('GET', 200), ('HEAD', 200),
('PUT', 200), ('POST', 200), ('OPTIONS', 200),
('TRACE', 405), ('CONNECT', 405), ('MOVE', 405),
('PROPPATCH', 405), ('MKCOL', 405), ('COPY', 405),
('UNLOCK', 405), ('UNKNOWN', 501)):
self._ch.method(method)
self.__runTest(code, "with method: {}".format(method))
def __runTest(self, code, message=""):
msg = "Invalid status: found {}, should be {}"
found = self._ch.getStatus()
status = getCodeStatus(code)
msg += ", " + message
self.assertTrue(found == status, msg.format(found, status))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2747274eab49ca93f620415e0a166077",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 79,
"avg_line_length": 41.62979683972912,
"alnum_prop": 0.5041210280880598,
"repo_name": "cnobile2012/restycodes",
"id": "0b72b8aa119e06417bd20fd084238992ece16cc4",
"size": "18490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restycodes/tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1850"
},
{
"name": "Python",
"bytes": "55682"
}
],
"symlink_target": ""
} |
__author__ = "chenghao"
from util import config
import os, logging, re, uuid, json
from logging import handlers
from datetime import datetime, date
from dogpile.cache import make_region
# 基本配置
ADMIN_PREFIX = "/admin" # 管理平台访问前缀
UEDITOR_PREFIX = "/ueditor" # 富文本编辑访问前缀
ROW = 10 # 每页显示多少行
DB_NAME = "blog" # 数据库名称
BLOG_COOKIE = "blog.cookie" # cookie
def get_current_date(pattern="%Y-%m-%d %H:%M:%S", datetime_s=None):
"""
格式化日期
:param pattern:
:param datetime_s:
:return:
"""
if datetime_s is None:
result = datetime.now().strftime(pattern)
elif isinstance(datetime_s, datetime):
result = datetime_s.strftime(pattern)
elif isinstance(datetime_s, date):
result = datetime_s.strftime("%Y-%m-%d")
else:
result = datetime_s
return result
def ver_mobile(data):
"""
验证手机号,正确返回True
:param data:
:return:
"""
p = re.compile(r"((13|14|15|17|18)\d{9}$)")
return p.match(data)
def ver_email(data):
"""
验证邮箱,正确返回True
:param data:
:return:
"""
p = re.compile(r"(\w+[@]\w+[.]\w+)")
return p.match(data)
def random_num():
"""
获取随机数
:return:
"""
return uuid.uuid4().hex.lower()
class ComplexEncoder(json.JSONEncoder):
"""
json日期格式化
"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
def total_page(total_rows, rows=ROW):
"""
将总行数计算出多少页
"""
return int((total_rows - 1) / rows + 1)
class SingletonLog(type):
"""
日志单例
"""
def __init__(cls, name, bases, dict):
super(SingletonLog, cls).__init__(name, bases, dict)
cls._instances = None
def __call__(cls, *args, **kwargs):
if cls._instances is None:
super(SingletonLog, cls).__call__(*args, **kwargs)
conf = config.Config()
# 按每天生成日志文件 linux
log_file_path = conf.get("log", "path")
if not os.path.exists(log_file_path):
os.makedirs(log_file_path)
log_handler = handlers.TimedRotatingFileHandler(log_file_path + "/blog", conf.get("log", "when"),
conf.getint("log", "interval"))
# 格式化日志内容
format_ = "%(asctime)s %(pathname)-5s %(funcName)-5s %(lineno)-5s %(levelname)-5s %(message)s"
log_formatter = logging.Formatter(format_)
log_handler.setFormatter(log_formatter)
# 设置记录器名字
log = logging.getLogger('blog')
log.addHandler(log_handler)
# 设置日志等级
log.setLevel(conf.get("log", "level"))
cls._instances = log
return cls._instances
class GetLogging(object):
"""
获取log实例
"""
__metaclass__ = SingletonLog
class SingletonDogpile(type):
"""
缓存单例
"""
def __init__(cls, name, bases, dict):
super(SingletonDogpile, cls).__init__(name, bases, dict)
cls._instances_cache = None
cls._instances_session = None
def __call__(cls, *args, **kwargs):
if cls._instances_cache is None and cls._instances_session is None:
super(SingletonDogpile, cls).__call__(*args, **kwargs)
conf = config.Config()
file_path = conf.get("dogpile", "cache.file.arguments.filename")
parent_path = os.path.dirname(file_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
dogpile_conf = {
"cache.memory.backend": conf.get("dogpile", "cache.memory.backend"),
"cache.memory.expiration_time": conf.getint("dogpile", "cache.memory.expiration_time"),
"cache.file.backend": conf.get("dogpile", "cache.file.backend"),
"cache.file.expiration_time": conf.getint("dogpile", "cache.file.expiration_time"),
"cache.file.arguments.filename": conf.get("dogpile", "cache.file.arguments.filename"),
"session.memory.backend": conf.get("dogpile", "session.memory.backend"),
"session.memory.expiration_time": conf.getint("dogpile", "session.memory.expiration_time"),
"session.file.backend": conf.get("dogpile", "session.file.backend"),
"session.file.expiration_time": conf.get("dogpile", "session.file.expiration_time"),
"session.file.arguments.filename": conf.get("dogpile", "session.file.arguments.filename")
}
cls._instances_cache = make_region().configure_from_config(dogpile_conf, "cache.file.")
cls._instances_session = make_region().configure_from_config(dogpile_conf, "session.file.")
return cls._instances_cache, cls._instances_session
class GetDogpile(object):
"""
获取缓存实例
"""
__metaclass__ = SingletonDogpile
if __name__ == "__main__":
print total_page(24)
dogpiles = GetDogpile()
dogpile_cache = dogpiles[1]
dogpile_cache.set("chenghao", "222333444")
print dogpile_cache.get("chenghao")
dogpile_cache.delete("chenghao")
print dogpile_cache.get("chenghao")
| {
"content_hash": "7c7e1720819c95def347a43318c0e03f",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 100,
"avg_line_length": 26.627906976744185,
"alnum_prop": 0.6689956331877729,
"repo_name": "chenghao/blog",
"id": "c2c3aaebfbf708ca5b9cf314b6b8de6e17b8d8eb",
"size": "4843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "154235"
},
{
"name": "HTML",
"bytes": "105778"
},
{
"name": "JavaScript",
"bytes": "1582501"
},
{
"name": "Python",
"bytes": "23011"
}
],
"symlink_target": ""
} |
"""
Merge two dictionaries into one.
"""
from typing import Dict
import copy
def merge2(dict1, dict2):
# type: (Dict, Dict) -> Dict
"""Recrusively merge two dictionaries.
The two input dictionaries can have arbitrary depths.
If there is a conflict, values from dict2 will
override dict1.
Args:
dict1 (dict): input dictionary 1
dict2 (dict): input dictionary 2
Returns:
A new dictionary that combines both inputs.
"""
ooo = copy.deepcopy(dict1)
for k in dict2:
if (k in dict1 and # merge dictionaries
isinstance(dict1[k], dict) and
isinstance(dict2[k], dict)):
ooo[k] = merge2(dict1[k], dict2[k])
elif (k in dict1 and # merge lists
isinstance(dict1[k], list) and
isinstance(dict2[k], list)):
ooo[k] = dict1[k] + dict2[k]
else: # dict2 has priority (add new or override)
ooo[k] = dict2[k]
return ooo
| {
"content_hash": "248b936f2c40e295007a299cba10c05b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 57,
"avg_line_length": 27.75,
"alnum_prop": 0.5805805805805806,
"repo_name": "yuhangwang/ninjag-python",
"id": "d2212c5009ebd847bee2d69f51a1d18c12a0648b",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ninjag/tk/dictTK/merge2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24739"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='c_ext',
version='0.1',
packages=['c_ext'],
url='https://github.com/KivApple/c_ext',
license='MIT',
author='Ivan Kolesnikov',
author_email='kiv.apple@gmail.com',
description='Translator from extended C to normal C',
entry_points={
'console_scripts': [
'c_ext = c_ext.main:main'
]
},
install_requires=[
'six',
'pycparser',
'pycparserext',
'appdirs'
]
)
| {
"content_hash": "a29ca917b177b609d15c84122bf5662b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 21.82608695652174,
"alnum_prop": 0.547808764940239,
"repo_name": "KivApple/c_ext",
"id": "da1eed3cf799df3c3f3b6dd62229fe71b445c25a",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2239"
},
{
"name": "Python",
"bytes": "98389"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Architecture',
fields=[
('key', models.CharField(max_length=16, primary_key=True, serialize=False)),
('name', models.CharField(max_length=64)),
],
options={
'ordering': ('name',),
'verbose_name': 'architecture',
'verbose_name_plural': 'architectures',
},
),
migrations.CreateModel(
name='Classifier',
fields=[
('name', models.CharField(max_length=255, primary_key=True, serialize=False)),
],
options={
'ordering': ('name',),
'verbose_name': 'classifier',
'verbose_name_plural': 'classifiers',
},
),
migrations.CreateModel(
name='DistributionType',
fields=[
('key', models.CharField(max_length=32, primary_key=True, serialize=False)),
('name', models.CharField(max_length=64)),
],
options={
'ordering': ('name',),
'verbose_name': 'distribution type',
'verbose_name_plural': 'distribution types',
},
),
migrations.CreateModel(
name='PlatformName',
fields=[
('key', models.CharField(max_length=32, primary_key=True, serialize=False)),
('name', models.CharField(max_length=32)),
],
options={
'ordering': ('name',),
'verbose_name': 'platform name',
'verbose_name_plural': 'platform names',
},
),
migrations.CreateModel(
name='PythonVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('major', models.IntegerField()),
('minor', models.IntegerField()),
],
options={
'ordering': ('major', 'minor'),
'verbose_name': 'python version',
'verbose_name_plural': 'python versions',
},
),
migrations.AlterUniqueTogether(
name='pythonversion',
unique_together=set([('major', 'minor')]),
),
]
| {
"content_hash": "18ce7f606325c3c7adc1fedfc2070dda",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 114,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.47661383842288363,
"repo_name": "popen2/djangopypi2",
"id": "e68a352c5942dce9ba10218529b370c28149280d",
"size": "2660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangopypi2/apps/pypi_metadata/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1294"
},
{
"name": "HTML",
"bytes": "32170"
},
{
"name": "Python",
"bytes": "150011"
}
],
"symlink_target": ""
} |
"""
Tests For Scheduler Host Filter Drivers.
"""
import json
from nova import exception
from nova import flags
from nova import test
from nova.scheduler import host_filter
FLAGS = flags.FLAGS
class FakeZoneManager:
pass
class HostFilterTestCase(test.TestCase):
"""Test case for host filter drivers."""
def _host_caps(self, multiplier):
# Returns host capabilities in the following way:
# host1 = memory:free 10 (100max)
# disk:available 100 (1000max)
# hostN = memory:free 10 + 10N
# disk:available 100 + 100N
# in other words: hostN has more resources than host0
# which means ... don't go above 10 hosts.
return {'host_name-description': 'XenServer %s' % multiplier,
'host_hostname': 'xs-%s' % multiplier,
'host_memory_total': 100,
'host_memory_overhead': 10,
'host_memory_free': 10 + multiplier * 10,
'host_memory_free-computed': 10 + multiplier * 10,
'host_other-config': {},
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
'host_cpu_info': {},
'disk_available': 100 + multiplier * 100,
'disk_total': 1000,
'disk_used': 0,
'host_uuid': 'xxx-%d' % multiplier,
'host_name-label': 'xs-%s' % multiplier}
def setUp(self):
self.old_flag = FLAGS.default_host_filter_driver
FLAGS.default_host_filter_driver = \
'nova.scheduler.host_filter.AllHostsFilter'
self.instance_type = dict(name='tiny',
memory_mb=50,
vcpus=10,
local_gb=500,
flavorid=1,
swap=500,
rxtx_quota=30000,
rxtx_cap=200)
self.zone_manager = FakeZoneManager()
states = {}
for x in xrange(10):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
def tearDown(self):
FLAGS.default_host_filter_driver = self.old_flag
def test_choose_driver(self):
# Test default driver ...
driver = host_filter.choose_driver()
self.assertEquals(driver._full_name(),
'nova.scheduler.host_filter.AllHostsFilter')
# Test valid driver ...
driver = host_filter.choose_driver(
'nova.scheduler.host_filter.FlavorFilter')
self.assertEquals(driver._full_name(),
'nova.scheduler.host_filter.FlavorFilter')
# Test invalid driver ...
try:
host_filter.choose_driver('does not exist')
self.fail("Should not find driver")
except exception.SchedulerHostFilterDriverNotFound:
pass
def test_all_host_driver(self):
driver = host_filter.AllHostsFilter()
cooked = driver.instance_type_to_filter(self.instance_type)
hosts = driver.filter_hosts(self.zone_manager, cooked)
self.assertEquals(10, len(hosts))
for host, capabilities in hosts:
self.assertTrue(host.startswith('host'))
def test_flavor_driver(self):
driver = host_filter.FlavorFilter()
# filter all hosts that can support 50 ram and 500 disk
name, cooked = driver.instance_type_to_filter(self.instance_type)
self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
hosts = driver.filter_hosts(self.zone_manager, cooked)
self.assertEquals(6, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
def test_json_driver(self):
driver = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk
name, cooked = driver.instance_type_to_filter(self.instance_type)
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
hosts = driver.filter_hosts(self.zone_manager, cooked)
self.assertEquals(6, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
# Try some custom queries
raw = ['or',
['and',
['<', '$compute.host_memory_free', 30],
['<', '$compute.disk_available', 300]
],
['and',
['>', '$compute.host_memory_free', 70],
['>', '$compute.disk_available', 700]
]
]
cooked = json.dumps(raw)
hosts = driver.filter_hosts(self.zone_manager, cooked)
self.assertEquals(5, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
raw = ['not',
['=', '$compute.host_memory_free', 30],
]
cooked = json.dumps(raw)
hosts = driver.filter_hosts(self.zone_manager, cooked)
self.assertEquals(9, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
cooked = json.dumps(raw)
hosts = driver.filter_hosts(self.zone_manager, cooked)
self.assertEquals(5, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
# Try some bogus input ...
raw = ['unknown command', ]
cooked = json.dumps(raw)
try:
driver.filter_hosts(self.zone_manager, cooked)
self.fail("Should give KeyError")
except KeyError, e:
pass
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
['not', True, False, True, False]
)))
try:
driver.filter_hosts(self.zone_manager, json.dumps(
'not', True, False, True, False
))
self.fail("Should give KeyError")
except KeyError, e:
pass
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
['=', '$foo', 100]
)))
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
['=', '$.....', 100]
)))
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
)))
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
['=', {}, ['>', '$missing....foo']]
)))
| {
"content_hash": "186043dc95479e86ea95e0b3cb7bb850",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 37.88659793814433,
"alnum_prop": 0.5476190476190477,
"repo_name": "superstack/nova",
"id": "c029d41e6afaba6df42ffc493123e55917a4a1ec",
"size": "7979",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/test_host_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2491049"
},
{
"name": "Shell",
"bytes": "31698"
}
],
"symlink_target": ""
} |
try:
import cPickle as pickle
except ImportError:
import pickle
import zlib
import time
import argparse
import sys
import MySQLdb as mysqldb
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Genetic Algorithm MySQL Client.')
parser.add_argument('path', help='path to add for libraries etc', default=".")
parser.add_argument("servername", help="name of MySQL server")
parser.add_argument("username", help="username of MySQL server")
parser.add_argument("password", help="password of MySQL server")
parser.add_argument("database", help="database of MySQL server")
args = parser.parse_args()
sys.path[0] =args.path
#print "Using path", sys.path
servername = args.servername
username = args.username
password = args.password
database = args.database
conn = mysqldb.connect(host=servername, user=username, passwd=password, db=database)
cursor = conn.cursor()
running = True
while running:
cursor.execute("SELECT * FROM pyga WHERE status=0")
result = cursor.fetchone()
if result is not None:
searchid, generationid, individualid, genomeblob, fitnessfuncblob, status = result
currtime = int(time.time())
cursor.execute("BEGIN;")
cursor.execute("UPDATE pyga SET status=%s WHERE searchid=%s AND generationid=%s AND individualid = %s and status=0;",
(currtime, searchid, generationid, individualid))
cursor.execute("COMMIT;")
#print searchid, generationid, individualid
genome = pickle.loads(zlib.decompress(genomeblob))
fitnessfunc = pickle.loads(zlib.decompress(fitnessfuncblob))
#this could take a while...
fitnessfunc.generationno = generationid
fitnessfunc.individual = individualid
fitness = fitnessfunc(genome)
fitblob = zlib.compress(pickle.dumps(fitness))
cursor.execute("BEGIN;")
cursor.execute("UPDATE pyga SET fitness=%s, status=-1 WHERE searchid=%s AND generationid=%s AND individualid=%s and status=%s;",
(fitblob, searchid, generationid, individualid, currtime))
cursor.execute("COMMIT;")
#print searchid, generationid, individualid, fitness
else:
#nothing more to do
#wait a while and see if there is still nothing
time.sleep(1)
cursor.execute("SELECT * FROM pyga WHERE status=0")
result = cursor.fetchone()
if result is None:
#running = False
pass
| {
"content_hash": "c0ca874e8ac229de5f6d7cbad1e69c66",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 140,
"avg_line_length": 37.108108108108105,
"alnum_prop": 0.6125273124544792,
"repo_name": "afaulconbridge/PyGA",
"id": "96d9b0365bd66a62699b00c742c437bbc40d8cf2",
"size": "2746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sga/mysqlga_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "26014"
},
{
"name": "Python",
"bytes": "26411"
}
],
"symlink_target": ""
} |
from os import environ
import six
if six.PY34:
import configparser
else:
import ConfigParser as configparser
base_config = {
'wait_timeout': environ.get('TENABLEIOTEST_WAIT_TIMEOUT', '800'),
'scan_template_name': environ.get('TENABLEIOTEST_SCAN_TEMPLATE_NAME', 'basic'),
'registry_host': environ.get('TENABLEIOTEST_REGISTRY_HOST', 'https://registry.cloud.tenable.com'),
}
# Read tenable_io.ini config. Default to environment variables if exist.
config = configparser.SafeConfigParser(base_config)
config.add_section('tenable_io-test')
config.read('tenable_io.ini')
class TenableIOTestConfig(object):
@staticmethod
def get(key):
return config.get('tenable_io-test', key)
| {
"content_hash": "a85258627011a8cae489ccbc6cc37862",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 102,
"avg_line_length": 28.48,
"alnum_prop": 0.7289325842696629,
"repo_name": "tenable/Tenable.io-SDK-for-Python",
"id": "f8f882f24054bfa0a0aac9f4d481bff57548acef",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "459766"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from catalog.urls import catalog_url
from catalog.reviews.views import (
ReviewListView, ReviewDetailView, ReviewCreateView, ReviewUpdateView,
ReviewDeleteView)
# Pats are automatically inserted into urlpatterns in catalog urls.py
# so urlpatterns are not specified here.
pats = [
catalog_url('product', ReviewListView.as_view(),
'review_list',
'(?P<slug>[0-9A-Za-z-_.//]+)/reviews/'),
catalog_url('product', ReviewDetailView.as_view(),
'review_detail',
'(?P<slug>[0-9A-Za-z-_.//]+)/reviews/(?P<pk>[\d+])/'),
catalog_url('product', ReviewCreateView.as_view(),
'review_create',
'(?P<slug>[0-9A-Za-z-_.//]+)/reviews/create/'),
catalog_url('product', ReviewUpdateView.as_view(),
'review_update',
'(?P<slug>[0-9A-Za-z-_.//]+)/reviews/(?P<pk>[\d+])/update/'),
catalog_url('product', ReviewDeleteView.as_view(),
'review_delete',
'(?P<slug>[0-9A-Za-z-_.//]+)/reviews/(?P<pk>[\d+])/delete/'),
]
| {
"content_hash": "21dd7c0dd35dd1702278311a9ed6a64c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 36.193548387096776,
"alnum_prop": 0.5650623885918004,
"repo_name": "dinoperovic/django-shop-catalog",
"id": "3b9da192b7882e9a96e5cea4caaa5b3c28d2ea16",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog/reviews/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4264"
},
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "Python",
"bytes": "182299"
}
],
"symlink_target": ""
} |
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
from libcloud.container.base import ContainerImage
driver = get_driver(Provider.RANCHER)
connection = driver(
"MYRANCHERACCESSKEY", "MYRANCHERSECRETKEY", host="17.23.66.4", port=443
)
image = ContainerImage(
"hastebin", "hastebin", "rlister/hastebin", "latest", driver=None
)
new_service = connection.ex_deploy_service(
name="excitingservice",
image=image,
environmentid="1e2",
environment={"STORAGE_TYPE": "file"},
)
| {
"content_hash": "1e3608e4e7f273ca08bf5bc67073436f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 27.2,
"alnum_prop": 0.7389705882352942,
"repo_name": "mistio/libcloud",
"id": "af63c95f6bcf989f35162ec3dfe4fb3ff02bf68f",
"size": "544",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "docs/examples/container/rancher/deploy_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import oauth2client.django_orm
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('web_property_id', models.CharField(help_text='The property tracking ID is available when viewing the "Tracking Code" details in the Google Analytics admin.', max_length=25, verbose_name='property tracking ID')),
('profile_id', models.CharField(default='', max_length=25, verbose_name='view (profile) ID', blank=True)),
('display_features', models.BooleanField(default=False, help_text='Used for remarketing, demographics and interest reporting.', verbose_name='Use Display advertising features?')),
('is_enabled', models.BooleanField(default=False, help_text='Is Google Analytics tracking enabled on the website?', verbose_name='enabled')),
],
options={
'ordering': ['site'],
'verbose_name': 'view (profile)',
'verbose_name_plural': 'views (profiles)',
},
),
migrations.CreateModel(
name='ProfileOAuth2Credentials',
fields=[
('id', models.OneToOneField(related_name='_oauth2_credentials', primary_key=True, serialize=False, to='googleanalytics.Profile')),
('credentials', oauth2client.django_orm.CredentialsField(null=True)),
],
),
migrations.AddField(
model_name='profile',
name='site',
field=models.OneToOneField(related_name='+', to='sites.Site'),
),
]
| {
"content_hash": "5453ea44978ba35b5dfd726a355e6d30",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 229,
"avg_line_length": 46.02439024390244,
"alnum_prop": 0.6030736618971914,
"repo_name": "thecut/thecut-googleanalytics",
"id": "4a01fc65bf751adb385ee27f14149ec365da06af",
"size": "1911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thecut/googleanalytics/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1472"
},
{
"name": "Python",
"bytes": "36928"
}
],
"symlink_target": ""
} |
import os
# try/except added for compatibility with python < 3.8
try:
from unittest import mock
from unittest.mock import AsyncMock # pragma: NO COVER
except ImportError: # pragma: NO COVER
import mock
import math
from google.api_core import (
future,
gapic_v1,
grpc_helpers,
grpc_helpers_async,
operation,
operations_v1,
path_template,
)
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import operation_async # type: ignore
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.type import latlng_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
from google.type import postal_address_pb2 # type: ignore
from google.type import timeofday_pb2 # type: ignore
import grpc
from grpc.experimental import aio
from proto.marshal.rules import wrappers
from proto.marshal.rules.dates import DurationRule, TimestampRule
import pytest
from google.cloud.talent_v4.services.job_service import (
JobServiceAsyncClient,
JobServiceClient,
pagers,
transports,
)
from google.cloud.talent_v4.types import common, filters, histogram
from google.cloud.talent_v4.types import job
from google.cloud.talent_v4.types import job as gct_job
from google.cloud.talent_v4.types import job_service
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert JobServiceClient._get_default_mtls_endpoint(None) is None
assert (
JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize(
"client_class,transport_name",
[
(JobServiceClient, "grpc"),
(JobServiceAsyncClient, "grpc_asyncio"),
],
)
def test_job_service_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("jobs.googleapis.com:443")
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.JobServiceGrpcTransport, "grpc"),
(transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_job_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name",
[
(JobServiceClient, "grpc"),
(JobServiceAsyncClient, "grpc_asyncio"),
],
)
def test_job_service_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("jobs.googleapis.com:443")
def test_job_service_client_get_transport_class():
transport = JobServiceClient.get_transport_class()
available_transports = [
transports.JobServiceGrpcTransport,
]
assert transport in available_transports
transport = JobServiceClient.get_transport_class("grpc")
assert transport == transports.JobServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
def test_job_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is provided
options = client_options.ClientOptions(
api_audience="https://language.googleapis.com"
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience="https://language.googleapis.com",
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_job_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient])
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_job_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
def test_job_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = JobServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_job_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
scopes=None,
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CreateJobRequest,
dict,
],
)
def test_create_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
response = client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
def test_create_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
client.create_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateJobRequest()
@pytest.mark.asyncio
async def test_create_job_async(
transport: str = "grpc_asyncio", request_type=job_service.CreateJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
)
response = await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
@pytest.mark.asyncio
async def test_create_job_async_from_dict():
await test_create_job_async(request_type=dict)
def test_create_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateJobRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = gct_job.Job()
client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateJobRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_job(
parent="parent_value",
job=gct_job.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
def test_create_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_job(
job_service.CreateJobRequest(),
parent="parent_value",
job=gct_job.Job(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job(
parent="parent_value",
job=gct_job.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job(
job_service.CreateJobRequest(),
parent="parent_value",
job=gct_job.Job(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.BatchCreateJobsRequest,
dict,
],
)
def test_batch_create_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchCreateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_create_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
client.batch_create_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchCreateJobsRequest()
@pytest.mark.asyncio
async def test_batch_create_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.BatchCreateJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchCreateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_create_jobs_async_from_dict():
await test_batch_create_jobs_async(request_type=dict)
def test_batch_create_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchCreateJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchCreateJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_batch_create_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_jobs(
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
def test_batch_create_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_jobs(
job_service.BatchCreateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
@pytest.mark.asyncio
async def test_batch_create_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_jobs(
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_create_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_jobs(
job_service.BatchCreateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
@pytest.mark.parametrize(
"request_type",
[
job_service.GetJobRequest,
dict,
],
)
def test_get_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
response = client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
def test_get_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
client.get_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetJobRequest()
@pytest.mark.asyncio
async def test_get_job_async(
transport: str = "grpc_asyncio", request_type=job_service.GetJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
)
response = await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
@pytest.mark.asyncio
async def test_get_job_async_from_dict():
await test_get_job_async(request_type=dict)
def test_get_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetJobRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = job.Job()
client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetJobRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job.Job())
await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job(
job_service.GetJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job(
job_service.GetJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.UpdateJobRequest,
dict,
],
)
def test_update_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
response = client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
def test_update_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
client.update_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateJobRequest()
@pytest.mark.asyncio
async def test_update_job_async(
transport: str = "grpc_asyncio", request_type=job_service.UpdateJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
)
response = await client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
@pytest.mark.asyncio
async def test_update_job_async_from_dict():
await test_update_job_async(request_type=dict)
def test_update_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateJobRequest()
request.job.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
call.return_value = gct_job.Job()
client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"job.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateJobRequest()
request.job.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
await client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"job.name=name_value",
) in kw["metadata"]
def test_update_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_job(
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_job(
job_service.UpdateJobRequest(),
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_job(
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_job(
job_service.UpdateJobRequest(),
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.BatchUpdateJobsRequest,
dict,
],
)
def test_batch_update_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchUpdateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_update_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
client.batch_update_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchUpdateJobsRequest()
@pytest.mark.asyncio
async def test_batch_update_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.BatchUpdateJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchUpdateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_update_jobs_async_from_dict():
await test_batch_update_jobs_async(request_type=dict)
def test_batch_update_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchUpdateJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_update_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchUpdateJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_batch_update_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_update_jobs(
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
def test_batch_update_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_update_jobs(
job_service.BatchUpdateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
@pytest.mark.asyncio
async def test_batch_update_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_update_jobs(
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_update_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_update_jobs(
job_service.BatchUpdateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
@pytest.mark.parametrize(
"request_type",
[
job_service.DeleteJobRequest,
dict,
],
)
def test_delete_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
client.delete_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteJobRequest()
@pytest.mark.asyncio
async def test_delete_job_async(
transport: str = "grpc_asyncio", request_type=job_service.DeleteJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_job_async_from_dict():
await test_delete_job_async(request_type=dict)
def test_delete_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteJobRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = None
client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteJobRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job(
job_service.DeleteJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job(
job_service.DeleteJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.BatchDeleteJobsRequest,
dict,
],
)
def test_batch_delete_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchDeleteJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_delete_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
client.batch_delete_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchDeleteJobsRequest()
@pytest.mark.asyncio
async def test_batch_delete_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.BatchDeleteJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchDeleteJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_delete_jobs_async_from_dict():
await test_batch_delete_jobs_async(request_type=dict)
def test_batch_delete_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchDeleteJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_delete_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchDeleteJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_batch_delete_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_delete_jobs(
parent="parent_value",
names=["names_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].names
mock_val = ["names_value"]
assert arg == mock_val
def test_batch_delete_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_delete_jobs(
job_service.BatchDeleteJobsRequest(),
parent="parent_value",
names=["names_value"],
)
@pytest.mark.asyncio
async def test_batch_delete_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_delete_jobs(
parent="parent_value",
names=["names_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].names
mock_val = ["names_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_delete_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_delete_jobs(
job_service.BatchDeleteJobsRequest(),
parent="parent_value",
names=["names_value"],
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ListJobsRequest,
dict,
],
)
def test_list_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
client.list_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListJobsRequest()
@pytest.mark.asyncio
async def test_list_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.ListJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_jobs_async_from_dict():
await test_list_jobs_async(request_type=dict)
def test_list_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = job_service.ListJobsResponse()
client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListJobsResponse()
)
await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_jobs(
parent="parent_value",
filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_jobs(
job_service.ListJobsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_jobs(
parent="parent_value",
filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_jobs(
job_service.ListJobsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_jobs_pager(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
job.Job(),
],
next_page_token="abc",
),
job_service.ListJobsResponse(
jobs=[],
next_page_token="def",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
],
next_page_token="ghi",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_jobs(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, job.Job) for i in results)
def test_list_jobs_pages(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
job.Job(),
],
next_page_token="abc",
),
job_service.ListJobsResponse(
jobs=[],
next_page_token="def",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
],
next_page_token="ghi",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
],
),
RuntimeError,
)
pages = list(client.list_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_jobs_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
job.Job(),
],
next_page_token="abc",
),
job_service.ListJobsResponse(
jobs=[],
next_page_token="def",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
],
next_page_token="ghi",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
],
),
RuntimeError,
)
async_pager = await client.list_jobs(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, job.Job) for i in responses)
@pytest.mark.asyncio
async def test_list_jobs_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
job.Job(),
],
next_page_token="abc",
),
job_service.ListJobsResponse(
jobs=[],
next_page_token="def",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
],
next_page_token="ghi",
),
job_service.ListJobsResponse(
jobs=[
job.Job(),
job.Job(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_jobs(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.SearchJobsRequest,
dict,
],
)
def test_search_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
response = client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
def test_search_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
client.search_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
@pytest.mark.asyncio
async def test_search_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.SearchJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
)
response = await client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
@pytest.mark.asyncio
async def test_search_jobs_async_from_dict():
await test_search_jobs_async(request_type=dict)
def test_search_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
call.return_value = job_service.SearchJobsResponse()
client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse()
)
await client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.parametrize(
"request_type",
[
job_service.SearchJobsRequest,
dict,
],
)
def test_search_jobs_for_alert(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
response = client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
def test_search_jobs_for_alert_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
client.search_jobs_for_alert()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
@pytest.mark.asyncio
async def test_search_jobs_for_alert_async(
transport: str = "grpc_asyncio", request_type=job_service.SearchJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
)
response = await client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
@pytest.mark.asyncio
async def test_search_jobs_for_alert_async_from_dict():
await test_search_jobs_for_alert_async(request_type=dict)
def test_search_jobs_for_alert_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
call.return_value = job_service.SearchJobsResponse()
client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_jobs_for_alert_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse()
)
await client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = JobServiceClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = JobServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = JobServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.JobServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.JobServiceGrpcTransport,
transports.JobServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
],
)
def test_transport_kind(transport_name):
transport = JobServiceClient.get_transport_class(transport_name)(
credentials=ga_credentials.AnonymousCredentials(),
)
assert transport.kind == transport_name
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.JobServiceGrpcTransport,
)
def test_job_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_job_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_job",
"batch_create_jobs",
"get_job",
"update_job",
"batch_update_jobs",
"delete_job",
"batch_delete_jobs",
"list_jobs",
"search_jobs",
"search_jobs_for_alert",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
# Catch all for all remaining methods and properties
remainder = [
"kind",
]
for r in remainder:
with pytest.raises(NotImplementedError):
getattr(transport, r)()
def test_job_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
def test_job_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport()
adc.assert_called_once()
def test_job_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
JobServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.JobServiceGrpcTransport,
transports.JobServiceGrpcAsyncIOTransport,
],
)
def test_job_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.JobServiceGrpcTransport,
transports.JobServiceGrpcAsyncIOTransport,
],
)
def test_job_service_transport_auth_gdch_credentials(transport_class):
host = "https://language.com"
api_audience_tests = [None, "https://language2.com"]
api_audience_expect = [host, "https://language2.com"]
for t, e in zip(api_audience_tests, api_audience_expect):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
gdch_mock = mock.MagicMock()
type(gdch_mock).with_gdch_audience = mock.PropertyMock(
return_value=gdch_mock
)
adc.return_value = (gdch_mock, None)
transport_class(host=host, api_audience=t)
gdch_mock.with_gdch_audience.assert_called_once_with(e)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.JobServiceGrpcTransport, grpc_helpers),
(transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_job_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
scopes=["1", "2"],
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_job_service_host_no_port(transport_name):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint="jobs.googleapis.com"),
transport=transport_name,
)
assert client.transport._host == ("jobs.googleapis.com:443")
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_job_service_host_with_port(transport_name):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="jobs.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == ("jobs.googleapis.com:8000")
def test_job_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_job_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_service_grpc_lro_client():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_job_service_grpc_lro_async_client():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_company_path():
project = "squid"
tenant = "clam"
company = "whelk"
expected = "projects/{project}/tenants/{tenant}/companies/{company}".format(
project=project,
tenant=tenant,
company=company,
)
actual = JobServiceClient.company_path(project, tenant, company)
assert expected == actual
def test_parse_company_path():
expected = {
"project": "octopus",
"tenant": "oyster",
"company": "nudibranch",
}
path = JobServiceClient.company_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_company_path(path)
assert expected == actual
def test_job_path():
project = "cuttlefish"
tenant = "mussel"
job = "winkle"
expected = "projects/{project}/tenants/{tenant}/jobs/{job}".format(
project=project,
tenant=tenant,
job=job,
)
actual = JobServiceClient.job_path(project, tenant, job)
assert expected == actual
def test_parse_job_path():
expected = {
"project": "nautilus",
"tenant": "scallop",
"job": "abalone",
}
path = JobServiceClient.job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_job_path(path)
assert expected == actual
def test_tenant_path():
project = "squid"
tenant = "clam"
expected = "projects/{project}/tenants/{tenant}".format(
project=project,
tenant=tenant,
)
actual = JobServiceClient.tenant_path(project, tenant)
assert expected == actual
def test_parse_tenant_path():
expected = {
"project": "whelk",
"tenant": "octopus",
}
path = JobServiceClient.tenant_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_tenant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = JobServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = JobServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(
folder=folder,
)
actual = JobServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = JobServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = JobServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = JobServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(
project=project,
)
actual = JobServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = JobServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = JobServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = JobServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = JobServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(JobServiceClient, transports.JobServiceGrpcTransport),
(JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
| {
"content_hash": "61b5158afbe4fb8fdf3c93187a438a60",
"timestamp": "",
"source": "github",
"line_count": 4123,
"max_line_length": 107,
"avg_line_length": 35.97647344166869,
"alnum_prop": 0.6402235540783787,
"repo_name": "googleapis/python-talent",
"id": "b63847b151cbc37d147497c2fa0464cfdf140324",
"size": "148931",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/gapic/talent_v4/test_job_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2538179"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import os, string, re, java, javax, sys, StringIO
from xml.dom import minidom
from java.awt import Color
from java.awt import FlowLayout
from java.awt import BorderLayout
from java.awt import Rectangle
from java.awt import GridLayout
from java.awt import GridBagLayout
from java.awt import GridBagConstraints
from java.awt import Dimension
from java.awt import Component
from java.awt import Font
from javax.swing import JFrame
from javax.swing import JInternalFrame
from javax.swing import JPanel
from javax.swing import JLabel
from javax.swing import JButton
from javax.swing import JTabbedPane
from javax.swing import BoxLayout
from javax.swing import Box
from nova.library.graphics.novagl import NGLCanvas
from nova.library.core import Game
from nova.library.core import Settings
from nova.library.core import BuiltInCommand
from nova.library.core import PythonCommand
from nova.library.graphics.gui import GUI
from nova.library.graphics.gui import SettingsPanel
from nova.library.graphics.gui import LogsPanel
from nova.library.graphics.gui import CustomKeyListener
from nova.library.graphics.gui import CustomActionListener
from nova.library.graphics.gui import SimpleActionListener
from nova.library.graphics.gui import TulipDialog
from nova.library.utilities import Parser
from java.awt.event import KeyListener
from java.awt import Component
from java.awt.event import KeyEvent
def getChild(tag,node):
for child in node.childNodes:
if child.nodeName==tag:
return child
return None
def addListeners(game,component,node):
keylistener = None
actionlistener = None
if node.hasAttribute("listeners"):
for listener in node.getAttribute("listeners").split(","):
if listener=="key":
keylistener = CustomKeyListener()
component.addKeyListener(keylistener)
elif listener=="action":
actionlistener = SimpleActionListener()
component.addActionListener(actionlistener)
bindings = getChild("bindings",node)
if bindings!=None:
for binding in bindings.childNodes:
if binding.nodeName!="#text":
if binding.nodeName=="keyevent":
for command in binding.getElementsByTagName("command"):
command_type = command.getAttribute("type")
if command_type=="builtin":
if(command.hasAttribute("args")):
keylistener.addKeyBinding(int(binding.getAttribute("type")),int(binding.getAttribute("code")),BuiltInCommand(game,command.getAttribute("name"),command.getAttribute("args")))
else:
keylistener.addKeyBinding(int(binding.getAttribute("type")),int(binding.getAttribute("code")),BuiltInCommand(game,command.getAttribute("name"),""))
elif command_type=="python":
if(command.hasAttribute("args")):
keylistener.addKeyBinding(int(binding.getAttribute("type")),int(binding.getAttribute("code")),PythonCommand(game,command.getAttribute("file"),command.getAttribute("args")))
else:
keylistener.addKeyBinding(int(binding.getAttribute("type")),int(binding.getAttribute("code")),PythonCommand(game,command.getAttribute("file"),""))
elif binding.nodeName=="actionevent":
for command in binding.getElementsByTagName("command"):
command_type = command.getAttribute("type")
if command_type=="builtin":
if(command.hasAttribute("args")):
actionlistener.addCommand(BuiltInCommand(game,command.getAttribute("name"),command.getAttribute("args")))
else:
actionlistener.addCommand(BuiltInCommand(game,command.getAttribute("name"),""))
elif command_type=="python":
if(command.hasAttribute("args")):
actionlistener.addCommand(PythonCommand(game,command.getAttribute("file"),command.getAttribute("args")))
else:
actionlistener.addCommand(PythonCommand(game,command.getAttribute("file"),""))
def reload_dialog(game,gui,filename,id):
"@sig public javax.swing.JPanel load(nova.library.core.Game game, nova.library.gui.GUI gui, java.lang.String filename, java.lang.String id)"
#((javax.swing.plaf.basic.BasicInternalFrameUI)myInternalFrame.getUI()).setNorthPane(null);
doc = minidom.parse(filename)
xmlGUI = doc.getElementsByTagName("gui")[0]
xmlJDialog = None
for node in xmlGUI.getElementsByTagName("dialog"):
if(node.getAttribute("id")==id):
xmlJDialog = node
break
settings = game.getSettings();
res = Parser.parseDimension(settings.getString(Settings.RESOLUTION))
w = int(xmlJDialog.getAttribute("width")) if node.hasAttribute("width") else -1
h = int(xmlJDialog.getAttribute("height")) if node.hasAttribute("height") else -1
jdialog = JDialog(owner,(xmlJDialog.getAttribute("title") if xmlJDialog.hasAttribute("title") else ""),(xmlJDialog.getAttribute("modal") if xmlJDialog.hasAttribute("modal") else "true")=="true")
#jdialog.setUndecorated((xmlJDialog.getAttribute("undecorated") if xmlJDialog.hasAttribute("undecorated") else "true")=="true")
jdialog.setSize((w if w >= 0 else int(res.getWidth())),(h if h >= 0 else int(res.getHeight())))
jdialog.setDefaultCloseOperation(JDialog.HIDE_ON_CLOSE)
jdialog.setContentPane(reloadComponent(game,gui,int(xmlJDialog.getAttribute("id")),getChild("jpanel",xmlJDialog)))
return jdialog
def compile_dialog(dialog,game,filename,id):
"@sig public void compile_dialog(nova.library.graphics.gui.TulipDialog dialog, nova.library.core.Game game, java.lang.String filename, java.lang.String id)"
#print "Compiling Dialog "+id+" from "+filename
doc = minidom.parse(filename)
xmlGUI = doc.getElementsByTagName("gui")[0]
xmlDialog = None
for node in xmlGUI.getElementsByTagName("dialog"):
if(node.getAttribute("id")==id):
xmlDialog = node
break
settings = game.getSettings();
res = Parser.parseDimension(settings.getString(Settings.RESOLUTION))
w = int(xmlDialog.getAttribute("width")) if xmlDialog.hasAttribute("width") else -1
h = int(xmlDialog.getAttribute("height")) if xmlDialog.hasAttribute("height") else -1
dialog.setDynamicSize(w,h)
dialog.setBounds(0,0,(w if w >= 0 else int(res.getWidth())),(h if h >= 0 else int(res.getHeight())))
dialog.add(reloadComponent(game,dialog,getChild("jpanel",xmlDialog)))
def reloadComponent(game,dialog,node):
c = None
text = node.getAttribute("text") if node.hasAttribute("text") else ""
align = int(node.getAttribute("align")) if node.hasAttribute("align") else 0
#Components
if node.nodeName=="jpanel":
c = JPanel()
c.setBackground(Color.BLACK)
layout = createLayoutManager(c,node.getAttribute("layout") if node.hasAttribute("layout") else "flow")
c.setLayout(layout)
c.setOpaque(False)
if node.getAttribute("opaque"):
c.setOpaque(node.getAttribute("opaque")=="true")
if node.getAttribute("background"):
if(node.getAttribute("background")=="orange"):
c.setBackground(Color.ORANGE)
elif(node.getAttribute("background")=="green"):
c.setBackground(Color.GREEN)
for child in node.childNodes:
if child.nodeName!="#text":
if node.getAttribute("layout")=="gridbag":
c.add(reloadComponent(game,dialog,child),getGridBagConstraints(game,child))
else:
c.add(reloadComponent(game,dialog,child))
elif node.nodeName=="jlabel":
c = JLabel(text,align)
elif node.nodeName=="jbutton":
c = JButton(text)
if node.hasAttribute("enabled"): c.setEnabled(node.getAttribute("enabled")=="true");
elif node.nodeName=="settingspanel":
c = SettingsPanel(game)
for g in node.childNodes:
if g.nodeName=="group":
c.addGroup(int(g.getAttribute("id")),g.getAttribute("label"))
for h in g.childNodes:
if h.nodeName=="heading":
c.addHeading(int(g.getAttribute("id")),h.getAttribute("label"))
for s in h.childNodes:
if s.nodeName=="setting":
#<setting id="1" name="DISPLAY_REAL_WORLD_TIME" type="checkbox" label="Show Real-World Time"></setting>
if s.getAttribute("items")!="":
c.addSettingWithItems(int(g.getAttribute("id")),int(s.getAttribute("id")),s.getAttribute("label"),s.getAttribute("type"),s.getAttribute("items").split(","))
else:
c.addSetting(int(g.getAttribute("id")),int(s.getAttribute("id")),s.getAttribute("label"),s.getAttribute("type"))
c.finalizeSettingsPanel()
dialog.registerSettingsPanel(int(node.getAttribute("id")),c)
elif node.nodeName=="logspanel":
c = LogsPanel(game)
for g in node.childNodes:
if g.nodeName=="group":
c.addLog(int(g.getAttribute("id")),g.getAttribute("label"))
c.finalizeLogsPanel()
dialog.registerLogsPanel(int(node.getAttribute("id")),c)
elif node.nodeName=="nglcanvas":
c = NGLCanvas(game,int(getCascadingAttribute(game,node,"width")),int(getCascadingAttribute(game,node,"height")))
dialog.registerCanvas(int(node.getAttribute("id")),c)
#Layout
if node.parentNode.getAttribute("layout")=="absolute":
c.setBounds(getBounds(game,node))
elif node.parentNode.getAttribute("layout")=="box-y":
c.setAlignmentX(Component.CENTER_ALIGNMENT);
if node.hasAttribute("width") and node.hasAttribute("height"):
c.setPreferredSize(Dimension(int(getCascadingAttribute(game,node,"width")),int(getCascadingAttribute(game,node,"height"))))
if node.hasAttribute("minWidth") and node.hasAttribute("minHeight"):
c.setMinimumSize(Dimension(int(node.getAttribute("minWidth")),int(node.getAttribute("minHeight"))))
if node.hasAttribute("maxWidth") and node.hasAttribute("maxHeight"):
c.setMaximumSize(Dimension(int(node.getAttribute("maxWidth")),int(node.getAttribute("maxHeight"))))
elif node.parentNode.getAttribute("layout")=="box-x":
c.setAlignmentY(Component.CENTER_ALIGNMENT);
if node.hasAttribute("width") and node.hasAttribute("height"):
c.setPreferredSize(Dimension(int(getCascadingAttribute(game,node,"width")),int(getCascadingAttribute(game,node,"height"))))
if node.hasAttribute("minWidth") and node.hasAttribute("minHeight"):
c.setMinimumSize(Dimension(int(node.getAttribute("minWidth")),int(node.getAttribute("minHeight"))))
if node.hasAttribute("maxWidth") and node.hasAttribute("maxHeight"):
c.setMaximumSize(Dimension(int(node.getAttribute("maxWidth")),int(node.getAttribute("maxHeight"))))
if node.nodeName!="nglcanvas" and node.nodeName!="jpanel" and node.nodeName!="settingspanel": addListeners(game,c,node)
return c;
def createLayoutManager(jpanel,sLayout):
if sLayout=="absolute": return None
elif sLayout=="flow": return FlowLayout()
elif sLayout=="border": return BorderLayout()
elif sLayout.startswith("grid-"):#"grid-1x1"
dim = Parser.parseDimension(sLayout.split("-")[1])
return GridLayout(int(dim.getWidth()),int(dim.getHeight()))
elif sLayout.startswith("box-y"):
return BoxLayout(jpanel,BoxLayout.Y_AXIS)
elif sLayout.startswith("box-x"):
return BoxLayout(jpanel,BoxLayout.X_AXIS)
elif sLayout.startswith("gridbag"):
return GridBagLayout()
def getCascadingAttribute(game,node,attr):
settings = game.getSettings();
res = Parser.parseDimension(settings.getString(Settings.RESOLUTION))
if node.hasAttribute(attr):
return node.getAttribute(attr)
else:
if node.nodeName=="dialog":
if(attr=="width"): return str(int(res.getWidth()))
elif(attr=="height"): return str(int(res.getHeight()))
else:
return getCascadingAttribute(game,node.parentNode,attr)
def getBounds(game,n): return Rectangle((int(n.getAttribute("x")) if n.hasAttribute("x") else 0),(int(n.getAttribute("y")) if n.hasAttribute("y") else 0),int(getCascadingAttribute(game,n,"width")),int(getCascadingAttribute(game,n,"height")))
def getGridBagConstraints(game,node):
c = GridBagConstraints()
for str in node.getAttribute("constraints").split(","):
a = str.split(":")
if a[0]=="fill":
c.fill = int(a[1])
elif a[0]=="gridwidth":
c.gridwidth = int(a[1])
elif a[0]=="gridheight":
c.gridheight = int(a[1])
elif a[0]=="gridx":
c.gridx = int(a[1])
elif a[0]=="gridy":
c.gridy = int(a[1])
elif a[0]=="weightx":
c.weightx = int(a[1])
elif a[0]=="weighty":
c.weighty = int(a[1])
else:
print a[0]+"was not found"
return c
| {
"content_hash": "ff03263bfbca7d59609a5418c09a063b",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 241,
"avg_line_length": 51.324909747292416,
"alnum_prop": 0.6161637476260815,
"repo_name": "pjdufour/felspar",
"id": "5590cf50ed175ff07a85101092ddad2719736539",
"size": "14217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "felspar/backup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "41786"
}
],
"symlink_target": ""
} |
"""
test package backported for python-future.
Its primary purpose is to allow use of "import test.support" for running
the Python standard library unit tests using the new Python 3 stdlib
import location.
Python 3 renamed test.test_support to test.support.
"""
| {
"content_hash": "55afbca8761e63e38bc7cb2a44b2f2b9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.7582417582417582,
"repo_name": "noisemaster/AdamTestBot",
"id": "2863cd1bcd15f0bf0f3c8259a8b6c57a6370b1c5",
"size": "273",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "future/backports/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "54"
},
{
"name": "Python",
"bytes": "3464312"
},
{
"name": "Shell",
"bytes": "406"
}
],
"symlink_target": ""
} |
"""
Webpack template tags
=====================
To use
.. code-block:: html
{% load webpack_tags %}
<!-- Render inclusion tag for frontend JS elements -->
{% base_frontend_sync %}
<!-- Render on-demand async inclusion tag for frontend JS elements -->
{% base_frontend_async %}
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django import template
from .. import hooks
register = template.Library()
@register.simple_tag()
def webpack_asset(unique_id):
"""
Return statically loaded ('sync' loaded) assets for a specific asset.
You need to define the asset by means of inheriting from WebpackBundleHook.
:param unique_id: The unique_id defined as the module_path of the plugin
concatenated with the bundle_id of the WebpackBundleHook
:return: Inline Javascript as HTML for insertion into the DOM.
"""
hook = hooks.WebpackBundleHook.get_by_unique_id(unique_id)
return hook.render_to_page_load_sync_html()
@register.simple_tag()
def webpack_async_asset(unique_id):
"""
This template tag returns inline Javascript (wrapped in a script tag) that
registers the events that a KolibriModule listens to, and a list of JS and
CSS assets that need to be loaded to instantiate the KolibriModule Django
template. KolibriModules loaded in this way will not be executed,
initialized or registered until one of the defined events is triggered.
You need to define the asset by means of inheriting from WebpackBundleHook.
:param unique_id: The unique_id defined as the module_path of the plugin
concatenated with the bundle_id of the WebpackBundleHook
:return: Inline Javascript as HTML for insertion into the DOM.
"""
hook = hooks.WebpackBundleHook.get_by_unique_id(unique_id)
return hook.render_to_page_load_async_html()
| {
"content_hash": "215f2c171e9b3a939da9dbff3e595f15",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 80,
"avg_line_length": 31.5,
"alnum_prop": 0.699436763952893,
"repo_name": "indirectlylit/kolibri",
"id": "e0d11f34ff31cca11b9f21f4a9ce43128bc0a50f",
"size": "1953",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "kolibri/core/webpack/templatetags/webpack_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554964"
},
{
"name": "Dockerfile",
"bytes": "4114"
},
{
"name": "Gherkin",
"bytes": "365088"
},
{
"name": "HTML",
"bytes": "24294"
},
{
"name": "JavaScript",
"bytes": "1613945"
},
{
"name": "Makefile",
"bytes": "11953"
},
{
"name": "Python",
"bytes": "2860587"
},
{
"name": "SCSS",
"bytes": "5225"
},
{
"name": "Shell",
"bytes": "5245"
},
{
"name": "Vue",
"bytes": "1604613"
}
],
"symlink_target": ""
} |
"""Internal support module for sre"""
import _sre
import sre_parse
from sre_constants import *
from _sre import MAXREPEAT
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFF
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
# Sets of lowercase characters which have the same uppercase.
_equivalences = (
# LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I
(0x69, 0x131), # iı
# LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S
(0x73, 0x17f), # sſ
# MICRO SIGN, GREEK SMALL LETTER MU
(0xb5, 0x3bc), # µμ
# COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI
(0x345, 0x3b9, 0x1fbe), # \u0345ιι
# GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
(0x390, 0x1fd3), # ΐΐ
# GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA
(0x3b0, 0x1fe3), # ΰΰ
# GREEK SMALL LETTER BETA, GREEK BETA SYMBOL
(0x3b2, 0x3d0), # βϐ
# GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL
(0x3b5, 0x3f5), # εϵ
# GREEK SMALL LETTER THETA, GREEK THETA SYMBOL
(0x3b8, 0x3d1), # θϑ
# GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL
(0x3ba, 0x3f0), # κϰ
# GREEK SMALL LETTER PI, GREEK PI SYMBOL
(0x3c0, 0x3d6), # πϖ
# GREEK SMALL LETTER RHO, GREEK RHO SYMBOL
(0x3c1, 0x3f1), # ρϱ
# GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA
(0x3c2, 0x3c3), # ςσ
# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
(0x3c6, 0x3d5), # φϕ
# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
(0x1e61, 0x1e9b), # ṡẛ
# LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST
(0xfb05, 0xfb06), # ſtst
)
# Maps the lowercase code to lowercase codes which have the same uppercase.
_ignorecase_fixes = {i: tuple(j for j in t if i != j)
for t in _equivalences for i in t}
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
if (flags & SRE_FLAG_IGNORECASE and
not (flags & SRE_FLAG_LOCALE) and
flags & SRE_FLAG_UNICODE):
fixes = _ignorecase_fixes
else:
fixes = None
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
lo = _sre.getlower(av, flags)
if fixes and lo in fixes:
emit(OPCODES[IN_IGNORE])
skip = _len(code); emit(0)
if op is NOT_LITERAL:
emit(OPCODES[NEGATE])
for k in (lo,) + fixes[lo]:
emit(OPCODES[LITERAL])
emit(k)
emit(OPCODES[FAILURE])
code[skip] = _len(code) - skip
else:
emit(OPCODES[OP_IGNORE[op]])
emit(lo)
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = None
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup, fixes)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error("internal: unsupported template operator")
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error("look-behind requires fixed-width pattern")
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None, fixes=None):
# compile charset subprogram
emit = code.append
for op, av in _optimize_charset(charset, fixup, fixes,
flags & SRE_FLAG_UNICODE):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error("internal: unsupported set operator")
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup, fixes, isunicode):
# internal: optimize character set
out = []
tail = []
charmap = bytearray(256)
for op, av in charset:
while True:
try:
if op is LITERAL:
if fixup:
i = fixup(av)
charmap[i] = 1
if fixes and i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
charmap[av] = 1
elif op is RANGE:
r = range(av[0], av[1]+1)
if fixup:
r = map(fixup, r)
if fixup and fixes:
for i in r:
charmap[i] = 1
if i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
for i in r:
charmap[i] = 1
elif op is NEGATE:
out.append((op, av))
else:
tail.append((op, av))
except IndexError:
if len(charmap) == 256:
# character set contains non-UCS1 character codes
charmap += b'\0' * 0xff00
continue
# character set contains non-BMP character codes
if fixup and isunicode and op is RANGE:
lo, hi = av
ranges = [av]
# There are only two ranges of cased astral characters:
# 10400-1044F (Deseret) and 118A0-118DF (Warang Citi).
_fixup_range(max(0x10000, lo), min(0x11fff, hi),
ranges, fixup)
for lo, hi in ranges:
if lo == hi:
tail.append((LITERAL, hi))
else:
tail.append((RANGE, (lo, hi)))
else:
tail.append((op, av))
break
# compress character map
runs = []
q = 0
while True:
p = charmap.find(1, q)
if p < 0:
break
if len(runs) >= 2:
runs = None
break
q = charmap.find(0, p)
if q < 0:
runs.append((p, len(charmap)))
break
runs.append((p, q))
if runs is not None:
# use literal/range
for p, q in runs:
if q - p == 1:
out.append((LITERAL, p))
else:
out.append((RANGE, (p, q - 1)))
out += tail
# if the case was changed or new representation is more compact
if fixup or len(out) < len(charset):
return out
# else original character set is good enough
return charset
# use bitmap
if len(charmap) == 256:
data = _mk_bitmap(charmap)
out.append((CHARSET, data))
out += tail
return out
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 32-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (64 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of 256-bit chunks (8 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
charmap = bytes(charmap) # should be hashable
comps = {}
mapping = bytearray(256)
block = 0
data = bytearray()
for i in range(0, 65536, 256):
chunk = charmap[i: i + 256]
if chunk in comps:
mapping[i // 256] = comps[chunk]
else:
mapping[i // 256] = comps[chunk] = block
block += 1
data += chunk
data = _mk_bitmap(data)
data[0:0] = [block] + _bytes_to_codes(mapping)
out.append((BIGCHARSET, data))
out += tail
return out
def _fixup_range(lo, hi, ranges, fixup):
for i in map(fixup, range(lo, hi+1)):
for k, (lo, hi) in enumerate(ranges):
if i < lo:
if l == lo - 1:
ranges[k] = (i, hi)
else:
ranges.insert(k, (i, i))
break
elif i > hi:
if i == hi + 1:
ranges[k] = (lo, i)
break
else:
break
else:
ranges.append((i, i))
_CODEBITS = _sre.CODESIZE * 8
_BITS_TRANS = b'0' + b'1' * 255
def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int):
s = bits.translate(_BITS_TRANS)[::-1]
return [_int(s[i - _CODEBITS: i], 2)
for i in range(len(s), 0, -_CODEBITS)]
def _bytes_to_codes(b):
# Convert block indices to word array
a = memoryview(b).cast('I')
assert a.itemsize == _sre.CODESIZE
assert len(a) * a.itemsize == len(b)
return a.tolist()
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _generate_overlap_table(prefix):
"""
Generate an overlap table for the following prefix.
An overlap table is a table of the same size as the prefix which
informs about the potential self-overlap for each index in the prefix:
- if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...]
- if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with
prefix[0:k]
"""
table = [0] * len(prefix)
for i in range(1, len(prefix)):
idx = table[i - 1]
while prefix[i] != prefix[idx]:
if idx == 0:
table[i] = 0
break
idx = table[idx - 1]
else:
table[i] = idx + 1
return table
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
code.extend(_generate_overlap_table(prefix))
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
def isstring(obj):
return isinstance(obj, (str, bytes))
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| {
"content_hash": "9fbc517cfe01e0fffd484d9ab997d8de",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 109,
"avg_line_length": 33.74914089347079,
"alnum_prop": 0.49353426331330824,
"repo_name": "ArcherSys/ArcherSys",
"id": "520d78222eef7d9d7ab2d835d1026d4d2cad086f",
"size": "19904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/sre_compile.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""JSON utility functions."""
from collections import deque
import json
import logging
import os
import tempfile
from typing import Any, Callable, Dict, List, Optional, Type, Union
from homeassistant.core import Event, State
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
class SerializationError(HomeAssistantError):
"""Error serializing the data to JSON."""
class WriteError(HomeAssistantError):
"""Error writing the data."""
def load_json(
filename: str, default: Union[List, Dict, None] = None
) -> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding="utf-8") as fdesc:
return json.loads(fdesc.read()) # type: ignore
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug("JSON file not found: %s", filename)
except ValueError as error:
_LOGGER.exception("Could not parse JSON content: %s", filename)
raise HomeAssistantError(error)
except OSError as error:
_LOGGER.exception("JSON file reading failed: %s", filename)
raise HomeAssistantError(error)
return {} if default is None else default
def save_json(
filename: str,
data: Union[List, Dict],
private: bool = False,
*,
encoder: Optional[Type[json.JSONEncoder]] = None,
) -> None:
"""Save JSON data to a file.
Returns True on success.
"""
try:
json_data = json.dumps(data, indent=4, cls=encoder)
except TypeError:
msg = f"Failed to serialize to JSON: {filename}. Bad data at {format_unserializable_data(find_paths_unserializable_data(data))}"
_LOGGER.error(msg)
raise SerializationError(msg)
tmp_filename = ""
tmp_path = os.path.split(filename)[0]
try:
# Modern versions of Python tempfile create this file with mode 0o600
with tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", dir=tmp_path, delete=False
) as fdesc:
fdesc.write(json_data)
tmp_filename = fdesc.name
if not private:
os.chmod(tmp_filename, 0o644)
os.replace(tmp_filename, filename)
except OSError as error:
_LOGGER.exception("Saving JSON file failed: %s", filename)
raise WriteError(error)
finally:
if os.path.exists(tmp_filename):
try:
os.remove(tmp_filename)
except OSError as err:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("JSON replacement cleanup failed: %s", err)
def format_unserializable_data(data: Dict[str, Any]) -> str:
"""Format output of find_paths in a friendly way.
Format is comma separated: <path>=<value>(<type>)
"""
return ", ".join(f"{path}={value}({type(value)}" for path, value in data.items())
def find_paths_unserializable_data(
bad_data: Any, *, dump: Callable[[Any], str] = json.dumps
) -> Dict[str, Any]:
"""Find the paths to unserializable data.
This method is slow! Only use for error handling.
"""
to_process = deque([(bad_data, "$")])
invalid = {}
while to_process:
obj, obj_path = to_process.popleft()
try:
dump(obj)
continue
except (ValueError, TypeError):
pass
# We convert states and events to dict so we can find bad data inside it
if isinstance(obj, State):
obj_path += f"(state: {obj.entity_id})"
obj = obj.as_dict()
elif isinstance(obj, Event):
obj_path += f"(event: {obj.event_type})"
obj = obj.as_dict()
if isinstance(obj, dict):
for key, value in obj.items():
try:
# Is key valid?
dump({key: None})
except TypeError:
invalid[f"{obj_path}<key: {key}>"] = key
else:
# Process value
to_process.append((value, f"{obj_path}.{key}"))
elif isinstance(obj, list):
for idx, value in enumerate(obj):
to_process.append((value, f"{obj_path}[{idx}]"))
else:
invalid[obj_path] = obj
return invalid
| {
"content_hash": "7aa81c28c2762c328afd17f15391db25",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 136,
"avg_line_length": 32.064748201438846,
"alnum_prop": 0.597487098945479,
"repo_name": "titilambert/home-assistant",
"id": "7b6da837c491a103b405d2acc2fae60d049dd69d",
"size": "4457",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/util/json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
"""
Assumes a bare-bones pengines service running on 4242
You can use this:
docker run -p 4242:9083 -e PORT=9083 --name sparqlprog cmungall/sparqlprog
"""
import unittest
from pengines.Builder import PengineBuilder
from pengines.Pengine import Pengine
from tests.log_testing import log_tests
import logging
log_tests(logging.DEBUG)
class PenginesTestCase(unittest.TestCase):
def test_member(self):
"""
"""
q = "member(X,[1,2,3])"
factory = PengineBuilder(urlserver="http://localhost:4242", destroy=False, ask=q)
pengine = Pengine(builder=factory)
results = pengine.currentQuery.availProofs
print(results)
self.assertTrue(len(results) == 3)
self.assertTrue( {'X':1} in results )
self.assertTrue( {'X':2} in results )
self.assertTrue( {'X':3} in results )
def test_src(self):
"""
"""
src = "foo(a).\nfoo(b).\nfoo(c)."
q = "foo(X)"
factory = PengineBuilder(urlserver="http://localhost:4242", destroy=False, srctext=src, ask=q)
pengine = Pengine(builder=factory)
results = pengine.currentQuery.availProofs
print(results)
self.assertTrue(len(results) == 3)
self.assertTrue( {'X':'a'} in results )
self.assertTrue( {'X':'b'} in results )
self.assertTrue( {'X':'c'} in results )
def test_chunk(self):
"""
"""
src = "foo(a).\nfoo(b).\nfoo(c)."
q = "foo(X)"
factory = PengineBuilder(urlserver="http://localhost:4242", destroy=False, srctext=src, chunk=1, ask=q)
pengine = Pengine(builder=factory)
all_results = []
results = pengine.currentQuery.availProofs
print('INIT Results={}'.format(results))
self.assertTrue(len(results) == 1)
while pengine.currentQuery.hasMore:
pengine.doNext(pengine.currentQuery)
results = pengine.currentQuery.availProofs
print('NEXT Results={}'.format(results))
self.assertTrue( {'X':'a'} in results )
self.assertTrue( {'X':'b'} in results )
self.assertTrue( {'X':'c'} in results )
self.assertEquals(len(results), 3)
def test_iterator(self):
"""
"""
q = "member(X,[1,2,3,4,5,6,7,8,9,10])"
chunk_sizes = [1,2,3,4,100]
for chunk in chunk_sizes:
factory = PengineBuilder(urlserver="http://localhost:4242", destroy=False, chunk=chunk, ask=q)
pengine = Pengine(builder=factory)
results = []
for r in pengine.currentQuery:
print('ITER={}'.format(r))
results.append(r)
self.assertTrue(len(results) == 10)
self.assertTrue( {'X':1} in results )
self.assertTrue( {'X':2} in results )
self.assertTrue( {'X':3} in results )
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e5e0561feabf46330c004c9b333102dd",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 111,
"avg_line_length": 31.451612903225808,
"alnum_prop": 0.5801709401709402,
"repo_name": "ian-andrich/PythonPengines",
"id": "faa3019687f62a57918d047263bb0c6dd9588c8b",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/srctest_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "222"
},
{
"name": "Prolog",
"bytes": "219"
},
{
"name": "Python",
"bytes": "29766"
}
],
"symlink_target": ""
} |
"""Library for loading train and eval data.
This libary contains two functions, make_train_iterator for generating a
training data iterator from multiple sources of different formats and
make_eval_function for creating an evaluation function that evaluates on
data from multiple sources of different formats.
"""
# pylint:disable=g-importing-member
from functools import partial
import tensorflow as tf
from uflow import uflow_augmentation
from uflow.data import generic_flow_dataset as flow_dataset
from uflow.data import kitti
from uflow.data import sintel
# pylint:disable=g-long-lambda
def make_train_iterator(
train_on,
height,
width,
shuffle_buffer_size,
batch_size,
seq_len,
crop_instead_of_resize=False,
apply_augmentation=True,
include_ground_truth=False,
resize_gt_flow=True,
include_occlusions=False,
seed=41,
mode='train',
):
"""Build joint training iterator for all data in train_on.
Args:
train_on: string of the format 'format0:path0;format1:path1', e.g.
'kitti:/usr/local/home/...'.
height: int, height to which the images will be resized or cropped.
width: int, width to which the images will be resized or cropped.
shuffle_buffer_size: int, size that will be used for the shuffle buffer.
batch_size: int, batch size for the iterator.
seq_len: int, number of frames per sequences (at the moment this should
always be 2)
crop_instead_of_resize: bool, indicates if cropping should be used instead
of resizing
apply_augmentation: bool, indicates if geometric and photometric data
augmentation shall be activated (paramaters are gin configurable)
include_ground_truth: bool, if True, return ground truth optical flow with
the training images. This only exists for some datasets (Kitti, Sintel).
resize_gt_flow: bool, indicates if ground truth flow should be resized (only
important if resizing and supervised training is used)
include_occlusions: bool, indicates if ground truth occlusions should be
loaded (currently not supported in combination with augmentation)
seed: A seed for a random number generator, controls shuffling of data.
mode: str, will be passed on to the data iterator class. Can be used to
specify different settings within the data iterator.
Returns:
A tf.data.Iterator that produces batches of images of shape [batch
size, sequence length=3, height, width, channels=3]
"""
train_datasets = []
# Split strings according to pattern "format0:path0;format1:path1".
for format_and_path in train_on.split(';'):
data_format, path = format_and_path.split(':')
if include_occlusions:
mode += '-include-occlusions'
if include_ground_truth:
mode += '-supervised'
if include_occlusions and 'sintel' not in data_format:
raise ValueError('The parameter include_occlusions is only supported for'
'sintel data.')
if include_ground_truth and ('chairs' not in data_format and
'sintel' not in data_format and
'kitti' not in data_format):
raise NotImplementedError('The parameter include_ground_truth is only'
'supported for flying_chairs, sintel, kitti and'
'wod data at the moment.')
# Add a dataset based on format and path.
if 'kitti' in data_format:
dataset = kitti.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
seed=seed,
)
elif 'chairs' in data_format:
dataset = flow_dataset.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
gt_flow_shape=[384, 512, 2],
seed=seed,
)
elif 'sintel' in data_format:
dataset = sintel.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
seed=seed,
)
else:
print('Unknown data format "{}"'.format(data_format))
continue
train_datasets.append(dataset)
# prepare augmentation function
# in case no crop is desired set it to the size images have been resized to
# This will fail if none or both are specified.
augmentation_fn = partial(
uflow_augmentation.apply_augmentation,
crop_height=height,
crop_width=width)
# returns a function to apply ensure_shape on all the available data
def _ensure_shapes():
# shape of the data
imgs_shape = (batch_size, seq_len, height, width, 3)
if resize_gt_flow:
flow_shape = (batch_size, height, width, 2)
valid_shape = (batch_size, height, width, 1)
else:
flow_shape = (batch_size, None, None, 2)
valid_shape = (batch_size, None, None, 1)
occ_shape = (batch_size, height, width, 1)
# different cases of data combinations
if include_ground_truth and apply_augmentation:
return lambda imgs, imgs_na, flow, valid: (tf.ensure_shape(
imgs, imgs_shape), {
'images_without_photo_aug': tf.ensure_shape(imgs_na, imgs_shape),
'flow_uv': tf.ensure_shape(flow, flow_shape),
'flow_valid': tf.ensure_shape(valid, valid_shape)
})
elif include_ground_truth and include_occlusions:
return lambda imgs, flow, valid, occ: (tf.ensure_shape(
imgs, imgs_shape), {
'flow_uv': tf.ensure_shape(flow, flow_shape),
'flow_valid': tf.ensure_shape(valid, valid_shape),
'occlusions': tf.ensure_shape(occ, occ_shape)
})
elif include_ground_truth:
return lambda imgs, flow, valid: (tf.ensure_shape(imgs, imgs_shape), {
'flow_uv': tf.ensure_shape(flow, flow_shape),
'flow_valid': tf.ensure_shape(valid, valid_shape)
})
elif include_occlusions:
return lambda imgs, occ: (tf.ensure_shape(imgs, imgs_shape), {
'occlusions': tf.ensure_shape(occ, occ_shape)
})
elif apply_augmentation:
return lambda imgs, imgs_na: (tf.ensure_shape(imgs, imgs_shape), {
'images_without_photo_aug': tf.ensure_shape(imgs_na, imgs_shape)
})
else:
return lambda imgs: (tf.ensure_shape(imgs, imgs_shape), {})
train_ds = train_datasets[0]
# Perform data augmentation
# This cannot handle occlusions at the moment.
if apply_augmentation:
train_ds = train_ds.map(augmentation_fn)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.prefetch(1)
train_ds = train_ds.map(_ensure_shapes())
train_it = tf.compat.v1.data.make_one_shot_iterator(train_ds)
return train_it
def make_eval_function(eval_on, height, width, progress_bar, plot_dir,
num_plots):
"""Build an evaluation function for uflow.
Args:
eval_on: string of the format 'format0:path0;format1:path1', e.g.
'kitti:/usr/local/home/...'.
height: int, the height to which the images should be resized for inference.
width: int, the width to which the images should be resized for inference.
progress_bar: boolean, flag to indicate whether the function should print a
progress_bar during evaluaton.
plot_dir: string, optional path to a directory in which plots are saved (if
num_plots > 0).
num_plots: int, maximum number of qualitative results to plot for the
evaluation.
Returns:
A pair consisting of an evaluation function and a list of strings
that holds the keys of the evaluation result.
"""
eval_functions_and_datasets = []
eval_keys = []
# Split strings according to pattern "format0:path0;format1:path1".
for format_and_path in eval_on.split(';'):
data_format, path = format_and_path.split(':')
# Add a dataset based on format and path.
if 'kitti' in data_format:
if 'benchmark' in data_format:
dataset = kitti.make_dataset(path, mode='test')
eval_fn = kitti.benchmark
else:
dataset = kitti.make_dataset(path, mode='eval')
eval_fn = partial(kitti.evaluate, prefix=data_format)
eval_keys += kitti.list_eval_keys(prefix=data_format)
elif 'chairs' in data_format or 'custom' in data_format:
dataset = flow_dataset.make_dataset(path, mode='eval')
eval_fn = partial(
flow_dataset.evaluate,
prefix=data_format,
max_num_evals=1000, # We do this to avoid evaluating on 22k samples.
has_occlusion=False)
eval_keys += flow_dataset.list_eval_keys(prefix=data_format)
elif 'sintel' in data_format:
if 'benchmark' in data_format:
# pylint:disable=g-long-lambda
# pylint:disable=cell-var-from-loop
eval_fn = lambda uflow: sintel.benchmark(inference_fn=uflow.infer,
height=height, width=width,
sintel_path=path,
plot_dir=plot_dir,
num_plots=num_plots)
if len(eval_on.split(';')) != 1:
raise ValueError('Sintel benchmark should be done in isolation.')
return eval_fn, []
dataset = sintel.make_dataset(path, mode='eval-occlusion')
eval_fn = partial(sintel.evaluate, prefix=data_format)
eval_keys += sintel.list_eval_keys(prefix=data_format)
else:
print('Unknown data format "{}"'.format(data_format))
continue
dataset = dataset.prefetch(1)
eval_functions_and_datasets.append((eval_fn, dataset))
# Make an eval function that aggregates all evaluations.
def eval_function(uflow):
result = dict()
for eval_fn, ds in eval_functions_and_datasets:
results = eval_fn(
uflow.infer, ds, height,
width, progress_bar, plot_dir, num_plots)
for k, v in results.items():
result[k] = v
return result
return eval_function, eval_keys
| {
"content_hash": "e32f785de746b007ed9379d7ab8a776c",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 80,
"avg_line_length": 39.47940074906367,
"alnum_prop": 0.6433924675078265,
"repo_name": "google-research/google-research",
"id": "02818b8fff60c5fe32283b391e4935c036d6a765",
"size": "11149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uflow/uflow_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to="+14108675309",
from_="+15005550006"
)
print(call.sid)
| {
"content_hash": "376d7d7fd5f69c833b1bd321a95c41da",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 29.384615384615383,
"alnum_prop": 0.7486910994764397,
"repo_name": "teoreteetik/api-snippets",
"id": "458e3e9f6e5268728d53c55a9c4b780c50dba5b2",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/test-credentials/test-calls-example-1/test-calls-example-1.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
from datetime import datetime
from pydantic import validator
from pydantic.dataclasses import dataclass
@dataclass
class DemoDataclass:
ts: datetime = None
@validator('ts', pre=True, always=True)
def set_ts_now(cls, v):
return v or datetime.now()
print(DemoDataclass())
print(DemoDataclass(ts='2017-11-08T14:00'))
| {
"content_hash": "6eb54486ca331ec31e0c7c75caa55713",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 43,
"avg_line_length": 20,
"alnum_prop": 0.7235294117647059,
"repo_name": "samuelcolvin/pydantic",
"id": "11db5fca3c2c0ce4944d7a0fa190035bf0eefa2c",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/examples/validators_dataclass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2905"
},
{
"name": "Python",
"bytes": "1140694"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
} |
"""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Audiophiles Music Manager Build 20180119 VER0.0.0PREALPHA *
* (C)2017 Mattijs Snepvangers pegasus.ict@gmail.com *
* lib/ui.py User Interface VER0.0.0PREALPHA *
* License: MIT Please keep my name in the credits *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# self.dialog.exitcodes = ("0 DIALOG_OK", "1 DIALOG_CANCEL",
# "-1 DIALOG_ESC", "-1 DIALOG_ERROR",
# "3 DIALOG_EXTRA", "2 DIALOG_HELP",
# "4 DIALOG_ITEM_HELP")
###TODO### check Lynda vid 1203 & 1204!!!!
class UserInterface:
"""mandatory docstring"""
def __init__(self, ui_style="dialog"):
self.__ui_style = ui_style
if self.__ui_style == "dialog":
from dialog import Dialog
self.my_interface = Dialog(dialog=self.__ui_style,
DIALOGRC="./dialog.rc",
compat=self.__ui_style, use_stdout=None,
autowidgetsize=True,
pass_args_via_file=True)
else:
raise TypeError('Other ui styles are not available yet, need to \
use Dialog for now')
##############################################################################
@classmethod
def ui_builder(self, dialogtype, **kwargs):
"""construct elements of a user interface
"""
# from collections import namedtuple
dialogtypes = dict(message = "msgbox",
textbox = "textbox",
text_editor = "editbox_str",
announce = "infobox",
countdown = "pause",
progress_bar = "guage_start",
progress_bar_update = "guage_update",
progress_bar_stop = "guage_stop",
multi_progress_bar = "mixedguage",
build_list = "buildlist",
checklist = "checklist",
radiolist = "radiolist",
inputbox = "inputbox",
inputmenu = "inputmenu",
passwordbox = "passwordbox",
form = "form",
selectdir = "dselect",
selectfd = "fselect",
yn_question = 'yesno'
)
# dependencies = namedtuple('dependencies',
# ['name', 'text', 'title', 'path', 'ok',
# 'cancel', 'extra', 'yes', 'no']
# )
if dialogtype not in dialogtypes:
raise Error(TypeError, "unknown dialogtype")
else:
if dialogtype == "msgbox":
result = message_box(text, title)
elseif dialogtype == "textbox":
result = text_box(path)
#etc
@classmethod
def message_box(self, message, title):
"""display a messagebox
"""
result = myInterface.msgbox(message, title)
return result
@classmethod
def text_box(self, file_path):
"""display a messagebox
"""
result = myInterface.textbox(file_path)
return result
@classmethod
def text_editor(self, initialtext, args, title):
"""display a texteditor
"""
args = [None, None]
result = myInterface.editbox_str(initialtext, args, title)
return result # returns a tuple (exitcode, text)
@classmethod
def announce(self, message, title):
"""display an announcement
"""
result = myInterface.infobox(message, title)
return result
@classmethod
def countdown(self, message, time_out, title):
"""dispaly an announcement with a timeout
timeOut in secs(int)"""
result = myInterface.pause(message, time_out, title)
return result
##############################################################################
##############################################################################
@classmethod
def progressbar(self, bar_type, message, percent, title, **kwargs):
"""initialise/update a progressbar
this function utilizes _progress_bar, _progress_bar_update and
_progress_bar_stop to display, update and cleanup.
alternatively, it will use _multi_progress_bar to display/update a
multiprogressbar
"""
pass
@classmethod
def _progress_bar(self, message, percent, title):
if percent == '':
percent = 0
myInterface.guage_start(message, percent, title)
@classmethod
def _progress_bar_update(self, percent, message, update_message=False): #
myInterface.guage_update(percent, message, update_message)
if percent == '':
percent = 10
@classmethod
def _progress_bar_stop(self): #
result = myInterface.guage_stop()
return result
@classmethod
def _multi_progress_bar(self, message, percent, elements, title): #
if percent == '':
percent = 0
result = myInterface.mixedguage(message, percent, elements, title)
# elements[] is a list of tuples consisting of (tag, value)
# possible values are:
# a percentage (-25 equals 25%) or
# Succeeded, Failed, Passed, Completed, Done, Skipped,
# In Progress, Checked, N/A
# ToDo:
# Create an Object that automatically recalculates
# total_progress and resends the mixedguage command
# whenever one of the element values (and thereby
# total_progress) is changed
return result
##############################################################################
##############################################################################
@classmethod
def form(self, fields):
"""form generator
"""
#(fieldname, default_value, fieldlength=32)
numfields = len(fields)
#if numfields == 0:
# raise TypeError('form expected at least 1 field, got \
# {}.'.format(numfields)
#except ValueError as error:
#print("fields cannot be 0")
#elif fieldname == 0:
# fieldlength = 32
if numfields > 8:
cols = 2
else:
cols = 1
col = 1
row = 1
for fieldname in fields:
if row > rows / cols:
col = 2
row = row - rows // cols
elements.append(fieldname, row, col, )
##############################################################################
##############################################################################
@classmethod
def list_builder(self, list_type, message, items, title):
"""list_builder, encapsulating the various list methods below
"""
pass
@classmethod
def build_list(self, message, items, title):
"""some interesting text
"""
# items[(tag, item, status)]
listheight = None
result = myInterface.buildlist(message, listheight, items, title)
if result[0] != "DIALOG_OK":
print("oops, something went wrong...")
else:
return result[1]
@classmethod
def check_list(self, message, choices, title):
"""some interesting text
"""
# choices[(tag, item, status)]
listheight = None
result = myInterface.checklist(message, listheight, choices, title)
if result[0] != "DIALOG_OK":
print("oops, something went wrong...")
else:
return result[1]
@classmethod
def radio_list(self, message, choices, title):
"""some interesting text
"""
# choices[(tag, item)] where tag = shortname, item = description
list_height = None
result = myInterface.menu(message, list_height, choices, title)
if result[0] != "DIALOG_OK":
print("oops, something went wrong...")
else:
return result[1]
##############################################################################
##############################################################################
@classmethod
def select_dir(self, root_dir, title):
"""method for selecting a directory within a given root_dir
"""
selected_dir = myInterface.dselect(root_dir, title)
if debug_switch == True:
debug_log += "select_dir returned %s and %s. \n" % (selected_dir[1],
selected_dir[2])
return selected_dir
##############################################################################
@classmethod
def yn_question(self, question, buttons, title):
"""displays a question which can be answered with 2 different answers,
defaulting to yes and no
"""
if buttons['yes_label'] == '':
buttons['yes_label'] = ui_language['yes']
if buttons['no_label'] == '':
buttons['no_label'] = ui_language['no']
result = myInterface.yesno(question, buttons, title)
return result
##############################################################################
##############################################################################
def main():
"""just in case somebody wants to test this file by itself"""
print("It works!!! ;-)")
###TODO### do something with the various methods/functions of this file
# standard boilerplate
if __name__ == '__main__':
main()
| {
"content_hash": "80cc02acc2e48f2fecab05d2c8b93feb",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 80,
"avg_line_length": 40.019762845849804,
"alnum_prop": 0.4591604938271605,
"repo_name": "pegasusict/AMM",
"id": "0721081441e4109f89494657716a4325848fdf07",
"size": "10148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38618"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2022, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'TransIP Web Firewall (TransIP)'
def is_waf(self):
schemes = [
self.matchHeader(('X-TransIP-Backend', '.+')),
self.matchHeader(('X-TransIP-Balancer', '.+'))
]
if any(i for i in schemes):
return True
return False | {
"content_hash": "a3e8c55e21affa4426796af37efeaac4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 22.125,
"alnum_prop": 0.6129943502824858,
"repo_name": "EnableSecurity/wafw00f",
"id": "ccde146d79af93b6cf4ea6cadcd77b527d9117db",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wafw00f/plugins/transip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "97"
},
{
"name": "Makefile",
"bytes": "339"
},
{
"name": "Python",
"bytes": "109477"
}
],
"symlink_target": ""
} |
class Station(object):
"""Radio station configuration:
id: station id, used to select which station to play
url: mp3 stream URL
name: human-readable station name
"""
def __init__(self, id, url, name):
self.id = id
self.url = url
self.name = name
| {
"content_hash": "ebc3a3e4c9d2f2d44295ac24ebec4443",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.597972972972973,
"repo_name": "petli/codplayer",
"id": "b364dbbd25e3ab7766eb8fa63d0605ac25acf447",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/codplayer/radio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "669"
},
{
"name": "C",
"bytes": "46162"
},
{
"name": "CSS",
"bytes": "2279"
},
{
"name": "HTML",
"bytes": "17734"
},
{
"name": "JavaScript",
"bytes": "87189"
},
{
"name": "Makefile",
"bytes": "163"
},
{
"name": "Python",
"bytes": "376187"
},
{
"name": "Shell",
"bytes": "476"
},
{
"name": "TeX",
"bytes": "4957"
}
],
"symlink_target": ""
} |
"""
This program includes a subclass (MinBalAcct) that inherits from
BankAccount and then overrides __init__, __str__ and withdraw.
Note - class variables are referenced through the original class.
"""
class BankAccount(object): # Top tier class (super class)
acct_cntr = 0 # class variable
def __init__(self): # This method runs during instantiation
self.balance = 0 # instance variable
BankAccount.acct_cntr += 1 # Accessing a class variable
def withdraw(self, amount): # a method
self.balance -= amount
def deposit(self, amount): # another method
self.balance += amount
def __str__(self):
return 'The balance for this account is ${:,.2f}'.format(
self.balance)
def __del__(self):
BankAccount.acct_cntr -= 1
class MinBalAcct(BankAccount):
def __init__(self, deposit, minbal):
self.minimum_bal = minbal
self.balance = deposit
BankAccount.acct_cntr += 1
def withdraw(self, amount):
if self.balance - amount < self.minimum_bal:
print 'Withdrawal request of {:,.2f}'.format(amount)
print ' denied due to minimum balance requirement'
else:
self.balance -= amount
def __str__(self):
return ("The balance for this account is ${:,.2f}\n" +
"The minimum balance is ${:,.0f}").format(self.balance,
self.minimum_bal)
a = BankAccount() # Create an instance of Bankaccount
b = BankAccount() # Create another instance
print 'Number of accounts -', BankAccount.acct_cntr # print class variable
del a
print 'Number of accounts -', BankAccount.acct_cntr
c = MinBalAcct(5000,1000) # Create a minimum balance instance
print 'Number of accounts -', BankAccount.acct_cntr
print c
c.withdraw(4500)
| {
"content_hash": "224ad189298a6d490d06aa8eb69026b6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 39.16326530612245,
"alnum_prop": 0.6055237102657635,
"repo_name": "sharkySharks/PythonForDevs",
"id": "2e51877a71e6820fc2fcc0be69fa32bf54ff6a6d",
"size": "1919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LabsDone/lab26_classes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8247"
},
{
"name": "HTML",
"bytes": "40466"
},
{
"name": "JavaScript",
"bytes": "14304"
},
{
"name": "Python",
"bytes": "59829"
}
],
"symlink_target": ""
} |
import os, sys, json
from argparse import ArgumentParser
class Tester:
TESTS_DIRECTORY_NAME = "tests"
JSON_DESCRIPTION_FILE_NAME = "info.json"
ERR_LOG_FILE_NAME = "err.log"
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
TESTS_PATH = os.path.join(ROOT_DIR, TESTS_DIRECTORY_NAME)
def run_test(self, test_name, verbose=True):
import subprocess
test_path = os.path.join(Tester.TESTS_PATH, test_name)
os.chdir(test_path)
process = subprocess.Popen(['./launch.sh'], stdout=subprocess.PIPE, stderr=sys.stderr)
out, err = process.communicate()
os.chdir(Tester.ROOT_DIR)
try:
json_information = json.loads(out)
if 'passed' not in json_information:
raise ValueError("The key 'passed' is missing")
if verbose:
print json.dumps(json_information, indent=4, separators=(',', ': '))
return json_information
except Exception, e:
json_error = {'passed': False, 'error': str(e), 'test_output': out}
if verbose:
print json.dumps(json_error, indent=4, separators=(',', ': '))
return json_error
def run_all(self):
failed_tests = []
available_tests = sorted(self.get_available_tests())
for (test_name, test_dir) in available_tests:
json_file_path = os.path.join(test_dir, Tester.JSON_DESCRIPTION_FILE_NAME)
if self.check_well_formed_info_json(json_file_path) == True:
with open(json_file_path) as data_file:
json_information = json.load(data_file)
enabled = json_information['enabled']
if enabled:
print "~~~~ " + test_name + " ~~~~"
print "Test description: " + json_information['description'] + "\n"
json_info = self.run_test(test_name, False)
if not json_info['passed']:
failed_tests.append(test_name)
print json.dumps(json_info, indent=4, separators=(',', ': '))
print "\n"
if len(failed_tests) == 0:
print "All tests have been passed."
else:
print "The folliwing tests failed:"
print ",\n".join(failed_tests)
def list_all_tests(self):
available_tests = sorted(self.get_available_tests())
enabled_tests = []
disabled_tests = []
malformed_tests = []
if len(available_tests) == 0:
print "No test found."
return
for (test_name, test_dir) in available_tests:
json_file_path = os.path.join(test_dir, Tester.JSON_DESCRIPTION_FILE_NAME)
is_json_valid = self.check_well_formed_info_json(json_file_path)
if is_json_valid == True:
with open(json_file_path) as data_file:
json_information = json.load(data_file)
enabled = json_information['enabled']
info = (test_name, json_information['enabled'], json_information['description'])
if enabled:
enabled_tests.append(info)
else:
disabled_tests.append(info)
else:
malformed_tests.append((test_name, is_json_valid))
# Print information
print "\n## ENABLED TEST ##"
print "#%s\t%s\t%s" % ("Test_name", "Enabled", "Description")
for test in enabled_tests:
print "%s\t%s\t\"%s\"\n" % test
print "\n## DISABLED TEST ##"
print "#%s\t%s\t%s" % ("Test_name", "Enabled", "Description")
for test in disabled_tests:
print "%s\t%s\t\"%s\"\n" % test
print "\n## MALFORMED TEST ##"
print "#%s\t%s\t%s" % ("Test_name", "Status", "Error Description")
for test in malformed_tests:
print "%s\tmalformed\t\"%s\"\n" % test
print "\n"
def change_status_test(self, test_name, status):
test_dir = os.path.join(Tester.TESTS_PATH, test_name)
json_file_path = os.path.join(test_dir, Tester.JSON_DESCRIPTION_FILE_NAME)
is_json_valid = self.check_well_formed_info_json(json_file_path)
if is_json_valid == True:
with open(json_file_path) as data_file:
json_information = json.load(data_file)
json_information['enabled'] = status
new_json_string = json.dumps(json_information, indent=4, separators=(',', ': '))
with open(json_file_path, 'w') as json_file:
json_file.write(new_json_string)
print "SUCCESS"
else:
print is_json_valid
def print_info(self, test_name):
test_dir = os.path.join(Tester.TESTS_PATH, test_name)
json_file_path = os.path.join(test_dir, Tester.JSON_DESCRIPTION_FILE_NAME)
is_json_valid = self.check_well_formed_info_json(json_file_path)
if is_json_valid == True:
with open(json_file_path) as data_file:
json_information = json.load(data_file)
print json.dumps(json_information, indent=4, separators=(',', ': '))
else:
print is_json_valid
def get_available_tests(self):
return [(test, os.path.join(Tester.TESTS_PATH, test)) for test in os.listdir(Tester.TESTS_PATH) if
os.path.isdir(os.path.join(Tester.TESTS_PATH, test))]
def check_well_formed_info_json(self, json_file_path):
try:
with open(json_file_path) as data_file:
json_information = json.load(data_file)
mandatory_keys = ["enabled", "description", "full_description", "author"]
for key in mandatory_keys:
if key not in json_information:
raise ValueError('The key \'' + key + '\' is missing')
if not isinstance(json_information['enabled'], bool):
raise ValueError('The value of the key \'enabled\' must be a boolean')
return True
except ValueError, e:
return str(e)
if __name__ == "__main__":
parser = ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', "-all", dest="all", action='store_true',
help="Run all the enabled tests")
group.add_argument('-n', "--name", dest="single", metavar='TEST_NAME',
help="Run the specified test")
group.add_argument('-l', "--list", dest="list", action='store_true',
help="List all the available tests")
group.add_argument('-e', "--enable", dest="enable", metavar='TEST_NAME',
help="Enable the specified test")
group.add_argument('-d', "--disable", dest="disable", metavar='TEST_NAME',
help="Disable the specified test")
group.add_argument('-i', "--info", dest="info", metavar='TEST_NAME',
help="Print information about the specified test")
args = parser.parse_args()
tester = Tester()
if args.all:
tester.run_all()
elif args.list:
tester.list_all_tests()
elif args.single is not None:
tester.run_test(args.single)
elif args.enable is not None:
tester.change_status_test(args.enable, True)
elif args.disable is not None:
tester.change_status_test(args.disable, False)
elif args.info is not None:
tester.print_info(args.info)
else:
parser.print_help()
| {
"content_hash": "172685fcb00a30ed0ff6626fa22b5023",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 106,
"avg_line_length": 44.10526315789474,
"alnum_prop": 0.5613895518430124,
"repo_name": "letconex/MMT",
"id": "a7f4481256ee36a4a68c18eddc518b12af280709",
"size": "7564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/run_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AutoHotkey",
"bytes": "1609"
},
{
"name": "C",
"bytes": "2310"
},
{
"name": "C++",
"bytes": "3479743"
},
{
"name": "CMake",
"bytes": "42508"
},
{
"name": "Java",
"bytes": "810385"
},
{
"name": "Perl",
"bytes": "93820"
},
{
"name": "Protocol Buffer",
"bytes": "947"
},
{
"name": "Python",
"bytes": "232214"
},
{
"name": "Roff",
"bytes": "25856"
},
{
"name": "Shell",
"bytes": "15583"
}
],
"symlink_target": ""
} |
"""Home of the `Sequential` model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Sequential', 'keras.Sequential')
class Sequential(Model):
"""Linear stack of layers.
Arguments:
layers: list of layers to add to the model.
Example:
```python
# Optionally, the first layer can receive an `input_shape` argument:
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
# Afterwards, we do automatic shape inference:
model.add(Dense(32))
# This is identical to the following:
model = Sequential()
model.add(Dense(32, input_dim=500))
# And to the following:
model = Sequential()
model.add(Dense(32, batch_input_shape=(None, 500)))
# Note that you can also omit the `input_shape` argument:
# In that case the model gets built the first time you call `fit` (or other
# training and evaluation methods).
model = Sequential()
model.add(Dense(32))
model.add(Dense(32))
model.compile(optimizer=optimizer, loss=loss)
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
# Note that when using this delayed-build pattern (no input shape specified),
# the model doesn't have any weights until the first call
# to a training/evaluation method (since it isn't yet built):
model = Sequential()
model.add(Dense(32))
model.add(Dense(32))
model.weights # returns []
# Whereas if you specify the input shape, the model gets built continuously
# as you are adding layers:
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(32))
model.weights # returns list of length 4
When using the delayed-build pattern (no input shape specified), you can
choose to manually build your model by calling `build(batch_input_shape)`:
model = Sequential()
model.add(Dense(32))
model.add(Dense(32))
model.build((None, 500))
model.weights # returns list of length 4
```
"""
@checkpointable.no_automatic_dependency_tracking
def __init__(self, layers=None, name=None):
super(Sequential, self).__init__(name=name)
self.supports_masking = True
self._build_input_shape = None
self._compute_output_and_mask_jointly = True
# Add to the model any layers passed to the constructor.
if layers:
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `CheckpointableBase` manages the `_layers` attributes and does filtering
# over it.
layers = super(Sequential, self).layers
if layers and isinstance(layers[0], InputLayer):
return layers[1:]
return layers[:]
@checkpointable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
if not isinstance(layer, base_layer.Layer):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
self.built = False
set_inputs = False
if not self._layers:
if isinstance(layer, InputLayer):
# Corner case where the user passes an InputLayer layer via `add`.
assert len(layer._inbound_nodes[-1].output_tensors) == 1
set_inputs = True
else:
batch_shape, dtype = get_input_shape_and_dtype(layer)
if batch_shape:
# Instantiate an input layer.
x = Input(
batch_shape=batch_shape,
dtype=dtype,
name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
# If an input layer (placeholder) is available.
if len(layer._inbound_nodes[-1].output_tensors) != 1:
raise ValueError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [layer._inbound_nodes[-1].output_tensors[0]]
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if isinstance(output_tensor, list):
raise TypeError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [output_tensor]
if set_inputs or self._is_graph_network:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
self.built = True
else:
self._layers.append(layer)
if self._layers:
self._track_layers(self._layers)
@checkpointable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
self._layers.pop()
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
elif self._is_graph_network:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs, name=self.name)
self.built = True
def build(self, input_shape=None):
if self._is_graph_network:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
else:
if input_shape is None:
raise ValueError('You must provide an `input_shape` argument.')
self._build_input_shape = input_shape
shape = input_shape
for layer in self.layers:
if not layer.built:
with ops.name_scope(layer._name_scope()):
layer.build(shape)
layer.built = True
shape = layer.compute_output_shape(shape)
self.built = True
def call(self, inputs, training=None, mask=None):
if self._is_graph_network:
return super(Sequential, self).call(inputs, training=training, mask=mask)
outputs, _ = self._call_and_compute_mask(
inputs, training=training, mask=mask)
return outputs
def _call_and_compute_mask(self, inputs, training=None, mask=None):
if not self.built:
self.build(inputs.shape)
x = inputs
for layer in self.layers:
kwargs = {}
if 'mask' in tf_inspect.getargspec(layer.call).args:
kwargs['mask'] = mask
if 'training' in tf_inspect.getargspec(layer.call).args:
kwargs['training'] = training
if isinstance(layer, Network) and layer._compute_output_and_mask_jointly:
x, mask = layer._call_and_compute_mask(x, **kwargs)
else:
x = layer.call(x, **kwargs)
if layer.supports_masking:
mask = layer.compute_mask(x, mask)
else:
mask = None
if not context.executing_eagerly():
x._keras_mask = mask
return x, mask
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
_, mask = self._call_and_compute_mask(inputs, mask=mask)
return mask
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
layer_configs = []
for layer in self.layers:
layer_configs.append({
'class_name': layer.__class__.__name__,
'config': layer.get_config()
})
config = {
'name': self.name,
'layers': copy.deepcopy(layer_configs)
}
if self._build_input_shape:
config['build_input_shape'] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'name' in config:
name = config['name']
build_input_shape = config.get('build_input_shape')
layer_configs = config['layers']
else:
name = None
build_input_shape = None
model = cls(name=name)
for layer_config in layer_configs:
layer = layer_module.deserialize(layer_config,
custom_objects=custom_objects)
model.add(layer)
if not model.inputs and build_input_shape:
model.build(build_input_shape)
return model
def get_input_shape_and_dtype(layer):
"""Retrieve input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Graph Network is passed.
"""
if ((isinstance(layer, Model) and layer._is_graph_network)
or isinstance(layer, Sequential)):
# We were passed a model as first layer.
# This requires a specific way to figure out the
# input shape and dtype.
if not layer.layers:
raise ValueError('Cannot add an empty model '
'to a `Sequential` model.')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
layer = layer.layers[0]
while ((isinstance(layer, Model) and layer._is_graph_network)
or isinstance(layer, Sequential)):
layer = layer.layers[0]
if hasattr(layer, '_batch_input_shape'):
batch_shape = layer._batch_input_shape
dtype = layer.dtype
return batch_shape, dtype
return None, None
| {
"content_hash": "f3dba9371ed7f8f2b73afd8ae95746b6",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 79,
"avg_line_length": 34.28729281767956,
"alnum_prop": 0.648082500805672,
"repo_name": "manipopopo/tensorflow",
"id": "415b15fde1655f43f3c317cfc6e7756859fc9da1",
"size": "13136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/sequential.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "324704"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46405377"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6905"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "830061"
},
{
"name": "Jupyter Notebook",
"bytes": "2632416"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52525"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99271"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "39882449"
},
{
"name": "Ruby",
"bytes": "551"
},
{
"name": "Shell",
"bytes": "447049"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
Hack to get scripts to run from source checkout without having to set
PYTHONPATH.
"""
import sys
from os.path import dirname, join, abspath
db_path = dirname(__file__)
project_path = abspath(join(db_path, ".."))
sys.path.insert(0, project_path)
| {
"content_hash": "28cf2ad90728c159b701e065efcaf11b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.7171314741035857,
"repo_name": "bd4/monster-hunter-scripts",
"id": "d44515eab15e6fff927464a6511d5f1e4b9298d8",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "db/_pathfix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1404"
},
{
"name": "EJS",
"bytes": "22104"
},
{
"name": "HTML",
"bytes": "1519215"
},
{
"name": "JavaScript",
"bytes": "13604"
},
{
"name": "Python",
"bytes": "303309"
}
],
"symlink_target": ""
} |
from cached import Resource
from lib.paypal.client import get_client
from lib.paypal.errors import PaypalError
from lib.paypal.forms import GetPersonal
class Personal(object):
def obj_create(self, bundle, request, **kwargs):
form = self._meta.form(bundle.data)
if not form.is_valid():
raise self.form_errors(form)
paypal = get_client()
result = getattr(paypal, self._meta.method)(*form.args())
if paypal.check_personal_email:
if 'email' in result:
if form.cleaned_data['seller'].paypal_id != result['email']:
raise PaypalError('The user data did not match',
data={'email': result['email']},
id=100001)
for k, v in result.items():
setattr(form.cleaned_data['seller'], k, v)
form.cleaned_data['seller'].save()
bundle.data = result
bundle.obj = form.cleaned_data['seller']
return bundle
class CheckPersonalBasic(Personal, Resource):
class Meta(Resource.Meta):
resource_name = 'personal-basic'
list_allowed_methods = ['post']
form = GetPersonal
method = 'get_personal_basic'
class CheckPersonalAdvanced(Personal, Resource):
class Meta(Resource.Meta):
resource_name = 'personal-advanced'
list_allowed_methods = ['post']
form = GetPersonal
method = 'get_personal_advanced'
| {
"content_hash": "8c6770e4e1d8597695aa02298a491bba",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.5936657681940701,
"repo_name": "muffinresearch/solitude",
"id": "ff2e8e534ba36e7ee9d8b677f906d2b110b6d7ed",
"size": "1484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/paypal/resources/personal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "405779"
},
{
"name": "Shell",
"bytes": "3235"
}
],
"symlink_target": ""
} |
import logging
import cloudio.common.utils.timestamp_helpers as TimeStampProvider
from cloudio.endpoint.attribute.constraint import CloudioAttributeConstraint as AttributeConstraint
from cloudio.endpoint.attribute.type import CloudioAttributeType as AttributeType
from cloudio.endpoint.exception.cloudio_modification_exception import CloudioModificationException
from cloudio.endpoint.exception.invalid_cloudio_attribute_exception import InvalidCloudioAttributeException
from cloudio.endpoint.interface.attribute_listener import CloudioAttributeListener
from cloudio.endpoint.interface.unique_identifiable import CloudioUniqueIdentifiable
from cloudio.endpoint.topicuuid import TopicUuid
class CloudioAttribute(CloudioUniqueIdentifiable):
"""The leaf information in the cloud.io data model
"""
log = logging.getLogger(__name__)
def __init__(self):
self._name = None # type: str or None
self._parent = None
self._topic_uuid = None # type: TopicUuid or None
self._constraint = None
self._type = None # type: AttributeType or None
self._timestamp = None
self._value = None # type: bool or int or float or str or None
self._listeners = None # type: list[CloudioAttributeListener] or None
def add_listener(self, listener):
"""Adds the given listener to the list of listeners that will get informed about a change of the attribute.
:param listener: Reference to the object implementing the CloudioAttributeListener interface to add.
:type listener: CloudioAttributeListener
"""
if listener is not None:
# Lazy initialization of the listener list
if self._listeners is None:
self._listeners = []
# Finally add the listener
self._listeners.append(listener)
def remove_listener(self, listener):
"""Removes the given listener from the list of listeners.
:param listener: Reference to the object implementing the CloudioAttributeListener interface to remove.
:type listener: CloudioAttributeListener
"""
if listener is not None and self._listeners is not None:
self._listeners.remove(listener)
######################################################################
# CloudioUniqueIdentifiable implementation
#
def get_uuid(self):
if not self._topic_uuid:
self._topic_uuid = TopicUuid(self)
return self._topic_uuid
def set_value(self, value, timestamp=None):
if not timestamp:
timestamp = TimeStampProvider.get_time_in_milliseconds()
# TODO Check constraint.
# Update value
self._timestamp = timestamp
self._set_value_with_type_check(value)
# Send change to cloud.
if self.get_parent():
self.get_parent().attribute_has_changed_by_endpoint(self)
# TODO Inform all registered listeners.
def set_value_from_cloud(self, value, timestamp):
"""Updates the value from the cloud.
Note that this method should not be used by endpoints, as it guarantees
that only attributes with semantics compatible with cloud updates can be updated.
:param value: New value to set from cloud.
:param timestamp: Timestamp of the value from the cloud.
:return: True if the value was updated, false if not.
"""
# TODO: Check if the cloud can change the attribute.
# self.constraint.cloudWillChange()
# Check if the value from the cloud is older than the actual one and do nothing if that is the case.
if self._timestamp is not None and self._timestamp >= timestamp:
print('Warning: Ignoring new value from cloud.iO. Not valid timestamp!')
return False
# Update the value
self._timestamp = timestamp
self._set_value_with_type_check(value)
# Notify the cloud.
if self._parent is not None:
self._parent.attribute_has_changed_by_cloud(self)
# Notify all listeners.
if self._listeners:
for listener in self._listeners:
# noinspection unchecked
listener.attribute_has_changed(self, from_cloud=True)
else:
self.log.warning('No listeners connected to attribute \"' + self.get_name() + '\"!')
return True
def _set_value_with_type_check(self, value):
"""Assigns a new value and checks the rvalue type.
"""
if self._type == AttributeType.Boolean:
self._value = bool(value)
elif self._type == AttributeType.Integer:
self._value = int(value)
elif self._type == AttributeType.Number:
self._value = float(value)
elif self._type == AttributeType.String:
assert isinstance(value, str)
self._value = value
else:
self.log.warning('Need to assign value which has unsupported type!')
self.set_type(type(value)) # Try to set the type
self._value = value
def get_type(self):
"""Returns the actual type of the attribute."""
if self._type is not None:
return self._type.type
else:
self.log.warning('Deprecated call to get_type()!')
return AttributeType.from_raw_type(type(self._value))
def get_type_as_string(self):
"""Returns the actual type of the attribute as a string."""
if self._type is not None:
return self._type.to_string()
else:
self.log.warning('Deprecated call to get_type_as_string()!')
AttributeType.from_raw_type_to_string(type(self._value))
######################################################################
# Named item implementation
#
def get_name(self):
return self._name if self._name else 'unknown'
def set_name(self, name):
"""
:type name: str
"""
# If the attribute already has a name (we are renaming the attribute) then fail with a runtime exception.
if self._name is not None:
raise CloudioModificationException('The Attribute has already a name (Renaming attributes is forbidden)!')
assert name and name != '', 'Name not valid!'
self._name = name
def get_value(self):
"""Returns the current value of the attribute.
:return: Attributes current value.
"""
return self._value
def set_type(self, the_type: [bool, int, float, str]):
"""Sets the type of the attribute.
Note that the type of an attribute is not allowed to change over time, so if
the attribute already has a type, the method fails with an runtime exception.
:param the_type Python type like bool, int, float and str
:type [bool, int, float, str]
"""
if self._value:
raise CloudioModificationException('The Attribute has already a type (Changing the type is not allowed)!')
if the_type in (bool, int, float, bytes, str):
self._value = the_type()
# Init to invalid
self._type = AttributeType(AttributeType.Invalid)
# Set cloudio attribute type accordingly
if the_type in (bool,):
self._type = AttributeType(AttributeType.Boolean)
elif the_type in (int,):
self._type = AttributeType(AttributeType.Integer)
elif the_type in (float,):
self._type = AttributeType(AttributeType.Number)
else:
assert the_type in (bytes, str), 'Seems we got a new type!'
self._type = AttributeType(AttributeType.String)
else:
raise InvalidCloudioAttributeException(the_type)
######################################################################
# Public API
#
def get_timestamp(self):
return self._timestamp
def set_static_value(self, value):
"""Initializes the static value
This can be only done using static attributes (@StaticAttribute or @Static).
The value of a static attribute can be changed as often as wanted, the only constraint is that the node
containing the static attribute has not been registered within the endpoint.
:param value: The initial value to set
:return:
"""
# TODO Check constraint
# self._constraint.endpointWillChangeStatic()
self._set_value_with_type_check(value)
def get_parent(self):
return self._parent
def set_parent(self, parent):
"""Sets the parent of the attribute. Note that attributes can not be moved, so this method throws a runtime
exception if someone tries to move the attribute to a new parent.
"""
# If the attribute already has a parent (we are moving the attribute) then fail with a runtime exception.
if self._parent:
raise CloudioModificationException('The parent of an Attribute can never be changed ' +
'(Attributes can not be moved)!')
# assert isinstance(parent, CloudioAttributeContainer), 'Wrong type for parent attribute!'
self._parent = parent
def get_constraint(self):
return self._constraint
def set_constraint(self, constraint):
"""
:param constraint:
:type constraint: CloudioAttributeConstraint
:return:
"""
# Convert to AttributeConstraint if 'constraint' parameter is a string
if isinstance(constraint, str):
constraint = AttributeConstraint(constraint)
assert isinstance(constraint, AttributeConstraint), 'Wrong type'
if self._constraint:
raise CloudioModificationException('The Attribute has already a constraint ' +
'(Changing constraints is not allowed)!')
# Set the constraint
self._constraint = constraint
def to_json(self, encoder):
"""Pick out the attributes we want to store / publish.
"""
attr_dict = {
'type': AttributeType.from_raw_type_to_string(self._value),
'value': self._value,
'constraint': self._constraint
}
# Name should not be added for @online message
# attr_dict['name'] = self._name
# Get the type of the value and convert it to cloud.io attribute type
# attr_dict['timestamp'] = self._timestamp
return encoder.default(attr_dict)
| {
"content_hash": "029c3f01230fdef931729b5ceeb2b32e",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 118,
"avg_line_length": 38.846715328467155,
"alnum_prop": 0.6184704998121007,
"repo_name": "cloudio-project/cloudio-endpoint-python",
"id": "ac4513dc3d8e0bb12fce0af2c3768e5bf90e22c7",
"size": "10669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cloudio/endpoint/attribute/attribute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170484"
}
],
"symlink_target": ""
} |
"""Tests for tf.contrib.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v1.losses as losses
from tf_slim import layers as layers_lib
from tf_slim.layers import layers as _layers
from tf_slim.layers import regularizers
from tf_slim.ops import arg_scope as arg_scope_lib
from tf_slim.ops import variables
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
def setUpModule():
tf.disable_eager_execution()
arg_scope = arg_scope_lib.arg_scope
class AvgPool2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.avg_pool2d(images, [3, 3], data_format='CHWN')
def testCreateAvgPool(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.avg_pool2d(images, [3, 3])
self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateAvgPoolNCHW(self):
height, width = 3, 6
images = np.random.uniform(size=(5, 2, height, width))
output = _layers.avg_pool2d(images, [3, 3], data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 1, 2])
def testCollectOutputs(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['AvgPool2D'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, 3)
self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])
def testCreateAvgPoolWithSamePaddingNCHW(self):
height, width = 3, 6
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.avg_pool2d(
images, [3, 3], padding='SAME', data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class AvgPool3DTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3))
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCDHW or NDHWC.'):
_layers.avg_pool3d(images, [3, 3, 3], data_format='CDHWN')
def testCreateAvgPool(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3))
output = _layers.avg_pool3d(images, [3, 3, 3])
self.assertEqual(output.op.name, 'AvgPool3D/AvgPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateAvgPoolNCDHW(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, 2, depth, height, width))
output = _layers.avg_pool3d(images, [3, 3, 3], data_format='NCDHW')
self.assertEqual(output.op.name, 'AvgPool3D/transpose_1')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 1, 2, 4])
def testCollectOutputs(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(
images, [3, 3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['AvgPool3D'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, 3)
self.assertEqual(output.op.name, 'AvgPool3D/AvgPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateAvgPoolWithScope(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, [3, 3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/AvgPool3D')
def testCreateAvgPoolWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, [3, 3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 5, 3])
def testCreateAvgPoolWithSamePaddingNCDHW(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, 3, depth, height, width), seed=1)
output = _layers.avg_pool3d(
images, [3, 3, 3], padding='SAME', data_format='NCDHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3, 5])
def testCreateAvgPoolStrideWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, [3, 3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 3])
def testGlobalAvgPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, images.get_shape()[1:4], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 1, 3])
class PoolTest(test.TestCase):
def testCreatePool(self):
height, width = 3, 3
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.pool(images, [3, 3], pooling_type='AVG')
self.assertEqual(output.op.name, 'avg_pool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreatePoolNCHW(self):
height, width = 3, 3
images = np.random.uniform(size=(5, 3, height, width))
output = _layers.pool(
images, [3, 3], pooling_type='AVG', data_format='NCHW')
self.assertEqual(output.op.name, 'avg_pool')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 1])
def testCollectOutputs(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [3, 3], pooling_type='AVG', outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['avg_pool'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, 3, pooling_type='AVG')
self.assertEqual(output.op.name, 'avg_pool')
self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [3, 3], pooling_type='MAX', scope='pool1')
self.assertEqual(output.op.name, 'pool1')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [3, 3], pooling_type='MAX', padding='SAME')
self.assertEqual(output.get_shape().as_list(), [5, 3, 3, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [3, 3], stride=1, padding='SAME', pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, images.get_shape()[1:3], stride=1, pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testAvgPoolWithStride(self):
height, width = 5, 8
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [2, 3], stride=[1, 2], pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 4, 3, 3])
def testAvgPoolWithDilation(self):
height, width = 5, 8
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [2, 3], dilation_rate=[1, 2], pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 4, 4, 3])
def testAvgPoolWithDilationNCHW(self):
height, width = 5, 8
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.pool(
images, [2, 3],
dilation_rate=[1, 2],
pooling_type='AVG',
data_format='NCHW')
self.assertEqual(output.get_shape().as_list(), [5, 3, 4, 4])
class BiasAddTest(test.TestCase):
def testCreate(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.bias_add(images)
self.assertEqual(output.op.name, 'BiasAdd/BiasAdd')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateWithActivation(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.bias_add(images, activation_fn=nn_ops.relu)
self.assertEqual(output.op.name, 'BiasAdd/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateDimensions(self):
dims = (2, 3, 4)
shape = [5, 2, 3, 4]
with self.cached_session():
for d in dims:
input_shape = shape[:d]
inputs = random_ops.random_uniform(input_shape, seed=1)
output = _layers.bias_add(inputs)
self.assertListEqual(output.get_shape().as_list(), input_shape)
biases = variables.get_variables_by_name('biases')[-1]
self.assertListEqual(biases.get_shape().as_list(), [input_shape[-1]])
class ConvolutionTest(test.TestCase):
def testInvalidShape(self):
with self.cached_session():
images_2d = random_ops.random_uniform((5, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 5, got 4'):
layers_lib.convolution3d(images_2d, 32, 3)
images_3d = random_ops.random_uniform((5, 6, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 4, got 5'):
layers_lib.convolution2d(images_3d, 32, 3)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
layers_lib.convolution2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvNCHW(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32)
output = layers_lib.convolution2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
output = layers_lib.convolution2d(
images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [64])
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
with self.cached_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layers_lib.convolution2d(images, 64, images.get_shape()[1:3])
self.assertEqual(called[0], 2) # Custom getter called twice.
def testCreateVerticalConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self):
height, width = 6, 8
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithCollection(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with ops.name_scope('fe'):
conv = layers_lib.convolution2d(
images, 32, [3, 3], outputs_collections='outputs', scope='Conv')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['Conv'])
self.assertEqual(output_collected, conv)
def testCreateConvWithoutActivation(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], activation_fn=None)
self.assertEqual(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])
def testCreateConvWithWD(self):
height, width = 7, 9
weight_decay = 0.01
with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(weight_decay)
layers_lib.convolution2d(
images, 32, [3, 3], weights_regularizer=regularizer)
l2_loss = nn_ops.l2_loss(variables.get_variables_by_name('weights')[0])
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
def testCreateConvNoRegularizers(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
def testNonReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 2)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
with arg_scope(
[layers_lib.convolution2d], weights_regularizer=weight_decay):
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = layers_lib.convolution2d(images, 32, [3, 3])
net = layers_lib.convolution2d(net, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 8)
self.assertEqual(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = layers_lib.convolution2d(images, 32, [3, 3], scope='Conv')
net = layers_lib.convolution2d(
net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEqual(len(variables.get_variables()), 4)
self.assertEqual(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('Conv_1/BatchNorm')), 0)
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], rate=2, scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testOutputSizeWithRateTwoSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 8, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoThreeValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 6, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=[2, 3], padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testDynamicOutputSizeWithRateOneValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=1, padding='VALID')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [None, num_filters, None, None]
expected_size_dynamic = [5, num_filters, 7, 9]
with self.session(use_gpu=True):
images = array_ops.placeholder(np.float32,
[None, input_size[1], None, None])
output = layers_lib.convolution2d(
images,
num_filters, [3, 3],
rate=1,
padding='VALID',
data_format='NCHW')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images,
num_filters, [3, 3],
rate=2,
padding='VALID',
activation_fn=None,
scope='conv7')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
class Convolution2dTransposeTests(test.TestCase):
def testTrainableFlagIsPassedOn(self):
for trainable in [True, False]:
with ops.Graph().as_default():
num_filters = 32
input_size = [5, 10, 12, 3]
images = random_ops.random_uniform(input_size, seed=1)
layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, trainable=trainable)
model_variables = variables.get_model_variables()
trainable_variables = variables_lib.trainable_variables()
for model_variable in model_variables:
self.assertEqual(trainable, model_variable in trainable_variables)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
_layers.convolution2d_transpose(images, 32, 3, data_format='CHWN')
def testOutputSizeWithStrideOneSamePaddingNCHW(self):
# `NCHW` data format is only supported for `GPU` device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
expected_size = [5, num_filters, 10, 12]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=1,
padding='SAME',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
expected_size = [5, num_filters, 12, 14]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=1,
padding='VALID',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [5, num_filters, 19, 23]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, num_filters, 2, 2]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='SAME',
data_format='NCHW')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, num_filters, 2, 2]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 2, 2]
expected_size = [1, num_filters, 4, 4]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='SAME',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 2, 2]
expected_size = [1, num_filters, 4, 4]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 5]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 1],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 8]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 4],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 10]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 5],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='SAME')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 12, 14, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 5, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 1], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 8, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 4], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 10, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 5], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeRandomSizesAndStridesValidPadding(self):
np.random.seed(0)
max_image_size = 10
for _ in range(10):
num_filters = 1
input_size = [
1,
np.random.randint(1, max_image_size),
np.random.randint(1, max_image_size), 1
]
filter_size = [
np.random.randint(1, input_size[1] + 1),
np.random.randint(1, input_size[2] + 1)
]
stride = [np.random.randint(1, 3), np.random.randint(1, 3)]
ops.reset_default_graph()
graph = ops.Graph()
with graph.as_default():
images = random_ops.random_uniform(input_size, seed=1)
transpose = layers_lib.conv2d_transpose(
images, num_filters, filter_size, stride=stride, padding='VALID')
conv = layers_lib.conv2d(
transpose, num_filters, filter_size, stride=stride, padding='VALID')
with self.session(graph=graph) as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(conv.eval().shape), input_size)
def testDynamicOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 19, 23, num_filters]
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithStrideTwoSamePadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 18, 22, num_filters]
with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='SAME')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')
self.assertEqual(output.op.name, 'conv7/Relu')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=2,
padding='VALID',
activation_fn=None,
scope='conv7')
self.assertEqual(output.op.name, 'conv7/BiasAdd')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testDeconvWithoutBiasesProducesConv2dTranspose(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
stride = 2
padding = 'VALID'
with self.cached_session() as sess:
images = random_ops.random_uniform(input_size, seed=1)
output_deconv = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=stride,
padding=padding,
activation_fn=None,
scope='conv7')
weights = variables.get_variables_by_name('conv7/weights')[0]
output_conv2d_transpose = nn_ops.conv2d_transpose(
images,
weights,
expected_size, [1, stride, stride, 1],
padding=padding)
sess.run(variables_lib.global_variables_initializer())
output_deconv, output_conv2d_transpose = sess.run(
[output_deconv, output_conv2d_transpose])
self.assertTrue(
np.isclose(output_deconv, output_conv2d_transpose, 1e-5, 1e-5).all())
class ConvolutionInPlaneTest(test.TestCase):
def testHorzConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
expected = np.zeros((1, 10, 9, 1))
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithBlankImageAndPlaceholder(self):
image = array_ops.placeholder(dtypes.float32, shape=(None, None, None, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(
horz_gradients, feed_dict={
image: np.ones((1, 10, 10, 1))
})
expected = np.zeros((1, 10, 9, 1))
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithRandomImageMultiBatch(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 1)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 7)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-1.0 -1.0;' '-0.9 -2.0;' '-4.3 -8.9'))
expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testVertConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
vert_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
expected = np.zeros((1, 9, 10, 1))
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testVertConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-0.1 0.0 -1.0;' ' 5.4 2.0 -4.9'))
expected = np.reshape(np.asarray(expected), (1, 2, 3, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
vert_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testConv1dShape(self):
width = 7
with self.cached_session():
images = random_ops.random_uniform((5, width, 3), seed=1)
output = layers_lib.convolution1d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, width, 32])
def testConvInferSpatialDims(self):
depth, height, width = 7, 9, 11
with self.cached_session():
images = np.random.uniform(size=(5, width, 4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3])
self.assertListEqual(output.get_shape().as_list(), [5, width, 32])
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3, 3])
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
images = np.random.uniform(size=(5, depth, height, width,
4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3, 3, 3])
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 32])
class DenseToSparseTest(test.TestCase):
def testDenseFromConstantToSparse(self):
expected_constant = np.reshape(np.arange(24, dtype=np.int64), (3, 4, 2))
tensor = constant_op.constant(expected_constant)
sparse = _layers.dense_to_sparse(tensor)
dense = sparse_ops.sparse_to_dense(sparse.indices, sparse.dense_shape,
sparse.values)
with self.cached_session() as sess:
constant = sess.run(dense)
self.assertAllEqual(expected_constant, constant)
class DropoutTest(test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.dropout(images)
output.get_shape().assert_is_compatible_with(
ops.convert_to_tensor(images).get_shape())
def testCreateDropoutWithConstantTrue(self):
height, width = 3, 3
with self.cached_session():
is_training = constant_op.constant(True)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithConstantFalse(self):
height, width = 3, 3
with self.cached_session():
is_training = constant_op.constant(False)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithPlaceholder(self):
height, width = 3, 3
tf.reset_default_graph()
with self.cached_session():
is_training = array_ops.placeholder(dtype=dtypes.bool, shape=[])
images = random_ops.random_uniform((5, height, width, 3), seed=1)
# this verifies that that we've inserted cond properly.
output = _layers.dropout(images, is_training=is_training)
# In control_flow_v2 the op is called "If" and it is behind
# identity op. In legacy mode cond we just go by name.
# Might need to do something more robust here eventually.
is_cond_op = (output.op.inputs[0].op.type == 'If' or
output.op.name == 'Dropout/cond/Merge')
self.assertTrue(is_cond_op,
'Expected cond_op got ' + repr(output))
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCollectOutputs(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['Dropout'])
self.assertEqual(c_output, output)
def testDropout(self):
height, width = 10, 10
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(images > 0, dtypes.float32))
output = _layers.dropout(images)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
def testDropoutSeed(self):
"""Test that providing the same seed produces the same result."""
height, width = 10, 10
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output1 = _layers.dropout(images, seed=1)
output2 = _layers.dropout(images, seed=1)
self.assertAllEqual(*sess.run([output1, output2]))
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(images > 0, dtypes.float32))
output = _layers.dropout(images, is_training=False)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertEqual(num_elem, num_elem_initial)
outputs, inputs = sess.run([output, images])
self.assertAllClose(outputs, inputs)
def testCreateFCFollowByDropout(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(images, 50)
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(output > 0, dtypes.float32))
output = _layers.dropout(output)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
sess.run(variables_lib.global_variables_initializer())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
def testCreateFCWithDropout(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(
images, 50, normalizer_fn=_layers.dropout)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
sess.run(variables_lib.global_variables_initializer())
num_elem = sess.run(num_elem)
self.assertLess(num_elem, 0.5)
self.assertGreater(num_elem, 0.1)
class FlattenTest(test.TestCase):
def testUnknownLastDim(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, None)))
output = _layers.flatten(inputs)
self.assertEqual(output.get_shape().as_list(), [5, None])
def testCollectOutputs(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.flatten(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['Flatten'])
self.assertEqual(c_output, output)
def testFlatten4D(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.flatten(images)
self.assertEqual(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width), seed=1, name='images')
output = _layers.flatten(images)
self.assertEqual(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten0D(self):
with self.cached_session():
scalars = random_ops.random_uniform((5,), seed=1, name='scalars')
output = _layers.flatten(scalars)
self.assertEqual(output.shape, (5, 1))
def testFlattenBatchSize(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, height, width, 3))
output = _layers.flatten(inputs)
self.assertEqual(output.get_shape().as_list(), [None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.size, images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
def testUnknownDims(self):
height = width = depth = 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, depth), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, None, None, None))
output = _layers.flatten(inputs)
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.size, images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
def _sparsify(array, threshold=0.5):
array[array < threshold] = 0
non_zero = np.where(array)
indices = np.vstack(non_zero).T
values = array[non_zero]
shape = array.shape
return indices, values, shape
class PartialFlattenTest(test.TestCase):
def testDensePartialFlatten(self):
"""Test `_inner_flatten` on `Tensor`s."""
shape = [2, 3, 4, 5, 6]
np.random.seed(5446)
inputs = np.random.randint(0, 100, size=shape)
for new_rank in [1, 2, 3, 4, 5]:
expected_new_shape = (
shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
expected_flattened = np.reshape(inputs, expected_new_shape)
flattened_t = _layers._inner_flatten(inputs, new_rank)
static_shape = flattened_t.get_shape().as_list()
self.assertEqual(static_shape, expected_new_shape)
with self.cached_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_flattened, flattened)
def testSparsePartialFlatten(self):
"""Test `_inner_flatten` on `SparseTensor`s."""
shape = [4, 3, 11, 6]
np.random.seed(10301)
random_ = np.random.rand(*shape)
indices, values, _ = _sparsify(random_)
for new_rank in [1, 2, 3]:
expected_shape = (shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
reshaped_random_ = np.reshape(random_, expected_shape)
expected_indices, expected_values, _ = _sparsify(reshaped_random_)
inputs_t = sparse_tensor.SparseTensor(indices, values, shape)
flattened_t = _layers._inner_flatten(inputs_t, new_rank)
with self.cached_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_indices, flattened.indices)
np.testing.assert_array_equal(expected_values, flattened.values)
np.testing.assert_array_equal(expected_shape, flattened.dense_shape)
def testIncompleteShape(self):
"""Test `_inner_flatten` shape inference for incomplete shapes."""
shape = [2, None, 4, None, 5, 6]
inputs = array_ops.placeholder(dtypes.int32)
inputs.set_shape(shape)
flattened1 = _layers._inner_flatten(inputs, 1)
self.assertEqual([None], flattened1.get_shape().as_list())
flattened2 = _layers._inner_flatten(inputs, 2)
self.assertEqual([2, None], flattened2.get_shape().as_list())
flattened3 = _layers._inner_flatten(inputs, 3)
self.assertEqual([2, None, None], flattened3.get_shape().as_list())
flattened4 = _layers._inner_flatten(inputs, 4)
self.assertEqual([2, None, 4, None], flattened4.get_shape().as_list())
flattened5 = _layers._inner_flatten(inputs, 5)
self.assertEqual([2, None, 4, None, 30], flattened5.get_shape().as_list())
def testDenseFlattenRankAssertion(self):
"""Test `_inner_flatten` rank assertion for dense tensors."""
shape = [2, 3]
new_rank = 3
inputs = array_ops.placeholder(dtypes.int32)
inputs.set_shape(shape)
with self.assertRaisesRegexp(ValueError,
'inputs has rank less than new_rank'):
_layers._inner_flatten(inputs, new_rank)
def testSparseFlattenRankAssertion(self):
"""Test `_inner_flatten` rank assertion for sparse tensors."""
shape = [2, 3]
new_rank = 3
np.random.seed(10301)
random_ = np.random.rand(*shape)
indices, values, _ = _sparsify(random_)
inputs = sparse_tensor.SparseTensor(indices, values, shape)
with self.assertRaisesRegexp(ValueError,
'Inputs has rank less than new_rank'):
_layers._inner_flatten(inputs, new_rank)
class FCTest(test.TestCase):
def testCreateFC(self):
height, width = 3, 3
for layer_fn in (_layers.fully_connected, layers_lib.relu):
with ops.Graph().as_default() as g, self.session(g):
inputs = np.random.uniform(size=(5, height * width * 3))
output = layer_fn(inputs, 32)
self.assertEqual(output.op.name, 'fully_connected/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3 * 3 * 3, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(output.op.name, 'fc1/Relu')
def testCreateFCWithCollection(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with ops.name_scope('fe'):
fc = _layers.fully_connected(
inputs, 7, outputs_collections='outputs', scope='fc')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['fc'])
self.assertEqual(output_collected, fc)
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertTrue(variables.get_variables('fc1/weights'))
self.assertTrue(variables.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.cached_session():
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(len(variables.get_variables('fc1')), 2)
_layers.fully_connected(inputs, 32, scope='fc1', reuse=True)
self.assertEqual(len(variables.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.cached_session():
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 2)
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 4)
def testReuseWithRegularizer(self):
height, width = 3, 3
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(
inputs, 32, scope='fc1', weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
self.assertEqual(len(losses.get_regularization_losses()), 1)
_layers.fully_connected(
inputs, 32, scope='fc1', weights_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
self.assertEqual(len(losses.get_regularization_losses()), 1)
with variable_scope.variable_scope('outer', reuse=False):
_layers.fully_connected(inputs, 32, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
self.assertEqual(len(losses.get_regularization_losses()), 2)
with variable_scope.variable_scope('outer', reuse=True):
_layers.fully_connected(inputs, 32, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
self.assertEqual(len(losses.get_regularization_losses()), 2)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, activation_fn=None)
self.assertEqual(output.op.name, 'fully_connected/BiasAdd')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.cached_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, weights_regularizer=weight_decay)
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name,
'fully_connected/kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLess(sess.run(wd), 0.4)
def testCreateFCWithBD(self):
height, width = 3, 3
with self.cached_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
bias_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, biases_regularizer=bias_decay)
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name,
'fully_connected/bias/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLess(sess.run(wd), 0.4)
def testCreateNoRegularizers(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(inputs, 32)
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(
inputs, 32, weights_regularizer=weight_decay, scope='FC')
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_layers.fully_connected(
inputs, 32, weights_regularizer=weight_decay, scope='FC', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = _layers.fully_connected(images, 27)
net = _layers.fully_connected(net, 27)
self.assertEqual(len(variables.get_variables()), 8)
self.assertEqual(
len(variables.get_variables('fully_connected/BatchNorm')), 3)
self.assertEqual(
len(variables.get_variables('fully_connected_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = _layers.fully_connected(images, 27, scope='fc1')
net = _layers.fully_connected(net, 27, scope='fc1', reuse=True)
self.assertEqual(len(variables.get_variables()), 4)
self.assertEqual(len(variables.get_variables('fc1/BatchNorm')), 3)
class BatchNormTest(test.TestCase):
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var, correction_factor
def testBatchNormCenterFalse(self):
a = array_ops.placeholder(dtype=dtypes.float32, shape=(10, 10, 10, 10))
# Test that center=False builds a valid graph.
_layers.batch_norm(
a, center=False, data_format='NCHW', zero_debias_moving_mean=True)
def testUnknownShape(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
_layers.batch_norm(inputs)
def testInvalidDataFormat(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
_layers.batch_norm(inputs, data_format='CHWN')
def testUnknownChannelsDimNHWC(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'undefined'):
_layers.batch_norm(inputs, data_format='NHWC')
def testUnknownChannelsDimNCHW(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, None, 3, 3)))
with self.assertRaisesRegexp(ValueError, 'undefined'):
_layers.batch_norm(inputs, data_format='NCHW')
def _testCreateOp(self, fused, dtype=None):
if dtype is None:
dtype = dtypes.float32
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(
dtype.as_numpy_dtype)
output = _layers.batch_norm(images, fused=fused)
expected_name = ('BatchNorm/FusedBatchNorm'
if fused else 'BatchNorm/batchnorm')
self.assertTrue(output.op.name.startswith(expected_name))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testCreateOpDefault(self):
self._testCreateOp(False)
def testCreateOpFused(self):
self._testCreateOp(True)
def testCreateOpFusedFloat16(self):
self._testCreateOp(True, dtypes.float16)
def _testCreateOpBetaRegularizer(self, fused=True):
height, width = 3, 3
with self.cached_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
_layers.batch_norm(images, param_regularizers={'beta': reg}, fused=fused)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
beta_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(beta_decay.op.name, 'BatchNorm/beta/Regularizer/mul')
def testCreateOpBetaRegularizerFused(self):
self._testCreateOpBetaRegularizer(fused=True)
def testCreateOpBetaRegularizerNonFused(self):
self._testCreateOpBetaRegularizer(fused=False)
def _testCreateOpGammaRegularizer(self, fused=True):
height, width = 3, 3
with self.cached_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
_layers.batch_norm(
images, param_regularizers={'gamma': reg}, scale=True, fused=fused)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
gamma_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(gamma_decay.op.name, 'BatchNorm/gamma/Regularizer/mul')
def testCreateOpGammaRegularizerFused(self):
self._testCreateOpGammaRegularizer(fused=True)
def testCreateOpGammaRegularizerNonFused(self):
self._testCreateOpGammaRegularizer(fused=False)
def testCreateVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.op.name, 'BatchNorm/beta')
self.assertEqual(gamma.op.name, 'BatchNorm/gamma')
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
self.assertEqual(len(variables.get_model_variables()), 4)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariablesZeroDebias(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(
images, scale=True, zero_debias_moving_mean=True, fused=False)
self.assertEqual(len(variables.get_model_variables()), 6)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
self.assertEqual(biased.op.name, 'BatchNorm/BatchNorm/moving_mean/biased')
self.assertEqual(local_step.op.name,
'BatchNorm/BatchNorm/moving_mean/local_step')
def testUpdatesCollection(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, updates_collections='my_update_ops')
update_layers = ops.get_collection('my_update_ops')
update_moving_mean = update_layers[0]
update_moving_variance = update_layers[1]
self.assertStartsWith(update_moving_mean.op.name,
'BatchNorm/AssignMovingAvg')
self.assertStartsWith(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testVariablesCollections(self):
variables_collections = {
'beta': ['beta'],
'gamma': ['gamma'],
'moving_mean': ['moving_mean'],
'moving_variance': ['moving_variance'],
}
images = random_ops.random_uniform((5, 5, 5, 3), seed=1)
_layers.batch_norm(
images, scale=True, variables_collections=variables_collections)
for var_name, collection_names in variables_collections.items():
collection = ops.get_collection(collection_names[0])
self.assertEqual(len(collection), 1)
var_name_in_collection = collection[0].op.name
self.assertEqual(var_name_in_collection, 'BatchNorm/' + var_name)
def testReuseVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True, scope='bn')
_layers.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEqual(len(beta), 1)
self.assertEqual(len(gamma), 1)
moving_mean = variables.get_variables_by_name('moving_mean')
moving_variance = variables.get_variables_by_name('moving_variance')
moving_vars = moving_mean + moving_variance
self.assertEqual(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with arg_scope([_layers.batch_norm], updates_collections='update_ops'):
_layers.batch_norm(images, scope='bn')
self.assertEqual(len(ops.get_collection('update_ops')), 2)
_layers.batch_norm(images, scope='bn', reuse=True)
self.assertEqual(len(ops.get_collection('update_ops')), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_ = _layers.batch_norm(images)
moving_mean = variables.get_variables('BatchNorm/moving_mean')
self.assertEqual(len(moving_mean), 1)
self.assertEqual(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = variables.get_variables('BatchNorm/moving_variance')
self.assertEqual(len(moving_variance), 1)
self.assertEqual(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testZeroDebiasMovingMean(self):
height, width = 3, 3
batch_size = 10
channels = 3
np.random.seed(1)
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
zero_debias_moving_mean=True,
fused=False)
moving_mean = variables.get_variables_by_name('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertAllClose(local_step.eval(), 0)
self.assertAllClose(moving_mean.eval(), [0] * channels)
self.assertAllClose(biased.eval(), [0] * channels)
self.assertAllClose(moving_variance.eval(), [1] * channels)
for i in range(10):
self.assertAllClose(local_step.eval(), i)
sess.run([output])
# In this case moving_mean == expected_mean after each update
self.assertAllClose(moving_mean.eval(), expected_mean)
# After 10 updates with decay 0.1 moving_mean == expected_mean,
# biased == expected_mean and moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
self.assertAllClose(biased.eval(), expected_mean)
def _testNoneUpdatesCollections(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, _ = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
for _ in range(10):
sess.run([output])
if zero_debias_moving_mean:
# In this case moving_mean == expected_mean after update
self.assertAllClose(moving_mean.eval(), expected_mean)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testNoneUpdatesCollectionsNHWC(self):
self._testNoneUpdatesCollections(False, data_format='NHWC')
print(tf.all_variables())
def testNoneUpdatesCollectionsNCHW(self):
self._testNoneUpdatesCollections(False, data_format='NCHW')
def testNoneUpdatesCollectionsNHWCZeroDebias(self):
self._testNoneUpdatesCollections(
False, data_format='NHWC', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsNCHWZeroDebias(self):
self._testNoneUpdatesCollections(
False, data_format='NCHW', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(True, data_format='NCHW')
def testNoneUpdatesCollectionsFusedNHWC(self):
self._testNoneUpdatesCollections(True, data_format='NHWC')
def testNoneUpdatesCollectionsFusedNCHWZeroDebias(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(
True, data_format='NCHW', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsFusedNHWCZeroDebias(self):
self._testNoneUpdatesCollections(
True, data_format='NHWC', zero_debias_moving_mean=True)
def _testDelayedUpdateMovingVars(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
# updates_ops are added to UPDATE_OPS collection.
self.assertEqual(len(update_ops), 2)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
for _ in range(10):
sess.run([output])
if zero_debias_moving_mean:
# In this case moving_mean == expected_mean after update
self.assertAllClose(moving_mean.eval(), expected_mean)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
self.assertAllClose(variance, expected_var)
def testDelayedUpdateMovingVarsNHWC(self):
self._testDelayedUpdateMovingVars(False, data_format='NHWC')
def testDelayedUpdateMovingVarsNCHW(self):
self._testDelayedUpdateMovingVars(False, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testDelayedUpdateMovingVars(True, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNHWC(self):
self._testDelayedUpdateMovingVars(True, data_format='NHWC')
def testDelayedUpdateMovingVars(self):
self._testDelayedUpdateMovingVars(False)
def _testEvalMovingVars(self, zero_debias_moving_mean=False):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(images, decay=0.1, is_training=False)
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assignment from saver restore.
init_assigns = [
state_ops.assign(moving_mean, expected_mean),
state_ops.assign(moving_variance, expected_var)
]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
self._testEvalMovingVars()
def testEvalMovingVarsZeroDebias(self):
self._testEvalMovingVars(True)
def testEvalMovingVarsWithPartitioner(self):
# This test makes sure that the moving-mean and moving-variance logic works
# when `batch_norm` is called within a variable-scope that has a variable
# partitioner.
partitioner = partitioned_variables.fixed_size_partitioner(2, axis=0)
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), partitioner=partitioner):
self.testEvalMovingVars()
def _testReuseVars(self, fused, zero_debias_moving_mean=False):
height, width = 3, 3
batch_size = 10
channels = 3
with self.cached_session() as sess:
image_shape = (batch_size, height, width, channels)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output_train = _layers.batch_norm(
images,
decay=0.1,
is_training=True,
scope='BN',
fused=fused,
zero_debias_moving_mean=zero_debias_moving_mean)
output_eval = _layers.batch_norm(
images,
decay=0.1,
is_training=False,
scope='BN',
reuse=True,
fused=fused,
zero_debias_moving_mean=zero_debias_moving_mean)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BN/moving_mean')[0]
moving_variance = variables.get_variables('BN/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output_train)
# Before updates the outputs are different for train and eval.
self.assertFalse(
np.allclose(sess.run([output_train]), sess.run([output_eval])))
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
self.assertAllClose(variance, expected_var)
# After convergence output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def testReuseVarsDefault(self):
self._testReuseVars(False)
def testReuseVarsFused(self):
self._testReuseVars(True)
def testReuseVarsDefaultZeroDebias(self):
self._testReuseVars(False, True)
def testReuseVarsFusedZeroDebias(self):
self._testReuseVars(True, True)
def _testIsTrainingVariable(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
np.random.seed(1)
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
# NB: tf.identity is required, because variables can't be fed.
is_training = tf.identity(tf.Variable(True))
output = _layers.batch_norm(
images,
decay=0.1,
is_training=is_training,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
output_false = sess.run([output], {is_training: False})
self.assertAllClose(output_true, output_false)
def testIsTrainingVariableNHWC(self):
self._testIsTrainingVariable(False, data_format='NHWC')
def testIsTrainingVariableNCHW(self):
self._testIsTrainingVariable(False, data_format='NCHW')
def testIsTrainingVariableNHWCZeroDebias(self):
self._testIsTrainingVariable(
False, data_format='NHWC', zero_debias_moving_mean=True)
def testIsTrainingVariableNCHWZeroDebias(self):
self._testIsTrainingVariable(
False, data_format='NCHW', zero_debias_moving_mean=True)
def testIsTrainingVariableFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(True, data_format='NCHW')
def testIsTrainingVariableFusedNHWC(self):
self._testIsTrainingVariable(True, data_format='NHWC')
def testIsTrainingVariableFusedNCHWZeroDebias(self):
if test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(
True, data_format='NCHW', zero_debias_moving_mean=True)
def testIsTrainingVariableFusedNHWCZeroDebias(self):
self._testIsTrainingVariable(
True, data_format='NHWC', zero_debias_moving_mean=True)
def testNoUpdatesWhenIsTrainingFalse(self):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(images, decay=0.1, is_training=False)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(len(update_ops), 0)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def testNoneUpdatesCollectionNoTraining(self):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images, decay=0.1, updates_collections=None, is_training=False)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def _testNoneUpdatesCollectionIsTrainingVariable(self,
fused,
data_format='NHWC'):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
# NB: tf.identity is required because variables cant be fed.
# Ref: https://github.com/tensorflow/tensorflow/issues/19884
is_training = tf.identity(tf.Variable(True))
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
is_training=is_training,
fused=fused,
data_format=data_format)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance], {
is_training: True})
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output], {is_training: False})
self.assertAllClose(moving_mean.eval(), [0] * channels)
self.assertAllClose(moving_variance.eval(), [1] * channels)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
# When is_training is True update moving_vars.
for _ in range(10):
sess.run([output], {is_training: True})
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance, {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertTrue(np.allclose(output_true, output_false))
def testNoneUpdatesCollectionIsTrainingVariableNHWC(self):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NHWC')
def testNoneUpdatesCollectionIsTrainingVariableNCHW(self):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
with tf.Graph().as_default():
self._testNoneUpdatesCollectionIsTrainingVariable(
True, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNHWC(self):
self._testNoneUpdatesCollectionIsTrainingVariable(True, data_format='NHWC')
def _testTrainMovingVars(self, fused, data_format='NHWC'):
# Test that the gradients are stable while the moving_mean is updated.
# Since the moving_mean is used as shift to compute the tf.momments, the
# gradients could diverge, this test checks that gradients remains stable
# while the moving_mean is updated.
height, width = 7, 7
batch_size = 10
channels = 32
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape) + 256
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, _ = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.2,
updates_collections=None,
is_training=True,
fused=fused,
data_format=data_format)
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
objective = math_ops.reduce_sum(output)
[images_gradients] = gradients_impl.gradients(objective, images)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# Initial input gradients.
images_gradients_value = sess.run(images_gradients)
for _ in range(10):
np_output, new_images_gradients = sess.run([output, images_gradients])
# The outputs should be close to 0.0 mean and 1.0 variance
self.assertAllClose(
np.mean(np_output, axis=axis), [0] * channels,
rtol=0.001,
atol=0.001)
self.assertAllClose(
np.var(np_output, axis=axis), [1] * channels, rtol=0.01, atol=0.01)
# The gradients should change slowly while updating moving_mean.
max_diff = np.max(np.abs(images_gradients_value - new_images_gradients))
self.assertGreaterEqual(max_diff, 0.0)
self.assertLess(max_diff, 5e-5)
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
def testTrainMovingVarsNHWC(self):
self._testTrainMovingVars(False, data_format='NHWC')
def testTrainMovingVarsNCHW(self):
self._testTrainMovingVars(False, data_format='NCHW')
def testTrainMovingVarsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testTrainMovingVars(True, data_format='NCHW')
def testTrainMovingVarsFusedNHWC(self):
self._testTrainMovingVars(True, data_format='NHWC')
def testCustomInitializer(self):
height, width = 3, 3
channels = 3
with self.cached_session() as sess:
images = (np.ones((5, height, width, channels)) * 9.0).astype('f')
beta = init_ops.constant_initializer(
(np.ones(channels) * 5.0).astype('f'))
gamma = init_ops.constant_initializer(
(np.ones(channels) * 2.0).astype('f'))
mean = init_ops.constant_initializer(
(np.ones(channels) * 5.0).astype('f'))
variance = init_ops.constant_initializer(
(np.ones(channels) * 4.0).astype('f'))
output = _layers.batch_norm(
images,
is_training=False,
scale=True,
epsilon=0.0,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
})
sess.run(variables_lib.global_variables_initializer())
outs = sess.run(output)
self.assertAllClose(outs, images)
def _runBatchNormalizationWithFormat(self, shape, data_format, is_training):
channels = shape[-1]
with self.session(use_gpu=True) as sess:
images = np.arange(np.product(shape), dtype=np.float32).reshape(shape)
beta = init_ops.constant_initializer(
np.arange(2, channels + 2, dtype=np.float32))
gamma = init_ops.constant_initializer(
np.arange(10, channels + 10, dtype=np.float32) * 2.0)
mean = init_ops.constant_initializer(
np.arange(3, channels + 3, dtype=np.float32) * 5.0)
variance = init_ops.constant_initializer(
np.arange(1, channels + 1, dtype=np.float32) * 4.0)
if data_format == 'NCHW':
# Reshape inputs from NHWC to NCHW format.
images = array_ops.transpose(
images, [0, len(shape) - 1] + list(range(1,
len(shape) - 1)))
output = _layers.batch_norm(
images,
is_training=is_training,
scale=True,
epsilon=0.5,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
},
data_format=data_format)
if data_format == 'NCHW':
# Reshape outputs from NCHW back to NHWC format.
output = array_ops.transpose(output,
[0] + list(range(2, len(shape))) + [1])
sess.run(variables_lib.global_variables_initializer())
return sess.run(output)
def testNHWCAndNCHWInferenceProduceSameOutput(self):
if test.is_gpu_available(cuda_only=True):
for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
nhwc = self._runBatchNormalizationWithFormat(
data_format='NHWC', shape=shape, is_training=False)
nchw = self._runBatchNormalizationWithFormat(
data_format='NCHW', shape=shape, is_training=False)
self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4)
def testNHWCAndNCHWTrainingProduceSameOutput(self):
if test.is_gpu_available(cuda_only=True):
for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
nhwc = self._runBatchNormalizationWithFormat(
data_format='NHWC', shape=shape, is_training=True)
nchw = self._runBatchNormalizationWithFormat(
data_format='NCHW', shape=shape, is_training=True)
self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4)
def testBatchNormBeta(self):
# Test case for 11673
with self.cached_session() as sess:
a_32 = array_ops.placeholder(dtypes.float32, shape=(10, 10, 10, 10))
_layers.batch_norm(
a_32, center=False, data_format='NCHW', zero_debias_moving_mean=True)
a_16 = array_ops.placeholder(dtypes.float16, shape=(10, 10, 10, 10))
_layers.batch_norm(
a_16, center=False, data_format='NCHW', zero_debias_moving_mean=True)
sess.run(variables_lib.global_variables_initializer())
def is_float_var(self, v):
if v.dtype == dtypes.float32_ref:
return True
if v.dtype == tf.float32 and v.op.outputs[0].dtype == tf.resource:
return True
return False
def testVariablesAreFloat32(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float16)
_layers.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertTrue(self.is_float_var(beta))
self.assertTrue(self.is_float_var(gamma))
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertTrue(self.is_float_var(moving_mean))
self.assertTrue(self.is_float_var(moving_variance))
def _runFusedBatchNorm(self, shape, dtype):
channels = shape[1]
images = np.arange(np.product(shape), dtype=dtype).reshape(shape)
beta = init_ops.constant_initializer(
np.arange(2, channels + 2, dtype=np.float32))
gamma = init_ops.constant_initializer(
np.arange(10, channels + 10, dtype=np.float32) * 2.0)
mean = init_ops.constant_initializer(
np.arange(3, channels + 3, dtype=np.float32) * 5.0)
variance = init_ops.constant_initializer(
np.arange(1, channels + 1, dtype=np.float32) * 4.0)
output = _layers.batch_norm(
images,
fused=True,
is_training=True,
scale=True,
epsilon=0.5,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
},
data_format='NCHW')
with self.session(use_gpu=True) as sess:
sess.run(variables_lib.global_variables_initializer())
return sess.run(output)
def testFusedBatchNormFloat16MatchesFloat32(self):
if test.is_gpu_available(cuda_only=True):
shape = [5, 4, 2, 3]
res_32 = self._runFusedBatchNorm(shape, np.float32)
res_16 = self._runFusedBatchNorm(shape, np.float16)
self.assertAllClose(res_32, res_16, rtol=1e-3)
def testAdjustmentCreated(self):
# Tests that the adjustment is appropriately passed to and used by the core
# BN layer.
all_adjustments = []
def _create_adjustment(shape):
adjustments = [array_ops.ones(shape[-1:]), array_ops.zeros(shape[-1:])]
all_adjustments.extend(adjustments)
return adjustments
depth = 8
images = array_ops.zeros([10, 5, 5, depth])
output = _layers.batch_norm(
images, is_training=True, adjustment=_create_adjustment)
self.assertListEqual(output.shape.as_list(), images.shape.as_list())
self.assertEqual(len(all_adjustments), 2)
self.assertListEqual(all_adjustments[0].shape.as_list(), [depth])
self.assertListEqual(all_adjustments[1].shape.as_list(), [depth])
class LayerNormTest(test.TestCase):
def testUnknownShape(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
_layers.layer_norm(inputs)
def testParamsDimsNotFullyDefined(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'is not fully defined'):
_layers.layer_norm(inputs)
def testCreateOp(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.layer_norm(images)
self.assertTrue(output.op.name.startswith('LayerNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.op.name, 'LayerNorm/beta')
self.assertEqual(gamma.op.name, 'LayerNorm/gamma')
def testReuseVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images, scope='ln')
_layers.layer_norm(images, scope='ln', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEqual(len(beta), 1)
self.assertEqual(len(gamma), 1)
def testReuseVars(self):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output_train = _layers.layer_norm(images, scope='LN')
output_eval = _layers.layer_norm(images, scope='LN', reuse=True)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
# output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def doOutputTest(self,
input_shape,
tol=1e-5,
begin_norm_axis=1,
dtype=dtypes.float64):
eps = 1e-12 if dtype != dtypes.float16 else 1e-3
expected_mean = np.zeros(input_shape[:begin_norm_axis])
expected_var_uncorrected = np.ones(input_shape[:begin_norm_axis])
sigma_list = [1.0, 0.1]
if dtype == dtypes.float16:
# This causes the variance to underflow in float16, and requires that
# variance_epsilon be set appropriately to avoid NaNs in the output.
sigma_list.append(1e-4)
# Note that the mean:variance ratio must be limited to the representable
# range for float16.
for mu in [0.0, 1e2 if dtype != dtypes.float16 else 1e1]:
for sigma in sigma_list:
expected_var = expected_var_uncorrected / (1.0 + eps / sigma**2)
input_values = np.random.randn(*input_shape) * sigma + mu
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
inputs = constant_op.constant(
input_values, shape=input_shape, dtype=dtype)
output_t = _layers.layer_norm(
inputs, begin_norm_axis=begin_norm_axis, scope='LN')
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
# The mean and variance of the output should be close to 0 and 1
# respectively.
if begin_norm_axis < 0:
begin_norm_axis = len(input_shape) + begin_norm_axis
moments_axis = tuple(range(begin_norm_axis, len(input_shape)))
with variable_scope.variable_scope('LN', reuse=True):
beta_var = variable_scope.get_variable('beta', dtype=dtype)
gamma_var = variable_scope.get_variable('gamma', dtype=dtype)
outputs, beta, gamma = sess.run((output_t, beta_var, gamma_var))
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
if outputs.dtype != np.float64:
# Cast to float64 before computing mean/variance to avoid
# overflow and precision issues.
outputs = outputs.astype(np.float64)
mean = np.mean(outputs, axis=moments_axis)
var = np.var(outputs, axis=moments_axis)
# Layer-norm implemented in numpy
expected_out = (
(gamma * (input_values - np.mean(
input_values, axis=moments_axis, keepdims=True)) /
np.sqrt(eps + np.var(
input_values, axis=moments_axis, keepdims=True))) + beta)
self.assertAllClose(expected_mean, mean, atol=tol, rtol=tol)
self.assertAllClose(expected_var, var, atol=tol)
# The full computation gets a bigger tolerance
self.assertAllClose(expected_out, outputs, atol=5 * tol)
def testOutput2DInput(self):
self.doOutputTest((10, 300))
def testOutput2DInputDegenerateNormAxis(self):
with self.assertRaisesRegexp(ValueError, r'must be < rank\(inputs\)'):
self.doOutputTest((10, 300), begin_norm_axis=2)
def testOutput4DInput(self):
self.doOutputTest((100, 10, 10, 3))
def testOutput4DInputNormOnInnermostAxis(self):
# Equivalent tests
self.doOutputTest(
(100, 10, 10, 3), begin_norm_axis=3, tol=1e-4, dtype=dtypes.float64)
self.doOutputTest(
(100, 10, 10, 3), begin_norm_axis=-1, tol=1e-4, dtype=dtypes.float64)
def testOutputSmallInput(self):
self.doOutputTest((10, 10, 10, 30))
def testOutputSmallInputNormOnInnermostAxis(self):
self.doOutputTest((10, 10, 10, 30), begin_norm_axis=3)
def testOutputBigInput(self):
self.doOutputTest((1, 100, 100, 1))
def testOutputBigInputFloat32(self):
self.doOutputTest((1, 100, 1000, 1), tol=1e-4, dtype=dtypes.float32)
def testOutputBigInputFloat16(self):
self.doOutputTest((1, 100, 1000, 1), tol=5e-2, dtype=dtypes.float16)
class GDNTest(test.TestCase):
def _runGDN(self, x, shape, inverse, data_format):
inputs = array_ops.placeholder(dtypes.float32, shape)
outputs = _layers.gdn(inputs, inverse=inverse, data_format=data_format)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
y, = sess.run([outputs], {inputs: x})
return y
def testInvalidDataFormat(self):
x = np.random.uniform(size=(1, 2, 3, 4))
with self.assertRaises(ValueError):
self._runGDN(x, x.shape, False, 'NHWC')
def testUnknownDim(self):
x = np.random.uniform(size=(1, 2, 3, 4))
with self.assertRaises(ValueError):
self._runGDN(x, 4 * [None], False, 'channels_last')
def testChannelsLast(self):
for ndim in [3, 4, 5]:
x = np.random.uniform(size=(1, 2, 3, 4)[:ndim])
y = self._runGDN(x, x.shape, False, 'channels_last')
self.assertEqual(x.shape, y.shape)
self.assertAllClose(y, x / np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6)
def testChannelsFirst(self):
# `bias_add` doesn't support NCHW on CPU.
if test.is_gpu_available(cuda_only=True):
for ndim in [3, 4, 5]:
x = np.random.uniform(size=(4, 3, 2, 1)[:ndim])
y = self._runGDN(x, x.shape, False, 'channels_first')
self.assertEqual(x.shape, y.shape)
self.assertAllClose(y, x / np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6)
def testWrongDims(self):
for ndim in [1, 2, 6]:
x = np.random.uniform(size=(1, 2, 3, 4, 3, 2)[:ndim])
with self.assertRaises(ValueError):
self._runGDN(x, x.shape, False, 'channels_last')
def testIGDN(self):
x = np.random.uniform(size=(1, 2, 3, 4))
y = self._runGDN(x, x.shape, True, 'channels_last')
self.assertEqual(x.shape, y.shape)
self.assertAllClose(y, x * np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6)
class ImagesToSequenceTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 11
images = np.random.uniform(size=(5, height, width, 2))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.images_to_sequence(images, data_format='CHWN')
def testImagesToSequenceDims(self):
height, width = 7, 11
images = np.random.uniform(size=(2, height, width, 5)).astype(np.float32)
output = _layers.images_to_sequence(images)
self.assertListEqual(output.get_shape().as_list(), [11, 14, 5])
def testImagesToSequenceNCHW(self):
height, width = 7, 11
images = np.random.uniform(size=(2, 5, height, width)).astype(np.float32)
output = _layers.images_to_sequence(images, data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [11, 14, 5])
class MaxPool2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.max_pool2d(images, [3, 3], data_format='CHWN')
def testCreateMaxPool(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.max_pool2d(images, [3, 3])
self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateMaxPoolNCHW(self):
height, width = 3, 6
images = np.random.uniform(size=(5, 3, height, width)).astype(np.float32)
output = _layers.max_pool2d(images, [3, 3], data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 2])
def testCollectOutputs(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['MaxPool2D'])
self.assertEqual(output_collected, output)
def testCreateSquareMaxPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, 3)
self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])
def testCreateMaxPoolWithSamePaddingNCHW(self):
height, width = 3, 6
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.max_pool2d(
images, [3, 3], padding='SAME', data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])
def testCreateMaxPoolStrideWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class MaxPool3DTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3))
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCDHW or NDHWC.'):
_layers.max_pool3d(images, [3, 3, 3], data_format='CDHWN')
def testCreateMaxPool(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3)).astype(
np.float32)
output = _layers.max_pool3d(images, [3, 3, 3])
self.assertEqual(output.op.name, 'MaxPool3D/MaxPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateMaxPoolNCDHW(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, 3, depth, height, width)).astype(
np.float32)
output = _layers.max_pool3d(images, [3, 3, 3], data_format='NCDHW')
self.assertEqual(output.op.name, 'MaxPool3D/transpose_1')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 2, 4])
def testCollectOutputs(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(
images, [3, 3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['MaxPool3D'])
self.assertEqual(output_collected, output)
def testCreateSquareMaxPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, 3)
self.assertEqual(output.op.name, 'MaxPool3D/MaxPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateMaxPoolWithScope(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, [3, 3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/MaxPool3D')
def testCreateMaxPoolWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, [3, 3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 5, 3])
def testCreateMaxPoolWithSamePaddingNCDHW(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, 3, depth, height, width), seed=1)
output = _layers.max_pool3d(
images, [3, 3, 3], padding='SAME', data_format='NCDHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3, 5])
def testCreateMaxPoolStrideWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, [3, 3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 3])
def testGlobalMaxPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, images.get_shape()[1:4], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 1, 3])
class OneHotEncodingTest(test.TestCase):
def testOneHotEncodingCreate(self):
with self.cached_session():
labels = np.array([0, 1, 2])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertEqual(output.op.name, 'OneHotEncoding/one_hot')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testCollectOutputs(self):
with self.cached_session():
labels = constant_op.constant([0, 1, 2])
output = _layers.one_hot_encoding(
labels, num_classes=3, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['OneHotEncoding'])
self.assertEqual(c_output, output)
def testOneHotEncoding(self):
with self.cached_session():
labels = constant_op.constant([0, 1, 2])
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
def testOneHotEncodingInt32(self):
with self.cached_session():
labels = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class RepeatTests(test.TestCase):
def testRepeat(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
self.assertEqual(output.op.name, 'Repeat/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
def testRepeatWithScope(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.repeat(
images, 3, layers_lib.conv2d, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
class SeparableConv2dTest(test.TestCase):
def testCreateConvInt32(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.int32, maxval=12345)
with self.assertRaisesRegexp(TypeError, 'non-floating point type'):
layers_lib.separable_conv2d(images, 32, [3, 3], 2)
def testCreateConvFloat32(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float32)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateDepthwiseConv(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, None, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 6])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, 32, [3, 3], 4, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertTrue(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateAtrousConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, 32, [3, 3], 4, rate=2, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertTrue(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateDepthwiseConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, None, [3, 3], 4, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 6, scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 8, activation_fn=None)
self.assertEqual(output.op.name, 'SeparableConv2d/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateAtrousConvValid(self):
height, width = 5, 5
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID', rate=2)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateDepthwiseConvValid(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateAtrousDepthwiseConvValid(self):
height, width = 5, 5
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID', rate=2)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateConvWithWeightDecay(self):
random_seed.set_random_seed(0)
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
weights_initializer=init_ops.ones_initializer())
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/depthwise_kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
depth_weight_one = sess.run(weight_decay)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[1]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/pointwise_kernel/Regularizer/l2_regularizer')
pointwise_weight_one = sess.run(weight_decay)
regularizer = regularizers.l2_regularizer(1.0)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
weights_initializer=init_ops.ones_initializer())
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 4)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[2]
sess.run(variables_lib.global_variables_initializer())
depth_weight_two = sess.run(weight_decay)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[3]
pointwise_weight_two = sess.run(weight_decay)
self.assertAllClose(
[100.0 * depth_weight_one, 100.0 * pointwise_weight_one],
[depth_weight_two, pointwise_weight_two])
def testReuseConvWithWeightDecay(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images, 32, [3, 3], 2, weights_regularizer=regularizer, scope='conv1')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
scope='conv1',
reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
def testConvWithBatchNorm(self):
height, width = 3, 3
batch_norm_collection = 'moving_vars'
normalizer_params = {
'variables_collections': {
'beta': [batch_norm_collection],
'gamma': [batch_norm_collection],
'moving_mean': [batch_norm_collection],
'moving_variance': [batch_norm_collection],
}
}
images = random_ops.random_uniform((5, height, width, 3), seed=1)
net = layers_lib.separable_conv2d(
images,
8, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv1')
net = layers_lib.separable_conv2d(
net,
32, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv2')
self.assertEqual(len(ops.get_collection(batch_norm_collection)), 6)
self.assertEqual(len(variables.get_variables('conv1/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('conv2/BatchNorm')), 3)
def testConvWithInputsViaPlaceHolder(self):
height, width = 3, 3
images_placeholder = array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 3))
net = layers_lib.separable_conv2d(
images_placeholder,
8, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params={},
scope='conv1')
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
images = np.random.rand(5, height, width, 3)
sess.run(init_op)
sess.run(net, feed_dict={images_placeholder: images})
def testTrainableFlagIsPassedOn(self):
for trainable in [True, False]:
for num_filters in [None, 8]:
with ops.Graph().as_default():
input_size = [5, 10, 12, 3]
images = random_ops.random_uniform(input_size, seed=1)
layers_lib.separable_conv2d(
images, num_filters, [3, 3], 1, trainable=trainable)
model_variables = variables.get_model_variables()
trainable_variables = variables_lib.trainable_variables()
for model_variable in model_variables:
self.assertEqual(trainable, model_variable in trainable_variables)
def testSepConvNCHW(self):
for num_filters, correct_output_filters in zip((None, 5), (6, 5)):
with self.cached_session():
batch, height, width = 4, 10, 12
kernel_dim, stride = 3, 2
images = random_ops.random_uniform((batch, 3, height, width), seed=1)
output = layers_lib.separable_conv2d(
images,
num_outputs=num_filters,
kernel_size=[kernel_dim, kernel_dim],
depth_multiplier=2,
stride=stride,
padding='VALID',
data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [
batch, correct_output_filters, (height - kernel_dim + 1) // stride,
(width - kernel_dim + 1) // stride
])
class ScaleGradientTests(test.TestCase):
"""Simple tests of the scale_gradient function."""
def testBasic(self):
with self.cached_session():
x = np.array([42], np.float32)
gradient_scale = np.array([2], np.float32)
x = ops.convert_to_tensor(x)
y = layers_lib.scale_gradient(x, gradient_scale)
np.testing.assert_array_equal(x.eval(), y.eval())
g_x, = gradients_impl.gradients(y, [x], [np.array([3], np.float32)])
np.testing.assert_array_equal([3 * 2], g_x.eval())
class SequenceToImagesTest(test.TestCase):
def testImagesToSequenceDims(self):
num_batches = 14
num_time_steps = 11
num_channels = 5
desired_height = 7
sequence = np.random.uniform(size=(num_time_steps,
num_batches,
num_channels)).astype(np.float32)
output = _layers.sequence_to_images(sequence, desired_height)
self.assertListEqual(output.get_shape().as_list(), [2, 7, 11, 5])
def testImagesToSequenceNCHW(self):
num_batches = 14
num_time_steps = 11
num_channels = 5
desired_height = 7
sequence = np.random.uniform(size=(num_time_steps,
num_batches,
num_channels)).astype(np.float32)
output = _layers.sequence_to_images(sequence,
desired_height,
output_data_format='channels_first')
self.assertListEqual(output.get_shape().as_list(), [2, 5, 7, 11])
class SoftmaxTests(test.TestCase):
def setUp(self):
super(SoftmaxTests, self).setUp()
self.low = 1 / (1 + math.e)
self.high = math.e / (1 + math.e)
def testSoftmax2D(self):
logits = constant_op.constant([[0.0, 1], [1, 1], [1, 0]])
prediction = _layers.softmax(logits)
exp_prediction = np.array([[self.low, self.high], [0.5, 0.5],
[self.high, self.low]])
with self.cached_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3D(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logits = constant_op.constant(logits)
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logits)
with self.cached_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3DUnknownSize(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logit_placeholder = array_ops.placeholder(
dtypes.float32, shape=(None, None, 2))
feed_dict = {logit_placeholder: logits}
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logit_placeholder)
with self.cached_session() as sess:
prediction = sess.run(prediction, feed_dict=feed_dict)
self.assertAllClose(exp_prediction, prediction)
def testSoftmaxUndefinedNthDimension(self):
logits = array_ops.placeholder(dtypes.float32)
with self.assertRaises(ValueError):
_layers.softmax(logits)
class SpatialSoftmaxTests(test.TestCase):
def _SpatialSoftmax(self, x_loc, y_loc, height, width, batch_size, nchannels):
# Convert specified activation locations to range [-1, 1].
height_lin = np.linspace(-1, 1, height)
width_lin = np.linspace(-1, 1, width)
x_lin = np.expand_dims(np.array([height_lin[i] for i in x_loc]), 1)
y_lin = np.expand_dims(np.array([width_lin[i] for i in y_loc]), 1)
np_keypoints = np.array(
[np.concatenate([x_lin, y_lin], axis=1) for i in range(batch_size)])
np_keypoints = np.reshape(np_keypoints, [-1, nchannels * 2])
return np_keypoints
def testSpatialSoftmaxShape(self):
batch_shape = (2, 35, 30, 2)
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
np_features = np.zeros(batch_shape, dtype=np.float32)
spatial_softmax = _layers.spatial_softmax(features)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllEqual(keypoints.shape, (batch_shape[0], batch_shape[3] * 2))
def testSpatialSoftmaxShapeNCHW(self):
batch_shape = (2, 2, 35, 35)
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
np_features = np.zeros(batch_shape, dtype=np.float32)
spatial_softmax = _layers.spatial_softmax(features, data_format='NCHW')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllEqual(keypoints.shape, (batch_shape[0], batch_shape[1] * 2))
def testTwoMaxActivationsSameChannel(self):
batch_size, height, width, nchannels = (2, 35, 35, 1)
batch_shape = (batch_size, height, width, nchannels)
# Put high equal activations on different locations in the same channel.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features = np.zeros(batch_shape, dtype=np.float32)
x0, y0 = (10, 10)
x1, y1 = (20, 20)
avg_x = (x0 + x1) // 2
avg_y = (y0 + y1) // 2
np_features[:, x0, y0, :] = 100.
np_features[:, x1, y1, :] = 100.
x_loc = [avg_x]
y_loc = [avg_y]
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testMaxActivationsAtEdges(self):
batch_size, height, width, nchannels = (2, 35, 35, 4)
batch_shape = (batch_size, height, width, nchannels)
# Put high activations on edges of spatial extent.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features = np.zeros(batch_shape, dtype=np.float32)
edges = [(0, 0), (0, width - 1), (height - 1, 0), (height - 1, width - 1)]
x_loc, y_loc = zip(*edges)
for c in range(nchannels):
np_features[:, x_loc[c], y_loc[c], c] = 100.
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testSpatialSoftmaxVariableSized(self):
batch_size = 2
nchannels = 2
height1, width1 = (35, 30)
height2, width2 = (20, 20)
batch_shape1 = (batch_size, height1, width1, nchannels)
batch_shape2 = (batch_size, height2, width2, nchannels)
variable_sized_shape = (None, None, None, 2)
# Put high activations on single spatial locations.
features = array_ops.placeholder(dtypes.float32, shape=variable_sized_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features1 = np.zeros(batch_shape1, dtype=np.float32)
np_features2 = np.zeros(batch_shape2, dtype=np.float32)
x_loc = [15, 2]
y_loc = [10, 9]
for c in range(nchannels):
np_features1[:, x_loc[c], y_loc[c], c] = 100.
np_features2[:, x_loc[c], y_loc[c], c] = 100.
np_keypoints1 = self._SpatialSoftmax(x_loc, y_loc, height1, width1,
batch_size, nchannels)
np_keypoints2 = self._SpatialSoftmax(x_loc, y_loc, height2, width2,
batch_size, nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features1}
tf_keypoints1 = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(tf_keypoints1, np_keypoints1)
feed_dict = {features: np_features2}
tf_keypoints2 = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(tf_keypoints2, np_keypoints2)
def testSpatialSoftmax(self):
batch_size, height, width, nchannels = (2, 35, 35, 2)
batch_shape = (batch_size, height, width, nchannels)
# Put high activations on single spatial locations.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features = np.zeros(batch_shape, dtype=np.float32)
x_loc = [15, 2]
y_loc = [10, 28]
for c in range(nchannels):
np_features[:, x_loc[c], y_loc[c], c] = 100.
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testSpatialSoftmaxNCHW(self):
batch_size, nchannels, height, width = (2, 2, 35, 35)
batch_shape = (batch_size, nchannels, height, width)
# Put high activations on single spatial locations.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features, data_format='NCHW')
np_features = np.zeros(batch_shape, dtype=np.float32)
x_loc = [15, 2]
y_loc = [10, 28]
for c in range(nchannels):
np_features[:, c, x_loc[c], y_loc[c]] = 100.
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testSpatialSoftmaxToFullyConnected(self):
batch_shape = (2, 35, 35, 2)
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
net = _layers.fully_connected(spatial_softmax, 10)
np_features = np.zeros(batch_shape, dtype=np.float32)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
sess.run(net, feed_dict)
class StackTests(test.TestCase):
def testStackFullyConnected(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height * width * 3))
output = _layers.stack(images, _layers.fully_connected, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackFullyConnectedFailOnReuse(self):
height, width = 3, 3
with self.cached_session():
with variable_scope.variable_scope('test', reuse=True):
images = np.random.uniform(size=(5, height * width * 3))
with self.assertRaises(ValueError):
_layers.stack(images, _layers.fully_connected, [10, 20, 30])
def testStackRelu(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.relu, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackElu(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.elu, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Elu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackConvolution2d(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
images,
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME')
self.assertEqual(output.op.name, 'Stack/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
def testStackWithScope(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
images,
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME',
scope='conv1')
self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
class UnitNormTests(test.TestCase):
def testUnitNormWithRandomMatrix(self):
height, width = 2, 3
for dim in range(3):
random_seed.set_random_seed(0)
image = random_ops.random_uniform((height, width, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), axis=dim))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
with self.cached_session():
actual = norms.eval()
self.assertAllClose(expected, actual, 1e-4, 1e-4)
def testDimEqualToRankRaisesError(self):
height, width = 2, 3
random_seed.set_random_seed(0)
image = random_ops.random_uniform((height, width, 3))
with self.assertRaises(ValueError):
_layers.unit_norm(image, dim=3, epsilon=1e-6)
def testUnknownRankRaisesError(self):
image = array_ops.placeholder(dtypes.float32)
with self.assertRaises(ValueError):
_layers.unit_norm(image, dim=2)
def testKnownRankUnknownDimsSucceeds(self):
height, width = 2, 3
for dim in range(3):
placeholder_value = np.ones((height, width, 3))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
image = array_ops.placeholder(dtypes.float32, (None, None, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), axis=dim))
with self.cached_session():
actual = norms.eval({image: placeholder_value})
self.assertAllClose(expected, actual, 1e-4, 1e-4)
class PoincareNormalizeTest(test.TestCase):
def _PoincareNormalize(self, x, dim, epsilon=1e-5):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
norm_x = ((1. - epsilon) * x) / norm
else:
norm = np.expand_dims(np.apply_along_axis(np.linalg.norm, dim, x), dim)
norm_x = ((1. - epsilon) * x) / norm
return np.where(norm > 1.0 - epsilon, norm_x, x)
def testPoincareNormalize(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._PoincareNormalize(x_np, dim, epsilon)
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim, epsilon)
y_tf_eval = y_tf.eval()
norm = np.linalg.norm(y_np, axis=dim)
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
norm = np.linalg.norm(y_tf_eval, axis=dim)
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
self.assertAllClose(y_np, y_tf_eval)
def testPoincareNormalizeDimArray(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._PoincareNormalize(x_np, dim, epsilon)
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim, epsilon)
y_tf_eval = y_tf.eval()
norm = np.linalg.norm(y_np, axis=tuple(dim))
self.assertLess(norm.max(), 1. - epsilon + tol)
norm = np.linalg.norm(y_tf_eval, axis=tuple(dim))
self.assertLess(norm.max(), 1. - epsilon + tol)
self.assertAllClose(y_np, y_tf_eval, rtol=1e-6, atol=1e-6)
def testPoincareNormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print('PoinCareNormalize gradient err = %g ' % err)
self.assertLess(err, 1e-4)
# TODO(b/28426988): Add separate tests for non-legacy versions.
class LegacyFullyConnectedTest(test.TestCase):
def setUp(self):
super(LegacyFullyConnectedTest, self).setUp()
random_seed.set_random_seed(1234)
self.input = constant_op.constant([[1., 2., 3.], [-4., 15., -6.]])
self.input_3_dim_arr = [[[1., 1.1, 1.2], [2., 2.1, 2.2], [3., 3.1, 3.2],
[4., 4.1, 4.2]], [[5., 5.1, 5.2], [6., 6.1, 6.2],
[7., 7.1, 7.2], [8., 8.1, 8.2]]]
self.input_3_dim = constant_op.constant(self.input_3_dim_arr)
assert not ops.get_collection(ops.GraphKeys.SUMMARIES)
def _fully_connected_basic_use(self, x, num_output_units, expected_shape):
output = _layers.legacy_fully_connected(
x, num_output_units, activation_fn=nn_ops.relu)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value, shape_value = sess.run([output, array_ops.shape(output)])
self.assertAllClose(shape_value, expected_shape)
self.assertEqual(output.get_shape().as_list(), expected_shape)
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
def test_fully_connected_basic_use(self):
self._fully_connected_basic_use(self.input, 8, [2, 8])
def test_fully_connected_basic_use_multi_dim(self):
for last_dim in [1, 3]:
self.setUp()
self._fully_connected_basic_use(self.input_3_dim, last_dim,
[2, 4, last_dim])
def test_relu_layer_basic_use(self):
output = layers_lib.legacy_relu(self.input, 8)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value = sess.run(output)
self.assertEqual(output.get_shape().as_list(), [2, 8])
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
def test_variable_reuse_with_scope(self):
with variable_scope.variable_scope('test') as vs:
output1 = layers_lib.legacy_relu(self.input, 8)
output2 = layers_lib.legacy_relu(self.input, 8)
with variable_scope.variable_scope(vs, reuse=True):
output3 = layers_lib.legacy_relu(self.input, 8)
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])
self.assertFalse(np.allclose(out_value1, out_value2))
self.assertAllClose(out_value1, out_value3)
def test_variable_reuse_with_template(self):
tmpl1 = template.make_template(
'test', _layers.legacy_fully_connected, num_output_units=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
def _custom_initializers(self, x, num_output_units, expected_outputs):
output = layers_lib.legacy_relu(
x,
num_output_units,
weight_init=init_ops.constant_initializer(2.0),
bias_init=init_ops.constant_initializer(1.0))
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value = sess.run(output)
self.assertAllClose(np.array(expected_outputs), out_value)
def test_custom_initializers(self):
self._custom_initializers(self.input, 2, [[13.0, 13.0], [11.0, 11.0]])
def test_custom_initializers_multi_dim(self):
self._custom_initializers(
self.input_3_dim, 2,
[[[7.6, 7.6], [13.6, 13.6], [19.6, 19.6], [25.6, 25.6]],
[[31.6, 31.6], [37.6, 37.6], [43.6, 43.6], [49.6, 49.6]]])
def test_custom_collections(self):
layers_lib.legacy_relu(
self.input,
2,
weight_collections=['unbiased'],
bias_collections=['biased'],
output_collections=['output'])
self.assertEqual(1, len(ops.get_collection('unbiased')))
self.assertEqual(1, len(ops.get_collection('biased')))
self.assertEqual(1, len(ops.get_collection('output')))
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_all_custom_collections(self):
layers_lib.legacy_relu(
self.input,
2,
weight_collections=['unbiased', 'all'],
bias_collections=['biased', 'all'])
self.assertEqual(1, len(ops.get_collection('unbiased')))
self.assertEqual(1, len(ops.get_collection('biased')))
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
ops.get_collection('all'))
def test_no_bias(self):
layers_lib.legacy_relu(self.input, 2, bias_init=None)
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_no_activation(self):
y = _layers.legacy_fully_connected(self.input, 2)
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual('BiasAdd', y.op.type)
def test_no_activation_no_bias(self):
y = _layers.legacy_fully_connected(self.input, 2, bias_init=None)
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual('MatMul', y.op.type)
def test_regularizer(self):
tensor = constant_op.constant(5.0)
def test_fn(_):
return tensor
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
regs = [
ops.convert_to_tensor(r)
for r in ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
]
self.assertEqual([tensor], regs)
def test_regularizer_with_multiple_variables(self):
tensor = constant_op.constant(5.0)
def test_fn(_):
return tensor
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
regs = [
ops.convert_to_tensor(r)
for r in ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
]
self.assertEqual([tensor, tensor], regs)
def test_regularizer_with_variable_reuse(self):
tensor = constant_op.constant(5.0)
def test_fn(_):
return tensor
with variable_scope.variable_scope('test') as vs:
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
with variable_scope.variable_scope(vs, reuse=True):
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
regs = [
ops.convert_to_tensor(r)
for r in ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
]
self.assertEqual([tensor], regs)
def test_empty_x_results_in_empty_output(self):
# Empty x is common if someone masks their input with tf.boolean_mask in
# order to drop missing entries, and in a particular batch all entries are
# missing.
with self.cached_session():
x = np.array([]).reshape(0, 3)
self.assertEqual(0, array_ops.size(x).eval())
y = _layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
variables_lib.global_variables_initializer().run()
expected_y = np.array([]).reshape(0, 2)
np.testing.assert_array_equal(expected_y, y.eval())
def test_shapes_variable_first_dim(self):
# first dimension is not known statically.
x = array_ops.placeholder(dtypes.float32, shape=[None, 4, 3])
y = _layers.legacy_fully_connected(x, 1)
# in the output we still only know the 2nd and 3rd dimensions statically.
self.assertEqual(y.get_shape().as_list(), [None, 4, 1])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
# we can feed in input with first dimension 2
shape_value = sess.run(
array_ops.shape(y), feed_dict={
x: self.input_3_dim_arr
})
self.assertAllClose(shape_value, [2, 4, 1])
# we can feed in input with first dimension 1
shape_value = sess.run(
array_ops.shape(y), feed_dict={
x: [self.input_3_dim_arr[0]]
})
self.assertAllClose(shape_value, [1, 4, 1])
# we cannot feed in input with inconsistent dimensions
with self.assertRaises(ValueError):
sess.run(array_ops.shape(y), feed_dict={x: [[[]]]})
def _unknown_dim_invalid_input(self, last_dim):
x = array_ops.placeholder(dtypes.float32, shape=[3, last_dim])
_layers.legacy_fully_connected(x, 2, activation_fn=None)
def test_known_dim_valid_input(self):
self._unknown_dim_invalid_input(last_dim=3)
def test_unknown_dim_invalid_input(self):
with self.assertRaisesRegexp(
ValueError, 'last dimension of x must be known but is None'):
self._unknown_dim_invalid_input(last_dim=None)
def test_1d_invalid_input(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError,
'rank of x must be at least 2 not: 1'):
x = constant_op.constant([[]], shape=[0])
_layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
class MaxOutTest(test.TestCase):
def test_simple(self):
inputs = random_ops.random_uniform((64, 10, 36), seed=1)
graph = _layers.maxout(inputs, num_units=3)
self.assertEqual(graph.get_shape().as_list(), [64, 10, 3])
def test_fully_connected(self):
inputs = random_ops.random_uniform((64, 50), seed=1)
graph = _layers.fully_connected(inputs, 50)
graph = _layers.maxout(graph, num_units=10)
self.assertEqual(graph.get_shape().as_list(), [64, 10])
def test_nchw(self):
inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1)
graph = _layers.conv2d(inputs, 10, 3, padding='SAME')
graph = _layers.maxout(graph, num_units=1)
self.assertEqual(graph.get_shape().as_list(), [10, 100, 100, 1])
def test_invalid_shape(self):
inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1)
graph = _layers.conv2d(inputs, 3, 10)
with self.assertRaisesRegexp(ValueError, 'number of features'):
graph = _layers.maxout(graph, num_units=2)
if __name__ == '__main__':
test.main()
| {
"content_hash": "4fb8b90ebfa6fb8ba86536268373b221",
"timestamp": "",
"source": "github",
"line_count": 4238,
"max_line_length": 80,
"avg_line_length": 41.77394997640396,
"alnum_prop": 0.6495046261254646,
"repo_name": "google-research/tf-slim",
"id": "0aa91d3ba2c57df5c25806438c011760ba14da8d",
"size": "177742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_slim/layers/layers_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1856010"
},
{
"name": "Shell",
"bytes": "3404"
}
],
"symlink_target": ""
} |
"""
Invert a binary tree.
4
/ \
2 7
/ \ / \
1 3 6 9
to
4
/ \
7 2
/ \ / \
9 6 3 1
Trivia:
This problem was inspired by this original tweet by Max Howell:
Google: 90% of our engineers use the software you wrote (Homebrew), but you can’t invert a binary tree on a whiteboard so fuck off.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
# base case
if root == None:
return None
# reverse children sub trees first
left = root.left
right = root.right
# swap left and right children
root.left = self.invertTree(right)
root.right = self.invertTree(left)
# return root with reverted children
return root | {
"content_hash": "0d0fab0ce597a6f86af26505a33443e2",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 131,
"avg_line_length": 18.27659574468085,
"alnum_prop": 0.6321303841676368,
"repo_name": "Ahmed--Mohsen/leetcode",
"id": "9ea8f2f917feead30d0729995ea1b563c0e99c1d",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invert_binary_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317482"
}
],
"symlink_target": ""
} |
""" Configuration handler """
from dynamic_dynamodb import config
CONFIGURATION = config.get_configuration()
def get_configured_tables():
""" Returns a list of all configured tables
:returns: list -- List of tables
"""
try:
return CONFIGURATION['tables'].keys()
except KeyError:
return []
def get_global_option(option):
""" Returns the value of the option
:returns: str or None
"""
try:
return CONFIGURATION['global'][option]
except KeyError:
return None
def get_gsi_option(table_key, gsi_key, option):
""" Returns the value of the option
:type table_key: str
:param table_key: Table key name
:type gsi_key: str
:param gsi_key: GSI key name
:returns: str or None
"""
try:
return CONFIGURATION['tables'][table_key]['gsis'][gsi_key][option]
except KeyError:
return None
def get_logging_option(option):
""" Returns the value of the option
:returns: str or None
"""
try:
return CONFIGURATION['logging'][option]
except KeyError:
return None
def get_monitoring_option(option):
""" Returns the value of the option
:returns: str or None
"""
try:
return CONFIGURATION['monitoring'][option]
except KeyError:
return None
def get_table_option(table_name, option):
""" Returns the value of the option
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: str or None
"""
try:
return CONFIGURATION['tables'][table_name][option]
except KeyError:
return None
| {
"content_hash": "3be2523b635c9d0a7299f32670b5970b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 21.394736842105264,
"alnum_prop": 0.6260762607626076,
"repo_name": "tellybug/dynamic-dynamodb",
"id": "42713a2a2a5318f18299440bafedc5025ba62aaf",
"size": "1650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic_dynamodb/config_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "152"
},
{
"name": "Python",
"bytes": "180589"
}
],
"symlink_target": ""
} |
import time
from prometheus_api_client import PrometheusConnect
class PrometheusChecker:
def __init__(self):
self.prometheus_client = PrometheusConnect(url="http://localhost:9090", disable_ssl=True)
def wait_for_metric_class_on_prometheus(self, metric_class, timeout_seconds):
start_time = time.perf_counter()
while (time.perf_counter() - start_time) < timeout_seconds:
if self.verify_metric_class(metric_class):
return True
time.sleep(1)
return False
def wait_for_processor_metric_on_prometheus(self, metric_class, timeout_seconds, processor_name):
start_time = time.perf_counter()
while (time.perf_counter() - start_time) < timeout_seconds:
if self.verify_processor_metric(metric_class, processor_name):
return True
time.sleep(1)
return False
def verify_processor_metric(self, metric_class, processor_name):
if metric_class == "GetFileMetrics":
return self.verify_getfile_metrics(metric_class, processor_name)
else:
return self.verify_general_processor_metrics(metric_class, processor_name)
def verify_metric_class(self, metric_class):
if metric_class == "RepositoryMetrics":
return self.verify_repository_metrics()
elif metric_class == "QueueMetrics":
return self.verify_queue_metrics()
elif metric_class == "FlowInformation":
return self.verify_flow_information_metrics()
elif metric_class == "DeviceInfoNode":
return self.verify_device_info_node_metrics()
elif metric_class == "AgentStatus":
return self.verify_agent_status_metrics()
else:
raise Exception("Metric class '%s' verification is not implemented" % metric_class)
def verify_repository_metrics(self):
label_list = [{'repository_name': 'provenance'}, {'repository_name': 'flowfile'}]
return all((self.verify_metrics_exist(['minifi_is_running', 'minifi_is_full', 'minifi_repository_size'], 'RepositoryMetrics', labels) for labels in label_list))
def verify_queue_metrics(self):
return self.verify_metrics_exist(['minifi_queue_data_size', 'minifi_queue_data_size_max', 'minifi_queue_size', 'minifi_queue_size_max'], 'QueueMetrics')
def verify_general_processor_metrics(self, metric_class, processor_name):
labels = {'processor_name': processor_name}
return self.verify_metrics_exist(['minifi_average_onTrigger_runtime_milliseconds', 'minifi_last_onTrigger_runtime_milliseconds'], metric_class, labels) and \
self.verify_metrics_larger_than_zero(['minifi_onTrigger_invocations', 'minifi_transferred_flow_files', 'minifi_transferred_to_success', 'minifi_transferred_bytes'], metric_class, labels)
def verify_getfile_metrics(self, metric_class, processor_name):
labels = {'processor_name': processor_name}
return self.verify_general_processor_metrics(metric_class, processor_name) and \
self.verify_metrics_exist(['minifi_input_bytes', 'minifi_accepted_files'], metric_class, labels)
def verify_flow_information_metrics(self):
return self.verify_metrics_exist(['minifi_queue_data_size', 'minifi_queue_data_size_max', 'minifi_queue_size', 'minifi_queue_size_max'], 'FlowInformation') and \
self.verify_metric_exists('minifi_is_running', 'FlowInformation', {'component_name': 'FlowController'})
def verify_device_info_node_metrics(self):
return self.verify_metrics_exist(['minifi_physical_mem', 'minifi_memory_usage', 'minifi_cpu_utilization'], 'DeviceInfoNode')
def verify_agent_status_metrics(self):
label_list = [{'repository_name': 'provenance'}, {'repository_name': 'flowfile'}]
for labels in label_list:
if not (self.verify_metric_exists('minifi_is_running', 'AgentStatus', labels)
and self.verify_metric_exists('minifi_is_full', 'AgentStatus', labels)
and self.verify_metric_exists('minifi_repository_size', 'AgentStatus', labels)):
return False
return self.verify_metric_exists('minifi_uptime_milliseconds', 'AgentStatus') and \
self.verify_metric_exists('minifi_agent_memory_usage_bytes', 'AgentStatus') and \
self.verify_metric_exists('minifi_agent_cpu_utilization', 'AgentStatus')
def verify_metric_exists(self, metric_name, metric_class, labels={}):
labels['metric_class'] = metric_class
labels['agent_identifier'] = "Agent1"
return len(self.prometheus_client.get_current_metric_value(metric_name=metric_name, label_config=labels)) > 0
def verify_metrics_exist(self, metric_names, metric_class, labels={}):
return all((self.verify_metric_exists(metric_name, metric_class, labels) for metric_name in metric_names))
def verify_metric_larger_than_zero(self, metric_name, metric_class, labels={}):
labels['metric_class'] = metric_class
result = self.prometheus_client.get_current_metric_value(metric_name=metric_name, label_config=labels)
return len(result) > 0 and int(result[0]['value'][1]) > 0
def verify_metrics_larger_than_zero(self, metric_names, metric_class, labels={}):
return all((self.verify_metric_larger_than_zero(metric_name, metric_class, labels) for metric_name in metric_names))
| {
"content_hash": "fb4c8c5dbc17fa54f9a4614b1a873661",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 198,
"avg_line_length": 57.723404255319146,
"alnum_prop": 0.6763730187983782,
"repo_name": "dtrodrigues/nifi-minifi-cpp",
"id": "14dc8eda6ef3f1388c1d93d1192624193b1b7311",
"size": "5426",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "docker/test/integration/minifi/core/PrometheusChecker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12771"
},
{
"name": "C",
"bytes": "194133"
},
{
"name": "C++",
"bytes": "7328208"
},
{
"name": "CMake",
"bytes": "454842"
},
{
"name": "Dockerfile",
"bytes": "14380"
},
{
"name": "Gherkin",
"bytes": "159174"
},
{
"name": "Java",
"bytes": "86800"
},
{
"name": "LLVM",
"bytes": "2594"
},
{
"name": "Lua",
"bytes": "3132"
},
{
"name": "Python",
"bytes": "330016"
},
{
"name": "Shell",
"bytes": "91629"
},
{
"name": "Yacc",
"bytes": "7064"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
import ctypes
import struct
class Packet(object):
# General
VERSION_INFO = 0xFA00
# Differential Base
SET_ACCELERATION = 0xFA10
GET_ACCELERATION = 0xFA11
SET_TARGET_SPEED = 0xFA12
GET_TARGET_SPEED = 0xFA13
MOVE_STRAIGHT = 0xFA14
MOVE_BEZIER = 0xFA15
ROTATE = 0xFA16
GET_ACTION_STATUS = 0xFA17
PAUSE_ACTION = 0xFA18
RESUME_ACTION = 0xFA19
STOP_ACTION = 0xFA1A
RESET_ENCODER = 0xFA1B
GET_ENCODER = 0xFA1C
GET_RAW_ENCODER = 0xFA1D
# Safety
GET_SAFETY_FLAG = 0xFA30
GET_ENCODER_AND_SAFETY_FLAG = 0xFA31
GET_RAW_ENCODER_AND_SAFETY_FLAG = 0xFA32
# Power
GET_BATTERY = 0xFA40
SET_CHARGING = 0xFA41
GET_CHARGING = 0xFA42
# IO
GET_INPUTS = 0xFA50
SET_OUTPUTS = 0xFA51
GET_OUTPUTS = 0xFA52
# Replies
RESULT_OK = 0xF900
RESULT_ERROR_INVALID_COMMAND = 0xF901
RESULT_ERROR_BUSY = 0xF902
# Sizes
DATA_OFFSET = 8
MAX_PAYLOAD = 64 # must be a multiple of 8
SIZE = DATA_OFFSET + MAX_PAYLOAD
# Structure
__packet_t = struct.Struct('%is' % SIZE)
__command_t = struct.Struct('H')
__data_t = struct.Struct('%is' % MAX_PAYLOAD)
__u8_t = struct.Struct('B')
__s8_t = struct.Struct('b')
__u16_t = struct.Struct('H')
__s16_t = struct.Struct('h')
__u32_t = struct.Struct('I')
__s32_t = struct.Struct('i')
__u64_t = struct.Struct('Q')
__s64_t = struct.Struct('q')
__f_t = struct.Struct('f')
__d_t = struct.Struct('d')
# Byte array
class ByteArray(object):
def __init__(self, buf, fmt):
self.__buf = buf
self.__fmt = fmt
def __getitem__(self, key):
offset = self.__get_offset(key)
return self.__fmt.unpack_from(self.__buf, offset)[0]
def __setitem__(self, key, value):
offset = self.__get_offset(key)
self.__fmt.pack_into(self.__buf, offset, value)
def __get_offset(self, key):
if not isinstance(key, int):
raise TypeError()
if key < 0 or key >= Packet.MAX_PAYLOAD // self.__fmt.size:
raise IndexError()
return Packet.DATA_OFFSET + self.__fmt.size * key
def __init__(self, raw=None):
self.__buffer = ctypes.create_string_buffer(self.SIZE)
if raw is not None:
if not isinstance(raw, bytes):
raise TypeError()
self.__packet_t.pack_into(self.__buffer, 0, raw)
def raw(self):
return self.__buffer
@property
def command(self):
return self.__command_t.unpack_from(self.__buffer)[0]
@command.setter
def command(self, value):
return self.__command_t.pack_into(self.__buffer, 0, value)
@property
def data(self):
return self.__data_t.unpack_from(self.__buffer, self.DATA_OFFSET)[0]
@data.setter
def data(self, value):
return self.__data_t.pack_into(self.__buffer, self.DATA_OFFSET, value)
@property
def u8(self):
if not hasattr(self, '_u8'):
self._u8 = self.ByteArray(self.__buffer, self.__u8_t)
return self._u8
@property
def s8(self):
if not hasattr(self, '_s8'):
self._s8 = self.ByteArray(self.__buffer, self.__s8_t)
return self._s8
@property
def u16(self):
if not hasattr(self, '_u16'):
self._u16 = self.ByteArray(self.__buffer, self.__u16_t)
return self._u16
@property
def s16(self):
if not hasattr(self, '_s16'):
self._s16 = self.ByteArray(self.__buffer, self.__s16_t)
return self._s16
@property
def u32(self):
if not hasattr(self, '_u32'):
self._u32 = self.ByteArray(self.__buffer, self.__u32_t)
return self._u32
@property
def s32(self):
if not hasattr(self, '_s32'):
self._s32 = self.ByteArray(self.__buffer, self.__s32_t)
return self._s32
@property
def u64(self):
if not hasattr(self, '_u64'):
self._u64 = self.ByteArray(self.__buffer, self.__u64_t)
return self._u64
@property
def s64(self):
if not hasattr(self, '_s64'):
self._s64 = self.ByteArray(self.__buffer, self.__s64_t)
return self._s64
@property
def f(self):
if not hasattr(self, '_f'):
self._f = self.ByteArray(self.__buffer, self.__f_t)
return self._f
@property
def d(self):
if not hasattr(self, '_d'):
self._d = self.ByteArray(self.__buffer, self.__d_t)
return self._d
| {
"content_hash": "a71cb586e1e19a7c8f540a63ef340b86",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 27.727810650887573,
"alnum_prop": 0.5633802816901409,
"repo_name": "dfautomation/zalpha-api",
"id": "6161a891e98598040eb5a6e8e4ecb4b50056f8b6",
"size": "5331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/zalpha_api/packet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "30686"
},
{
"name": "CMake",
"bytes": "3459"
},
{
"name": "Python",
"bytes": "26499"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registrasion', '0004_groupmemberdiscount_groupmemberflag'),
]
operations = [
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(verbose_name='Description'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=255, verbose_name='Name'),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(blank=True, null=True, verbose_name='Description'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=255, verbose_name='Name'),
),
]
| {
"content_hash": "570f5573c276fade2e5bda9ba2026948",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 29.21212121212121,
"alnum_prop": 0.5778008298755186,
"repo_name": "chrisjrn/registrasion",
"id": "e590939469bf3e56db41e06bd1559206bd232625",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registrasion/migrations/0005_auto_20160905_0945.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "37061"
},
{
"name": "Python",
"bytes": "373937"
}
],
"symlink_target": ""
} |
import os, time
boundary = '--boundarydonotcross'
def request_headers():
return {
'Cache-Control': 'no-store, no-cache, must-revalidate, pre-check=0, post-check=0, max-age=0',
'Connection': 'close',
'Content-Type': 'multipart/x-mixed-replace;boundary=%s' % boundary,
'Expires': 'Mon, 3 Jan 2000 12:34:56 GMT',
'Pragma': 'no-cache',
}
def image_headers(filename):
return {
'X-Timestamp': time.time(),
'Content-Length': os.path.getsize(filename),
#FIXME: mime-type must be set according file content
'Content-Type': 'image/jpeg',
}
# FIXME: should take a binary stream
def image(filename):
with open(filename, "rb") as f:
# for byte in f.read(1) while/if byte ?
byte = f.read(1)
while byte:
yield byte
# Next byte
byte = f.read(1)
| {
"content_hash": "17aed9ea75fef1b38307024efe054e01",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 101,
"avg_line_length": 29.5,
"alnum_prop": 0.5785310734463277,
"repo_name": "HackaRobot/rpi",
"id": "bd23c7d87eb211ff475d1ba39eb1b986f796d070",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pirover/webserver/cgi-bin/pymjpeg.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "62635"
},
{
"name": "Shell",
"bytes": "834"
}
],
"symlink_target": ""
} |
def f(
a,
**kwargs,
) -> A:
with cache_dir():
if something:
result = (
CliRunner().invoke(black.main, [str(src1), str(src2), "--diff", "--check"])
)
limited.append(-limited.pop()) # negate top
return A(
very_long_argument_name1=very_long_value_for_the_argument,
very_long_argument_name2=-very.long.value.for_the_argument,
**kwargs,
)
def g():
"Docstring."
def inner():
pass
print("Inner defs should breathe a little.")
def h():
def inner():
pass
print("Inner defs should breathe a little.")
if os.name == "posix":
import termios
def i_should_be_followed_by_only_one_newline():
pass
elif os.name == "nt":
try:
import msvcrt
def i_should_be_followed_by_only_one_newline():
pass
except ImportError:
def i_should_be_followed_by_only_one_newline():
pass
elif False:
class IHopeYouAreHavingALovelyDay:
def __call__(self):
print("i_should_be_followed_by_only_one_newline")
else:
def foo():
pass
with hmm_but_this_should_get_two_preceding_newlines():
pass
# output
def f(
a,
**kwargs,
) -> A:
with cache_dir():
if something:
result = CliRunner().invoke(
black.main, [str(src1), str(src2), "--diff", "--check"]
)
limited.append(-limited.pop()) # negate top
return A(
very_long_argument_name1=very_long_value_for_the_argument,
very_long_argument_name2=-very.long.value.for_the_argument,
**kwargs,
)
def g():
"Docstring."
def inner():
pass
print("Inner defs should breathe a little.")
def h():
def inner():
pass
print("Inner defs should breathe a little.")
if os.name == "posix":
import termios
def i_should_be_followed_by_only_one_newline():
pass
elif os.name == "nt":
try:
import msvcrt
def i_should_be_followed_by_only_one_newline():
pass
except ImportError:
def i_should_be_followed_by_only_one_newline():
pass
elif False:
class IHopeYouAreHavingALovelyDay:
def __call__(self):
print("i_should_be_followed_by_only_one_newline")
else:
def foo():
pass
with hmm_but_this_should_get_two_preceding_newlines():
pass
| {
"content_hash": "665b60234f2ca939096d51b3ba0359e7",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 91,
"avg_line_length": 19.99173553719008,
"alnum_prop": 0.5638693675072344,
"repo_name": "psf/black",
"id": "5bb36c26318cbd0b9992b55fd73da3485cf500c7",
"size": "2419",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/data/simple_cases/function2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "929"
},
{
"name": "Jupyter Notebook",
"bytes": "2848"
},
{
"name": "Python",
"bytes": "4932376"
},
{
"name": "Vim Script",
"bytes": "9445"
}
],
"symlink_target": ""
} |
from pants.build_graph.target import Target
class ToolsJar(Target):
"""A private target type injected by the JavacPlugin to represent the JDK's tools.jar.
The classpath for this target is provided by the ProvideToolsJar task.
"""
def __init__(self, *args, **kwargs):
super().__init__(scope="compile", *args, **kwargs)
| {
"content_hash": "c8d03b1c32aced98e4ab41b3e17ec586",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 90,
"avg_line_length": 31.454545454545453,
"alnum_prop": 0.6791907514450867,
"repo_name": "tdyas/pants",
"id": "407c13c4407024ae2d1a4eb0244dfad6e3e75ed3",
"size": "478",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/targets/tools_jar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
"""User interface for Win32 terminals."""
#
# (C) Pywikibot team, 2003-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from pywikibot.tools import ModuleDeprecationWrapper
from pywikibot.userinterfaces import (
terminal_interface_base,
win32_unicode,
)
import ctypes
windowsColors = {
'default': 7,
'black': 0,
'blue': 1,
'green': 2,
'aqua': 3,
'red': 4,
'purple': 5,
'yellow': 6,
'lightgray': 7,
'gray': 8,
'lightblue': 9,
'lightgreen': 10,
'lightaqua': 11,
'lightred': 12,
'lightpurple': 13,
'lightyellow': 14,
'white': 15,
}
class Win32BaseUI(terminal_interface_base.UI):
"""DEPRECATED. User interface for Win32 terminals without ctypes."""
def __init__(self):
"""Initializer."""
super(Win32BaseUI, self).__init__()
self.encoding = 'ascii'
class Win32UI(terminal_interface_base.UI):
"""User interface for Win32 terminals using ctypes."""
def __init__(self):
"""Initializer."""
super(Win32CtypesUI, self).__init__()
(stdin, stdout, stderr, argv) = win32_unicode.get_unicode_console()
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.argv = argv
self.encoding = 'utf-8'
def support_color(self, target_stream):
"""Return whether the target stream supports actually color."""
return getattr(target_stream, '_hConsole', None) is not None
def encounter_color(self, color, target_stream):
"""Set the new color."""
fg, bg = self.divide_color(color)
windows_color = windowsColors[fg]
# Merge foreground/backgroung color if needed.
if bg is not None:
windows_color = windowsColors[bg] << 4 | windows_color
ctypes.windll.kernel32.SetConsoleTextAttribute(
target_stream._hConsole, windows_color)
def _raw_input(self):
data = self.stdin.readline()
# data is in both Python versions str but '\x1a' is unicode in Python 2
# so explicitly convert into str as it otherwise tries to decode data
if str('\x1a') in data:
raise EOFError()
return data.strip()
Win32CtypesUI = Win32UI
wrapper = ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('Win32CtypesUI',
replacement_name='Win32UI',
since='20190217')
wrapper._add_deprecated_attr('Win32BaseUI',
replacement_name='Win32UI',
since='20190217')
| {
"content_hash": "c9c5112a07f9cae119586500e14ab4bd",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 28.97872340425532,
"alnum_prop": 0.5888399412628488,
"repo_name": "PersianWikipedia/pywikibot-core",
"id": "78f18c9a749a0f38822c1bea255eaf9490340bdd",
"size": "2748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywikibot/userinterfaces/terminal_interface_win32.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4021871"
}
],
"symlink_target": ""
} |
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock, ANY, call, patch
import six
from six import BytesIO
import time
from threading import Lock
from cassandra import OperationTimedOut
from cassandra.cluster import Cluster
from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, ProtocolError,
locally_supported_compressions, ConnectionHeartbeat, _Frame, Timer, TimerManager,
ConnectionException)
from cassandra.marshal import uint8_pack, uint32_pack, int32_pack
from cassandra.protocol import (write_stringmultimap, write_int, write_string,
SupportedMessage, ProtocolHandler)
class ConnectionTest(unittest.TestCase):
def make_connection(self):
c = Connection('1.2.3.4')
c._socket = Mock()
c._socket.send.side_effect = lambda x: len(x)
return c
def make_header_prefix(self, message_class, version=Connection.protocol_version, stream_id=0):
if Connection.protocol_version < 3:
return six.binary_type().join(map(uint8_pack, [
0xff & (HEADER_DIRECTION_TO_CLIENT | version),
0, # flags (compression)
stream_id,
message_class.opcode # opcode
]))
else:
return six.binary_type().join(map(uint8_pack, [
0xff & (HEADER_DIRECTION_TO_CLIENT | version),
0, # flags (compression)
0, # MSB for v3+ stream
stream_id,
message_class.opcode # opcode
]))
def make_options_body(self):
options_buf = BytesIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['3.0.1'],
'COMPRESSION': []
})
return options_buf.getvalue()
def make_error_body(self, code, msg):
buf = BytesIO()
write_int(buf, code)
write_string(buf, msg)
return buf.getvalue()
def make_msg(self, header, body=""):
return header + uint32_pack(len(body)) + body
def test_bad_protocol_version(self, *args):
c = self.make_connection()
c._requests = Mock()
c.defunct = Mock()
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage, version=0x7f)
options = self.make_options_body()
message = self.make_msg(header, options)
c._iobuf = BytesIO()
c._iobuf.write(message)
c.process_io_buffer()
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_negative_body_length(self, *args):
c = self.make_connection()
c._requests = Mock()
c.defunct = Mock()
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
message = header + int32_pack(-13)
c._iobuf = BytesIO()
c._iobuf.write(message)
c.process_io_buffer()
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_unsupported_cql_version(self, *args):
c = self.make_connection()
c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])}
c.defunct = Mock()
c.cql_version = "3.0.3"
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
options_buf = BytesIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['7.8.9'],
'COMPRESSION': []
})
options = options_buf.getvalue()
c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options)
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_prefer_lz4_compression(self, *args):
c = self.make_connection()
c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])}
c.defunct = Mock()
c.cql_version = "3.0.3"
locally_supported_compressions.pop('lz4', None)
locally_supported_compressions.pop('snappy', None)
locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')
# read in a SupportedMessage response
options_buf = BytesIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['3.0.3'],
'COMPRESSION': ['snappy', 'lz4']
})
options = options_buf.getvalue()
c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options)
self.assertEqual(c.decompressor, locally_supported_compressions['lz4'][1])
def test_requested_compression_not_available(self, *args):
c = self.make_connection()
c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])}
c.defunct = Mock()
# request lz4 compression
c.compression = "lz4"
locally_supported_compressions.pop('lz4', None)
locally_supported_compressions.pop('snappy', None)
locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
# the server only supports snappy
options_buf = BytesIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['3.0.3'],
'COMPRESSION': ['snappy']
})
options = options_buf.getvalue()
c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options)
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_use_requested_compression(self, *args):
c = self.make_connection()
c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])}
c.defunct = Mock()
# request snappy compression
c.compression = "snappy"
locally_supported_compressions.pop('lz4', None)
locally_supported_compressions.pop('snappy', None)
locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
# the server only supports snappy
options_buf = BytesIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['3.0.3'],
'COMPRESSION': ['snappy', 'lz4']
})
options = options_buf.getvalue()
c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options)
self.assertEqual(c.decompressor, locally_supported_compressions['snappy'][1])
def test_disable_compression(self, *args):
c = self.make_connection()
c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)}
c.defunct = Mock()
# disable compression
c.compression = False
locally_supported_compressions.pop('lz4', None)
locally_supported_compressions.pop('snappy', None)
locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
# the server only supports snappy
options_buf = BytesIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['3.0.3'],
'COMPRESSION': ['snappy', 'lz4']
})
options = options_buf.getvalue()
message = self.make_msg(header, options)
c.process_msg(message, len(message) - 8)
self.assertEqual(c.decompressor, None)
def test_not_implemented(self):
"""
Ensure the following methods throw NIE's. If not, come back and test them.
"""
c = self.make_connection()
self.assertRaises(NotImplementedError, c.close)
def test_set_keyspace_blocking(self):
c = self.make_connection()
self.assertEqual(c.keyspace, None)
c.set_keyspace_blocking(None)
self.assertEqual(c.keyspace, None)
c.keyspace = 'ks'
c.set_keyspace_blocking('ks')
self.assertEqual(c.keyspace, 'ks')
def test_set_connection_class(self):
cluster = Cluster(connection_class='test')
self.assertEqual('test', cluster.connection_class)
@patch('cassandra.connection.ConnectionHeartbeat._raise_if_stopped')
class ConnectionHeartbeatTest(unittest.TestCase):
@staticmethod
def make_get_holders(len):
holders = []
for _ in range(len):
holder = Mock()
holder.get_connections = Mock(return_value=[])
holders.append(holder)
get_holders = Mock(return_value=holders)
return get_holders
def run_heartbeat(self, get_holders_fun, count=2, interval=0.05):
ch = ConnectionHeartbeat(interval, get_holders_fun)
time.sleep(interval * count)
ch.stop()
self.assertTrue(get_holders_fun.call_count)
def test_empty_connections(self, *args):
count = 3
get_holders = self.make_get_holders(1)
self.run_heartbeat(get_holders, count)
self.assertGreaterEqual(get_holders.call_count, count - 1) # lower bound to account for thread spinup time
self.assertLessEqual(get_holders.call_count, count)
holder = get_holders.return_value[0]
holder.get_connections.assert_has_calls([call()] * get_holders.call_count)
def test_idle_non_idle(self, *args):
request_id = 999
# connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback)
def send_msg(msg, req_id, msg_callback):
msg_callback(SupportedMessage([], {}))
idle_connection = Mock(spec=Connection, host='localhost',
max_request_id=127,
lock=Lock(),
in_flight=0, is_idle=True,
is_defunct=False, is_closed=False,
get_request_id=lambda: request_id,
send_msg=Mock(side_effect=send_msg))
non_idle_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=False)
get_holders = self.make_get_holders(1)
holder = get_holders.return_value[0]
holder.get_connections.return_value.append(idle_connection)
holder.get_connections.return_value.append(non_idle_connection)
self.run_heartbeat(get_holders)
holder.get_connections.assert_has_calls([call()] * get_holders.call_count)
self.assertEqual(idle_connection.in_flight, 0)
self.assertEqual(non_idle_connection.in_flight, 0)
idle_connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count)
self.assertEqual(non_idle_connection.send_msg.call_count, 0)
def test_closed_defunct(self, *args):
get_holders = self.make_get_holders(1)
closed_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=True)
defunct_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=True, is_closed=False)
holder = get_holders.return_value[0]
holder.get_connections.return_value.append(closed_connection)
holder.get_connections.return_value.append(defunct_connection)
self.run_heartbeat(get_holders)
holder.get_connections.assert_has_calls([call()] * get_holders.call_count)
self.assertEqual(closed_connection.in_flight, 0)
self.assertEqual(defunct_connection.in_flight, 0)
self.assertEqual(closed_connection.send_msg.call_count, 0)
self.assertEqual(defunct_connection.send_msg.call_count, 0)
def test_no_req_ids(self, *args):
in_flight = 3
get_holders = self.make_get_holders(1)
max_connection = Mock(spec=Connection, host='localhost',
lock=Lock(),
max_request_id=in_flight - 1, in_flight=in_flight,
is_idle=True, is_defunct=False, is_closed=False)
holder = get_holders.return_value[0]
holder.get_connections.return_value.append(max_connection)
self.run_heartbeat(get_holders)
holder.get_connections.assert_has_calls([call()] * get_holders.call_count)
self.assertEqual(max_connection.in_flight, in_flight)
self.assertEqual(max_connection.send_msg.call_count, 0)
self.assertEqual(max_connection.send_msg.call_count, 0)
max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count)
holder.return_connection.assert_has_calls([call(max_connection)] * get_holders.call_count)
def test_unexpected_response(self, *args):
request_id = 999
get_holders = self.make_get_holders(1)
def send_msg(msg, req_id, msg_callback):
msg_callback(object())
connection = Mock(spec=Connection, host='localhost',
max_request_id=127,
lock=Lock(),
in_flight=0, is_idle=True,
is_defunct=False, is_closed=False,
get_request_id=lambda: request_id,
send_msg=Mock(side_effect=send_msg))
holder = get_holders.return_value[0]
holder.get_connections.return_value.append(connection)
self.run_heartbeat(get_holders)
self.assertEqual(connection.in_flight, get_holders.call_count)
connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count)
connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count)
exc = connection.defunct.call_args_list[0][0][0]
self.assertIsInstance(exc, ConnectionException)
self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*')
holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count)
def test_timeout(self, *args):
request_id = 999
get_holders = self.make_get_holders(1)
def send_msg(msg, req_id, msg_callback):
pass
connection = Mock(spec=Connection, host='localhost',
max_request_id=127,
lock=Lock(),
in_flight=0, is_idle=True,
is_defunct=False, is_closed=False,
get_request_id=lambda: request_id,
send_msg=Mock(side_effect=send_msg))
holder = get_holders.return_value[0]
holder.get_connections.return_value.append(connection)
self.run_heartbeat(get_holders)
self.assertEqual(connection.in_flight, get_holders.call_count)
connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count)
connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count)
exc = connection.defunct.call_args_list[0][0][0]
self.assertIsInstance(exc, OperationTimedOut)
self.assertEqual(exc.errors, 'Connection heartbeat timeout after 0.05 seconds')
self.assertEqual(exc.last_host, 'localhost')
holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count)
class TimerTest(unittest.TestCase):
def test_timer_collision(self):
# simple test demonstrating #466
# same timeout, comparison will defer to the Timer object itself
t1 = Timer(0, lambda: None)
t2 = Timer(0, lambda: None)
t2.end = t1.end
tm = TimerManager()
tm.add_timer(t1)
tm.add_timer(t2)
# Prior to #466: "TypeError: unorderable types: Timer() < Timer()"
tm.service_timeouts()
| {
"content_hash": "1a067f2441ca7df0ccc483fd46307e06",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 141,
"avg_line_length": 40.13776722090261,
"alnum_prop": 0.6185347378387975,
"repo_name": "coldeasy/python-driver",
"id": "3209d312eff9aa812411d19776268454bd7580ee",
"size": "17477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28924"
},
{
"name": "PowerShell",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "2238540"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numbers
import os
import sys
import textwrap
import uuid
import prettytable
import six
import yaml
from monascaclient import exc
from oslo_serialization import jsonutils
from oslo_utils import importutils
supported_formats = {
"json": lambda x: jsonutils.dumps(x, indent=2),
"yaml": yaml.safe_dump
}
# Decorator for cli-args
def arg(*args, **kwargs):
def _decorator(func):
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
return func
return _decorator
def link_formatter(links):
return '\n'.join([l.get('href', '') for l in links or []])
def json_formatter(js):
return (jsonutils.dumps(js, indent=2, ensure_ascii=False)).encode('utf-8')
def text_wrap_formatter(d):
return '\n'.join(textwrap.wrap(d or '', 55))
def newline_list_formatter(r):
return '\n'.join(r or [])
def print_list(objs, fields, field_labels=None, formatters={}, sortby=None):
field_labels = field_labels or fields
pt = prettytable.PrettyTable([f for f in field_labels],
caching=False, print_empty=False)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
elif isinstance(field, int):
row.append(o[field])
else:
data = getattr(o, field, None) or ''
row.append(data)
pt.add_row(row)
if sortby is None:
print(pt.get_string().encode('utf-8'))
else:
print(pt.get_string(sortby=field_labels[sortby]).encode('utf-8'))
def print_dict(d, formatters={}):
pt = prettytable.PrettyTable(['Property', 'Value'],
caching=False, print_empty=False)
pt.align = 'l'
for field in d.keys():
if field in formatters:
pt.add_row([field, formatters[field](d[field])])
else:
pt.add_row([field, d[field]])
print(pt.get_string(sortby='Property').encode('utf-8'))
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exc.NotFound:
pass
# now try to get entity as uuid
try:
uuid.UUID(str(name_or_id))
return manager.get(name_or_id)
except (ValueError, exc.NotFound):
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exc.NotFound:
msg = ("No %s with a name or ID of '%s' exists." %
(manager.resource_class.__name__.lower(), name_or_id))
raise exc.CommandError(msg)
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', None)
def import_versioned_module(version, submodule=None):
module = 'monascaclient.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return importutils.import_module(module)
def exit(msg=''):
if msg:
print(msg.encode('utf-8'), file=sys.stderr)
sys.exit(1)
def format_parameters(params):
'''Reformat parameters into dict of format expected by the API.'''
if not params:
return {}
# expect multiple invocations of --parameters but fall back
# to ; delimited if only one --parameters is specified
if len(params) == 1:
if params[0].find(';') != -1: # found
params = params[0].split(';')
else:
params = params[0].split(',')
parameters = {}
for p in params:
try:
(n, v) = p.split(('='), 1)
except ValueError:
msg = '%s(%s). %s.' % ('Malformed parameter', p,
'Use the key=value format')
raise exc.CommandError(msg)
if n not in parameters:
parameters[n] = v
else:
if not isinstance(parameters[n], list):
parameters[n] = [parameters[n]]
parameters[n].append(v)
return parameters
def format_dimensions_query(dims):
if not dims:
return {}
# expect multiple invocations of --parameters but fall back
# to ; delimited if only one --parameters is specified
if len(dims) == 1:
if dims[0].find(';') != -1: # found
dims = dims[0].split(';')
else:
dims = dims[0].split(',')
dimensions = {}
for p in dims:
try:
(n, v) = p.split('=', 1)
except ValueError:
n = p
v = ""
dimensions[n] = v
return dimensions
def format_output(output, format='yaml'):
"""Format the supplied dict as specified."""
output_format = format.lower()
try:
return supported_formats[output_format](output)
except KeyError:
raise exc.HTTPUnsupported("The format(%s) is unsupported."
% output_format)
def format_dimensions(dict):
return ('dimensions: {\n' + format_dict(dict) + '\n}')
def format_expression_data(dict):
# takes an dictionary containing a dict
string_list = list()
for k, v in dict.items():
if k == 'dimensions':
dim_str = format_dimensions(v)
string_list.append(dim_str)
else:
if isinstance(v, numbers.Number):
d_str = k + ': ' + str(v)
else:
d_str = k + ': ' + v
string_list.append(d_str)
return '\n'.join(string_list)
def format_dictlist(dict_list):
# takes list of dictionaries to format for output
string_list = list()
for mdict in dict_list:
kv_list = list()
for k, v in sorted(mdict.items()):
kv_str = k + ':' + str(v)
kv_list.append(kv_str)
# a string of comma separated k:v
this_dict_str = ','.join(kv_list)
string_list.append(this_dict_str)
return '\n'.join(string_list)
def format_dict(dict):
# takes a dictionary to format for output
dstring_list = list()
for k, v in dict.items():
if isinstance(v, numbers.Number):
d_str = k + ': ' + str(v)
else:
d_str = k + ': ' + v
dstring_list.append(d_str)
return '\n'.join(dstring_list)
def format_list(in_list):
string_list = list()
for k in in_list:
if isinstance(k, unicode):
key = k.encode('utf-8')
else:
key = k
string_list.append(key)
return '\n'.join(string_list)
def set_env_variables(kwargs):
environment_variables = {
'username': 'OS_USERNAME',
'password': 'OS_PASSWORD',
'token': 'OS_AUTH_TOKEN',
'auth_url': 'OS_AUTH_URL',
'service_type': 'OS_SERVICE_TYPE',
'endpoint_type': 'OS_ENDPOINT_TYPE',
'os_cacert': 'OS_CACERT',
'user_domain_id': 'OS_USER_DOMAIN_ID',
'user_domain_name': 'OS_USER_DOMAIN_NAME',
'project_id': 'OS_PROJECT_ID',
'project_name': 'OS_PROJECT_NAME',
'domain_id': 'OS_DOMAIN_ID',
'domain_name': 'OS_DOMAIN_NAME',
'region_name': 'OS_REGION_NAME'
}
for k, v in six.iteritems(environment_variables):
if k not in kwargs:
kwargs[k] = env(v)
| {
"content_hash": "4fb523ca969fd9acbf80a5649972e89c",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 78,
"avg_line_length": 27.772241992882563,
"alnum_prop": 0.566376217324449,
"repo_name": "sapcc/python-monascaclient",
"id": "c4fac77c32715700c515ca1c5207415f700f93a5",
"size": "8428",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monascaclient/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "187283"
},
{
"name": "Shell",
"bytes": "3386"
}
],
"symlink_target": ""
} |
from lisp import Symbol as _S
from proc import _ME, _UME
| {
"content_hash": "8849028507418cbed6b23e63d33dfeff",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 28.5,
"alnum_prop": 0.7368421052631579,
"repo_name": "kuangyh/chord",
"id": "e913e365f22bd3c877f2a0238e6e5b45c0176f49",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/solo/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Common Lisp",
"bytes": "2597"
},
{
"name": "Python",
"bytes": "46316"
}
],
"symlink_target": ""
} |
import re
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.translation import ugettext_lazy
from htmlentitydefs import name2codepoint
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:]
capfirst = allow_lazy(capfirst, unicode)
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_unicode(text)
def _generator():
it = iter(text.split(' '))
word = it.next()
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return u''.join(_generator())
wrap = allow_lazy(wrap, unicode)
def truncate_words(s, num):
"Truncates a string after a certain number of words."
s = force_unicode(s)
length = int(num)
words = s.split()
if len(words) > length:
words = words[:length]
if not words[-1].endswith('...'):
words.append('...')
return u' '.join(words)
truncate_words = allow_lazy(truncate_words, unicode)
def truncate_html_words(s, num):
"""
Truncates html to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the given
html.
"""
s = force_unicode(s)
length = int(num)
if length <= 0:
return u''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
ellipsis_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
ellipsis_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or ellipsis_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i+1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:ellipsis_pos] + ' ...'
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
truncate_html_words = allow_lazy(truncate_html_words, unicode)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
u'johns_portrait_in_2004.jpg'
"""
s = force_unicode(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, unicode)
def get_text_list(list_, last_word=ugettext_lazy(u'or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
u'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
u'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
u'a and b'
>>> get_text_list(['a'])
u'a'
>>> get_text_list([])
u''
"""
if len(list_) == 0: return u''
if len(list_) == 1: return force_unicode(list_[0])
return u'%s %s %s' % (', '.join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1]))
get_text_list = allow_lazy(get_text_list, unicode)
def normalize_newlines(text):
return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text))
normalize_newlines = allow_lazy(normalize_newlines, unicode)
def recapitalize(text):
"Recapitalizes text, placing caps after end-of-sentence punctuation."
text = force_unicode(text).lower()
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
text = capsRE.sub(lambda x: x.group(1).upper(), text)
return text
recapitalize = allow_lazy(recapitalize)
def phone2numeric(phone):
"Converts a phone number with letters into its numeric equivalent."
letters = re.compile(r'[A-Z]', re.I)
char2number = lambda m: {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3',
'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}.get(m.group(0).lower())
return letters.sub(char2number, phone)
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
import cStringIO, gzip
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
ustring_re = re.compile(u"([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != unicode:
raise TypeError, s
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, unicode)
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
[u'This', u'is', u'"a person\\\'s"', u'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
[u'Another', u"'person\\'s'", u'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
[u'A', u'"\\"funky\\" style"', u'test.']
"""
text = force_unicode(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
smart_split = allow_lazy(smart_split, unicode)
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return unichr(c)
except ValueError:
return match.group(0)
else:
try:
return unichr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, unicode)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
| {
"content_hash": "a8a6eae089c913ecd15934fb31902264",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 132,
"avg_line_length": 34.43065693430657,
"alnum_prop": 0.5590417638329447,
"repo_name": "sanjuro/RCJK",
"id": "7592cead094bd7332bbb9d002a0ecdb8dc2916f2",
"size": "9434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor/django/utils/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "26840"
},
{
"name": "Python",
"bytes": "1109105"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "6923"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Denominator.multiplier'
db.add_column('profiles_denominator', 'multiplier', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Denominator.multiplier'
db.delete_column('profiles_denominator', 'multiplier')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadisplay': {
'Meta': {'object_name': 'DataDisplay'},
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDisplayTemplate']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'profiles.datadisplaytemplate': {
'Meta': {'object_name': 'DataDisplayTemplate'},
'display_type': ('django.db.models.fields.CharField', [], {'default': "'STANDARD'", 'max_length': '11'}),
'domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'symmetrical': 'False', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False', 'blank': 'True'}),
'records': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoRecord']", 'symmetrical': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.IndicatorPart']"})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '10'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| {
"content_hash": "589600ea9e091d84585e10245bc1bb39",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 192,
"avg_line_length": 81.73869346733669,
"alnum_prop": 0.5555760481986967,
"repo_name": "ProvidencePlan/Profiles",
"id": "b6be0268a5fa0e603fd478e80db15c136faa2824",
"size": "16284",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "communityprofiles/profiles/oldmigrations/0035_auto__add_field_denominator_multiplier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132319"
},
{
"name": "HTML",
"bytes": "146060"
},
{
"name": "JavaScript",
"bytes": "188204"
},
{
"name": "Python",
"bytes": "2668150"
},
{
"name": "Ruby",
"bytes": "4727"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
} |
import os
import unittest
from collections import OrderedDict
import time
from nose.plugins.attrib import attr
from parameterized.parameterized import parameterized
from conans import DEFAULT_REVISION_V1, load, ONLY_V2
from conans.client.tools import environment_append
from conans.errors import RecipeNotFoundException, PackageNotFoundException
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestServer, TurboTestClient, GenConanfile
from conans.util.env_reader import get_env
@attr("artifactory_ready")
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class InstallingPackagesWithRevisionsTest(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.server2 = TestServer()
self.servers = OrderedDict([("default", self.server),
("remote2", self.server2)])
self.c_v2 = TurboTestClient(revisions_enabled=True, servers=self.servers)
self.c_v1 = TurboTestClient(revisions_enabled=False, servers=self.servers)
self.ref = ConanFileReference.loads("lib/1.0@conan/testing")
def test_install_missing_prev_deletes_outdated_prev(self):
"""If we have in a local v2 client a RREV with a PREV that doesn't match the RREV, when
we try to install, it removes the previous outdated PREV even before resolve it"""
pref = self.c_v2.create(self.ref)
self.c_v2.export(self.ref, conanfile=GenConanfile().with_build_msg("REV2"))
self.c_v2.run("install {} --build missing".format(self.ref))
self.assertIn("WARN: The package {} doesn't belong to the installed recipe revision, "
"removing folder".format(pref), self.c_v2.out)
self.assertIn("Package '{}' created".format(pref.id), self.c_v2.out)
def test_install_binary_iterating_remotes_same_rrev(self):
"""We have two servers (remote1 and remote2), first with a recipe but the
second one with a PREV of the binary.
If a client installs without specifying -r remote1, it will iterate remote2 also"""
conanfile = GenConanfile().with_package_file("file.txt", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref, remote="default")
self.c_v2.run("remove {} -p {} -f -r default".format(self.ref, pref.id))
# Same RREV, different PREV
with environment_append({"MY_VAR": "2"}):
pref2 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref, remote="remote2")
self.c_v2.remove_all()
self.assertEqual(pref.ref.revision, pref2.ref.revision)
self.c_v2.run("install {}".format(self.ref))
self.assertIn("{} from 'default' - Downloaded".format(self.ref), self.c_v2.out)
self.assertIn("Retrieving package {} from remote 'remote2'".format(pref.id), self.c_v2.out)
def test_install_binary_iterating_remotes_different_rrev(self):
"""We have two servers (remote1 and remote2), first with a recipe RREV1 but the
second one with other RREV2 a PREV of the binary.
If a client installs without specifying -r remote1, it wont find in remote2 the binary"""
pref = self.c_v2.create(self.ref, conanfile=GenConanfile().with_build_msg("REv1"))
self.c_v2.upload_all(self.ref, remote="default")
self.c_v2.run("remove {} -p {} -f -r default".format(self.ref, pref.id))
# Same RREV, different PREV
pref = self.c_v2.create(self.ref, conanfile=GenConanfile().with_build_msg("REv2"))
self.c_v2.upload_all(self.ref, remote="remote2")
self.c_v2.remove_all()
# Install, it will iterate remotes, resolving the package from remote2, but the recipe
# from default
self.c_v2.run("install {}".format(self.ref), assert_error=True)
self.assertIn("{} - Missing".format(pref), self.c_v2.out)
def test_update_recipe_iterating_remotes(self):
"""We have two servers (remote1 and remote2), both with a recipe but the second one with a
new RREV. If a client installs without specifying -r remote1, it WONT iterate
remote2, because it is associated in the registry and have it in the cache. Unless we
specify the -r remote2"""
conanfile = GenConanfile().with_package_file("file.txt", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref, remote="default")
time.sleep(1)
other_v2 = TurboTestClient(revisions_enabled=True, servers=self.servers)
# Same RREV, different new PREV
with environment_append({"MY_VAR": "2"}):
other_v2.create(self.ref, conanfile=conanfile)
other_v2.upload_all(self.ref, remote="remote2")
# Install, it wont resolve the remote2 because it is in the registry, it will use the cache
self.c_v2.run("install {} --update".format(self.ref))
self.assertIn("lib/1.0@conan/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - "
"Cache".format(pref.id), self.c_v2.out)
# If we force remote2, it will find an update
self.c_v2.run("install {} --update -r remote2".format(self.ref))
self.assertIn("{} - Update".format(pref), self.c_v2.out)
self.assertIn("Retrieving package {} from remote 'remote2' ".format(pref.id),
self.c_v2.out)
# This is not updating the remote in the registry with a --update
# Is this a bug?
metadata = self.c_v2.cache.package_layout(self.ref).load_metadata()
self.assertEqual("default", metadata.recipe.remote)
def test_diamond_revisions_conflict(self):
""" If we have a diamond because of pinned conflicting revisions in the requirements,
it gives an error"""
# Two revisions of "lib1" to the server
lib1 = ConanFileReference.loads("lib1/1.0@conan/stable")
lib1_pref = self.c_v2.create(lib1)
self.c_v2.upload_all(lib1)
lib1b_pref = self.c_v2.create(lib1, conanfile=GenConanfile().with_build_msg("Rev2"))
self.c_v2.upload_all(lib1)
# Lib2 depending of lib1
self.c_v2.remove_all()
lib2 = ConanFileReference.loads("lib2/1.0@conan/stable")
self.c_v2.create(lib2, conanfile=GenConanfile().with_requirement(lib1_pref.ref))
self.c_v2.upload_all(lib2)
# Lib3 depending of lib1b
self.c_v2.remove_all()
lib3 = ConanFileReference.loads("lib3/1.0@conan/stable")
self.c_v2.create(lib3, conanfile=GenConanfile().with_requirement(lib1b_pref.ref))
self.c_v2.upload_all(lib3)
# Project depending on both lib3 and lib2
self.c_v2.remove_all()
project = ConanFileReference.loads("project/1.0@conan/stable")
self.c_v2.create(project,
conanfile=GenConanfile().with_requirement(lib2).with_requirement(lib3),
assert_error=True)
self.assertIn("Conflict in {}\n ".format(lib3), self.c_v2.out)
self.assertIn("Different revisions of {} has been requested".format(lib1), self.c_v2.out)
def test_alias_to_a_rrev(self):
""" If an alias points to a RREV, it resolved that RREV and no other"""
# Upload one revision
pref = self.c_v2.create(self.ref)
self.c_v2.upload_all(self.ref)
# Upload other revision
self.c_v2.create(self.ref, conanfile=GenConanfile().with_build_msg("Build Rev 2"))
self.c_v2.upload_all(self.ref)
self.c_v2.remove_all()
# Create an alias to the first revision
self.c_v2.run("alias lib/latest@conan/stable {}".format(pref.ref.full_repr()))
alias_ref = ConanFileReference.loads("lib/latest@conan/stable")
exported = load(self.c_v2.cache.package_layout(alias_ref).conanfile())
self.assertIn('alias = "{}"'.format(pref.ref.full_repr()), exported)
self.c_v2.upload_all(ConanFileReference.loads("lib/latest@conan/stable"))
self.c_v2.remove_all()
self.c_v2.run("install lib/latest@conan/stable")
# Shouldn't be packages in the cache
self.assertNotIn("doesn't belong to the installed recipe revision", self.c_v2.out)
# Read current revision
self.assertEqual(pref.ref.revision, self.c_v2.recipe_revision(self.ref))
@parameterized.expand([(True,), (False,)])
def test_install_rev0(self, v1):
"""If we upload a revision with a v1 client it is stored as rev0 in the server then:
0. In the cache the revision is kept, not overwrite with the "0"
If we install it with a fresh client:
1. With revisions enabled, it is 0 in the metadata (not supported)
2. Without revisions, it is 0 in the metadata"""
# Upload with v1
pref = self.c_v1.create(self.ref)
self.assertNotEqual(pref.revision, DEFAULT_REVISION_V1)
self.assertNotEqual(pref.ref.revision, DEFAULT_REVISION_V1)
remote_ref = self.c_v1.upload_all(self.ref)
self.assertEqual(remote_ref.revision, DEFAULT_REVISION_V1)
# Check remote revision and time
remote_rev_time = self.server.recipe_revision_time(remote_ref)
self.assertIsNotNone(remote_rev_time)
local_rev = self.c_v1.recipe_revision(self.ref)
self.assertNotEqual(local_rev, DEFAULT_REVISION_V1)
self.assertEqual(local_rev, pref.ref.revision)
# Remove all from c_v1
self.c_v1.remove_all()
client = self.c_v1 if v1 else self.c_v2
client.run("install {}".format(self.ref))
local_rev = client.recipe_revision(self.ref)
local_prev = client.package_revision(pref)
self.assertEqual(local_rev, DEFAULT_REVISION_V1)
self.assertEqual(local_prev, DEFAULT_REVISION_V1)
def test_revision_metadata_update_on_install(self):
"""If a clean v2 client installs a RREV/PREV from a server, it get
the revision from upstream"""
# Upload with v2
pref = self.c_v2.create(self.ref)
self.c_v2.upload_all(self.ref)
# Remove all from c_v2 local
self.c_v2.remove_all()
self.assertRaises(RecipeNotFoundException, self.c_v2.recipe_revision, self.ref)
self.c_v2.run("install {}".format(self.ref))
local_rev = self.c_v2.recipe_revision(self.ref)
local_prev = self.c_v2.package_revision(pref)
self.assertEqual(local_rev, pref.ref.revision)
self.assertEqual(local_prev, pref.revision)
def test_revision_metadata_update_on_update(self):
"""
A client v2 upload a recipe revision
Another client v2 upload a new recipe revision
The first client can upgrade from the remote"""
client = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
client2 = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
pref1 = client.create(self.ref)
client.upload_all(self.ref)
rrev1_time_remote = self.server.recipe_revision_time(pref1.ref)
prev1_time_remote = self.server.package_revision_time(pref1)
time.sleep(1) # Wait a second, to be considered an update
pref2 = client2.create(self.ref, conanfile=GenConanfile().with_build_msg("REV2"))
client2.upload_all(self.ref)
rrev2_time_remote = self.server.recipe_revision_time(pref2.ref)
prev2_time_remote = self.server.package_revision_time(pref2)
# Check different revision times
self.assertNotEqual(rrev1_time_remote, rrev2_time_remote)
self.assertNotEqual(prev1_time_remote, prev2_time_remote)
client.run("install {} --update".format(self.ref))
self.assertIn("Package installed {}".format(pref2.id), client.out)
rrev = client.recipe_revision(self.ref)
self.assertIsNotNone(rrev)
prev = client.package_revision(pref2)
self.assertIsNotNone(prev)
def test_revision_update_on_package_update(self):
"""
A client v2 upload RREV with PREV1
Another client v2 upload the same RREV with PREV2
The first client can upgrade from the remote, only
in the package, because the recipe is the same and it is not updated"""
client = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
client2 = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
conanfile = GenConanfile().with_package_file("file", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref = client.create(self.ref, conanfile=conanfile)
client.upload_all(self.ref)
time.sleep(1) # Wait a second, to be considered an update
with environment_append({"MY_VAR": "2"}):
pref2 = client2.create(self.ref, conanfile=conanfile)
client2.upload_all(self.ref)
prev1_time_remote = self.server.package_revision_time(pref)
prev2_time_remote = self.server.package_revision_time(pref2)
self.assertNotEqual(prev1_time_remote, prev2_time_remote) # Two package revisions
client.run("install {} --update".format(self.ref))
self.assertIn("{} from 'default' - Cache".format(self.ref), client.out)
self.assertIn("Retrieving package {}".format(pref.id), client.out)
prev = client.package_revision(pref)
self.assertIsNotNone(prev)
@parameterized.expand([(True,), (False,)])
def test_revision_mismatch_packages_in_local(self, v1):
"""If we have a recipe that doesn't match the local package:
1. With revisions enabled, it is not resolved.
2. Without revisions enabled it is resolved"""
client = self.c_v1 if v1 else self.c_v2
pref = client.create(self.ref)
ref2 = client.export(self.ref, conanfile=GenConanfile().with_build_msg("REV2"))
# Now we have two RREVs and a PREV corresponding to the first one
self.assertEqual(pref.ref.copy_clear_rev(), ref2.copy_clear_rev())
self.assertNotEqual(pref.ref.revision, ref2.revision)
# Now we try to install the self.ref, the binary is missing when using revisions
command = "install {}".format(self.ref)
if v1:
client.run(command)
self.assertIn("{} - Cache".format(pref), client.out)
else:
client.run(command, assert_error=True)
self.assertIn("The package {} doesn't belong to the installed "
"recipe".format(pref), client.out)
self.assertIn("ERROR: Missing prebuilt package for '{}'".format(self.ref), client.out)
@parameterized.expand([(True,), (False,)])
def test_revision_install_explicit_mismatch_rrev(self, v1):
"""If we have a recipe in local, but we request to install a different one with RREV
1. With revisions enabled, it fail and won't look the remotes unless --update
2. Without revisions enabled it raises an input error"""
client = self.c_v1 if v1 else self.c_v2
ref = client.export(self.ref)
command = "install {}#fakerevision".format(ref)
if v1:
client.run(command, assert_error=True)
self.assertIn("ERROR: Revisions not enabled in the client, "
"specify a reference without revision", client.out)
else:
client.run(command, assert_error=True)
self.assertIn("The recipe in the local cache doesn't match the specified revision. "
"Use '--update' to check in the remote", client.out)
command = "install {}#fakerevision --update".format(ref)
client.run(command, assert_error=True)
self.assertIn("Unable to find '{}#fakerevision' in remotes".format(ref), client.out)
# Now create a new revision with other client and upload it, we will request it
new_client = TurboTestClient(revisions_enabled=True, servers=self.servers)
pref = new_client.create(self.ref, conanfile=GenConanfile().with_build_msg("Rev2"))
new_client.upload_all(self.ref)
# Repeat the install --update pointing to the new reference
client.run("install {} --update".format(pref.ref.full_repr()))
self.assertIn("{} from 'default' - Downloaded".format(self.ref), client.out)
@parameterized.expand([(True,), (False,)])
def test_revision_mismatch_packages_remote(self, v1):
"""If we have a recipe that doesn't match a remote recipe:
1. With revisions enabled, it is not resolved in the remote.
2. Without revisions enabled it is resolved"""
self.c_v2.create(self.ref)
self.c_v2.upload_all(self.ref)
client = self.c_v1 if v1 else self.c_v2
client.remove_all()
client.export(self.ref, conanfile=GenConanfile().with_build_msg("REV2"))
command = "install {}".format(self.ref)
if v1:
client.run(command)
self.assertIn("{}: Package installed".format(self.ref), client.out)
else:
client.run(command, assert_error=True)
self.assertIn("Can't find a '{}' package".format(self.ref), client.out)
def test_json_output(self):
client = TurboTestClient()
client.save({"conanfile.py": str(GenConanfile())})
client.run("create . {} --json file.json".format(self.ref.full_repr()))
json_path = os.path.join(client.current_folder, "file.json")
import json
data = json.loads(load(json_path))
ref = ConanFileReference.loads(data["installed"][0]["recipe"]["id"])
self.assertIsNotNone(ref.revision)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class RevisionsInLocalCacheTest(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.c_v2 = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
self.c_v1 = TurboTestClient(revisions_enabled=False, servers={"default": self.server})
self.ref = ConanFileReference.loads("lib/1.0@conan/testing")
@parameterized.expand([(True,), (False,)])
def test_create_metadata(self, v1):
"""When a create is executed, the recipe & package revision are updated in the cache"""
client = self.c_v1 if v1 else self.c_v2
pref = client.create(self.ref)
# Check recipe revision
rev = client.recipe_revision(self.ref)
self.assertEqual(pref.ref.revision, rev)
self.assertIsNotNone(rev)
# Check package revision
prev = client.package_revision(pref)
self.assertEqual(pref.revision, prev)
self.assertIsNotNone(prev)
# Create new revision, check that it changes
client.create(self.ref, conanfile=GenConanfile().with_build_msg("Rev2"))
rev2 = client.recipe_revision(self.ref)
prev2 = client.package_revision(pref)
self.assertNotEqual(rev2, rev)
self.assertNotEqual(prev2, prev)
self.assertIsNotNone(rev2)
self.assertIsNotNone(prev2)
@parameterized.expand([(True,), (False,)])
def test_new_exported_recipe_clears_outdated_packages(self, v1):
client = self.c_v1 if v1 else self.c_v2
conanfile = GenConanfile().with_setting("os")
pref_outdated = client.create(self.ref, conanfile=conanfile, args="-s os=Windows")
pref_ok = client.create(self.ref, conanfile=conanfile.with_build_msg("rev2"),
args="-s os=Linux")
msg = "Removing the local binary packages from different recipe revisions"
if v1:
self.assertNotIn(msg, client.out)
self.assertTrue(client.package_exists(pref_outdated.copy_clear_rev()))
else:
self.assertIn(msg, client.out)
self.assertFalse(client.package_exists(pref_outdated.copy_clear_rev()))
self.assertTrue(client.package_exists(pref_ok))
@parameterized.expand([(True,), (False,)])
def test_export_metadata(self, v1):
"""When a export is executed, the recipe revision is updated in the cache"""
client = self.c_v1 if v1 else self.c_v2
ref = client.export(self.ref)
# Check recipe revision
rev = client.recipe_revision(self.ref)
self.assertEqual(ref.revision, rev)
self.assertIsNotNone(rev)
# Export new revision, check that it changes
client.export(self.ref, conanfile=GenConanfile().with_build_msg("Rev2"))
rev2 = client.recipe_revision(self.ref)
self.assertNotEqual(rev2, rev)
self.assertIsNotNone(rev2)
def test_remove_metadata(self):
"""If I remote a package, the metadata is cleared"""
pref = self.c_v2.create(self.ref)
self.c_v2.upload_all(self.ref)
self.c_v2.remove_all()
self.c_v2.run("install {}".format(self.ref))
self.c_v2.run("remove {} -p {} -f".format(pref.ref, pref.id))
self.assertRaises(PackageNotFoundException, self.c_v2.package_revision, pref)
rev = self.c_v2.recipe_revision(pref.ref)
self.assertIsNotNone(rev)
self.c_v2.remove_all()
self.assertRaises(RecipeNotFoundException, self.c_v2.recipe_revision, pref.ref)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class RemoveWithRevisionsTest(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.c_v2 = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
self.c_v1 = TurboTestClient(revisions_enabled=False, servers={"default": self.server})
self.ref = ConanFileReference.loads("lib/1.0@conan/testing")
def test_remove_oudated_packages_locally_removes_orphan_prevs(self):
"""if we run 'conan remove --outdated' locally, it removes the PREVS belonging to a
different RREV"""
# V1 client exports two recipes, no packages are removed in the second
# create, even belonging to a different recipe
pref = self.c_v1.create(self.ref, conanfile=GenConanfile().with_setting("os"),
args="-s os=Windows")
pref2 = self.c_v1.create(self.ref, conanfile=GenConanfile().with_setting("os").
with_build_msg("I'm rev 2"),
args="-s os=Linux")
self.assertNotIn("Removing the local binary packages from different recipe revisions",
self.c_v1.out)
# Now we enable the revisions in the c_v1, to make sure that with revisions
# the orphan is also removed
self.c_v1.enable_revisions()
layout = self.c_v1.cache.package_layout(pref.ref.copy_clear_rev())
# Assert pref (outdated) is in the cache
self.assertTrue(layout.package_exists(pref.copy_clear_rev()))
# Assert pref2 is also in the cache
self.assertTrue(layout.package_exists(pref2.copy_clear_rev()))
self.c_v1.run("remove {} --outdated -f".format(pref.ref))
# Assert pref (outdated) is not in the cache anymore
self.assertFalse(layout.package_exists(pref.copy_clear_rev()))
# Assert pref2 is in the cache
self.assertTrue(layout.package_exists(pref2.copy_clear_rev()))
@parameterized.expand([(True,), (False,)])
def test_remove_oudated_packages_remote(self, v1):
"""In a server with revisions uploaded no package is oudated so nothing is done, unless
a v1 upload mixed packages to a v1 or some hardcoded revision happen"""
self.c_v1.create(self.ref, conanfile=GenConanfile().
with_setting("os").
with_build_msg("I'm revision 1"),
args="-s os=Windows")
self.c_v1.upload_all(self.ref)
# Different revision, different package_id (but everything uploaded to rev0)
self.c_v1.create(self.ref, conanfile=GenConanfile().
with_setting("os").
with_build_msg("I'm revision 2"),
args="-s os=Linux")
self.c_v1.upload_all(self.ref)
# Verify in the server there is only one revision "0"
revs = self.server.server_store.get_recipe_revisions(self.ref)
self.assertEqual([r.revision for r in revs], [DEFAULT_REVISION_V1])
# Verify using v1 we can search for the outdated
data = self.c_v1.search(self.ref, remote="default", args="--outdated")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual(set(["Windows"]), set(oss))
self.assertTrue(data["results"][0]["items"][0]["packages"][0]["outdated"])
# Verify we can remove it both using v1 or v2
client = self.c_v1 if v1 else self.c_v2
client.run("remove {} -r default --outdated -f".format(self.ref))
# The Windows package is not there anymore
data = self.c_v1.search(self.ref, remote="default", args="--outdated")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual([], oss)
# But the Linux package is there, not outdated
data = self.c_v1.search(self.ref, remote="default")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual(set(["Linux"]), set(oss))
@parameterized.expand([(True,), (False,)])
def test_remove_local_recipe(self, v1):
"""Locally:
When I remove a recipe without RREV, everything is removed.
When I remove a recipe with RREV only if the local revision matches is removed"""
client = self.c_v1 if v1 else self.c_v2
# If I remove the ref, the revision is gone, of course
ref1 = client.export(self.ref)
client.run("remove {} -f".format(ref1.copy_clear_rev().full_repr()))
self.assertFalse(client.recipe_exists(self.ref))
# If I remove a ref with a wrong revision, the revision is not removed
ref1 = client.export(self.ref)
fakeref = ref1.copy_with_rev("fakerev")
full_ref = fakeref.full_repr() if not v1 else str(fakeref)
client.run("remove {} -f".format(fakeref.full_repr()), assert_error=True)
self.assertIn("ERROR: Recipe not found: '%s'" % full_ref, client.out)
self.assertTrue(client.recipe_exists(self.ref))
@parameterized.expand([(True,), (False,)])
def test_remove_local_package(self, v1):
"""Locally:
When I remove a recipe without RREV, the package is removed.
When I remove a recipe with RREV only if the local revision matches is removed
When I remove a package with PREV and not RREV it raises an error
When I remove a package with RREV and PREV only when both matches is removed"""
client = self.c_v1 if v1 else self.c_v2
# If I remove the ref without RREV, the packages are also removed
pref1 = client.create(self.ref)
client.run("remove {} -f".format(pref1.ref.copy_clear_rev().full_repr()))
self.assertFalse(client.package_exists(pref1))
# If I remove the ref with fake RREV, the packages are not removed
pref1 = client.create(self.ref)
fakeref = pref1.ref.copy_with_rev("fakerev")
str_ref = fakeref.full_repr() if not v1 else str(fakeref)
client.run("remove {} -f".format(fakeref.full_repr()), assert_error=True)
self.assertTrue(client.package_exists(pref1))
self.assertIn("Recipe not found: '{}'".format(str_ref), client.out)
# If I remove the ref with valid RREV, the packages are removed
pref1 = client.create(self.ref)
client.run("remove {} -f".format(pref1.ref.full_repr()))
self.assertFalse(client.package_exists(pref1))
# If I remove the ref without RREV but specifying PREV it raises
pref1 = client.create(self.ref)
command = "remove {} -f -p {}#{}".format(pref1.ref.copy_clear_rev().full_repr(),
pref1.id, pref1.revision)
client.run(command, assert_error=True)
self.assertTrue(client.package_exists(pref1))
self.assertIn("Specify a recipe revision if you specify a package revision", client.out)
# A wrong PREV doesn't remove the PREV
pref1 = client.create(self.ref)
command = "remove {} -f -p {}#fakeprev".format(pref1.ref.full_repr(), pref1.id)
client.run(command, assert_error=True)
self.assertTrue(client.package_exists(pref1))
self.assertIn("Binary package not found", client.out)
# Everything correct, removes the unique local package revision
pref1 = client.create(self.ref)
command = "remove {} -f -p {}#{}".format(pref1.ref.full_repr(), pref1.id, pref1.revision)
client.run(command)
self.assertFalse(client.package_exists(pref1))
@parameterized.expand([(True, ), (False, )])
def test_remove_remote_recipe(self, v1):
"""When a client removes a reference, it removes ALL revisions, no matter
if the client is v1 or v2"""
pref1 = self.c_v2.create(self.ref)
self.c_v2.upload_all(pref1.ref)
pref2 = self.c_v2.create(self.ref,
conanfile=GenConanfile().with_build_msg("RREV 2!"))
self.c_v2.upload_all(pref2.ref)
self.assertNotEqual(pref1, pref2)
remover_client = self.c_v1 if v1 else self.c_v2
# Remove ref without revision in a remote
remover_client.run("remove {} -f -r default".format(self.ref))
self.assertFalse(self.server.recipe_exists(self.ref))
self.assertFalse(self.server.recipe_exists(pref1.ref))
self.assertFalse(self.server.recipe_exists(pref2.ref))
self.assertFalse(self.server.package_exists(pref1))
self.assertFalse(self.server.package_exists(pref2))
@parameterized.expand([(True, ), (False, )])
def test_remove_remote_recipe_revision(self, v1):
"""If a client removes a recipe with revision:
- If the client is v1 will fail (it can't send the revision through the API)
- If the client is v2 will remove only that revision"""
pref1 = self.c_v2.create(self.ref)
self.c_v2.upload_all(pref1.ref)
pref2 = self.c_v2.create(self.ref,
conanfile=GenConanfile().with_build_msg("RREV 2!"))
self.c_v2.upload_all(pref2.ref)
self.assertNotEqual(pref1, pref2)
remover_client = self.c_v1 if v1 else self.c_v2
# Remove ref without revision in a remote
command = "remove {} -f -r default".format(pref1.ref.full_repr())
if v1:
remover_client.run(command, assert_error=True)
self.assertIn("Revisions not enabled in the client", remover_client.out)
else:
remover_client.run(command)
self.assertFalse(self.server.recipe_exists(pref1.ref))
self.assertTrue(self.server.recipe_exists(pref2.ref))
@parameterized.expand([(True,), (False,)])
def test_remove_remote_package(self, v1):
"""When a client removes a package, without RREV, it removes the package from ALL
RREVs"""
pref1 = self.c_v2.create(self.ref)
self.c_v2.upload_all(pref1.ref)
pref2 = self.c_v2.create(self.ref,
conanfile=GenConanfile().with_build_msg("RREV 2!"))
self.c_v2.upload_all(pref2.ref)
self.assertEqual(pref1.id, pref2.id)
# Locally only one revision exists at the same time
self.assertFalse(self.c_v2.package_exists(pref1))
self.assertTrue(self.c_v2.package_exists(pref2))
remover_client = self.c_v1 if v1 else self.c_v2
# Remove pref without RREV in a remote
remover_client.run("remove {} -p {} -f -r default".format(self.ref, pref2.id))
self.assertTrue(self.server.recipe_exists(pref1.ref))
self.assertTrue(self.server.recipe_exists(pref2.ref))
self.assertFalse(self.server.package_exists(pref1))
self.assertFalse(self.server.package_exists(pref2))
@parameterized.expand([(True,), (False,)])
def test_remove_remote_package_revision(self, v1):
"""When a client removes a package with PREV
(conan remove zlib/1.0@conan/stable -p 12312#PREV)
- If not RREV, the client fails
- If RREV and PREV:
- If v1 it fails in the client (cannot transmit revisions with v1)
- If v2 it removes only that PREV
"""
# First RREV
pref1 = self.c_v2.create(self.ref)
self.c_v2.upload_all(pref1.ref)
# Second RREV with two PREVS (exactly same conanfile, different package files)
rev2_conanfile = GenConanfile().with_build_msg("RREV 2!")\
.with_package_file("file", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref2 = self.c_v2.create(self.ref, conanfile=rev2_conanfile)
self.c_v2.upload_all(pref2.ref)
with environment_append({"MY_VAR": "2"}):
pref2b = self.c_v2.create(self.ref, conanfile=rev2_conanfile)
self.c_v2.upload_all(pref2b.ref)
# Check created revisions
self.assertEqual(pref1.id, pref2.id)
self.assertEqual(pref2.id, pref2b.id)
self.assertEqual(pref2.ref.revision, pref2b.ref.revision)
self.assertNotEqual(pref2.revision, pref2b.revision)
remover_client = self.c_v1 if v1 else self.c_v2
# Remove PREV without RREV in a remote, the client has to fail
command = "remove {} -p {}#{} -f -r default".format(self.ref, pref2.id, pref2.revision)
remover_client.run(command, assert_error=True)
self.assertIn("Specify a recipe revision if you specify a package revision",
remover_client.out)
# Remove package with RREV and PREV
command = "remove {} -p {}#{} -f -r default".format(pref2.ref.full_repr(),
pref2.id, pref2.revision)
if v1:
remover_client.run(command, assert_error=True)
self.assertIn("Revisions not enabled in the client", remover_client.out)
else:
remover_client.run(command)
self.assertTrue(self.server.recipe_exists(pref1.ref))
self.assertTrue(self.server.recipe_exists(pref2.ref))
self.assertTrue(self.server.recipe_exists(pref2b.ref))
self.assertTrue(self.server.package_exists(pref1))
self.assertTrue(self.server.package_exists(pref2b))
self.assertFalse(self.server.package_exists(pref2))
# Try to remove a missing revision
command = "remove {} -p {}#fakerev -f -r default".format(pref2.ref.full_repr(),
pref2.id)
remover_client.run(command, assert_error=True)
fakeref = pref2.copy_with_revs(pref2.ref.revision, "fakerev")
self.assertIn("Binary package not found: '{}'".format(fakeref.full_repr()),
remover_client.out)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class SearchingPackagesWithRevisions(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.server2 = TestServer()
servers = OrderedDict([("default", self.server),
("remote2", self.server2)])
self.c_v2 = TurboTestClient(revisions_enabled=True, servers=servers)
self.c_v1 = TurboTestClient(revisions_enabled=False, servers=servers)
self.ref = ConanFileReference.loads("lib/1.0@conan/testing")
@parameterized.expand([(True,), (False,)])
def search_outdated_packages_locally_without_rrev_test(self, v1):
"""If we search for the packages of a ref without specifying the RREV using --outdated:
it shows the packages not matching the current recipe revision"""
# Create locally a package outdated, because we export a new recipe revision
self.c_v1.create(self.ref)
ref = self.c_v1.export(self.ref, conanfile=GenConanfile().
with_build_msg("I'm your father, rev2"))
if not v1:
self.c_v1.enable_revisions()
data = self.c_v1.search(ref, args="--outdated")
self.assertTrue(data["results"][0]["items"][0]["packages"][0]["outdated"])
@parameterized.expand([(True,), (False,)])
def search_outdated_packages_locally_with_rrev_test(self, v1):
"""If we search for the packages of a ref specifying the RREV using --outdated:
- If the RREV do not exists it will raise
- If the RREV exists it won't show anything, if the recipe is there, is the current one
"""
# Create locally a package outdated, because we export a new recipe revision
client = self.c_v1 if v1 else self.c_v2
client.create(self.ref)
ref = client.export(self.ref, conanfile=GenConanfile().
with_build_msg("I'm your father, rev2"))
data = client.search(ref.full_repr(), args="--outdated")
self.assertEqual([], data["results"][0]["items"][0]["packages"])
client.search("{}#fakerev".format(ref), args="--outdated", assert_error=True)
self.assertIn("Recipe not found: 'lib/1.0@conan/testing#fakerev'", client.out)
def search_outdated_packages_remote_test(self):
"""If we search for outdated packages in a remote, it has to be
always empty, unless it is the "0" revision that contain some mixed packages uploaded with
a client with revisions disabled
"""
self.c_v1.create(self.ref, conanfile=GenConanfile().
with_setting("os").
with_build_msg("I'm revision 1"),
args="-s os=Windows")
self.c_v1.upload_all(self.ref)
# Different revision, different package_id (but everything uploaded to rev0)
self.c_v1.create(self.ref, conanfile=GenConanfile().
with_setting("os").
with_build_msg("I'm revision 2"),
args="-s os=Linux")
self.c_v1.upload_all(self.ref)
# Verify in the server there is only one revision "0"
revs = self.server.server_store.get_recipe_revisions(self.ref)
self.assertEqual([r.revision for r in revs], [DEFAULT_REVISION_V1])
# Verify if we can reach both packages with v1 (The Windows is outdated)
data = self.c_v1.search(self.ref, remote="default")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual(set(["Windows", "Linux"]), set(oss))
# Verify using v1 we can search for the outdated
data = self.c_v1.search(self.ref, remote="default", args="--outdated")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual(set(["Windows"]), set(oss))
self.assertTrue(data["results"][0]["items"][0]["packages"][0]["outdated"])
# Verify using v2 if we can get the outdated
data = self.c_v2.search(self.ref, remote="default", args="--outdated")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual(set(["Windows"]), set(oss))
self.assertTrue(data["results"][0]["items"][0]["packages"][0]["outdated"])
# Verify using v2 and specifying RREV we can get the outdated
data = self.c_v2.search(self.ref.copy_with_rev(DEFAULT_REVISION_V1),
remote="default", args="--outdated")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
self.assertEqual(set(["Windows"]), set(oss))
self.assertTrue(data["results"][0]["items"][0]["packages"][0]["outdated"])
@parameterized.expand([(True,), (False,)])
def search_all_remotes_with_rrev_test(self, v1):
"""If we search for the packages of a ref with the RREV in the "all" remote:
- With an v1 client, it fails
- With an v2 client, it shows the packages for that specific RREV, in all the remotes,
in an isolated way, just as we made it calling Conan N times
No matter how many PREVS are uploaded it returns package references not duplicated"""
# First revision with 1 binary, Windows
# Second revision with 1 binary for Macos
# Third revision with 2 binaries for SunOS and FreeBSD
revisions = [{"os": "Windows"}], \
[{"os": "Macos"}], \
[{"os": "SunOS"}, {"os": "FreeBSD"}]
refs = self.c_v2.massive_uploader(self.ref, revisions, remote="default", num_prev=2)
self.c_v2.remove_all()
# In the second remote only one revision, with one binary (two PREVS)
revisions = [[{"os": "Linux"}]]
refs2 = self.c_v2.massive_uploader(self.ref, revisions, remote="remote2", num_prev=2)
self.c_v2.remove_all()
# Ensure that the first revision in the first remote is the same than in the second one
revision_ref = refs[0][0].ref
self.assertEqual(revision_ref.revision, refs2[0][0].ref.revision)
self.assertNotEqual(refs[1][0].ref.revision, refs2[0][0].ref.revision)
# Check that in the remotes there are the packages we expect
self.assertTrue(self.server.package_exists(refs[0][0]))
self.assertTrue(self.server2.package_exists(refs2[0][0]))
client = self.c_v1 if v1 else self.c_v2
if v1:
client.search(revision_ref.full_repr(), remote="all", assert_error=True)
self.assertIn("ERROR: Revisions not enabled in the client, "
"specify a reference without revision", client.out)
else:
data = client.search(revision_ref.full_repr(), remote="all")
oss_r1 = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
oss_r2 = [p["settings"]["os"] for p in data["results"][1]["items"][0]["packages"]]
self.assertEqual(["Windows"], oss_r1)
self.assertEqual(["Linux"], oss_r2)
@parameterized.expand([(True,), (False,)])
def search_all_remotes_without_rrev_test(self, v1):
"""If we search for the packages of a ref without specifying the RREV in the "all" remote:
- With an v1 client, it shows all the packages for all the revisions, in all the remotes
- With an v2 client, it shows the packages for the latest, in all the remotes,
in an isolated way, just as we made it calling Conan N times
No matter how many PREVS are uploaded it returns package references not duplicated"""
# First revision with 1 binary, Windows
# Second revision with 1 binary for Macos
# Third revision with 2 binaries for SunOS and FreeBSD
revisions = [{"os": "Windows"}], \
[{"os": "Macos"}], \
[{"os": "SunOS"}, {"os": "FreeBSD"}]
self.c_v2.massive_uploader(self.ref, revisions, remote="default", num_prev=2)
self.c_v2.remove_all()
# In the second remote only one revision, with one binary (two PREVS)
revisions = [[{"os": "Linux"}]]
self.c_v2.massive_uploader(self.ref, revisions, remote="remote2", num_prev=2)
self.c_v2.remove_all()
client = self.c_v1 if v1 else self.c_v2
data = client.search(str(self.ref), remote="all")
oss_r1 = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
oss_r2 = [p["settings"]["os"] for p in data["results"][1]["items"][0]["packages"]]
if v1:
self.assertEqual(set(["Windows", "Macos", "SunOS", "FreeBSD"]), set(oss_r1))
self.assertEqual(set(["Linux"]), set(oss_r2))
else:
self.assertEqual(set(["SunOS", "FreeBSD"]), set(oss_r1))
self.assertEqual(set(["Linux"]), set(oss_r2))
@parameterized.expand([(True,), (False,)])
def search_a_remote_package_without_rrev_test(self, v1):
"""If we search for the packages of a ref without specifying the RREV:
- With an v1 client, it shows all the packages for all the revisions
- With an v2 client, it shows the packages for the latest
No matter how many PREVS are uploaded it returns package references not duplicated"""
# Upload to the server 3 RREVS for "lib" each one with 5 package_ids, each one with
# 2 PREVS
# First revision with 2 binaries, Windows and Linux
# Second revision with 1 binary for Macos
# Third revision with 2 binaries for SunOS and FreeBSD
revisions = [{"os": "Windows"}, {"os": "Linux"}], \
[{"os": "Macos"}], \
[{"os": "SunOS"}, {"os": "FreeBSD"}]
self.c_v2.massive_uploader(self.ref, revisions, num_prev=2)
client = self.c_v1 if v1 else self.c_v2
client.remove_all()
data = client.search(str(self.ref), remote="default")
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
if v1:
self.assertEqual(set(["Linux", "Windows", "Macos", "SunOS", "FreeBSD"]), set(oss))
else:
self.assertEqual(set(["SunOS", "FreeBSD"]), set(oss))
@parameterized.expand([(True,), (False,)])
def search_a_local_package_without_rrev_test(self, v1):
"""If we search for the packages of a ref without specifying the RREV:
- With an v1 client, it shows all the packages in local.
- With an v2 client, it shows the packages in local for the latest, not showing the
packages that doesn't belong to the recipe"""
client = self.c_v1 if v1 else self.c_v2
# Create two RREVs, first with Linux and Windows, second with Mac only (one PREV)
conanfile = GenConanfile().with_build_msg("Rev1").with_setting("os")
pref1a = client.create(self.ref, conanfile=conanfile, args="-s os=Linux")
client.create(self.ref, conanfile=conanfile, args="-s os=Windows")
conanfile2 = GenConanfile().with_build_msg("Rev2").with_setting("os")
pref2a = client.create(self.ref, conanfile=conanfile2, args="-s os=Macos")
self.assertNotEqual(pref1a.ref.revision, pref2a.ref.revision)
# Search without RREV
data = client.search(self.ref)
oss = [p["settings"]["os"] for p in data["results"][0]["items"][0]["packages"]]
if v1:
self.assertEqual(set(["Linux", "Windows", "Macos"]), set(oss))
else:
self.assertEqual(set(["Macos"]), set(oss))
@parameterized.expand([(True,), (False,)])
def search_a_remote_package_with_rrev_test(self, v1):
"""If we search for the packages of a ref specifying the RREV:
1. With v2 client it shows the packages for that RREV
2. With v1 client it fails, because it cannot propagate the rrev with v1"""
# Upload to the server two rrevs for "lib" and two rrevs for "lib2"
conanfile = GenConanfile().with_build_msg("REV1").with_setting("os")
pref = self.c_v2.create(self.ref, conanfile, args="-s os=Linux")
self.c_v2.upload_all(self.ref)
conanfile = GenConanfile().with_build_msg("REV2").with_setting("os")
pref2 = self.c_v2.create(self.ref, conanfile, args="-s os=Windows")
self.c_v2.upload_all(self.ref)
# Ensure we have uploaded two different revisions
self.assertNotEqual(pref.ref.revision, pref2.ref.revision)
client = self.c_v1 if v1 else self.c_v2
client.remove_all()
if v1:
client.search(pref.ref.full_repr(), remote="default", assert_error=True)
self.assertIn("ERROR: Revisions not enabled in the client, specify "
"a reference without revision", client.out)
else:
data = client.search(pref.ref.full_repr(), remote="default")
items = data["results"][0]["items"][0]["packages"]
self.assertEqual(1, len(items))
oss = items[0]["settings"]["os"]
self.assertEqual(oss, "Linux")
@parameterized.expand([(True,), (False,)])
def search_a_local_package_with_rrev_test(self, v1):
"""If we search for the packages of a ref specifying the RREV in the local cache:
1. With v2 client it shows the packages for that RREV and only if it is the one
in the cache, otherwise it is not returned.
2. With v1 client: the same"""
client = self.c_v1 if v1 else self.c_v2
pref1 = client.create(self.ref, GenConanfile().with_setting("os").with_build_msg("Rev1"),
args="-s os=Windows")
pref2 = client.create(self.ref, GenConanfile().with_setting("os").with_build_msg("Rev2"),
args="-s os=Linux")
client.run("search {}".format(pref1.ref.full_repr()), assert_error=True)
self.assertIn("Recipe not found: '{}'".format(pref1.ref.full_repr()), client.out)
client.run("search {}".format(pref2.ref.full_repr()))
self.assertIn("Existing packages for recipe {}:".format(pref2.ref), client.out)
self.assertIn("os: Linux", client.out)
@parameterized.expand([(True,), (False,)])
def search_recipes_in_local_by_pattern_test(self, v1):
"""If we search for recipes with a pattern:
1. With v2 client it return the refs matching, the refs doesn't contain RREV
2. With v1 client, same"""
client = self.c_v1 if v1 else self.c_v2
# Create a couple of recipes locally
client.export(self.ref)
ref2 = ConanFileReference.loads("lib2/1.0@conan/testing")
client.export(ref2)
# Search for the recipes
data = client.search("lib*")
items = data["results"][0]["items"]
self.assertEqual(2, len(items))
expected = [str(self.ref), str(ref2)]
self.assertEqual(expected, [i["recipe"]["id"] for i in items])
@parameterized.expand([(True,), (False,)])
def search_recipes_in_local_by_revision_pattern_test(self, v1):
"""If we search for recipes with a pattern containing even the RREV:
1. With v2 client it return the refs matching, the refs doesn't contain RREV
2. With v1 client, same"""
client = self.c_v1 if v1 else self.c_v2
# Create a couple of recipes locally
client.export(self.ref)
ref2 = ConanFileReference.loads("lib2/1.0@conan/testing")
client.export(ref2)
# Search for the recipes
data = client.search("{}*".format(self.ref.full_repr()))
items = data["results"][0]["items"]
self.assertEqual(1, len(items))
expected = [str(self.ref)]
self.assertEqual(expected, [i["recipe"]["id"] for i in items])
@parameterized.expand([(True,), (False,)])
def search_recipes_in_remote_by_pattern_test(self, v1):
"""If we search for recipes with a pattern:
1. With v2 client it return the refs matching of the latests, the refs doesnt contain RREV
2. With v1 client it return the refs matching, the refs without the RREV"""
# Upload to the server two rrevs for "lib" and two rrevs for "lib2"
self.c_v2.create(self.ref)
self.c_v2.upload_all(self.ref)
pref1b = self.c_v2.create(self.ref, conanfile=GenConanfile().with_build_msg("REv2"))
self.c_v2.upload_all(self.ref)
ref2 = ConanFileReference.loads("lib2/1.0@conan/testing")
self.c_v2.create(ref2)
self.c_v2.upload_all(ref2)
pref2b = self.c_v2.create(ref2, conanfile=GenConanfile().with_build_msg("REv2"))
self.c_v2.upload_all(ref2)
# Search from the client for "lib*"
client = self.c_v1 if v1 else self.c_v2
client.remove_all()
data = client.search("lib*", remote="default")
items = data["results"][0]["items"]
self.assertEqual(2, len(items))
expected = [str(pref1b.ref), str(pref2b.ref)]
self.assertEqual(expected, [i["recipe"]["id"] for i in items])
@parameterized.expand([(True,), (False,)])
@unittest.skipIf(get_env("CONAN_TEST_WITH_ARTIFACTORY", False),
"Not implemented in artifactory")
def search_in_remote_by_revision_pattern_test(self, v1):
"""If we search for recipes with a pattern like "lib/1.0@conan/stable#rev*"
1. With v2 client: We get the revs without refs matching the pattern
2. With v1 client: Same
The same for "lib/*@conan/stable#rev" and "*lib/*@conan/stable#rev"
But if we search an invalid revision it is not found
"""
# Upload to the server two rrevs for "lib" and one rrevs for "lib2"
self.c_v2.create(self.ref)
self.c_v2.upload_all(self.ref)
pref2_lib = self.c_v2.create(self.ref, conanfile=GenConanfile().with_build_msg("REv2"))
self.c_v2.upload_all(self.ref)
ref2 = ConanFileReference.loads("lib2/1.0@conan/testing")
self.c_v2.create(ref2)
self.c_v2.upload_all(ref2)
client = self.c_v1 if v1 else self.c_v2
data = client.search("{}*".format(pref2_lib.ref.full_repr()), remote="default")
items = data["results"][0]["items"]
expected = [str(self.ref)]
self.assertEqual(expected, [i["recipe"]["id"] for i in items])
data = client.search("{}".format(pref2_lib.ref.full_repr()).replace("1.0", "*"),
remote="default")
items = data["results"][0]["items"]
expected = [str(self.ref)]
self.assertEqual(expected, [i["recipe"]["id"] for i in items])
data = client.search("*{}".format(pref2_lib.ref.full_repr()).replace("1.0", "*"),
remote="default")
items = data["results"][0]["items"]
expected = [str(self.ref)]
self.assertEqual(expected, [i["recipe"]["id"] for i in items])
data = client.search("*{}#fakerev".format(pref2_lib.ref),
remote="default")
items = data["results"]
expected = []
self.assertEqual(expected, items)
def search_revisions_locally_with_v1_server_test(self):
"""If I upload a recipe to a v1 server and then I check the revisions locally, it
will return None to the time because the v1 doesn't support revisions"""
old_server = TestServer(server_capabilities=[])
servers = OrderedDict([("default", old_server)])
c_v1 = TurboTestClient(revisions_enabled=False, servers=servers)
pref = c_v1.create(self.ref)
c_v1.upload_all(self.ref)
c_v1.enable_revisions()
# Local searchs
c_v1.run("search {} --revisions".format(pref.ref))
self.assertIn("{} (No time)".format(pref.ref.revision), c_v1.out)
pref_rev = pref.copy_with_revs(pref.ref.revision, None)
c_v1.run("search {} --revisions".format(pref_rev.full_repr()))
self.assertIn("{} (No time)".format(pref.revision), c_v1.out)
def search_revisions_remotely_with_v1_server_test(self):
"""If I upload a recipe to a v1 server and then I check the revisions locally, it
will return None to the time because the v1 doesn't support revisions"""
old_server = TestServer(server_capabilities=[])
servers = OrderedDict([("default", old_server)])
c_v1 = TurboTestClient(revisions_enabled=False, servers=servers)
pref = c_v1.create(self.ref)
c_v1.upload_all(self.ref)
c_v1.enable_revisions()
# Local searchs
c_v1.run("search {} --revisions -r default".format(pref.ref), assert_error=True)
self.assertIn("The remote doesn't support revisions", c_v1.out)
pref_rev = pref.copy_with_revs(pref.ref.revision, None)
c_v1.run("search {} --revisions -r default".format(pref_rev.full_repr()),
assert_error=True)
self.assertIn("The remote doesn't support revisions", c_v1.out)
def search_revisions_regular_results_test(self):
"""If I upload several revisions to a server, we can list the times"""
server = TestServer()
servers = OrderedDict([("default", server)])
c_v2 = TurboTestClient(revisions_enabled=True, servers=servers)
pref = c_v2.create(self.ref)
c_v2.upload_all(self.ref)
pref_rev = pref.copy_with_revs(pref.ref.revision, None)
c_v2.run("search {} --revisions -r default".format(pref_rev.full_repr()))
# I don't want to mock here because I want to run this test against Artifactory
self.assertIn("83c38d3b4e5f1b8450434436eec31b00 (", c_v2.out)
self.assertIn(" UTC)", c_v2.out)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class UploadPackagesWithRevisions(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.c_v2 = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
self.c_v1 = TurboTestClient(revisions_enabled=False, servers={"default": self.server})
self.ref = ConanFileReference.loads("lib/1.0@conan/testing")
@parameterized.expand([(True,), (False,)])
def upload_a_recipe_test(self, v1):
"""If we upload a package to a server:
1. Using v1 client it will upload "0" revision to the server. The rev time is NOT updated.
2. Using v2 client it will upload RREV revision to the server. The rev time is NOT updated.
"""
client = self.c_v1 if v1 else self.c_v2
pref = client.create(self.ref)
client.upload_all(self.ref)
revs = [r.revision for r in self.server.server_store.get_recipe_revisions(self.ref)]
if v1:
self.assertEqual(revs, [DEFAULT_REVISION_V1])
else:
self.assertEqual(revs, [pref.ref.revision])
@parameterized.expand([(True,), (False,)])
def upload_discarding_outdated_packages_test(self, v1):
"""If we upload all packages to a server,
if a package doesn't belong to the current recipe:
1. Using v1 client it will upload all the binaries as revision "0".
2. Using v2 client it will upload only the matching packages.
"""
conanfile = GenConanfile().with_setting("os")
pref = self.c_v1.create(self.ref, conanfile=conanfile, args=" -s os=Windows")
if not v1:
self.c_v1.enable_revisions() # Ensure we upload with v2 both revisions
self.c_v1.upload_all(self.ref)
# To allow creating a non-matching revision binary package
self.c_v1.disable_revisions()
pref2 = self.c_v1.create(self.ref, conanfile=conanfile.with_build_msg("rev2"),
args=" -s os=Linux")
# Now pref is outdated in the client, should not be uploaded with v2
client = self.c_v1
if not v1:
client.enable_revisions()
client.upload_all(self.ref)
if not v1:
self.assertIn("Skipping package '%s', "
"it doesn't belong to the current recipe revision" % pref.id, client.out)
revs = [r.revision for r in self.server.server_store.get_recipe_revisions(self.ref)]
if v1:
self.assertEqual(revs, [DEFAULT_REVISION_V1])
else:
self.assertEqual(set(revs), set([pref.ref.revision, pref2.ref.revision]))
@parameterized.expand([(True,), (False,)])
def upload_no_overwrite_recipes_test(self, v1):
"""If we upload a RREV to the server and create a new RREV in the client,
when we upload with --no-overwrite
1. Using v1 client it will fail because it cannot overwrite.
2. Using v2 client it will warn an upload a new revision.
"""
client = self.c_v1 if v1 else self.c_v2
pref = client.create(self.ref, conanfile=GenConanfile().with_setting("os"),
args=" -s os=Windows")
client.upload_all(self.ref)
pref2 = client.create(self.ref,
conanfile=GenConanfile().with_setting("os").with_build_msg("rev2"),
args=" -s os=Linux")
if v1:
client.upload_all(self.ref, args="--no-overwrite", assert_error=True)
self.assertIn("Local recipe is different from the remote recipe. "
"Forbidden overwrite.", client.out)
else:
self.assertEqual(self.server.server_store.get_last_revision(self.ref)[0],
pref.ref.revision)
client.upload_all(self.ref, args="--no-overwrite")
self.assertEqual(self.server.server_store.get_last_revision(self.ref)[0],
pref2.ref.revision)
@parameterized.expand([(True,), (False,)])
def upload_no_overwrite_packages_test(self, v1):
"""If we upload a PREV to the server and create a new PREV in the client,
when we upload with --no-overwrite
1. Using v1 client it will fail because it cannot overwrite.
2. Using v2 client it will warn and upload a new revision.
"""
client = self.c_v1 if v1 else self.c_v2
conanfile = GenConanfile().with_package_file("file", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref = client.create(self.ref, conanfile=conanfile)
client.upload_all(self.ref)
with environment_append({"MY_VAR": "2"}):
pref2 = client.create(self.ref, conanfile=conanfile)
self.assertNotEqual(pref.revision, pref2.revision)
if v1:
client.upload_all(self.ref, args="--no-overwrite", assert_error=True)
self.assertIn("Local package is different from the remote package. "
"Forbidden overwrite.", client.out)
else:
self.assertEqual(self.server.server_store.get_last_package_revision(pref2).revision,
pref.revision)
client.upload_all(self.ref, args="--no-overwrite")
self.assertEqual(self.server.server_store.get_last_package_revision(pref2).revision,
pref2.revision)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class SCMRevisions(unittest.TestCase):
def auto_revision_even_without_scm_git_test(self):
"""Even without using the scm feature, the revision is detected from repo.
Also while we continue working in local, the revision doesn't change, so the packages
can be found"""
ref = ConanFileReference.loads("lib/1.0@conan/testing")
client = TurboTestClient()
conanfile = GenConanfile().with_revision_mode("scm")
commit = client.init_git_repo(files={"file.txt": "hey"}, origin_url="http://myrepo.git")
client.create(ref, conanfile=conanfile)
self.assertEqual(client.recipe_revision(ref), commit)
# Change the conanfile and make another create, the revision should be the same
client.save({"conanfile.py": str(conanfile.with_build_msg("New changes!"))})
client.create(ref, conanfile=conanfile)
self.assertEqual(client.recipe_revision(ref), commit)
self.assertIn("New changes!", client.out)
def auto_revision_without_commits_test(self):
"""If we have a repo but without commits, it has to fail when the revision_mode=scm"""
ref = ConanFileReference.loads("lib/1.0@conan/testing")
client = TurboTestClient()
conanfile = GenConanfile().with_revision_mode("scm")
client.runner('git init .', cwd=client.current_folder)
client.save({"conanfile.py": str(conanfile)})
client.run("create . {}".format(ref), assert_error=True)
# It error, because the revision_mode is explicitly set to scm
self.assertIn("Cannot detect revision using 'scm' mode from repository at "
"'{f}': Unable to get git commit from '{f}'".format(f=client.current_folder),
client.out)
@attr("svn")
def auto_revision_even_without_scm_svn_test(self):
"""Even without using the scm feature, the revision is detected from repo.
Also while we continue working in local, the revision doesn't change, so the packages
can be found"""
ref = ConanFileReference.loads("lib/1.0@conan/testing")
client = TurboTestClient()
conanfile = GenConanfile().with_revision_mode("scm")
commit = client.init_svn_repo("project",
files={"file.txt": "hey", "conanfile.py": str(conanfile)})
client.current_folder = os.path.join(client.current_folder, "project")
client.create(ref, conanfile=conanfile)
self.assertEqual(client.recipe_revision(ref), commit)
# Change the conanfile and make another create, the revision should be the same
client.save({"conanfile.py": str(conanfile.with_build_msg("New changes!"))})
client.create(ref, conanfile=conanfile)
self.assertEqual(client.recipe_revision(ref), commit)
self.assertIn("New changes!", client.out)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class CapabilitiesRevisionsTest(unittest.TestCase):
def test_server_without_revisions_capability(self):
"""If a server doesn't have the revisions capability, a modern client will still
talk v1"""
server = TestServer(server_capabilities=[])
c_v2 = TurboTestClient(revisions_enabled=True, servers={"default": server})
ref = ConanFileReference.loads("lib/1.0@conan/testing")
c_v2.create(ref)
c_v2.upload_all(ref)
c_v2.remove_all()
c_v2.run("install {}".format(ref))
self.assertEqual(c_v2.recipe_revision(ref), DEFAULT_REVISION_V1)
def test_server_with_only_v2_capability(self):
server = TestServer(server_capabilities=[ONLY_V2])
c_v2 = TurboTestClient(revisions_enabled=False, servers={"default": server})
ref = ConanFileReference.loads("lib/1.0@conan/testing")
c_v2.create(ref)
c_v2.upload_all(ref, remote="default", assert_error=True)
self.assertIn("The remote at '{}' only works with revisions enabled. "
"Set CONAN_REVISIONS_ENABLED=1 or set 'general.revisions_enabled = 1' "
"at the 'conan.conf'. [Remote: default]".format(server.fake_url), c_v2.out)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class InfoRevisions(unittest.TestCase):
@parameterized.expand([(True,), (False,)])
def test_info_command_showing_revision(self, v1):
"""If I run 'conan info ref' I get information about the revision only in a v2 client"""
server = TestServer(server_capabilities=[])
c_v2 = TurboTestClient(revisions_enabled=True, servers={"default": server})
c_v1 = TurboTestClient(revisions_enabled=False, servers={"default": server})
ref = ConanFileReference.loads("lib/1.0@conan/testing")
client = c_v1 if v1 else c_v2
client.create(ref)
client.run("info {}".format(ref))
revision = client.recipe_revision(ref)
if v1:
self.assertNotIn("Revision:", client.out)
else:
self.assertIn("Revision: {}".format(revision), client.out)
@unittest.skipUnless(get_env("TESTING_REVISIONS_ENABLED", False), "Only revisions")
class ServerRevisionsIndexes(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.c_v1 = TurboTestClient(revisions_enabled=False, servers={"default": self.server})
self.c_v2 = TurboTestClient(revisions_enabled=True, servers={"default": self.server})
self.ref = ConanFileReference.loads("lib/1.0@conan/testing")
def rotation_deleting_recipe_revisions_test(self):
"""
- If we have two RREVs in the server and we remove the first one,
the last one is the latest
- If we have two RREvs in the server and we remove the second one,
the first is now the latest
"""
ref1 = self.c_v2.export(self.ref, conanfile=GenConanfile())
self.c_v2.upload_all(ref1)
self.assertEqual(self.server.server_store.get_last_revision(self.ref).revision,
ref1.revision)
ref2 = self.c_v2.export(self.ref, conanfile=GenConanfile().with_build_msg("I'm rev2"))
self.c_v2.upload_all(ref2)
self.assertEqual(self.server.server_store.get_last_revision(self.ref).revision,
ref2.revision)
ref3 = self.c_v2.export(self.ref, conanfile=GenConanfile().with_build_msg("I'm rev3"))
self.c_v2.upload_all(ref3)
self.assertEqual(self.server.server_store.get_last_revision(self.ref).revision,
ref3.revision)
revs = [r.revision for r in self.server.server_store.get_recipe_revisions(self.ref)]
self.assertEqual(revs, [ref3.revision, ref2.revision, ref1.revision])
self.assertEqual(self.server.server_store.get_last_revision(self.ref).revision,
ref3.revision)
# Delete the latest from the server
self.c_v2.run("remove {} -r default -f".format(ref3.full_repr()))
revs = [r.revision for r in self.server.server_store.get_recipe_revisions(self.ref)]
self.assertEqual(revs, [ref2.revision, ref1.revision])
self.assertEqual(self.server.server_store.get_last_revision(self.ref).revision,
ref2.revision)
def rotation_deleting_package_revisions_test(self):
"""
- If we have two PREVs in the server and we remove the first one,
the last one is the latest
- If we have two PREVs in the server and we remove the second one,
the first is now the latest
"""
conanfile = GenConanfile().with_package_file("file", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref1 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref)
self.assertEqual(self.server.server_store.get_last_package_revision(pref1).revision,
pref1.revision)
with environment_append({"MY_VAR": "2"}):
pref2 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref)
self.assertEqual(self.server.server_store.get_last_package_revision(pref1).revision,
pref2.revision)
with environment_append({"MY_VAR": "3"}):
pref3 = self.c_v2.create(self.ref, conanfile=conanfile)
server_pref3 = self.c_v2.upload_all(self.ref)
self.assertEqual(self.server.server_store.get_last_package_revision(pref1).revision,
pref3.revision)
self.assertEqual(pref1.ref.revision, pref2.ref.revision)
self.assertEqual(pref2.ref.revision, pref3.ref.revision)
self.assertEqual(pref3.ref.revision, server_pref3.revision)
pref = pref1.copy_clear_rev().copy_with_revs(pref1.ref.revision, None)
revs = [r.revision
for r in self.server.server_store.get_package_revisions(pref)]
self.assertEqual(revs, [pref3.revision, pref2.revision, pref1.revision])
self.assertEqual(self.server.server_store.get_last_package_revision(pref).revision,
pref3.revision)
# Delete the latest from the server
self.c_v2.run("remove {} -p {}#{} -r default -f".format(pref3.ref.full_repr(),
pref3.id, pref3.revision))
revs = [r.revision
for r in self.server.server_store.get_package_revisions(pref)]
self.assertEqual(revs, [pref2.revision, pref1.revision])
self.assertEqual(self.server.server_store.get_last_package_revision(pref).revision,
pref2.revision)
def deleting_all_rrevs_test(self):
"""
If we delete all the recipe revisions in the server. There is no latest.
If then a client uploads a RREV it is the latest
"""
ref1 = self.c_v2.export(self.ref, conanfile=GenConanfile())
self.c_v2.upload_all(ref1)
ref2 = self.c_v2.export(self.ref, conanfile=GenConanfile().with_build_msg("I'm rev2"))
self.c_v2.upload_all(ref2)
ref3 = self.c_v2.export(self.ref, conanfile=GenConanfile().with_build_msg("I'm rev3"))
self.c_v2.upload_all(ref3)
self.c_v2.run("remove {} -r default -f".format(ref1.full_repr()))
self.c_v2.run("remove {} -r default -f".format(ref2.full_repr()))
self.c_v2.run("remove {} -r default -f".format(ref3.full_repr()))
self.assertRaises(RecipeNotFoundException,
self.server.server_store.get_recipe_revisions, self.ref)
ref4 = self.c_v2.export(self.ref, conanfile=GenConanfile().with_build_msg("I'm rev4"))
self.c_v2.upload_all(ref4)
revs = [r.revision for r in self.server.server_store.get_recipe_revisions(self.ref)]
self.assertEqual(revs, [ref4.revision])
def deleting_all_prevs_test(self):
"""
If we delete all the package revisions in the server. There is no latest.
If then a client uploads a RREV/PREV it is the latest
"""
conanfile = GenConanfile().with_package_file("file", env_var="MY_VAR")
with environment_append({"MY_VAR": "1"}):
pref1 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref)
with environment_append({"MY_VAR": "2"}):
pref2 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref)
with environment_append({"MY_VAR": "3"}):
pref3 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref)
# Delete the package revisions (all of them have the same ref#rev and id)
command = "remove {} -p {}#{{}} -r default -f".format(pref3.ref.full_repr(), pref3.id)
self.c_v2.run(command.format(pref3.revision))
self.c_v2.run(command.format(pref2.revision))
self.c_v2.run(command.format(pref1.revision))
with environment_append({"MY_VAR": "4"}):
pref4 = self.c_v2.create(self.ref, conanfile=conanfile)
self.c_v2.upload_all(self.ref)
pref = pref1.copy_clear_rev().copy_with_revs(pref1.ref.revision, None)
revs = [r.revision
for r in self.server.server_store.get_package_revisions(pref)]
self.assertEqual(revs, [pref4.revision])
def v1_get_always_latest_test(self):
conanfile = GenConanfile()
self.c_v1.create(self.ref, conanfile=conanfile)
self.c_v1.upload_all(self.ref)
pref = self.c_v2.create(self.ref, conanfile=conanfile.with_build_msg("Rev2"))
self.c_v2.upload_all(self.ref)
latest = self.server.server_store.get_last_revision(self.ref)
self.assertEqual(latest.revision, pref.ref.revision)
if get_env("CONAN_TEST_WITH_ARTIFACTORY", False):
time.sleep(62)
self.c_v1.create(self.ref, conanfile=conanfile.with_build_msg("Rev3"))
self.c_v1.upload_all(self.ref)
latest = self.server.server_store.get_last_revision(self.ref)
self.assertEqual(latest.revision, DEFAULT_REVISION_V1)
| {
"content_hash": "f4833cefeb75ad7ffd05f82cf9d18a74",
"timestamp": "",
"source": "github",
"line_count": 1578,
"max_line_length": 99,
"avg_line_length": 48.19391634980989,
"alnum_prop": 0.624707429322814,
"repo_name": "memsharded/conan",
"id": "fa396fec56e1fdb20d8ba6006b695f474e10722a",
"size": "76050",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/integration/revisions_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
import json
import os
import re
import xml.etree.ElementTree as etree
def process_node(path, node):
"""
if node.tag == "TestCase":
if node.attrib.get("result", "UNKNOWN") != "passed":
failset.add(node)
return
if node.tag in ["TestResult", "TestSuite"]:
for child in node:
cpath = path+"/"+child.attrib["name"]
process_node(cpath, child)
return
"""
#print("unknown node", node.tag)
print(node.tag)
return
name2members_doxygen = {}
def process_class_node(node):
result = {"name":"", "vmembers":[]}
for child in node:
if child.tag == "name":
result["name"] = child.text
elif child.tag == "member":
kind = child.attrib.get("kind")
if kind == "variable":
result["vmembers"].append(child[0].text)
name2members_doxygen[result["name"]] = result["vmembers"]
return
tree = etree.parse("doxygen/xml/index.xml")
root = tree.getroot()
for child in root:
if (child.tag == "compound") and (child.attrib.get("kind") in ["struct", "class"]):
process_class_node(child)
s_static_names = set(["space_id", "type_id"])
for k, v in name2members_doxygen.items():
name2members_doxygen[k] = [x for x in v if x not in s_static_names]
#with open("stuff/member_enumerator.out", "r") as f:
# name2members_fc = json.load(f)
# scan for FC_REFLECT( graphene::... in all cpp,hpp files under libraries/ programs/ tests/
re_reflect = re.compile(r"""
FC_REFLECT\s*[(]
\s*(graphene::[a-zA-Z0-9_:]+)
\s*,
((?:\s*[(]\s*[a-zA-Z0-9_]+\s*[)])*)
""", re.VERBOSE)
re_reflect_derived = re.compile(r"""
FC_REFLECT_DERIVED\s*[(]
\s*(graphene::[a-zA-Z0-9_:]+)
\s*,
\s*[(]\s*(graphene::[a-zA-Z0-9_:]+)\s*[)]
\s*,
((?:\s*[(]\s*[a-zA-Z0-9_]+\s*[)])*)
""", re.VERBOSE)
re_bubble_item = re.compile(r"\s*[(]\s*([a-zA-Z0-9_]+)\s*")
def bubble_list(x):
return [re_bubble_item.match(e).group(1) for e in x.split(")")[:-1]]
name2members_re = {}
for root, dirs, files in os.walk("."):
if root == ".":
dirs[:] = ["libraries", "programs", "tests"]
for filename in files:
if not (filename.endswith(".cpp") or filename.endswith(".hpp")):
continue
try:
with open( os.path.join(root, filename), "r" ) as f:
content = f.read()
for m in re_reflect.finditer(content):
cname = m.group(1)
members = bubble_list(m.group(2))
name2members_re[cname] = members
if cname.endswith("_object"):
print("FC_REFLECT on {} should be FC_REFLECT_DERIVED".format(cname))
for m in re_reflect_derived.finditer(content):
cname = m.group(1)
members = bubble_list(m.group(3))
name2members_re[cname] = members
except OSError as e:
pass
def validate_members(name2members_ref, name2members_test):
ok_items = []
ne_items = []
error_items = []
for name in sorted(name2members_ref.keys()):
if name not in name2members_test:
ne_items.append(name)
elif sorted(name2members_ref[name]) != sorted(name2members_test[name]):
error_items.append(name)
print("")
print("error in", name)
print("doxygen:", name2members_ref[name])
print("fc :", name2members_test[name])
else:
ok_items.append(name)
return
"""
print("")
print("ok:")
for item in ok_items:
print(item)
print("")
print("not evaluated:")
for item in ne_items:
print(item)
print("")
print("error:")
for item in error_items:
print(item)
"""
validate_members(name2members_doxygen, name2members_re)
| {
"content_hash": "2edd7b8305203563902224c40531350a",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 91,
"avg_line_length": 28.795454545454547,
"alnum_prop": 0.5577479610628782,
"repo_name": "pidiscovery/pi",
"id": "0f41f355f7342082d6285c0ff0d206c72b62a465",
"size": "3825",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "programs/build_helpers/check_reflect.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3199037"
},
{
"name": "CMake",
"bytes": "35097"
},
{
"name": "Dockerfile",
"bytes": "554"
},
{
"name": "Perl",
"bytes": "4937"
},
{
"name": "Python",
"bytes": "35817"
},
{
"name": "Shell",
"bytes": "2583"
}
],
"symlink_target": ""
} |
import subprocess
from thefuck.utils import for_app, replace_command, eager
@for_app('ifconfig')
def match(command):
return 'error fetching interface information: Device not found' \
in command.output
@eager
def _get_possible_interfaces():
proc = subprocess.Popen(['ifconfig', '-a'], stdout=subprocess.PIPE)
for line in proc.stdout.readlines():
line = line.decode()
if line and line != '\n' and not line.startswith(' '):
yield line.split(' ')[0]
def get_new_command(command):
interface = command.output.split(' ')[0][:-1]
possible_interfaces = _get_possible_interfaces()
return replace_command(command, interface, possible_interfaces)
| {
"content_hash": "6675f812d27a5a66317e9ca51f719d4a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.671875,
"repo_name": "scorphus/thefuck",
"id": "f65c77ad6c644f8e49d678e05a9a5856a3428300",
"size": "704",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "thefuck/rules/ifconfig_device_not_found.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "536"
},
{
"name": "Python",
"bytes": "536648"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
} |
from pyface.ui.wx.action.status_bar_manager import *
| {
"content_hash": "9c265edf53b2102b66d2a9d76853afa1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.7924528301886793,
"repo_name": "enthought/etsproxy",
"id": "646248756fbd81736e81c6682c88cb0dc90f6c7e",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/ui/wx/action/status_bar_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
from adafruit import TCS34725
try:
from .component import *
except SystemError:
from component import *
class ColorSensor(LoopedComponent, I2CComponent):
_mswait = 20
_FN = "color"
def init(self):
super().init()
self.tcs = TCS34725(integrationTime=0xEB, gain=0x01)
self._set_init()
def readColor(self):
rgb = self.tcs.getRawData()
self._lastRGB = rgb
return rgb
def getColorTemp(self):
return self.tcs.calculateColorTemperature(self._lastRGB)
def getLux(self):
return self.tcs.calculateLux(self._lastRGB)
def cleanup(self):
self.tcs.disable()
def tick(self):
rgbc = self.readColor()
self.writedata((rgbc["r"], rgbc["g"], rgbc["b"], rgbc["c"]))
| {
"content_hash": "4e79876fb6780c556f487571a04ea44a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 24,
"alnum_prop": 0.6060606060606061,
"repo_name": "PHSCRC/boxbot",
"id": "49f9d16967c23b272aaead25a7aea2564d100814",
"size": "792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hardware/color.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2564"
},
{
"name": "Python",
"bytes": "69474"
}
],
"symlink_target": ""
} |
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id: bootstrap.py 102545 2009-08-06 14:49:47Z chrisw $
"""
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser()
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=True,
help="Use Disribute rather than Setuptools.")
options, args = parser.parse_args()
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
USE_DISTRIBUTE = options.distribute
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen(
'http://python-distribute.org/distribute_setup.py').read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen(
'http://peak.telecommunity.com/dist/ez_setup.py').read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
# work around spawn lamosity on windows
return '"%s"' % c
else:
return c
else:
def quote(c):
return c
cmd = 'from setuptools.command.easy_install import main; main()'
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
if is_jython:
import subprocess
assert subprocess.Popen(
[sys.executable] + ['-c', quote(cmd), '-mqNxd',
quote(tmpeggs), 'zc.buildout' + VERSION],
env=dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
),
).wait() == 0
else:
assert os.spawnle(
os.P_WAIT, sys.executable, quote(sys.executable),
'-c', quote(cmd), '-mqNxd', quote(tmpeggs), 'zc.buildout' + VERSION,
dict(
os.environ,
PYTHONPATH=ws.find(
pkg_resources.Requirement.parse(requirement)).location), ) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| {
"content_hash": "9b2b1447fd4fa5b7fb98b1d4405f07d7",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 27.471153846153847,
"alnum_prop": 0.6272313615680783,
"repo_name": "LPgenerator/pynba",
"id": "30a14cf225f4ba9985f3574f4fc98d189ad7510d",
"size": "3494",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "843"
},
{
"name": "Python",
"bytes": "74216"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0033_blank_edx_key'),
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.AddField(
model_name='program',
name='topics',
field=models.ManyToManyField(blank=True, related_name='topics', to='courses.Topic'),
),
]
| {
"content_hash": "dc762e680ec2eda846a2aff93eb1a21a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 29.130434782608695,
"alnum_prop": 0.5537313432835821,
"repo_name": "mitodl/micromasters",
"id": "241ee2219e45ab07b2fc9aee8ca8e1c316a3ac77",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/migrations/0034_program_topics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
} |
import os, sys
# Adds the fabric_bolt package from the working copy instead of site_packages
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fabric_bolt.core.settings.local')
sys.path.append(os.getcwd())
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
from django.conf import settings
if getattr(settings, 'SOCKETIO_ENABLED', False):
from gevent import monkey
monkey.patch_all() | {
"content_hash": "4a3166757ac3764ff6e705a265dd7830",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 86,
"avg_line_length": 33.11764705882353,
"alnum_prop": 0.7051509769094139,
"repo_name": "gvangool/fabric-bolt",
"id": "81454aaa4134d36e928a1ca678a1a6266f6e3bbe",
"size": "585",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fabric_bolt/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "838"
},
{
"name": "HTML",
"bytes": "63357"
},
{
"name": "JavaScript",
"bytes": "102422"
},
{
"name": "Python",
"bytes": "200391"
}
],
"symlink_target": ""
} |
import sys
import time
import logging
import os
import gc
import datetime
import numpy as np
import pyfits
import scipy
import scipy.interpolate
# Check if we have matplotlib for graphical output
has_matplotlib = True
try :
import matplotlib.pyplot as plt
import matplotlib.patches
except :
has_matplotlib = False
# Add script parent directory to python search path to get access to the pyfact package
sys.path.append(os.path.abspath(sys.path[0].rsplit('/', 1)[0]))
import pyfact as pf
#===========================================================================
# Main
def create_spectrum(input_file_names,
analysis_position=None,
analysis_radius=.125,
match_rmf=None,
datadir='',
write_output_files=False,
do_graphical_output=True,
loglevel='INFO') :
# Time it!
t_1 = time.clock()
# Configure logging
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# Welcome user, print out basic information on package versions
logging.info('This is {0} (pyfact v{1})'.format(os.path.split(__file__)[1], pf.__version__))
logging.info('We are running with numpy v{0}, scipy v{1}, and pyfits v{2}'.format(
np.__version__, scipy.__version__, pyfits.__version__
))
#---------------------------------------------------------------------------
# Loop over the file list, calculate quantities, & fill histograms
# Exclusion radius [this should be generalized in future versions]
rexdeg = .3
logging.warning('pfspec is currently using a single exclusion region for background extraction set on the analysis position (r = {0})'.format(rexdeg))
logging.warning('This should be improved in future versions (tm).')
# Intialize some variables
objra, objdec, pntra, pntdec = None, None, None, None
if analysis_position :
objra, objdec = eval(analysis_position)
logging.info('Analysis position: RA {0}, Dec {1}'.format(objra, objdec))
else :
logging.info('No analysis position given => will use object position from first file')
logging.info('Analysis radius: {0} deg'.format(analysis_radius))
if write_output_files :
logging.info('The output files can be found in {0}'.format(os.getcwd()))
theta2_hist_max, theta2_hist_nbins = .5 ** 2., 50
theta2_on_hist, theta2_off_hist, theta2_offcor_hist = np.zeros(theta2_hist_nbins), np.zeros(theta2_hist_nbins), np.zeros(theta2_hist_nbins)
non, noff, noffcor = 0., 0., 0.
sky_ex_reg = None
firstloop = True
spec_nbins, spec_emin, spec_emax = 40, -2., 2.
telescope, instrument = 'DUMMY', 'DUMMY'
arf_m, arf_m_erange = None, None
if match_rmf:
logging.info('Matching total PHA binning to RMF file: {0}'.format(match_rmf))
f = pyfits.open(match_rmf)
rm, erange, ebounds, minprob = pf.rmf_to_np(f)
f.close()
spec_nbins = (len(ebounds) - 1)
spec_emin = np.log10(ebounds[0])
spec_emax = np.log10(ebounds[-1])
arf_m_erange = erange
if 'INSTRUME' in f[1].header.keys() :
instrument = f[1].header['INSTRUME']
if 'TELESCOP' in f[1].header.keys() :
telescope = f[1].header['TELESCOP']
spec_on_hist, spec_off_hist, spec_off_cor_hist = np.zeros(spec_nbins), np.zeros(spec_nbins), np.zeros(spec_nbins)
spec_hist_ebounds = np.linspace(spec_emin, spec_emax, spec_nbins + 1)
dstart, dstop = None, None
exposure = 0. # [s]
# Read in input file, can be individual fits or bankfile
logging.info('Opening input file(s) ..')
# This list will hold the individual file names as strings
file_list = None
# Check if we are dealing with a single file or a bankfile
# and create/read in the file list accordingly
try :
f = pyfits.open(input_file_names[0])
f.close()
file_list = [input_file_names]
except :
logging.info('Reading files from batchfile {0}'.format(input_file_names[0]))
file_list = np.loadtxt(input_file_names[0], dtype='S')
if len(file_list.shape) == 1 :
file_list = np.array([file_list])
# Sanity checks on input file(s)
if len(file_list) < 1 :
raise RuntimeError('No entries in bankfile')
if len(file_list[0]) != 3 :
raise RuntimeError('Bankfile must have three columns (data/arf/rmf)')
# Shortcuts for commonly used functions
cci_f, cci_a = pf.circle_circle_intersection_f, pf.circle_circle_intersection_a
for files in file_list :
dataf, arf, rmf = datadir + files[0], datadir + files[1], datadir + files[2]
logging.info('==== Processing file {0}'.format(dataf))
# Open fits file
hdulist = pyfits.open(dataf)
# Print file info
#hdulist.info()
# Access header of second extension
ex1hdr = hdulist[1].header
# Print header of the first extension as ascardlist
#print ex1hdr.ascardlist()
# Access data of first extension
tbdata = hdulist[1].data # assuming the first extension is a table
# Print table columns
#hdulist[1].columns.info()
#---------------------------------------------------------------------------
# Calculate some useful quantities and add them to the table
if firstloop :
# If skymap center is not set, set it to the target position of the first run
if objra == None or objdec == None :
objra, objdec = ex1hdr['RA_OBJ'], ex1hdr['DEC_OBJ']
logging.info('Analysis position from header: RA {0}, Dec {1}'.format(objra, objdec))
pntra, pntdec = ex1hdr['RA_PNT'], ex1hdr['DEC_PNT']
obj_cam_dist = pf.SkyCoord(objra, objdec).dist(pf.SkyCoord(pntra, pntdec))
# If no exclusion regions are given, use the object position from the first run
if sky_ex_reg == None :
sky_ex_reg = [pf.SkyCircle(pf.SkyCoord(objra, objdec), rexdeg)]
exposure_run = ex1hdr['LIVETIME']
exposure += exposure_run
logging.info('RUN Start date/time : {0} {1}'.format(ex1hdr['DATE_OBS'], ex1hdr['TIME_OBS']))
logging.info('RUN Stop date/time : {0} {1}'.format(ex1hdr['DATE_END'], ex1hdr['TIME_END']))
logging.info('RUN Exposure : {0:.2f} [s]'.format(exposure_run))
logging.info('RUN Pointing pos. : RA {0:.4f} [deg], Dec {1:.4f} [deg]'.format(pntra, pntdec))
logging.info('RUN Obj. cam. dist. : {0:.4f} [deg]'.format(obj_cam_dist))
run_dstart = datetime.datetime(*[int(x) for x in (ex1hdr['DATE_OBS'].split('-') + ex1hdr['TIME_OBS'].split(':'))])
run_dstop = datetime.datetime(*[int(x) for x in (ex1hdr['DATE_END'].split('-') + ex1hdr['TIME_END'].split(':'))])
if firstloop :
dstart = run_dstart
dstop = run_dstop
# Distance from the camera (FOV) center
camdist = np.sqrt(tbdata.field('DETX ') ** 2. + tbdata.field('DETY ') ** 2.)
# Distance from analysis position
thetadist = pf.SkyCoord(objra, objdec).dist(pf.SkyCoord(tbdata.field('RA '), tbdata.field('DEC ')))
## cos(DEC)
#cosdec = np.cos(tbdata.field('DEC ') * np.pi / 180.)
#cosdec_col = pyfits.Column(name='XCOSDEC', format='1E', array=cosdec)
# Add new columns to the table
coldefs_new = pyfits.ColDefs(
[pyfits.Column(name='XCAMDIST',format='1E', unit='deg', array=camdist),
pyfits.Column(name='XTHETA',format='1E', unit='deg', array=thetadist)
]
)
newtable = pyfits.new_table(hdulist[1].columns + coldefs_new)
# Print new table columns
#newtable.columns.info()
mgit = np.ones(len(tbdata), dtype=np.bool)
try :
# Note: according to the eventlist format document v1.0.0 Section 10
# "The times are expressed in the same units as in the EVENTS
# table (seconds since mission start in terresterial time)."
for gti in hdulist['GTI'].data :
mgit *= (tbdata.field('TIME') >= gti[0]) * (tbdata.field('TIME') <= gti[1])
except :
logging.warning('File does not contain a GTI extension')
# New table data
tbdata = newtable.data[mgit]
#---------------------------------------------------------------------------
# Select signal and background events
photbdata = tbdata
on_run = photbdata[photbdata.field('XTHETA') < analysis_radius]
off_run = photbdata[((photbdata.field('XCAMDIST') < obj_cam_dist + analysis_radius)
* (photbdata.field('XCAMDIST') > obj_cam_dist - analysis_radius)
* np.invert(photbdata.field('XTHETA') < rexdeg))]
spec_on_run_hist = np.histogram(np.log10(on_run.field('ENERGY')), bins=spec_nbins, range=(spec_emin, spec_emax))[0]
spec_on_hist += spec_on_run_hist
non_run, noff_run = len(on_run), len(off_run)
alpha_run = analysis_radius**2. / ((obj_cam_dist + analysis_radius) ** 2.
- (obj_cam_dist - analysis_radius) ** 2.
- cci_f(obj_cam_dist + analysis_radius, rexdeg, obj_cam_dist) / np.pi
+ cci_f(obj_cam_dist - analysis_radius, rexdeg, obj_cam_dist) / np.pi)
spec_off_run_hist, ebins = np.histogram(np.log10(off_run.field('ENERGY')), bins=spec_nbins, range=(spec_emin, spec_emax))
spec_off_hist += spec_off_run_hist
spec_off_cor_hist += spec_off_run_hist * alpha_run
# DEBUG plot
#plt.plot(ebins[:-1], spec_on_hist, label='ON')
#plt.plot(ebins[:-1], spec_off_cor_hist, label='OFF cor.')
#plt.legend()
#plt.show()
def print_stats(non, noff, alpha, pre='') :
logging.info(pre + 'N_ON = {0}, N_OFF = {1}, ALPHA = {2:.4f}'.format(non, noff, alpha))
logging.info(pre + 'EXCESS = {0:.2f}, SIGN = {1:.2f}'.format(non - alpha * noff, pf.get_li_ma_sign(non, noff, alpha)))
non += non_run
noff += noff_run
noffcor += alpha_run * noff_run
print_stats(non_run, noff_run, alpha_run, 'RUN ')
print_stats(non, noff, noffcor/noff, 'TOTAL ')
theta2_on_hist += np.histogram(photbdata.field('XTHETA') ** 2., bins=theta2_hist_nbins, range=(0., theta2_hist_max))[0]
theta2_off_run_hist, theta2_off_run_hist_edges = np.histogram(np.fabs((photbdata[np.invert(photbdata.field('XTHETA') < rexdeg)].field('XCAMDIST') - obj_cam_dist)** 2.), bins=theta2_hist_nbins, range=(0., theta2_hist_max))
theta2_off_hist += theta2_off_run_hist
h_edges_r = np.sqrt(theta2_off_run_hist_edges)
a_tmp = (
cci_a(obj_cam_dist + h_edges_r,
np.ones(theta2_hist_nbins + 1) * rexdeg,
np.ones(theta2_hist_nbins + 1) * obj_cam_dist) / np.pi
- cci_a(obj_cam_dist - h_edges_r,
np.ones(theta2_hist_nbins + 1) * rexdeg,
np.ones(theta2_hist_nbins + 1) * obj_cam_dist) / np.pi
)
theta2_off_hist_alpha = (
(theta2_off_run_hist_edges[1:] - theta2_off_run_hist_edges[:-1])
/ (4. * obj_cam_dist * (h_edges_r[1:] - h_edges_r[:-1])
- (a_tmp[1:] - a_tmp[:-1])
)
)
#logging.debug('theta2_off_hist_alpha = {0}'.format( theta2_off_hist_alpha))
theta2_offcor_hist += theta2_off_run_hist * theta2_off_hist_alpha
# Read run ARF file
logging.info('RUN Reading ARF from : {0}'.format(arf))
f = pyfits.open(arf)
ea, ea_erange = pf.arf_to_np(f[1])
f.close()
# If average ARF is not matched to RMF use first ARF as template
if firstloop and arf_m_erange is None :
arf_m_erange = ea_erange
if (len(ea_erange) is not len(arf_m_erange)) or (np.sum(np.fabs(ea_erange - arf_m_erange)) > 1E-5) :
logging.debug('Average ARF - ARF binning does not match RMF for file: {0}'.format(arf))
logging.debug('Average ARF - Resampling ARF to match RMF EBOUNDS binning')
ea_spl = scipy.interpolate.UnivariateSpline(np.log10(ea_erange[:-1]*ea_erange[1:]) / 2. , np.log10(ea), s=0, k=1)
ea = 10. ** ea_spl((np.log10(arf_m_erange[:-1]*arf_m_erange[1:]) / 2.))
if firstloop :
arf_m = ea * exposure_run
else :
arf_m += ea * exposure_run
## DEBUG plot
#plt.errorbar(spec_hist_ebounds[:-1], dat, yerr=dat_err)
#plt.title(dataf)
#plt.show()
# Write run wise data to PHA
if write_output_files :
# Create base file name for run wise output files
run_out_basename = os.path.basename(dataf[:dataf.find('.fits')])
# Open run RMF file
logging.info('RUN Reading RMF from : {0}'.format(rmf))
f = pyfits.open(rmf)
# Read RM
rm, erange, ebounds, minprob = pf.rmf_to_np(f)
f.close()
# Bin data to match EBOUNDS from RMF
spec_on_run_hist = np.histogram(on_run.field('ENERGY'), bins=ebounds)[0]
spec_off_run_hist = np.histogram(off_run.field('ENERGY'), bins=ebounds)[0]
# Prepare excess data
dat = spec_on_run_hist - alpha_run * spec_off_run_hist # ON - alpha x OFF = Excess
dat_err = np.sqrt(spec_on_run_hist + spec_off_run_hist * alpha_run ** 2.)
quality = np.where(((spec_on_run_hist == 0) | (spec_off_run_hist == 0)), 2, 0) # Set quality flags
chan = np.arange(len(dat))
# Signal PHA
tbhdu = pf.np_to_pha(channel=chan, counts=np.array(spec_on_run_hist, dtype=float),
quality=quality,
exposure=exposure_run, obj_ra=objra, obj_dec=objdec,
dstart=run_dstart, dstop=run_dstop, creator='pfspec', version=pf.__version__,
telescope=telescope, instrument=instrument)
tbhdu.header.update('ANCRFILE', os.path.basename(arf), 'Ancillary response file (ARF)')
tbhdu.header.update('RESPFILE', os.path.basename(rmf), 'Redistribution matrix file (RMF)')
tbhdu.header.update('BACKFILE', run_out_basename + '_bg.pha.fits', 'Bkgr FITS file')
tbhdu.header.update('BACKSCAL', alpha_run, 'Background scale factor')
tbhdu.header.update('HDUCLAS2', 'TOTAL', 'Extension contains source + bkgd')
logging.info('RUN Writing signal PHA file to {0}'.format(run_out_basename + '_signal.pha.fits'))
tbhdu.writeto(run_out_basename + '_signal.pha.fits')
# Background PHA
tbhdu = pf.np_to_pha(channel=chan, counts=np.array(spec_off_run_hist, dtype=float),
exposure=exposure_run, obj_ra=objra, obj_dec=objdec,
dstart=run_dstart, dstop=run_dstop, creator='pfspec', version=pf.__version__,
telescope=telescope, instrument=instrument)
tbhdu.header.update('ANCRFILE', os.path.basename(arf), 'Ancillary response file (ARF)')
tbhdu.header.update('RESPFILE', os.path.basename(rmf), 'Redistribution matrix file (RMF)')
tbhdu.header.update('HDUCLAS2', 'TOTAL', 'Extension contains source + bkgd')
logging.info('RUN Writing background PHA file to {0}'.format(run_out_basename + '_bg.pha.fits'))
tbhdu.writeto(run_out_basename + '_bg.pha.fits')
# Excess PHA
tbhdu = pf.np_to_pha(channel=chan, counts=dat, stat_err=dat_err, exposure=exposure_run, quality=quality,
obj_ra=objra, obj_dec=objdec,
dstart=run_dstart, dstop=run_dstop, creator='pfspec', version=pf.__version__,
telescope=telescope, instrument=instrument)
tbhdu.header.update('ANCRFILE', os.path.basename(arf), 'Ancillary response file (ARF)')
tbhdu.header.update('RESPFILE', os.path.basename(rmf), 'Redistribution matrix file (RMF)')
logging.info('RUN Writing excess PHA file to {0}'.format(run_out_basename + '_excess.pha.fits'))
tbhdu.writeto(run_out_basename + '_excess.pha.fits')
hdulist.close()
firstloop = False
#---------------------------------------------------------------------------
# Write results to file
arf_m /= exposure
if write_output_files :
# Prepare data
dat = spec_on_hist - spec_off_cor_hist # ON - alpha x OFF = Excess
dat_err = np.sqrt(spec_on_hist + spec_off_hist* (spec_off_cor_hist / spec_off_hist) ** 2.)
quality = np.where(((spec_on_hist == 0) | (spec_off_hist == 0)), 1, 0) # Set quality flags
chan = np.arange(len(dat))
## DEBUG plot
#plt.errorbar(spec_hist_ebounds[:-1], dat, yerr=dat_err)
#plt.title('Total')
#plt.show()
# Data to PHA
tbhdu = pf.np_to_pha(channel=chan, counts=dat, stat_err=dat_err, exposure=exposure, quality=quality,
obj_ra=objra, obj_dec=objdec,
dstart=dstart, dstop=dstop, creator='pfspec', version=pf.__version__,
telescope=telescope, instrument=instrument)
# Write PHA to file
tbhdu.header.update('ANCRFILE', os.path.basename('average.arf.fits'), 'Ancillary response file (ARF)')
tbhdu.writeto('average.pha.fits')
# Write ARF
tbhdu = pf.np_to_arf(arf_m, arf_m_erange, telescope=telescope, instrument=instrument)
tbhdu.writeto('average.arf.fits')
#---------------------------------------------------------------------------
# Plot results
if has_matplotlib and do_graphical_output :
import matplotlib
logging.info('Plotting results (matplotlib v{0})'.format(matplotlib.__version__))
def set_title_and_axlabel(label) :
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.title(label, fontsize='medium')
plt.figure()
x = np.linspace(0., theta2_hist_max, theta2_hist_nbins + 1)
x = (x[1:] + x[:-1]) / 2.
plt.errorbar(x, theta2_on_hist, xerr=(theta2_hist_max / (2. * theta2_hist_nbins)), yerr=np.sqrt(theta2_on_hist),
fmt='o', ms=3.5, label=r'N$_{ON}$', capsize=0.)
plt.errorbar(x, theta2_offcor_hist, xerr=(theta2_hist_max / (2. * theta2_hist_nbins)),
yerr=np.sqrt(theta2_off_hist) * theta2_offcor_hist / theta2_off_hist,
fmt='+', ms=3.5, label=r'N$_{OFF} \times \alpha$', capsize=0.)
plt.axvline(analysis_radius ** 2., ls='--', label=r'$\theta^2$ cut')
plt.xlabel(r'$\theta^2$ (deg$^2$)')
plt.ylabel(r'N')
plt.legend(numpoints=1)
plt.figure()
ax = plt.subplot(111)
ecen = (spec_hist_ebounds[1:] + spec_hist_ebounds[:-1]) / 2.
plt.errorbar(ecen, spec_on_hist,
xerr=(spec_hist_ebounds[1] - spec_hist_ebounds[0]) / 2.,
yerr=np.sqrt(spec_on_hist), fmt='o', label='ON')
plt.errorbar(ecen, spec_off_cor_hist,
xerr=(spec_hist_ebounds[1] - spec_hist_ebounds[0]) / 2.,
yerr=np.sqrt(spec_off_hist) * spec_off_cor_hist / spec_off_hist, fmt='+', label='OFF cor.')
dat = spec_on_hist - spec_off_cor_hist
dat_err = np.sqrt(spec_on_hist + spec_off_hist* (spec_off_cor_hist / spec_off_hist) ** 2.)
plt.errorbar(ecen, dat, yerr=dat_err, fmt='s', label='ON - OFF cor.')
plt.xlabel(r'log(E/1 TeV)')
plt.ylabel(r'N')
plt.legend(numpoints=1)
ax.set_yscale('log')
#----------------------------------------
# Time it!
t_2 = time.clock()
logging.info('Execution took {0}'.format(pf.get_nice_time(t_2 - t_1)))
logging.info('Thank you for choosing {0}. Have a great day!'.format(os.path.split(__file__)[1]))
#----------------------------------------
plt.show()
#===========================================================================
# Main function
if __name__ == '__main__':
# We should switch to argparse soon (python v2.7++)
# http://docs.python.org/library/argparse.html#module-argparse
import optparse
parser = optparse.OptionParser(
usage='%prog [options] FILE [ARF RMF]\nFILE can either be an indiviual .fits/.fits.gz file or a batch file.\nIn case it is a individual file, the ARF and RMF must also be specified. The bankfile must contain three columns: data file, ARF, and RMF.',
description='Creates spectra from VHE event lists in FITS format.'
)
parser.add_option(
'-p','--analysis-position',
dest='analysis_position',
type='str',
default=None,
help='Analysis position in RA and Dec (J2000) in degrees. Format: \'(RA, Dec)\', including the quotation marks. If no center is given, the source position from the first input file is used.'
)
parser.add_option(
'-r','--analysis-radius',
dest='analysis_radius',
type='float',
default=.125,
help='Aperture for the analysis in degree [default: %default].'
)
parser.add_option(
'-m','--match-average-pha-to-rmf',
dest='match_rmf',
type='string',
default=None,
help='RMF filename to which the average PHA file binning is matched [default: %default].'
)
parser.add_option(
'-d','--data-dir',
dest='datadir',
type='string',
default='',
help='Directory in which the data is located. Will be added as prefix to the entries in the bankfile [default: %default].'
)
parser.add_option(
'-w','--write-output-files',
dest='write_output_files',
action='store_true',
default=False,
help='Write output files [default: %default].'
)
parser.add_option(
'--no-graphical-output',
dest='graphical_output',
action='store_false',
default=True,
help='Switch off graphical output.'
)
parser.add_option(
'-l','--log-level',
dest='loglevel',
default='INFO',
help='Amount of logging e.g. DEBUG, INFO, WARNING, ERROR [default: %default]'
)
options, args = parser.parse_args()
if len(args) == 1 or len(args) == 3 :
create_spectrum(
input_file_names=args,
analysis_position=options.analysis_position,
analysis_radius=options.analysis_radius,
match_rmf=options.match_rmf,
datadir=options.datadir,
write_output_files=options.write_output_files,
do_graphical_output=options.graphical_output,
loglevel=options.loglevel
)
else :
parser.print_help()
#===========================================================================
| {
"content_hash": "be015468a55baae240555c0d721f5f4a",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 257,
"avg_line_length": 44.440677966101696,
"alnum_prop": 0.5603864734299517,
"repo_name": "mraue/pyfact",
"id": "30f1020ad32d70e39e479d36b62b24c474966032",
"size": "25355",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/pfspec.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "255441"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'aedes_server.core'
verbose_name = 'Aedes'
| {
"content_hash": "786d141ec3dd9e240057f61d502e1a7a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 17.714285714285715,
"alnum_prop": 0.7096774193548387,
"repo_name": "henriquenogueira/aedes",
"id": "e77ae03badd8fdda3d67187d874eab6c89f0335e",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aedes_server/core/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202099"
},
{
"name": "HTML",
"bytes": "8148"
},
{
"name": "JavaScript",
"bytes": "511548"
},
{
"name": "Python",
"bytes": "38844"
}
],
"symlink_target": ""
} |
import os
import sys
import mezzanine_buffer
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = mezzanine_buffer.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='mezzanine-buffer',
version=version,
description="""Buffer integration for Mezzanine CMS""",
long_description=readme + '\n\n' + history,
author='Alex Tsai',
author_email='caffodian@gmail.com',
url='https://github.com/caffodian/mezzanine-buffer',
packages=[
'mezzanine_buffer',
],
include_package_data=True,
install_requires=[
"requests >= 2.1.0",
"buffer-python >= 1.08"
],
dependency_links=[
"https://github.com/vtemian/buffpy/tarball/master#egg=buffer-python-1.08"
],
license="BSD",
zip_safe=False,
keywords='mezzanine-buffer',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
) | {
"content_hash": "3647257889d535e90bce54fabd0ae0b0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 81,
"avg_line_length": 29.017543859649123,
"alnum_prop": 0.6124546553808948,
"repo_name": "caffodian/mezzanine-buffer",
"id": "bafadeb610167b48f3240945ad007dcec2456c10",
"size": "1701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "8304"
}
],
"symlink_target": ""
} |
from lxml import etree
import re
import os
import sys
# Regex
re_transform = re.compile('([a-zA-Z]+)\((-?\d+\.?\d*),?\s*(-?\d+\.?\d*)?\)')
re_translate = re.compile('\((-?\d+\.?\d*)\s*,?\s*(-?\d+\.?\d*)\)')
re_coord_split = re.compile('\s+|,')
re_path_coords = re.compile('[a-zA-Z]')
re_path_split = re.compile('([ACHLMQSTVZachlmqstvz])')
re_trailing_zeros = re.compile('\.(\d*?)(0+)$')
re_length = re.compile('^(\d+\.?\d*)\s*(em|ex|px|in|cm|mm|pt|pc|%|\w*)')
# Path commands
path_commands = {
"M": (0, 1),
"L": (0, 1),
"T": (0, 1),
"H": (0),
"V": (1),
"A": (-1, -1, -1, -1, -1, 0, 1),
"C": (0, 1, 0, 1, 0, 1)
}
# How relative commands are scaled
scale_commands = {
"m": (0, 1),
"l": (0, 1),
"t": (0, 1),
"h": (0),
"v": (1),
"a": (0, 1, -1, -1, -1, 0, 1),
"c": (0, 1, 0, 1, 0, 1)
}
scale_commands.update(path_commands)
# Attribute names
value_attributes = ["x", "y", "x1", "y1", "x2", "y2", "cx", "cy", "r", "rx", "ry", "width", "height"]
default_styles = set([
("opacity", "1"),
("fill-opacity", "1"),
("stroke", "none"),
("stroke-width", "1"),
("stroke-opacity", "1"),
("stroke-miterlimit", "4"),
("stroke-linecap", "butt"),
("stroke-linejoin", "miter"),
("stroke-dasharray", "none"),
("stroke-dashoffset", "0"),
("font-anchor", "start"),
("font-style", "normal"),
("font-weight", "normal"),
("font-stretch", "normal"),
("font-variant", "normal")
])
position_attributes = {"rect": (["x", "y"]),
"tspan": (["x", "y"]),
"circle": (["cx", "cy"]),
"ellipse": (["cx", "cy"]),
"line": (["x1", "y1", "x2", "y2"])}
scaling_attributes = {"rect": (["x", "y", "width", "height"]),}
STYLES = set([
"alignment-baseline",
"baseline-shift",
"clip-path",
"clip-rule",
"color-interpolation",
"color-interpolation-filters",
"color-profile",
"color-rendering",
"direction",
"dominant-baseline",
"fill",
"fill-opacity",
"fill-rule",
"font",
"font-family",
"font-size",
"font-size-adjust",
"font-stretch",
"font-style",
"font-variant",
"font-weight",
"glyph-orientation-horizontal",
"glyph-orientation-vertical",
"image-rendering",
"kerning",
"letter-spacing",
"marker",
"marker-end",
"marker-mid",
"marker-start",
"mask",
"opacity",
"pointer-events",
"shape-rendering",
"stop-color",
"stop-opacity",
"stroke",
"stroke-dasharray",
"stroke-dashoffset",
"stroke-linecap",
"stroke-linejoin",
"stroke-miterlimit",
"stroke-opacity",
"stroke-width",
"text-anchor",
"text-decoration",
"text-rendering",
"unicode-bidi",
"word-spacing",
"writing-mode",
])
class CleanSVG:
def __init__(self, svgfile=None, verbose=False):
self._verbose = verbose
self.tree = None
self.root = None
# Need to update this if style elements found
self.styles = {}
self.style_counter = 0
self.num_format = "%s"
self.removeWhitespace = True
if svgfile:
self.parseFile(svgfile)
def parseFile(self, filename):
try:
self.tree = etree.parse(filename)
except IOError:
print "Unable to open file", filename
sys.exit(1)
self.root = self.tree.getroot()
def analyse(self):
""" Search for namespaces. Will do more later """
print "Namespaces:"
for ns, link in self.root.nsmap.iteritems():
print " %s: %s" % (ns, link)
def removeGroups(self):
""" Remove groups with no attributes """
# Doesn't work for nested groups
for element in self.tree.iter():
if not isinstance(element.tag, basestring):
continue
element_type = element.tag.split('}')[1]
if element_type == 'g' and not element.keys():
parent = element.getparent()
if parent is not None:
parent_postion = parent.index(element)
print
print parent
# Move children outside of group
for i, child in enumerate(element, parent_postion):
print i
print "move %s to %s" % (child, i)
parent.insert(i, child)
#del parent[i]
def write(self, filename):
""" Write current SVG to a file. """
if not filename.endswith('.svg'):
filename += '.svg'
with open(filename, 'w') as f:
f.write(self.toString(True))
def toString(self, pretty_print=False):
""" Return a string of the current SVG """
if self.styles:
self._addStyleElement()
if self.removeWhitespace:
svg_string = etree.tostring(self.root)
svg_string = re.sub(r'\n\s*' , "", svg_string)
else:
svg_string = etree.tostring(self.root, pretty_print=pretty_print)
return svg_string
def _addStyleElement(self):
""" Insert a CSS style element containing information
from self.styles to the top of the file. """
style_element = etree.SubElement(self.root, "style")
self.root.insert(0, style_element)
style_text = '\n'
for styles, style_class in sorted(self.styles.iteritems(), key=lambda (k,v): v):
style_text += "\t.%s{\n" % style_class
for (style_id, style_value) in styles:
style_text += '\t\t%s:\t%s;\n' % (style_id, style_value)
style_text += "\t}\n"
style_element.text = style_text
def setDecimalPlaces(self, decimal_places):
""" Round attribute numbers to a given number of decimal places. """
self.num_format = "%%.%df" % decimal_places
for element in self.tree.iter():
if not isinstance(element.tag, basestring):
continue
tag = element.tag.split('}')[1]
if tag == "polyline" or tag == "polygon":
values = re_coord_split.split(element.get("points"))
formatted_values = [self._formatNumber(x) for x in values if x]
try:
point_list = " ".join((formatted_values[i] + "," + formatted_values[i+1] for i in range(0, len(formatted_values), 2)))
element.set("points", point_list)
except IndexError:
print "Could not parse points list"
pass
elif tag == "path":
coords = map(self._formatNumber, re_coord_split.split(element.get("d")))
coord_list = " ".join(coords)
element.set("d", coord_list)
#for coord in coords:
# if re_path_coords.match(coord):
# print coord
else:
for attribute in element.attrib.keys():
if attribute in value_attributes:
element.set(attribute, self._formatNumber(element.get(attribute)))
def removeAttribute(self, attribute, exception_list=None):
""" Remove all instances of an attribute ignoring any with a value in the exception list. """
if exception_list is None: exception_list = []
if self._verbose: print '\nRemoving attribute: %s' % attribute
for element in self.tree.iter():
if attribute in element.attrib.keys() and element.attrib[attribute] not in exception_list:
if self._verbose: print ' - Removed attribute: %s="%s"' % (attribute, element.attrib[attribute])
del element.attrib[attribute]
def removeElement(self, tagName):
""" Remove all instances of an element. """
if self._verbose: print '\nRemoving element: %s' % tagName
for element in self.tree.iter():
if (isinstance(element.tag, basestring)):
tag = element.tag.split('}')[1]
if tag == tagName:
element.getparent().remove(element)
def removeComments(self):
""" Remove all comments. """
if self._verbose: print '\nRemoving comments'
for element in self.tree.iter():
if element.tag is etree.Comment:
element.getparent().remove(element)
def removeNonDefIDAttributes(self):
""" Go through def elements and find IDs referred to, then remove all IDs except those. """
def_IDs = []
for element in self.tree.iter():
if not isinstance(element.tag, basestring):
continue
tag = element.tag.split('}')[1]
if tag == 'defs':
for child in element.getchildren():
for key, value in child.attrib.iteritems():
if key.endswith('href'):
def_IDs.append(value)
self.removeAttribute('id', exception_list=def_IDs)
def removeNamespace(self, namespace):
""" Remove all attributes of a given namespace. """
nslink = self.root.nsmap.get(namespace)
if self._verbose:
print "\nRemoving namespace, %s" % namespace
if nslink:
print " - Link: %s" % nslink
if nslink:
nslink = "{%s}" % nslink
length = len(nslink)
for element in self.tree.iter():
if element.tag[:length] == nslink:
self.root.remove(element)
if self._verbose:
print " - removed element: %s" % element.tag[length:]
for attribute in element.attrib.keys():
if attribute[:length] == nslink:
del element.attrib[attribute]
if self._verbose:
print " - removed attribute from tag: %s" % element.tag
del self.root.nsmap[namespace]
def extractStyles(self):
""" Remove style attributes and values of the style attribute and put in <style> element as CSS. """
for element in self.tree.iter():
style_list = []
if "style" in element.keys():
styles = element.attrib["style"].split(';')
style_list.extend([tuple(style.split(':')) for style in styles])
del element.attrib["style"]
for attribute in STYLES & set(element.attrib.keys()):
style_list.append((attribute, element.attrib[attribute]))
del element.attrib[attribute]
if len(style_list) > 0:
# Ensure styling is in the form: (key, value)
style_list = [style for style in style_list if len(style)==2]
# Remove pointless styles, e.g. opacity = 1
for default_style in default_styles & set(style_list):
style_list.remove(default_style)
# Clean decimals:
for i, (style_name, style_value) in enumerate(style_list):
number = re_length.search(style_value)
if number:
clean_number = self._formatNumber(number.group(1))
style_list[i] = (style_name, clean_number + number.group(2))
style_tuple = tuple(style_list)
if style_tuple not in self.styles:
style_class = "style%d" % self.style_counter
self.styles[style_tuple] = style_class
self.style_counter += 1
else:
style_class = self.styles[style_tuple]
# Should test to see whether there is already a class
element.set("class", style_class)
def applyTransforms(self):
""" Apply transforms to element coordinates. """
for element in self.tree.iter():
if 'transform' in element.keys():
all_transforms = element.get('transform')
transform_list = re_transform.findall(all_transforms)
transform_list.reverse()
element_type = element.tag.split('}')[1]
if element_type == 'g':
self._applyGroupTransforms(element, transform_list)
sucessful_transformation = False
for transformation in transform_list:
delta = [float(n) for n in transformation[1:] if n]
if transformation[0] == 'translate':
sucessful_transformation = self._translateElement(element, delta)
elif transformation[0] == 'scale':
sucessful_transformation = self._scaleElement(element, delta)
# Doesn't take into account if one transformation isn't sucessful
if sucessful_transformation:
del element.attrib["transform"]
def _applyGroupTransforms(self, group_element, transformations):
# Ensure all child elements are paths
children = [child for child in group_element if isinstance(child.tag, basestring)]
if any((child.tag.split('}')[1] != 'path' for child in children)):
return
f_dict = {'translate': self._translatePath, 'scale': self._scalePath}
sucessful_transformation = False
for transformation in transformations:
delta = [float(n) for n in transformation[1:] if n]
trans_f = f_dict.get(transformation[0])
if trans_f:
for child in children:
sucessful_transformation = trans_f(child, delta)
if sucessful_transformation:
del group_element.attrib["transform"]
def _formatNumber(self, number):
""" Convert a number to a string representation
with the appropriate number of decimal places. """
try:
number = float(number)
except ValueError:
return number
str_number = self.num_format % number
trailing_zeros = re_trailing_zeros.search(str_number)
if trailing_zeros:
# length equals number of trailing zeros + decimal point if no other numbers
length = (len(trailing_zeros.group(2)) + (len(trailing_zeros.group(1)) == 0))
str_number = str_number[:-length]
return str_number
def _translateElement(self, element, delta):
#print " - translate by: (%s, %s)" % delta
element_type = element.tag.split('}')[1]
coords = position_attributes.get(element_type)
if coords:
for i, coord_name in enumerate(coords):
new_coord = float(element.get(coord_name, 0)) + delta[i % 2]
element.set(coord_name, self._formatNumber(new_coord))
return True
elif "points" in element.keys():
values = [float(v) + delta[i % 2] for i, v in enumerate(re_coord_split.split(element.get("points")))]
str_values = map(self._formatNumber, values)
point_list = " ".join((str_values[i] + "," + str_values[i+1] for i in range(0, len(str_values), 2)))
element.set("points", point_list)
return True
elif "d" in element.keys():
self._translatePath(element, delta)
return True
def _scaleElement(self, element, delta):
if len(delta) == 1:
delta = [delta[0], delta[0]]
element_type = element.tag.split('}')[1]
coords = scaling_attributes.get(element_type)
if coords:
for i, coord_name in enumerate(coords):
new_coord = float(element.get(coord_name, 0)) * delta[i % 2]
element.set(coord_name, self._formatNumber(new_coord))
return True
elif "d" in element.keys():
self._scalePath(element, delta)
return True
def _translatePath(self, path, delta):
delta.append(0) # add as a null value for flags
commands = self._parsePath(path.get("d"))
new_d = ""
for command, values in commands:
new_d += command
if command in path_commands:
d = path_commands[command]
for n, value in enumerate(values):
new_d += "%s " % self._formatNumber(value + delta[d[n % len(d)]])
else:
new_d += " ".join(map(self._formatNumber, values))
path.set("d", new_d)
return True
def _scalePath(self, path, delta):
if len(delta) == 1:
delta = [delta[0], delta[0], 0]
elif len(delta) == 2:
delta.append(0) # add as a null value for flags
commands = self._parsePath(path.get("d"))
new_d = ""
for command, values in commands:
new_d += command
if command in scale_commands:
command_v = scale_commands[command]
for n, value in enumerate(values):
new_d += "%s " % self._formatNumber(value * delta[ command_v[n % len(command_v)]])
else:
new_d += " ".join(map(self._formatNumber, values))
path.set("d", new_d)
return True
def _parsePath(self, d):
commands = []
split_commands = re_path_split.split(d)
if len(split_commands) > 2:
for command, values in [(split_commands[i], split_commands[i+1]) for i in range(1, len(split_commands), 2)]:
values = [float(value) for value in re_coord_split.split(values) if value != '']
commands.append((command, values))
return commands
def main(filename):
svg = CleanSVG(filename, verbose=False)
#svg.removeAttribute('id')
svg.removeNamespace('sodipodi')
svg.removeNamespace('inkscape')
svg.removeNamespace('xml')
svg.removeNonDefIDAttributes()
#svg.removeGroups()
svg.setDecimalPlaces(2)
#svg.extractStyles()
svg.applyTransforms()
#svg.removeWhitespace = False;
name = os.path.splitext(filename)[0]
svg.write('%s_test.svg' % name)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main(os.path.join('examples', 'paths.svg'))
#main(os.path.join('examples', 'styles.svg'))
| {
"content_hash": "3aedd52443ee3452158cb9504aa068d4",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 138,
"avg_line_length": 34.786764705882355,
"alnum_prop": 0.522722468822659,
"repo_name": "lamka02sk/wrinche",
"id": "396264b99c553f5f95283fcbd37c9721947eb5eb",
"size": "18947",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "assets/icons/svgoptimizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "536140"
},
{
"name": "Hack",
"bytes": "6468"
},
{
"name": "JavaScript",
"bytes": "301879"
},
{
"name": "PHP",
"bytes": "409092"
},
{
"name": "Python",
"bytes": "19621"
}
],
"symlink_target": ""
} |
import sys
import random
test=False
calc_evo=True
extinction=True
seed=-1
outfile="out"
nefile="neat.ne"
solve_evals=0
#was 40000
interval=40000
if(len(sys.argv)>1):
extinction = sys.argv[1]=='e'
seed = int(sys.argv[2])
outfile= sys.argv[3]
nefile=sys.argv[4]
if(len(sys.argv)>5):
interval=int(sys.argv[5])
disp=False
SZX=SZY=400
screen = None
if test:
calc_evo=False
disp=True
if disp:
import pygame
from pygame.locals import *
pygame.init()
pygame.display.set_caption('Viz')
screen =pygame.display.set_mode((SZX,SZY))
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((250, 250, 250))
def render(pop):
global screen,background
screen.blit(background, (0, 0))
for robot in pop:
x=mazepy.feature_detector.endx(robot)*SZX
y=mazepy.feature_detector.endy(robot)*SZY
rect=(int(x),int(y),5,5)
pygame.draw.rect(screen,(255,0,0),rect,0)
pygame.display.flip()
from entropy import *
evo_fnc = calc_evolvability_cnt
def killbot(to_kill,niche,population,whole_population):
population[niche].remove(to_kill)
whole_population.remove(to_kill)
del to_kill
if(len(population[niche])==0):
population.pop(niche)
if(__name__=='__main__'):
log_file=open(outfile+".log","w")
evo_file=open(outfile+".evo","w")
#evo_fnc = calc_evolvability_entropy
#initialize maze stuff with "medium maze"
mazepy.mazenav.initmaze("hard_maze_list.txt",nefile)
#mazepy.mazenav.initmaze("medium_maze_list.txt")
if(seed==-1):
mazepy.mazenav.random_seed()
else:
random.seed(seed+100)
mazepy.mazenav.seed(seed+100)
eflag=False
robot=None
population=defaultdict(list)
whole_population=[]
psize=2000
repop=0
for k in range(psize):
robot=mazepy.mazenav()
robot.init_rand()
robot.mutate()
robot.map()
population[map_into_grid(robot)].append(robot)
whole_population.append(robot)
solved=False
evals=0 #psize
child=None
max_evals=6000001
best_fit=-1000000.0
best_fit_org=None
best_evo=0
best_evo_org=None
while evals < max_evals: #not solved:
keys=population.keys()
if(disp and eflag):
render(whole_population)
eflag=False
if(evals%1000==0):
quant=evals,len(keys),calc_population_entropy(whole_population),complexity(whole_population),best_fit
print quant
log_file.write(str(quant)+"\n")
if(disp):
render(whole_population)
sys.stdout.flush()
pniche=random.choice(keys)
parent=random.choice(population[pniche])
child=parent.copy()
child.mutate()
child.map()
if(fitness(child)>best_fit):
best_fit=fitness(child)
best_fit_org=child.copy()
if(child.solution()):
if not solved:
solved=True
solve_evals=evals
population[map_into_grid(child)].append(child)
whole_population.append(child)
if(repop==0):
niche=most_populus(population)
to_kill=random.choice(population[niche])
killbot(to_kill,niche,population,whole_population)
else:
repop-=1
if(calc_evo and evals%250000==0):
#run genome in the maze simulator
print "EVO-CALC"
for org in random.sample(whole_population,200):
evo=evo_fnc(org,1000)
if evo>best_evo:
best_evo=evo
best_evo_org=org.copy()
print "evolvability:", evo
evo_file.write(str(evals)+" "+str(evo)+"\n")
print "EVO-CALC END"
evo_file.flush()
if extinction and evals>10 and (evals-1)%(interval)==0:
eflag=True
xc=random.randint(0,grid_sz)
yc=random.randint(0,grid_sz)
rad=grid_sz*0.45
niches_to_kill=[]
for x in range(grid_sz):
for y in range(grid_sz):
dx=abs(x-xc)
dy=abs(y-yc)
#if min(dx,grid_sz-dx)**2+min(dy,grid_sz-dy)**2 < rad**2:
niches_to_kill.append((x,y))
survivors=random.sample(population.keys(),10)
survivor_orgs=[random.choice(population[x]) for x in survivors]
#for k in survivors:
# niches_to_kill.remove(k)
repop=0
for niche in niches_to_kill:
orgs=population[niche][:]
repop+=len(orgs)
for x in orgs:
if x not in survivor_orgs:
killbot(x,niche,population,whole_population)
if niche in population and niche not in survivors:
population.pop(niche)
evals+=1
best_evo_org.save(outfile+"_bestevo.dat")
best_fit_org.save(outfile+"_bestfit.dat")
if solved:
best_fit_org.save(outfile+"_%d_solution.dat" % solve_evals)
"""
robot=mazepy.mazenav()
robot.init_rand()
robot.mutate()
robot.map()
print "evolvability:", evo_fnc(robot,1000)
optimize_evolvability(child)
"""
| {
"content_hash": "e56c6a2ead01486faff85b78a6d5af36",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 104,
"avg_line_length": 22,
"alnum_prop": 0.6874437443744374,
"repo_name": "jal278/mazerobot-python",
"id": "f5ac054c57080964be8e2a43c28465482cbc2c0a",
"size": "4444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "population-entropy-e2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1045"
},
{
"name": "C++",
"bytes": "824720"
},
{
"name": "Gnuplot",
"bytes": "650"
},
{
"name": "Python",
"bytes": "53672"
},
{
"name": "Shell",
"bytes": "283"
}
],
"symlink_target": ""
} |
import urllib
import oauth
# Fill in the application's information in hidden
import hidden
# How to do OAuth signing
def augment(url, parameters):
secrets = hidden.oauth()
consumer = oauth.OAuthConsumer(secrets['consumer_key'], secrets['consumer_secret'])
token = oauth.OAuthToken(secrets['token_key'],secrets['token_secret'])
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
token=token, http_method='GET', http_url=url, parameters=parameters)
oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
return oauth_request.to_url()
def test_me():
user = eval(input('Enter username: - '))
print ('* Calling Twitter...')
# Count: # of post to load
url = augment('https://api.twitter.com/1.1/friends/list.json',
{'screen_name': user, 'count': '8'} )
print (url)
connection = urllib.request.urlopen(url)
# Gets the body
# JSON
data = connection.read()
print (data)
# Gets dictionary of headers
# JSON
headers = connection.info().dict
print (headers)
| {
"content_hash": "369075ca6437d35b25f927fb90ae809f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 87,
"avg_line_length": 31.314285714285713,
"alnum_prop": 0.6733576642335767,
"repo_name": "lastralab/Statistics",
"id": "604a99f07ab4d14a56171452a3a5470c8998accf",
"size": "1151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Specialization/Personal/twurl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "89"
},
{
"name": "Python",
"bytes": "249488"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
"""This script runs an automated Cronet performance benchmark.
This script:
1. Sets up "USB reverse tethering" which allow network traffic to flow from
an Android device connected to the host machine via a USB cable.
2. Starts HTTP and QUIC servers on the host machine.
3. Installs an Android app on the attached Android device and runs it.
4. Collects the results from the app.
Prerequisites:
1. A rooted (i.e. "adb root" succeeds) Android device connected via a USB cable
to the host machine (i.e. the computer running this script).
2. quic_server and quic_client have been built for the host machine, e.g. via:
./build/gyp_chromium
ninja -C out/Release quic_server quic_client
3. cronet_perf_test_apk has been built for the Android device, e.g. via:
./components/cronet/tools/cr_cronet.py gyp
ninja -C out/Release cronet_perf_test_apk
Invocation:
./run.py
Output:
Benchmark timings are output by telemetry to stdout and written to
./results.html
"""
import json
import os
import shutil
import subprocess
import sys
import tempfile
from time import sleep
import urllib
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools/telemetry'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build/android'))
import lighttpd_server
from pylib import android_commands
from pylib import pexpect
from pylib.device import device_utils
from pylib.device import intent
from telemetry import android
from telemetry import benchmark
from telemetry import benchmark_runner
from telemetry import story
from telemetry.internal import forwarders
from telemetry.internal.forwarders import android_forwarder
from telemetry.value import scalar
from telemetry.web_perf import timeline_based_measurement
BUILD_DIR = REPOSITORY_ROOT + '/out/Release/'
QUIC_SERVER = BUILD_DIR + 'quic_server'
QUIC_CLIENT = BUILD_DIR + 'quic_client'
APP_APK = BUILD_DIR + 'apks/CronetPerfTest.apk'
APP_PACKAGE = 'org.chromium.net'
APP_ACTIVITY = '.CronetPerfTestActivity'
APP_ACTION = 'android.intent.action.MAIN'
BENCHMARK_CONFIG = {
# Control various metric recording for further investigation.
'CAPTURE_NETLOG': False,
'CAPTURE_TRACE': False,
'CAPTURE_SAMPLED_TRACE': False,
# While running Cronet Async API benchmarks, indicate if callbacks should be
# run on network thread rather than posted back to caller thread. This allows
# measuring if thread-hopping overhead is significant.
'CRONET_ASYNC_USE_NETWORK_THREAD': False,
# A small resource for device to fetch from host.
'SMALL_RESOURCE': 'small.html',
'SMALL_RESOURCE_SIZE': 26,
# Number of times to fetch SMALL_RESOURCE.
'SMALL_ITERATIONS': 1000,
# A large resource for device to fetch from host.
'LARGE_RESOURCE': 'large.html',
'LARGE_RESOURCE_SIZE': 10000026,
# Number of times to fetch LARGE_RESOURCE.
'LARGE_ITERATIONS': 4,
# An on-device file containing benchmark timings. Written by benchmark app.
'RESULTS_FILE': '/data/data/' + APP_PACKAGE + '/results.txt',
# An on-device file whose presence indicates benchmark app has terminated.
'DONE_FILE': '/data/data/' + APP_PACKAGE + '/done.txt',
# Ports of HTTP and QUIC servers on host.
'HTTP_PORT': 9000,
'QUIC_PORT': 9001,
# Maximum read/write buffer size to use.
'MAX_BUFFER_SIZE': 16384,
}
# Add benchmark config to global state for easy access.
globals().update(BENCHMARK_CONFIG)
def GetDevice():
devices = android_commands.GetAttachedDevices()
assert len(devices) == 1
return device_utils.DeviceUtils(devices[0])
def GetForwarderFactory(device):
return android_forwarder.AndroidForwarderFactory(device, True)
def GetServersHost(device):
return GetForwarderFactory(device).host_ip
def GetHttpServerURL(device, resource):
return 'http://%s:%d/%s' % (GetServersHost(device), HTTP_PORT, resource)
class CronetPerfTestAndroidStory(android.AndroidStory):
# Android AppStory implementation wrapping CronetPerfTest app.
# Launches Cronet perf test app and waits for execution to complete
# by waiting for presence of DONE_FILE.
def __init__(self, device):
self._device = device
device.RunShellCommand('rm %s' % DONE_FILE)
config = BENCHMARK_CONFIG
config['HOST'] = GetServersHost(device)
start_intent = intent.Intent(
package=APP_PACKAGE,
activity=APP_ACTIVITY,
action=APP_ACTION,
# |config| maps from configuration value names to the configured values.
# |config| is encoded as URL parameter names and values and passed to
# the Cronet perf test app via the Intent data field.
data='http://dummy/?'+urllib.urlencode(config),
extras=None,
category=None)
super(CronetPerfTestAndroidStory, self).__init__(
start_intent, name='CronetPerfTest',
# No reason to wait for app; Run() will wait for results. By default
# StartActivity will timeout waiting for CronetPerfTest, so override
# |is_app_ready_predicate| to not wait.
is_app_ready_predicate=lambda app: True)
def Run(self, shared_user_story_state):
while not self._device.FileExists(DONE_FILE):
sleep(1.0)
class CronetPerfTestStorySet(story.StorySet):
def __init__(self, device):
super(CronetPerfTestStorySet, self).__init__()
# Create and add Cronet perf test AndroidStory.
self.AddStory(CronetPerfTestAndroidStory(device))
class CronetPerfTestMeasurement(
timeline_based_measurement.TimelineBasedMeasurement):
# For now AndroidStory's SharedAppState works only with
# TimelineBasedMeasurements, so implement one that just forwards results from
# Cronet perf test app.
def __init__(self, device, options):
super(CronetPerfTestMeasurement, self).__init__(options)
self._device = device
def WillRunStoryForPageTest(self, tracing_controller,
synthetic_delay_categories=None):
# Skip parent implementation which doesn't apply to Cronet perf test app as
# it is not a browser with a timeline interface.
pass
def Measure(self, tracing_controller, results):
# Reads results from |RESULTS_FILE| on target and adds to |results|.
jsonResults = json.loads(self._device.ReadFile(RESULTS_FILE))
for test in jsonResults:
results.AddValue(scalar.ScalarValue(results.current_page, test,
'ms', jsonResults[test]))
@benchmark.Enabled('android')
class CronetPerfTestBenchmark(benchmark.Benchmark):
# Benchmark implementation spawning off Cronet perf test measurement and
# StorySet.
def __init__(self, max_failures=None):
super(CronetPerfTestBenchmark, self).__init__(max_failures)
self._device = GetDevice()
def CreatePageTest(self, options):
return CronetPerfTestMeasurement(self._device, options)
def CreateStorySet(self, options):
return CronetPerfTestStorySet(self._device)
class QuicServer:
def __init__(self, quic_server_doc_root):
self._process = None
self._quic_server_doc_root = quic_server_doc_root
def StartupQuicServer(self, device):
self._process = pexpect.spawn(QUIC_SERVER,
['--quic_in_memory_cache_dir=%s' %
self._quic_server_doc_root,
'--port=%d' % QUIC_PORT])
assert self._process != None
# Wait for quic_server to start serving.
waited_s = 0
while subprocess.call([QUIC_CLIENT,
'--host=%s' % GetServersHost(device),
'--port=%d' % QUIC_PORT,
'http://%s:%d/%s' % (GetServersHost(device),
QUIC_PORT, SMALL_RESOURCE)],
stdout=open(os.devnull, 'w')) != 0:
sleep(0.1)
waited_s += 0.1
assert waited_s < 5, "quic_server failed to start after %fs" % waited_s
def ShutdownQuicServer(self):
if self._process:
self._process.terminate()
def GenerateHttpTestResources():
http_server_doc_root = tempfile.mkdtemp()
# Create a small test file to serve.
small_file_name = os.path.join(http_server_doc_root, SMALL_RESOURCE)
small_file = open(small_file_name, 'wb')
small_file.write('<html><body></body></html>');
small_file.close()
assert SMALL_RESOURCE_SIZE == os.path.getsize(small_file_name)
# Create a large (10MB) test file to serve.
large_file_name = os.path.join(http_server_doc_root, LARGE_RESOURCE)
large_file = open(large_file_name, 'wb')
large_file.write('<html><body>');
for i in range(0, 1000000):
large_file.write('1234567890');
large_file.write('</body></html>');
large_file.close()
assert LARGE_RESOURCE_SIZE == os.path.getsize(large_file_name)
return http_server_doc_root
def GenerateQuicTestResources(device):
quic_server_doc_root = tempfile.mkdtemp()
# Use wget to build up fake QUIC in-memory cache dir for serving.
# quic_server expects the dir/file layout that wget produces.
for resource in [SMALL_RESOURCE, LARGE_RESOURCE]:
assert subprocess.Popen(['wget', '-p', '-q', '--save-headers',
GetHttpServerURL(device, resource)],
cwd=quic_server_doc_root).wait() == 0
# wget places results in host:port directory. Adjust for QUIC port.
os.rename(os.path.join(quic_server_doc_root,
"%s:%d" % (GetServersHost(device), HTTP_PORT)),
os.path.join(quic_server_doc_root,
"%s:%d" % (GetServersHost(device), QUIC_PORT)))
return quic_server_doc_root
def GenerateLighttpdConfig(config_file, http_server_doc_root, http_server):
# Must create customized config file to allow overriding the server.bind
# setting.
config_file.write('server.document-root = "%s"\n' % http_server_doc_root)
config_file.write('server.port = %d\n' % HTTP_PORT)
# These lines are added so lighttpd_server.py's internal test succeeds.
config_file.write('server.tag = "%s"\n' % http_server.server_tag)
config_file.write('server.pid-file = "%s"\n' % http_server.pid_file)
config_file.write('dir-listing.activate = "enable"\n')
config_file.flush()
def main():
# Install APK
device = GetDevice()
device.EnableRoot()
device.Install(APP_APK)
# Start USB reverse tethering.
# Port map is ignored for tethering; must create one to placate assertions.
named_port_pair_map = {'http': (forwarders.PortPair(0, 0)),
'https': None, 'dns': None}
port_pairs = forwarders.PortPairs(**named_port_pair_map)
forwarder = GetForwarderFactory(device).Create(port_pairs)
# Start HTTP server.
http_server_doc_root = GenerateHttpTestResources()
config_file = tempfile.NamedTemporaryFile()
http_server = lighttpd_server.LighttpdServer(http_server_doc_root,
port=HTTP_PORT, base_config_path=config_file.name)
GenerateLighttpdConfig(config_file, http_server_doc_root, http_server)
assert http_server.StartupHttpServer()
config_file.close()
# Start QUIC server.
quic_server_doc_root = GenerateQuicTestResources(device)
quic_server = QuicServer(quic_server_doc_root)
quic_server.StartupQuicServer(device)
# Launch Telemetry's benchmark_runner on CronetPerfTestBenchmark.
# By specifying this file's directory as the benchmark directory, it will
# allow benchmark_runner to in turn open this file up and find the
# CronetPerfTestBenchmark class to run the benchmark.
top_level_dir = os.path.dirname(os.path.realpath(__file__))
runner_config = benchmark_runner.ProjectConfig(
top_level_dir=top_level_dir,
benchmark_dirs=[top_level_dir])
sys.argv.insert(1, 'run')
sys.argv.insert(2, 'run.CronetPerfTestBenchmark')
sys.argv.insert(3, '--android-rndis')
benchmark_runner.main(runner_config)
# Shutdown.
quic_server.ShutdownQuicServer()
shutil.rmtree(quic_server_doc_root)
http_server.ShutdownHttpServer()
shutil.rmtree(http_server_doc_root)
if __name__ == '__main__':
main()
| {
"content_hash": "d2b6803ce6b0bcffbbbbdf21abcec271",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 80,
"avg_line_length": 37.901898734177216,
"alnum_prop": 0.6984219754529515,
"repo_name": "lihui7115/ChromiumGStreamerBackend",
"id": "111a907bee8ec99c0974eed756260ce8c042e657",
"size": "12162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/cronet/android/test/javaperftests/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9508834"
},
{
"name": "C++",
"bytes": "242598549"
},
{
"name": "CSS",
"bytes": "943747"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27281878"
},
{
"name": "Java",
"bytes": "14561064"
},
{
"name": "JavaScript",
"bytes": "20540839"
},
{
"name": "Makefile",
"bytes": "70864"
},
{
"name": "Objective-C",
"bytes": "1745880"
},
{
"name": "Objective-C++",
"bytes": "10008668"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "482954"
},
{
"name": "Python",
"bytes": "8626890"
},
{
"name": "Shell",
"bytes": "481888"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from optparse import make_option
import subprocess
from multiprocessing import Pool
from bluebottle.common.management.commands.base import Command as BaseCommand
from bluebottle.clients.models import Client
def reindex(schema_name):
print(f'reindexing tenant {schema_name}')
return (
schema_name,
subprocess.call(
f'./manage.py tenant_command -s {schema_name} search_index --rebuild -f',
shell=True
)
)
class Command(BaseCommand):
help = 'Reindex all tenants'
option_list = BaseCommand.options + (
make_option(
'--processes',
default=8,
help='How many processes run in parallel'
),
)
def handle(self, *args, **options):
pool = Pool(processes=options['processes'])
tasks = [pool.apply_async(reindex, args=[str(tenant.schema_name)]) for tenant in Client.objects.all()]
results = [result.get() for result in tasks]
for tenant, result in results:
if result != 0:
print(f'Tenant failed to index: {tenant}')
pool.close()
| {
"content_hash": "d905a26b10a931e7ed33cf3785bdc957",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 110,
"avg_line_length": 26.13953488372093,
"alnum_prop": 0.6165480427046264,
"repo_name": "onepercentclub/bluebottle",
"id": "3c128573a3a4c1e288f9eef17a9b8bfc6d23e73f",
"size": "1124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/clients/management/commands/reindex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
"""Area/message template importer for CAPCollector project.
Imports area/messages template files to corresponding SQL tables.
File names must end with .xml.
An area template is a single CAP <area> block.
A message template is a single CAP <alert> block with a single <info> block;
<area> blocks are ignored.
Run
$ python manage.py import_templates area /home/user/path/to/templates/
to import area templates or
$ python manage.py import_templates message /home/user/path/to/templates/
to import message templates.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/custom-management-commands/
"""
__author__ = "arcadiy@google.com (Arkadii Yakovets)"
import os
import uuid
from core import models
from core import utils
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
class Command(BaseCommand):
"""Template importer command implementation."""
args = "<templates_type templates_path>"
help = "Imports existing area or message template files to SQL tables."
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError(
"Wrong arguments number! Please use python manage.py import_templates"
" template_type template_path (e.g. python manage.py import_templates"
" area /home/user/path/to/templates/ or python manage.py"
" import_templates message /home/user/path/to/templates/")
templates_type = args[0]
templates_path = args[1]
template_objects = []
for file_name in os.listdir(templates_path):
if not file_name.endswith(".xml"):
print "Ignored file: %s" % file_name
continue
file_path = os.path.join(templates_path, file_name)
with open(file_path, "r") as template_file:
template_content = template_file.read()
template_dict = utils.ParseAlert(template_content, "xml", uuid.uuid4())
if templates_type == "area":
template_model = models.AreaTemplate
elif templates_type == "message":
template_model = models.MessageTemplate
template_obj = template_model()
template_obj.title = file_name.rstrip(".xml").strip()
template_obj.content = template_content
template_objects.append(template_obj)
# Save to DB.
template_model.objects.bulk_create(template_objects)
| {
"content_hash": "2c8001b9f4dc729778c1f81390999c00",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 34.11594202898551,
"alnum_prop": 0.7073067119796091,
"repo_name": "CAPTools/CAPCollector",
"id": "15f67a46d9f0e6586f1e6d176506ac3a77aff264",
"size": "2354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/management/commands/import_templates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "98989"
},
{
"name": "Shell",
"bytes": "8416"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons.os_check import OSCheck
from resource_management.libraries.resources.repository import Repository
from resource_management.core.logger import Logger
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
# components_lits = repoName + postfix
_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
REPO_FILE_NAME_PREFIX = 'IOP-'
STACK_TO_ROOT_FOLDER = {"IOP": "/usr/iop", "BIGINSIGHTS":"/usr/iop"}
def _alter_repo(action, repo_string, repo_template):
"""
@param action: "delete" or "create"
@param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
"""
repo_dicts = json.loads(repo_string)
if not isinstance(repo_dicts, list):
repo_dicts = [repo_dicts]
if 0 == len(repo_dicts):
Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
else:
Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
for repo in repo_dicts:
if not 'baseUrl' in repo:
repo['baseUrl'] = None
if not 'mirrorsList' in repo:
repo['mirrorsList'] = None
ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
Repository(repo['repoId'],
action = action,
base_url = repo['baseUrl'],
mirror_list = repo['mirrorsList'],
repo_file_name = repo['repoName'],
repo_template = repo_template,
components = ubuntu_components, # ubuntu specific
)
def install_repos():
import params
if params.host_sys_prepped:
return
template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
_alter_repo("create", params.repo_info, template)
if params.service_repo_info:
_alter_repo("create", params.service_repo_info, template)
| {
"content_hash": "6e7a3eef76f0faebc08226b18ec712e3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 278,
"avg_line_length": 40.714285714285715,
"alnum_prop": 0.7017543859649122,
"repo_name": "arenadata/ambari",
"id": "0de2f5b59150b0dc985e6f5f3d1e791872dee537",
"size": "2850",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/repo_initialization.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
"""Helper utilities for Faint file formats."""
import ifaint
def _io_save_error(io_error, file_name):
"""Turns an IOException (which would be reported as an
internal error) into an ifaint.SaveError, which is reported
without stack-trace etc."""
err_str = 'Faint could not write to "%s".\n\nError code: %d (%s).' % (
file_name, io_error.errno, io_error.strerror)
return ifaint.SaveError(err_str)
def _io_load_error(io_error, file_name):
"""Load equivalent of _io_save_error"""
err_str = 'Faint could not read from "%s".\n\nError code: %d (%s).' % (
file_name, io_error.errno, io_error.strerror)
return ifaint.LoadError(err_str)
def open_for_writing_text(file_path):
"""Returns a File object or raises ifaint.SaveError"""
try:
f = open(file_path, 'w')
return f
except IOError as e:
raise _io_save_error(e, file_path)
def open_for_writing_binary(file_path):
"""Returns a File object or raises ifaint.SaveError"""
try:
f = open(file_path, 'wb')
return f
except IOError as e:
raise _io_save_error(e, file_path)
def open_for_reading_binary(file_path):
"""Returns a File object or raises ifaint.LoadError"""
try:
f = open(file_path, 'r')
return f
except IOError as e:
raise _io_load_error(e, file_path)
| {
"content_hash": "88f72f4836421539ec7d1face7f905ce",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 30.931818181818183,
"alnum_prop": 0.6296840558412932,
"repo_name": "tectronics/faint-graphics-editor",
"id": "79965edfa46f36c69147fcc713678326b95455a4",
"size": "1982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/faint/formatutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32346"
},
{
"name": "C++",
"bytes": "2585909"
},
{
"name": "Emacs Lisp",
"bytes": "8164"
},
{
"name": "HTML",
"bytes": "26096"
},
{
"name": "NSIS",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "454374"
}
],
"symlink_target": ""
} |
from datadog_checks.base import AgentCheck, ConfigurationError
from datadog_checks.base.errors import CheckException
class KernelcareCheck(AgentCheck):
KEY_KCARE_NAGIOS_ENDPOINT = 'https://cln.cloudlinux.com/clweb/api/kcare/nagios/'
RES_KCARE_NAGIOS_ENDPOINT = 'https://cln.cloudlinux.com/clweb/api/kcare/nagios-res/'
HTTP_RESPONSE_ERRORS = [
'Servers not found for key',
'Reseller not found',
'Registration token not found for reseller',
]
def _parse_nagios_response(self, text):
lines = text.split('\n')
tmp = lines[0].split('|', 1)
if len(tmp) == 1:
params = tmp[0]
else:
params = tmp[1]
res = {}
for p in params.split(';'):
k, v = p.split('=', 1)
res[k] = int(v)
return res
def get_url(self, instance):
key = instance.get('key')
if key:
return self.KEY_KCARE_NAGIOS_ENDPOINT + key
login = instance.get('login')
api_token = instance.get('api_token')
if login and api_token:
return self.RES_KCARE_NAGIOS_ENDPOINT + login + '/' + api_token
raise ConfigurationError('Configuration error, you must provide `key` or `login` & `api_token`')
def check(self, instance):
url = self.get_url(instance)
try:
response = self.http.get(url)
response.raise_for_status()
except Exception as e:
self.service_check('kernelcare.can_connect', self.CRITICAL, message=str(e))
raise e
for prefix in self.HTTP_RESPONSE_ERRORS:
if response.text.startswith(prefix):
self.service_check('kernelcare.can_connect', self.CRITICAL, message=response.text)
raise CheckException(response.text)
try:
data = self._parse_nagios_response(response.text)
except ValueError:
message = 'Kernelcare API: Invalid Response'
self.service_check('kernelcare.can_connect', self.CRITICAL, message=message)
raise CheckException(message)
self.service_check('kernelcare.can_connect', self.OK)
if 'uptodate' in data:
self.gauge('kernelcare.uptodate', data['uptodate'])
if 'outofdate' in data:
self.gauge('kernelcare.outofdate', data['outofdate'])
if 'unsupported' in data:
self.gauge('kernelcare.unsupported', data['unsupported'])
if 'inactive' in data:
self.gauge('kernelcare.inactive', data['inactive'])
| {
"content_hash": "cf6c32c94f41605365aa353f82317a4d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 104,
"avg_line_length": 33.03846153846154,
"alnum_prop": 0.5968180054326736,
"repo_name": "DataDog/integrations-extras",
"id": "911e04149f7e46bd08d02801e51ab1c859b1fdf7",
"size": "2577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kernelcare/datadog_checks/kernelcare/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4265"
},
{
"name": "Go",
"bytes": "4119"
},
{
"name": "PHP",
"bytes": "3192"
},
{
"name": "Python",
"bytes": "1219552"
},
{
"name": "Ruby",
"bytes": "8005"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
} |
import logging
from django import template
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from trove_dashboard import api
from trove_dashboard.content.database_configurations \
import config_param_manager
from trove_dashboard.content.databases import db_capability
from trove_dashboard.content.databases.logs import tables as log_tables
from trove_dashboard.content.databases import tables
from troveclient import exceptions as trove_exceptions
LOG = logging.getLogger(__name__)
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
context = {"instance": instance}
try:
root_show = api.trove.root_show(request, instance.id)
context["root_enabled"] = template.defaultfilters.yesno(
root_show.rootEnabled)
except Exception:
context["root_enabled"] = _('Unable to obtain information on '
'root user')
return context
def get_template_name(self, request):
instance = self.tab_group.kwargs['instance']
template_file = ('project/databases/_detail_overview_%s.html' %
self._get_template_type(instance.datastore['type']))
try:
template.loader.get_template(template_file)
return template_file
except template.TemplateDoesNotExist:
# This datastore type does not have a template file
# Just use the base template file
return ('project/databases/_detail_overview.html')
def _get_template_type(self, datastore):
if db_capability.is_mysql_compatible(datastore):
return 'mysql'
elif db_capability.is_oracle_ra_datastore(datastore):
return 'oracle'
elif db_capability.is_datastax_enterprise(datastore):
return 'cassandra'
return datastore
class UserTab(tabs.TableTab):
table_classes = [tables.UsersTable]
name = _("Users")
slug = "users_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_users_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.users_list(self.request, instance.id)
for user in data:
user.instance = instance
try:
user.access = api.trove.user_list_access(self.request,
instance.id,
user.name,
host=user.host)
except exceptions.NOT_FOUND:
pass
except trove_exceptions.BadRequest as e:
if not ("The 'list_access' operation "
"is not supported") in e.message:
raise
LOG.info("List user access is not available. "
"Reason: %s", e.message)
except Exception:
msg = _('Unable to get user access data.')
exceptions.handle(self.request, msg)
except Exception:
msg = _('Unable to get user data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return tables.has_user_add_perm(request)
class DatabaseTab(tabs.TableTab):
table_classes = [tables.DatabaseTable]
name = _("Databases")
slug = "database_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_databases_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.database_list(self.request, instance.id)
add_instance = lambda d: setattr(d, 'instance', instance)
map(add_instance, data)
except trove_exceptions.BadRequest as e:
data = []
if not ("The 'list_databases' operation "
"is not supported") in e.message:
raise
LOG.info("List database is not available. "
"Reason: %s", e.message)
except Exception:
msg = _('Unable to get databases data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return tables.has_database_add_perm(request)
class ConfigDefaultsTab(tabs.TableTab):
table_classes = [tables.ConfigDefaultsTable]
name = _("Defaults")
slug = "config_defaults"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_config_defaults_data(self):
instance = self.tab_group.kwargs['instance']
values_data = []
try:
data = api.trove.configuration_default(self.request, instance.id)
except trove_exceptions.BadRequest as e:
data = None
if not ("No configuration parser found") in e.message:
raise
LOG.info("Configuration defaults are not available. "
"Reason: %s", e.message)
if data is not None:
for k, v in data.configuration.items():
values_data.append(
config_param_manager.ConfigParam(None, k, v))
return sorted(values_data, key=lambda config: config.name)
class BackupsTab(tabs.TableTab):
table_classes = [tables.InstanceBackupsTable]
name = _("Backups")
slug = "backups_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_backups_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.instance_backups(self.request, instance.id)
except Exception:
msg = _('Unable to get database backup data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return (request.user.has_perm('openstack.services.object-store') and
self._has_backup_capability(self.tab_group.kwargs))
def _has_backup_capability(self, kwargs):
instance = kwargs['instance']
if (instance is not None):
return db_capability.can_backup(instance.datastore['type'])
return True
class LogsTab(tabs.TableTab):
table_classes = [log_tables.LogsTable]
name = _("Logs")
slug = "logs_tab"
template_name = "horizon/common/_detail_table.html"
preload = False
def get_logs_data(self):
instance = self.tab_group.kwargs['instance']
try:
logs = api.trove.log_list(self.request, instance.id)
return logs
except Exception as e:
LOG.exception(
_('Unable to retrieve list of logs.\n%s') % e.message)
logs = []
return logs
class InstanceDetailTabs(tabs.TabGroup):
slug = "instance_details"
tabs = (OverviewTab, UserTab, DatabaseTab, BackupsTab, ConfigDefaultsTab,
LogsTab)
sticky = True
| {
"content_hash": "8f0a26162528b6642c200e5aa50b362c",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 77,
"avg_line_length": 35.34615384615385,
"alnum_prop": 0.5832426550598476,
"repo_name": "Tesora-Release/tesora-trove-dashboard",
"id": "cfe1ce0d89895a4b0b44eaf0524a5665e3d48bd8",
"size": "7962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_dashboard/content/databases/tabs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4087"
},
{
"name": "HTML",
"bytes": "33250"
},
{
"name": "JavaScript",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "432226"
},
{
"name": "Shell",
"bytes": "18300"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except:
from distutils.core import setup
setup(
name = "parikstra",
description = "Parisian transport system API client",
py_modules = ["parikstra"],
test_suite = "tests",
install_requires = [
"beautifulsoup4",
"requests",
],
version = "0.1",
author = "Gawen ARAB",
author_email = "g@wenarab.com",
url = "https://github.com/Gawen/parikstra",
license = "MIT",
classifiers = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet",
"Topic :: Utilities",
],
)
| {
"content_hash": "4144dc650510ce987d5823915cdbe858",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 57,
"avg_line_length": 23.46875,
"alnum_prop": 0.5752330226364847,
"repo_name": "Gawen/parikstra",
"id": "018716e678e1e41a52907235dd74766cefeda2da",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14858"
}
],
"symlink_target": ""
} |
from joueur.delta_mergeable import DeltaMergeable
# the base class that every game object within a game inherit from for Python
# manipulation that would be redundant via Creer
class BaseGameObject(DeltaMergeable):
def __init__(self):
DeltaMergeable.__init__(self)
def __str__(self):
return "{} #{}".format(self.game_object_name, self.id)
def __repr__(self):
return str(self)
def __hash__(self):
# id will always be unique server side anyways,
# so it should be safe to hash on
return hash(self.id)
| {
"content_hash": "3e6df7bf2f2b743c8178d7191dc02018",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 30,
"alnum_prop": 0.6491228070175439,
"repo_name": "siggame/Joueur.py",
"id": "b4db2b9b8e8d3256c86648d9c9224d9502dbd6cd",
"size": "570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "joueur/base_game_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "136"
},
{
"name": "Makefile",
"bytes": "223"
},
{
"name": "Python",
"bytes": "407153"
},
{
"name": "Shell",
"bytes": "472"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounting', '0053_app_user_profiles_advanced'),
]
operations = [
migrations.CreateModel(
name='AuthenticatedEmailDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_domain', models.CharField(db_index=True, max_length=256, unique=True)),
],
),
migrations.CreateModel(
name='IdentityProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('slug', models.CharField(db_index=True, max_length=256, unique=True)),
('idp_type', models.CharField(choices=[('azure_ad', 'Azure AD')], default='azure_ad', max_length=50)),
('is_editable', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
('entity_id', models.TextField(blank=True, null=True)),
('login_url', models.TextField(blank=True, null=True)),
('logout_url', models.TextField(blank=True, null=True)),
('idp_cert_public', models.TextField(blank=True, null=True)),
('date_idp_cert_expiration', models.DateTimeField(blank=True, null=True)),
('sp_cert_public', models.TextField(blank=True, null=True)),
('sp_cert_private', models.TextField(blank=True, null=True)),
('date_sp_cert_expiration', models.DateTimeField(blank=True, null=True)),
('sp_rollover_cert_public', models.TextField(blank=True, null=True)),
('sp_rollover_cert_private', models.TextField(blank=True, null=True)),
('date_sp_rollover_cert_expiration', models.DateTimeField(blank=True, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('created_by', models.EmailField(max_length=254)),
('last_modified', models.DateTimeField(auto_now=True)),
('last_modified_by', models.EmailField(max_length=254)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='accounting.BillingAccount')),
],
),
migrations.CreateModel(
name='UserExemptFromSingleSignOn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(db_index=True, max_length=128)),
('email_domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sso.AuthenticatedEmailDomain')),
],
),
migrations.CreateModel(
name='TrustedIdentityProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(db_index=True, max_length=256)),
('date_acknowledged', models.DateTimeField(auto_now_add=True)),
('acknowledged_by', models.EmailField(max_length=254)),
('identity_provider', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='sso.IdentityProvider')),
],
),
migrations.AddField(
model_name='authenticatedemaildomain',
name='identity_provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='sso.IdentityProvider'),
),
]
| {
"content_hash": "6bb3910991a6c941a5689323fa447d5c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 132,
"avg_line_length": 53.74647887323944,
"alnum_prop": 0.5911949685534591,
"repo_name": "dimagi/commcare-hq",
"id": "55e42ca3fb4c6d562905d4cba393593334ed5fe4",
"size": "3866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/sso/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from lxml import etree
from src.Player import Player, Video
import praw
import re
import pickle
r = praw.Reddit("filling the database")
def contains_video_link(text):
return "oddshot" in text or "youtu.be" in text or "twitch.tv" in text or "youtube" in text
def get_url(submission):
text = submission.selftext.lower()
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
text)
for url in urls:
if contains_video_link(url):
return url
else:
curr_url = submission.url
# print(curr_url)
return curr_url
def is_video(submission):
if not submission.is_self:
return contains_video_link(submission.domain)
else:
text = submission.selftext.lower()
return contains_video_link(text)
class DatabaseManager:
def __init__(self):
self.database = {}
self.videos_dict = {}
self.CURRENT_VERSION = "1.0"
self.build()
self.done_submissions = []
def build(self):
self.database = self.build_database("../Player_Info/NALCS.html", "NA")
self.database = self.build_database("../Player_Info/EULCS.html", "EU")
self.database = self.build_database("../Player_Info/LCK.html", "LCK")
self.database = self.build_database("../Player_Info/LPL.html", "LPL")
def find_videos(self):
for player in self.database.keys():
player = player.lower()
submissions = r.search("title:" + player, subreddit="leagueoflegends", sort="top", period="all", limit=50)
for submission in submissions:
if submission.score < 50:
continue
if submission.is_self:
curr_url = get_url(submission)
else:
curr_url = submission.url
if is_video(submission):
if submission.permalink in self.videos_dict:
video = self.videos_dict.get(submission.permalink)
self.database[player].add_video(video)
video.add_player(self.database[player])
print(video.to_table_row(self.database[player]))
else:
video = Video(title=submission.title, link=curr_url, players_list=[self.database[player]],
upvotes=submission.score)
self.database[player].add_video(video)
self.videos_dict[submission.permalink] = video
print(video.to_table_row(self.database[player]))
return self.database
def save_db(self, filename):
with open(filename, 'wb') as save_file:
pickle.dump(self, save_file, pickle.HIGHEST_PROTOCOL)
def add_submission(self, submission):
self.done_submissions.append(submission.id)
def is_done(self, submission):
return submission.id in self.done_submissions
def build_database(self, filename, region):
with open(filename, "r", encoding="UTF-8") as html_file:
table_string = html_file.read()
parser = etree.XMLParser(recover=True)
# Turn the string into an actual table
table = etree.XML(table_string, parser=parser)
table.xpath('//tr/td//text()')
rows = iter(table)
for row in rows:
element = row[0].find("a")
if element is not None:
name = element.get("href")
# print(name[1:])
player = Player(name[1:])
element = row[2].find("a")
if element is not None:
team_name = element.get("href")
team_name = team_name[1:].replace('_', " ")
player.team_name = team_name
player.position = row[4].text
# print(player.position)
element = row[5].find("a")
if element is not None:
player.stream_link = element.get("href")
if "azubu" in player.stream_link.lower():
player.stream_site = "Azubu"
elif "twitch" in player.stream_link.lower():
player.stream_site = "Twitch"
# put the player in the dictionary
player.region = region
self.database[name[1:].lower()] = player
return self.database
def load_db(filename):
with open(filename, 'rb') as load_file:
return pickle.load(load_file)
| {
"content_hash": "8e7a817725076496cec2dbbc9666a811",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 118,
"avg_line_length": 35.22727272727273,
"alnum_prop": 0.54,
"repo_name": "Ashvio/ProPlayerInfoBot",
"id": "8195789f774b372cc57e6ded1373afa3e2dd0c40",
"size": "4650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/DatabaseManager.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from bs4 import BeautifulSoup
import urllib
#csv is for the csv writer
import csv
#initiates the dictionary to hold the output
holder = []
#this is the target URL
target_url = "target.html"
def scraper(url):
#opens the url for read access
this_url = urllib.urlopen(url).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#***********************************#
#******TITLE SECTION****************#
#***********************************#
#finds all of the titles
doc_title = soup.find_all('h3')
#creates a list called title mini holder
title_mini_holder = []
#for each of the titles
for element in doc_title:
#print element
#this gets just the text of the element
text = element.text.encode('utf-8')
#print text
#this gets just the url of the element
#by finding the thing tagged with 'a' and then pulling the 'href'
doc_url = element.find('a').attrs['href']
#print doc_url
#bundles the output of each round as a list
title_mini_holder = [text, doc_url]
#adds that list to the larger list
holder.append(title_mini_holder)
#***********************************#
#******EVERYTHING ELSE SECTION****************#
#***********************************#
#finds the doc numbers, promilgation date, and interet release date
#which are in <span class="sp">
#this will end up with all three values over and over
#that means that you will need to chunk them into threes
other_elements = soup.find_all('span', {'class':'sp'})
#creates a list called number mini holder
values_mini_holder = []
#for each of the numbers
for element in other_elements:
#pulls the text of each element
text = element.text.encode('utf-8')
#adds teh element to values_mini_holder
values_mini_holder.append(text)
#these variables allow matching between the title/URL and the other data
#this keeps track of the document (we have the title and URL already)
holder_spot_counter = 0
#this keeps track of three new elements (doc num, prom date, release date)
#since these are in chunks of three it needs to have its own variable
other_three_counter = 0
#this adds the three relevant values to the existing title/url
while holder_spot_counter < len(holder):
#the goal here is to add doc num, prom date, and release date
#so for each existing spot you need to move three steps through values_mini_holder
#for example the first doc (holder[0]) needs
#the values_mini_holder[0], values_mini_holder[1], & values_mini_holder[2]
#then holder[1] needs
#values_mini_holder[3], values_mini_holder[4], values_mini_holder[5]
holder[holder_spot_counter].append(values_mini_holder[other_three_counter])
holder[holder_spot_counter].append(values_mini_holder[other_three_counter+1])
holder[holder_spot_counter].append(values_mini_holder[other_three_counter+2])
holder_spot_counter += 1
other_three_counter += 3
scraper(target_url)
#outputs holder to a csv
with open('output.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(holder)
| {
"content_hash": "cf00e4fa8fda3b4cbeda9ef20fd03322",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 90,
"avg_line_length": 30.275229357798164,
"alnum_prop": 0.6260606060606061,
"repo_name": "mwweinberg/guofa-scraper",
"id": "cf988f9800f233b15dc01177636cf1786b534c39",
"size": "3300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alt_simple_fiveelements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28687"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
import webob.exc
from glance.common import exception
from glance.openstack.common import excutils
from glance.common import utils
import glance.db
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
import glance.store
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def initiate_deletion(req, location, id, delayed_delete=False):
"""
Deletes image data from the backend store.
:param req: The WSGI/Webob Request object
:param location: URL to the image data in a data store
:param image_id: Opaque image identifier
:param delayed_delete: whether data deletion will be delayed
"""
if delayed_delete:
glance.store.schedule_delayed_delete_from_backend(req.context,
location, id)
else:
glance.store.safe_delete_from_backend(req.context, location, id)
def _kill(req, image_id):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
"""
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'})
def safe_kill(req, image_id):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
"""
try:
_kill(req, image_id)
except Exception as e:
LOG.exception(_("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = glance.db.get_api()
image_size = image_meta.get('size', None)
try:
remaining = glance.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(location,
size,
checksum,
locations_metadata) = glance.store.store_add_to_backend(
image_meta['id'],
utils.CooperativeReader(image_data),
image_meta['size'],
store)
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
glance.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
LOG.info(_('Cleaning up %s after exceeding the quota %s')
% image_id)
glance.store.safe_delete_from_backend(
location, req.context, image_meta['id'])
raise
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = _("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % locals()
LOG.error(msg)
safe_kill(req, image_id)
initiate_deletion(req, location, image_id, CONF.delayed_delete)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug(_("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d"), locals())
update_data = {'checksum': checksum,
'size': size}
try:
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data)
except exception.NotFound as e:
msg = _("Image %s could not be found after upload. The image may "
"have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location, image_id, CONF.delayed_delete)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = _("Attempt to upload duplicate image: %s") % e
LOG.debug(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = _("Forbidden upload attempt: %s") % e
LOG.debug(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.StorageFull as e:
msg = _("Image storage media is full: %s") % e
LOG.error(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageWriteDenied as e:
msg = _("Insufficient permissions on image storage media: %s") % e
LOG.error(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.info(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the ."
"quota: %s") % e)
LOG.info(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
#NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id)
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.debug(msg)
safe_kill(req, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location, locations_metadata
| {
"content_hash": "38671cd225be0bdcd6c5f317f88cd240",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 40.109170305676855,
"alnum_prop": 0.5502449646162221,
"repo_name": "citrix-openstack-build/glance",
"id": "b718c399555432d6e1af59a03650dcff70a24117",
"size": "9866",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glance/api/v1/upload_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2464002"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 Robin Verschueren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions import cvxtypes
from cvxpy.problems.objective import Maximize, Minimize
from cvxpy.reductions.reduction import Reduction
class FlipObjective(Reduction):
"""Flip a minimization objective to a maximization and vice versa.
"""
def accepts(self, problem) -> bool:
return True
def apply(self, problem):
""":math:`\\max(f(x)) = -\\min(-f(x))`
Parameters
----------
problem : Problem
The problem whose objective is to be flipped.
Returns
-------
Problem
A problem with a flipped objective.
list
The inverse data.
"""
is_maximize = type(problem.objective) == Maximize
objective = Minimize if is_maximize else Maximize
problem = cvxtypes.problem()(objective(-problem.objective.expr),
problem.constraints)
return problem, []
def invert(self, solution, inverse_data):
"""Map the solution of the flipped problem to that of the original.
Parameters
----------
solution : Solution
A solution object.
inverse_data : list
The inverse data returned by an invocation to apply.
Returns
-------
Solution
A solution to the original problem.
"""
if solution.opt_val is not None:
solution.opt_val = -solution.opt_val
return solution
| {
"content_hash": "c669168eaf64bef916f71f7b614faaf4",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 75,
"avg_line_length": 30.611940298507463,
"alnum_prop": 0.6313993174061433,
"repo_name": "merraksh/cvxpy",
"id": "1a127f81ea2bbcde2dbe2fb28e90bf0f93348de9",
"size": "2051",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cvxpy/reductions/flip_objective.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
} |
'''Functions for defining, extracting data from, and serializing transform matrices'''
from numpy import *
def MakeTransform(rot,trans):
if size(rot,0) == 9 and size(rot,1) == 1:
tm = rot.reshape(3,3)
elif size(rot,0) == 3 and size(rot,1) == 3:
tm = rot
else:
print('rotation improperly specified');
if size(trans,0) == 3 and size(trans,1) == 1:
tm = bmat('tm trans')
elif size(trans,0) == 1 and size(trans,1) == 3:
tm = bmat('tm trans.T')
else:
print('translation improperly specified');
lastrow = mat([0,0,0,1])
return bmat('tm; lastrow')
def GetRot(tm):
return mat(tm[0:3][:,0:3].T.reshape(1,9));
def GetTrans(tm):
return mat(tm[0:3][:,3].T);
def SerializeTransform(tm):
rot = GetRot(tm)
trans = GetTrans(tm)
rottrans = bmat('rot trans')
return Serialize1DMatrix(rottrans)
def Serialize1DMatrix(arr):
return '%s'%(' '.join(' %.5f'%(arr[0,f]) for f in range(0,size(arr))))
def Serialize1DIntegerMatrix(arr):
return '%s'%(' '.join(' %d'%(arr[0,f]) for f in range(0,size(arr))))
| {
"content_hash": "7004558be02611ffe1f2789787a542b1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 86,
"avg_line_length": 29.026315789473685,
"alnum_prop": 0.5947416137805984,
"repo_name": "yzxsunshine/SoCBirrt",
"id": "1d149a3b1f4fd08d17aeee684c606d79a42df732",
"size": "2836",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "TransformMatrix.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "29865"
},
{
"name": "C++",
"bytes": "1248849"
},
{
"name": "CMake",
"bytes": "85716"
},
{
"name": "Makefile",
"bytes": "307323"
},
{
"name": "Matlab",
"bytes": "38800"
},
{
"name": "Python",
"bytes": "81001"
},
{
"name": "Shell",
"bytes": "1607"
}
],
"symlink_target": ""
} |
def get_card_mask(card_type, card_number):
raise NotImplementedError()
| {
"content_hash": "746dda152333b91a5e704a5bde157aa9",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 37.5,
"alnum_prop": 0.7466666666666667,
"repo_name": "coolshop-com/coolshop-application-assignment",
"id": "c27741779157d4f4a8739fac8b6bf039c6e9113a",
"size": "75",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "card_masks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2930"
}
],
"symlink_target": ""
} |
from flask import Flask
from routing.api import api
def create_app():
app = Flask(__name__)
app.register_blueprint(api)
return app
| {
"content_hash": "0c8007878084b4d91b4ee0d26af040c1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 31,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.6875,
"repo_name": "evz/routing",
"id": "97f99a93838222fb0284437f9222d069ca574990",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "routing/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10265"
},
{
"name": "JavaScript",
"bytes": "1757"
},
{
"name": "Python",
"bytes": "2440"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.postgres.fields.hstore
from django.contrib.postgres.operations import HStoreExtension
class Migration(migrations.Migration):
dependencies = [
('events', '0004_auto_20150607_2003'),
]
operations = [
HStoreExtension(),
migrations.AddField(
model_name='event',
name='custom_data',
field=django.contrib.postgres.fields.hstore.HStoreField(null=True),
),
migrations.AddField(
model_name='place',
name='custom_data',
field=django.contrib.postgres.fields.hstore.HStoreField(null=True),
),
]
| {
"content_hash": "3821307f473bc5e5d0a9c97c0d09041b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 27.96153846153846,
"alnum_prop": 0.6354883081155434,
"repo_name": "tuomas777/linkedevents",
"id": "ebc9340db30d2e27a579d48bbcb09419c0a05cd5",
"size": "751",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "events/migrations/0005_auto_20150607_2005.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1019"
},
{
"name": "Python",
"bytes": "353892"
},
{
"name": "Shell",
"bytes": "2177"
}
],
"symlink_target": ""
} |
from . import tservers
class TestApp(tservers.HTTPProxyTest):
def test_basic(self):
assert self.app("/").status_code == 200
def test_cert(self):
for ext in ["pem", "p12"]:
resp = self.app("/cert/%s" % ext)
assert resp.status_code == 200
assert resp.content
| {
"content_hash": "1cfa3fc0d611a0625d45ea16284c1251",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 47,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.5638629283489096,
"repo_name": "gzzhanghao/mitmproxy",
"id": "4c9eff08ea054150005e1f24778056b9b8738312",
"size": "321",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/mitmproxy/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "185129"
},
{
"name": "HTML",
"bytes": "2878"
},
{
"name": "JavaScript",
"bytes": "140781"
},
{
"name": "PowerShell",
"bytes": "362"
},
{
"name": "Python",
"bytes": "1313193"
},
{
"name": "Shell",
"bytes": "3717"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.