max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
neural_style_transfer.py | crisrise/neuralstyledemo | 0 | 12760651 | import numpy as np
import cv2
import tensorflow as tf
import argparse
import time
from PIL import Image
import tf1st
if __name__ == "__main__":
# Argument parse
parser = argparse.ArgumentParser(description='Neural Style Transfer with OpenCV and Tensorflow')
parser.add_argument('--input-image', default="./images/federer.jpg", type=str, help='image to style')
parser.add_argument('--style-image', default="./images/vangogh.jpg", type=str, help='styling image')
parser.add_argument('--content-weight', default=1000, type=float, help='weight of the content image')
parser.add_argument('--style-weight', default=0.01, type=float, help='weight of the styling image')
parser.add_argument('--iterations', default=1000, type=int, help='number of iterations')
parser.add_argument('--result-image', default="./images/result.jpg", type=str, help='resulting image')
args = parser.parse_args()
# Enable eager execution for tensorflow
tf.enable_eager_execution()
print("Eager execution: {}".format(tf.executing_eagerly()))
model = tf1st.get_model()
for layer in model.layers:
layer.trainable = False
# Get the style and content feature representations (from our specified intermediate layers)
style_features, content_features = tf1st.get_feature_representations(model, args.input_image, args.style_image)
gram_style_features = [tf1st.gram_matrix(style_feature) for style_feature in style_features]
# Set initial image
init_image = tf1st.load_and_process_img(args.input_image)
init_image = tf.Variable(init_image, dtype=tf.float32)
# Create our optimizer
opt = tf.train.AdamOptimizer(learning_rate=5, beta1=0.99, epsilon=1e-1)
# Store our best result
best_loss, best_img = float('inf'), None
# Create a nice config
loss_weights = (args.style_weight, args.content_weight)
cfg = {
'model': model,
'loss_weights': loss_weights,
'init_image': init_image,
'gram_style_features': gram_style_features,
'content_features': content_features
}
# For displaying
num_rows = 2
num_cols = 5
display_interval = args.iterations/(num_rows*num_cols)
start_time = time.time()
global_start = time.time()
norm_means = np.array([103.939, 116.779, 123.68])
min_vals = -norm_means
max_vals = 255 - norm_means
imgs = []
for i in range(args.iterations):
grads, all_loss = tf1st.compute_grads(cfg)
loss, style_score, content_score = all_loss
opt.apply_gradients([(grads, init_image)])
clipped = tf.clip_by_value(init_image, min_vals, max_vals)
init_image.assign(clipped)
end_time = time.time()
if loss < best_loss:
# Update best loss and best image from total loss.
best_loss = loss
best_img = tf1st.deprocess_img(init_image.numpy())
start_time = time.time()
# Use the .numpy() method to get the concrete numpy array
plot_img = init_image.numpy()
plot_img = tf1st.deprocess_img(plot_img)
imgs.append(plot_img)
final_img = cv2.cvtColor(np.array(Image.fromarray(plot_img)), cv2.COLOR_BGR2RGB)
cv2.imshow('Actual Styled Image', final_img)
cv2.imwrite(args.result_image, final_img)
cv2.waitKey(1)
print('Iteration: {}'.format(i))
print('Total loss: {:.4e}, '
'style loss: {:.4e}, '
'content loss: {:.4e}, '
'time: {:.4f}s'.format(loss, style_score, content_score, time.time() - start_time))
print('Total time: {:.4f}s'.format(time.time() - global_start))
time.sleep(5)
print('Done')
| 2.65625 | 3 |
towerlib/towerlib.py | lttmtins/towerlib | 0 | 12760652 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: towerlib.py
#
# Copyright 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# pylint: disable=too-many-lines
"""
Main code for towerlib
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import json
import logging
import sys
import math
import concurrent.futures
from requests import Session
from cachetools import TTLCache, cached
from .entities import (Config, # pylint: disable=unused-import # NOQA
LicenseInfo,
LicenseFeatures,
Organization,
User,
Project,
Team,
Group,
Inventory,
Host,
CredentialType,
Credential,
JobTemplate,
CERTIFICATE_TYPE_KINDS,
JOB_TYPES,
VERBOSITY_LEVELS,
Cluster,
ClusterInstance,
EntityManager)
from .towerlibexceptions import (AuthFailed,
InvalidOrganization,
InvalidInventory,
InvalidVariables,
InvalidCredentialTypeKind,
InvalidUser,
InvalidTeam,
InvalidCredential,
InvalidHost,
InvalidProject,
InvalidGroup,
InvalidCredentialType,
InvalidPlaybook,
InvalidInstanceGroup,
InvalidJobType,
InvalidVerbosity,
InvalidJobTemplate)
__author__ = '''<NAME> <<EMAIL>>'''
__docformat__ = '''google'''
__date__ = '''2018-01-02'''
__copyright__ = '''Copyright 2018, Costas Tyfoxylos'''
__credits__ = ['<NAME>']
__license__ = '''MIT'''
__maintainer__ = '''<NAME>'''
__email__ = '''<<EMAIL>>'''
__status__ = '''Development''' # 'Prototype', 'Development', 'Production'.
# This is the main prefix used for logging
LOGGER_BASENAME = '''towerlib'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
PAGINATION_LIMIT = 25
CLUSTER_STATE_CACHING_SECONDS = 10
CONFIGURATION_STATE_CACHING_SECONDS = 60
CLUSTER_STATE_CACHE = TTLCache(maxsize=1, ttl=CLUSTER_STATE_CACHING_SECONDS)
CONFIGURATION_STATE_CACHE = TTLCache(maxsize=1, ttl=CONFIGURATION_STATE_CACHING_SECONDS)
class Tower: # pylint: disable=too-many-public-methods
"""Models the api of ansible tower"""
def __init__(self, host, username, password, secure=False, ssl_verify=True): # pylint: disable=too-many-arguments
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
protocol = 'https' if secure else 'http'
self.host = '{protocol}://{host}'.format(protocol=protocol, host=host)
self.api = '{host}/api/v2'.format(host=self.host)
self.username = username
self.password = password
self.session = self._setup_session(secure, ssl_verify)
def _setup_session(self, secure, ssl_verify):
session = Session()
if secure:
session.verify = ssl_verify
session.get(self.host)
session.auth = (self.username, self.password)
session.headers.update({'content-type': 'application/json'})
url = '{api}/me/'.format(api=self.api)
response = session.get(url)
if response.status_code == 401:
raise AuthFailed(response.content)
return session
@property
@cached(CONFIGURATION_STATE_CACHE)
def configuration(self):
"""The configuration of the tower instance
Returns:
Config: The configuration of the tower instance
"""
url = '{api}/config/'.format(api=self.api)
results = self.session.get(url)
config = results.json()
features = [config.get('license_info',
{}).get('features',
{}).get(key) for key in LicenseFeatures._fields] # noqa
info = [config.get('license_info',
{}).get(key) for key in LicenseInfo._fields] # noqa
# we overwrite the entry of "features" with the namedtuple of it.
info[2] = LicenseFeatures(*features)
configuration = [config.get(key) for key in Config._fields] # noqa
# we overwrite the entry of "license_info" with the namedtuple of it.
configuration[1] = LicenseInfo(*info)
return Config(*configuration)
@property
@cached(CLUSTER_STATE_CACHE)
def cluster(self):
"""The cluster status of tower
Returns:
Cluster: The information about the state of the cluster
"""
def get_instance(name, instance_list):
"""Getting an instance nametuple from an instance list"""
node = next((instance for instance in instance_list
if instance.get('node') == name), None)
data = [node.get(key_) for key_ in ('node', 'heartbeat')]
return ClusterInstance(self, *data)
url = '{api}/ping/'.format(api=self.api)
results = self.session.get(url)
ping = results.json()
instance_groups = ping.get('instance_groups', [])[0]
instance_list = ping.get('instances')
capacity = instance_groups.get('capacity', 0)
name = instance_groups.get('name', 'Unset')
ha_enabled = ping.get('ha', False)
version = ping.get('version', 'Unknown')
instances = [get_instance(name_, instance_list)
for name_ in instance_groups.get('instances', [])]
active_node = get_instance(ping.get('active_node'), instance_list)
return Cluster(instances, capacity, name, ha_enabled, version, active_node)
@property
def organizations(self):
"""The organizations configured in tower
Returns:
EntityManager: The manager object for organizations
"""
return EntityManager(self,
entity_name='organizations',
entity_object='Organization',
primary_match_field='name')
def get_organization_by_name(self, name):
"""Retrieves an organization by name
Args:
name: The name of the organization to retrieve
Returns:
Organization: The organization if a match is found else None
"""
return next(self.organizations.filter({'name__iexact': name}), None)
def get_organization_by_id(self, id_):
"""Retrieves an organization by id
Args:
id_: The id of the organization to retrieve
Returns:
Organization: The organization if a match is found else None
"""
return next(self.organizations.filter({'id': id_}), None)
def create_organization(self, name, description):
"""Creates an organization in tower
Args:
name: The name of the organization to create
description: The description of the organization to create
Returns:
Organization: The organization on success, None otherwise
"""
url = '{api}/organizations/'.format(api=self.api)
payload = {'name': name,
'description': description}
response = self.session.post(url, data=json.dumps(payload))
return Organization(self, response.json()) if response.ok else None
def delete_organization(self, name):
"""Deletes an organization from tower
Args:
name: The name of the organization to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidOrganization: The organization provided as argument does not exist.
"""
organization = self.get_organization_by_name(name)
if not organization:
raise InvalidOrganization(name)
return organization.delete()
@staticmethod
def add_slash(url):
"""Adds a final slash to a url if there is not any"""
return url + '/' if not url.endswith('/') else url
def _get_paginated_response(self, url, params=None):
url = '{url}?page_size={limit}'.format(url=self.add_slash(url), limit=PAGINATION_LIMIT)
if isinstance(params, dict):
url = url + ''.join(['&{}={}'.format(key, value) for key, value in params.items()])
elif params:
self._logger.warning('Argument "params" should be a dictionary, value provided was :%s', params)
else:
pass
try:
response = self.session.get(url)
response_data = response.json()
response.close()
except (ValueError, AttributeError, TypeError):
self._logger.exception('Could not retrieve first page, response was %s', response.text)
response_data = {}
count = response_data.get('count', 0)
page_count = int(math.ceil(float(count) / PAGINATION_LIMIT))
self._logger.debug('Calculated that there are {} pages to get'.format(page_count))
for result in response_data.get('results', []):
yield result
if page_count:
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
futures = [executor.submit(self.session.get, '{url}&page={page_index}'.format(url=url,
page_index=index))
for index in range(page_count, 1, -1)]
for future in concurrent.futures.as_completed(futures):
try:
response = future.result()
response_data = response.json()
response.close()
for result in response_data.get('results'):
yield result
except Exception: # pylint: disable=broad-except
self._logger.exception('Future failed...')
def get_external_users(self):
"""Retrieves only users created by an external system
Returns:
list: Users created by external system in tower
"""
return (user for user in self.users if user.external_account == 'social')
def get_local_users(self):
"""Retrieves only users created locally in tower
Returns:
list: Users created locally in tower
"""
return (user for user in self.users if not user.external_account)
@property
def users(self):
"""A manager object for the users in tower
Returns:
EntityManager: The manager object for users
"""
return EntityManager(self, entity_name='users', entity_object='User', primary_match_field='username')
def get_user_by_username(self, name):
"""Retrieves a user by name
Args:
name: The name of the user to retrieve
Returns:
User: The user if a match is found else None
"""
return next(self.users.filter({'username__iexact': name}), None)
def get_user_by_id(self, id_):
"""Retrieves a user by id
Args:
id_: The id of the user to retrieve
Returns:
User: The user if a match is found else None
"""
return next(self.users.filter({'id': id_}), None)
def delete_user(self, username):
"""Deletes a user from tower
Args:
username: The username of the user to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidUser: The user provided as argument does not exist.
"""
user = self.get_user_by_username(username)
if not user:
raise InvalidUser(username)
return user.delete()
def create_user_in_organization(self, # pylint: disable=too-many-arguments
organization,
first_name,
last_name,
email,
username,
password,
level='standard'):
"""Creates a user in an organization
Args:
organization: The name of the organization to create the user under
first_name: The user's first name
last_name: The user's last name
email: The user's email
username: The user's username
password: <PASSWORD>
level: The user's level. Accepted values are ('standard', 'system_auditor', 'system_administrator')
Returns:
User: The user on success, None otherwise
Raises:
InvalidOrganization: The organization provided as argument does not exist.
"""
organization_ = self.get_organization_by_name(organization)
if not organization_:
raise InvalidOrganization(organization)
return organization_.create_user(first_name, last_name, email, username, password, level)
@property
def projects(self):
"""The projects configured in tower
Returns:
EntityManager: The manager object for projects
"""
return EntityManager(self, entity_name='projects', entity_object='Project', primary_match_field='name')
def get_project_by_name(self, name):
"""Retrieves a project by name
Args:
name: The name of the project to retrieve
Returns:
Project: The project if a match is found else None
"""
return next(self.projects.filter({'name__iexact': name}), None)
def get_project_by_id(self, id_):
"""Retrieves a project by id
Args:
id_: The id of the project to retrieve
Returns:
Project: The project if a match is found else None
"""
return next(self.projects.filter({'id': id_}), None)
def create_project_in_organization(self, # pylint: disable=too-many-arguments
organization,
name,
description,
credential,
scm_url,
scm_branch='master',
scm_type='git',
scm_clean=True,
scm_delete_on_update=False,
scm_update_on_launch=True,
scm_update_cache_timeout=0):
"""Creates a project in an organization
Args:
organization: The name of the organization to create the project under
name: The name of the project
description: The description of the project
credential: The name of the credential to use for the project
scm_url: The url of the scm
scm_branch: The default branch of the scm
scm_type: The type of the scm
scm_clean: Clean scm or not Boolean
scm_delete_on_update: Delete scm on update Boolean
scm_update_on_launch: Update scm on launch Boolean
scm_update_cache_timeout: Scm cache update integer
Returns:
Project: The created project on success, None otherwise
Raises:
InvalidOrganization: The organization provided as argument does not exist.
Raises:
InvalidCredential: The credential provided as argument does not exist.
"""
organization_ = self.get_organization_by_name(organization)
if not organization_:
raise InvalidOrganization(organization)
credential_ = self.get_credential_by_name(credential)
if not credential_:
raise InvalidCredential(credential)
return organization_.create_project(name,
description,
credential,
scm_url,
scm_branch,
scm_type,
scm_clean,
scm_delete_on_update,
scm_update_on_launch,
scm_update_cache_timeout)
def delete_project(self, name):
"""Deletes a project from tower
Args:
name: The name of the project to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidProject: The project provided as argument does not exist.
"""
project = self.get_project_by_name(name)
if not project:
raise InvalidProject(name)
return project.delete()
@property
def teams(self):
"""The teams configured in tower
Returns:
EntityManager: The manager object for teams
"""
return EntityManager(self, entity_name='teams', entity_object='Team', primary_match_field='name')
def get_team_by_name(self, name):
"""Retrieves a team by name
Args:
name: The name of the team to retrieve
Returns:
Team: The team if a match is found else None
"""
return next(self.teams.filter({'name__iexact': name}), None)
def get_team_by_id(self, id_):
"""Retrieves a team by id
Args:
id_: The id of the team to retrieve
Returns:
Team: The team if a match is found else None
"""
return next(self.teams.filter({'id': id_}), None)
def create_team_in_organization(self, organization, team_name, description):
"""Creates a team under an organization
Args:
organization: The name of the organization to create the team under
team_name: The name of the team to create
description: The description of the team to create
Returns:
Team: The created team on success, None otherwise
Raises:
InvalidOrganization: The organization provided as argument does not exist.
"""
organization = self.get_organization_by_name(organization)
if not organization:
raise InvalidOrganization(organization)
return organization.create_team(team_name, description)
def delete_team(self, name):
"""Deletes a team from tower
Args:
name: The name of the team to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidTeam: The team provided as argument does not exist.
"""
team = self.get_team_by_name(name)
if not team:
raise InvalidTeam(team)
return team.delete()
@property
def groups(self):
"""The groups configured in tower
Returns:
EntityManager: The manager object for groups
"""
return EntityManager(self, entity_name='groups', entity_object='Group', primary_match_field='name')
def get_group_by_name(self, name):
"""Retrieves a group by name
Args:
name: The name of the group to retrieve
Returns:
Group: The group if a match is found else None
"""
return next(self.groups.filter({'name__iexact': name}), None)
def get_group_by_id(self, id_):
"""Retrieves a group by id
Args:
id_: The id of the group to retrieve
Returns:
Group: The group if a match is found else None
"""
return next(self.groups.filter({'id': id_}), None)
def delete_group(self, name):
"""Deletes a group from tower
Args:
name: The name of the group to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidGroup: The group provided as argument does not exist.
"""
group = self.get_group_by_name(name)
if not group:
raise InvalidGroup(name)
return group.delete()
@property
def inventories(self):
"""The inventories configured in tower
Returns:
list of Inventory: The inventories configured in tower
"""
return EntityManager(self, entity_name='inventories', entity_object='Inventory', primary_match_field='name')
def get_inventory_by_name(self, name):
"""Retrieves an inventory by name
Args:
name: The name of the inventory to retrieve
Returns:
Inventory: The inventory if a match is found else None
"""
return next(self.inventories.filter({'name__iexact': name}), None)
def get_inventory_by_id(self, id_):
"""Retrieves an inventory by id
Args:
id_: The id of the inventory to retrieve
Returns:
Inventory: The inventory if a match is found else None
"""
return next(self.inventories.filter({'id': id_}), None)
def create_inventory_in_organization(self,
organization,
name,
description,
variables='{}'):
"""Creates an inventory under an organization
Args:
organization: The name of the organization to create the inventory under
name: The name of the inventory
description: The description of the inventory
variables: A json of the variables to be set on the inventory
Returns:
Inventory: The created inventory on success, None otherwise
Raises:
InvalidOrganization: The organization provided as argument does not exist.
"""
organization = self.get_organization_by_name(organization)
if not organization:
raise InvalidOrganization(organization)
return organization.create_inventory(name, description, variables)
def delete_inventory(self, name):
"""Deletes an inventory from tower
Args:
name: The name of the inventory to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidInventory: The inventory provided as argument does not exist.
"""
inventory = self.get_inventory_by_name(name)
if not inventory:
raise InvalidInventory(name)
return inventory.delete()
@property
def hosts(self):
"""The hosts configured in tower
Returns:
EntityManager: The manager object for hosts
"""
return EntityManager(self, entity_name='hosts', entity_object='Host', primary_match_field='name')
def get_host_by_name(self, name):
"""Retrieves a host by name
Args:
name: The name of the host to retrieve
Returns:
Host: The host if a match is found else None
"""
return next(self.hosts.filter({'name__iexact': name}), None)
def get_host_by_id(self, id_):
"""Retrieves a host by id
Args:
id_: The id of the host to retrieve
Returns:
Host: The host if a match is found else None
"""
return next(self.hosts.filter({'id': id_}), None)
def create_host_in_inventory(self, inventory, name, description, variables='{}'):
"""Creates a host under an inventory
Args:
inventory: The name of the inventory to create the host under
name: The name of the host
description: The description of the host
variables: A json of the variables to be set on the host
Returns:
Host: The created host on success, None otherwise
Raises:
InvalidInventory: The inventory provided as argument does not exist.
"""
inventory_ = self.get_inventory_by_name(inventory)
if not inventory_:
raise InvalidInventory(inventory)
return inventory_.create_host(name, description, variables)
def add_groups_to_host(self, hostname, groups):
"""Adds groups to a host
Args:
hostname: The name of the host to add the groups to
groups: A string of a single group or a list or tuple of group names to add to host
Returns:
bool: True on complete success, False otherwise
Raises:
InvalidHost: The host provided as argument does not exist.
"""
host = self.get_host_by_name(hostname)
if not host:
raise InvalidHost(hostname)
return host.associate_with_groups(groups)
def remove_groups_from_host(self, hostname, groups):
"""Removes groups from a host
Args:
hostname: The name of the host to remove the groups from
groups: A string of a single group or a list or tuple of group names to remove from a host
Returns:
bool: True on complete success, False otherwise
Raises:
InvalidHost: The host provided as argument does not exist.
"""
host = self.get_host_by_name(hostname)
if not host:
raise InvalidHost(hostname)
return host.disassociate_with_groups(groups)
def delete_host(self, name):
"""Deletes an host from tower
Args:
name: The name of the host to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidHost: The host provided as argument does not exist.
"""
host = self.get_host_by_name(name)
if not host:
raise InvalidHost(name)
return host.delete()
@property
def instances(self):
"""The instances configured in tower
Returns:
EntityManager: The manager object for instances
"""
return EntityManager(self, entity_name='instances', entity_object='Instance', primary_match_field='name')
@property
def instance_groups(self):
"""The instance_groups configured in tower
Returns:
EntityManager: The manager object for instance groups
"""
return EntityManager(self,
entity_name='instance_groups',
entity_object='InstanceGroup',
primary_match_field='name')
@property
def credential_types(self):
"""The credential_types configured in tower
Returns:
EntityManager: The manager object for credentials type
"""
return EntityManager(self,
entity_name='credential_types',
entity_object='CredentialType',
primary_match_field='name')
@property
def tower_credential_types(self):
"""The default credential_types configured in tower
Returns:
EntityManager: The manager object for internal credential types
"""
return EntityManager(self,
entity_name='credential_types',
entity_object='CredentialType',
primary_match_field='name').filter({'managed_by_tower': 'true'})
@property
def custom_credential_types(self):
"""The custom credential_types configured in tower
Returns:
EntityManager: The manager object for external credential types
"""
return EntityManager(self,
entity_name='credential_types',
entity_object='CredentialType',
primary_match_field='name').filter({'managed_by_tower': 'false'})
def get_credential_type_by_name(self, name):
"""Retrieves a credential_type by name
Args:
name: The name of the credential_type to retrieve
Returns:
Host: The credential_type if a match is found else None
"""
return next(self.credential_types.filter({'name__iexact': name}), None)
def get_credential_type_by_id(self, id_):
"""Retrieves a credential_type by id
Args:
id_: The id of the credential_type to retrieve
Returns:
Host: The credential_type if a match is found else None
"""
return next(self.credential_types.filter({'id': id_}), None)
def create_credential_type(self, # pylint: disable=too-many-arguments
name,
description,
kind,
inputs_='{}',
injectors='{}'):
"""Creates a credential type in tower
Args:
name: The name of the credential type
description: The description of the credential type
kind: The kind of the credential type.Valid values (u'scm', u'ssh', u'vault', u'net', u'cloud', u'insights')
inputs_: A dictionary of the inputs to set to the credential type
injectors: A dictionary of the injectors to set to the credential type
Returns:
CredentialType on success, None otherwise
Raises:
InvalidCredentialTypeKind: The credential type kind provided as argument does not exist.
InvalidVariables: The inputs or injectors provided as argument is not valid json.
"""
if kind.lower() not in CERTIFICATE_TYPE_KINDS:
raise InvalidCredentialTypeKind(kind)
payload = {'name': name,
'description': description,
'kind': kind.lower()}
variables = {'inputs': inputs_,
'injectors': injectors}
for var_name, value in variables.items():
try:
payload[var_name] = json.loads(value)
except (ValueError, TypeError):
raise InvalidVariables(value)
url = '{api}/credential_types/'.format(api=self.api)
response = self.session.post(url, data=json.dumps(payload))
return CredentialType(self, response.json()) if response.ok else None
def delete_credential_type(self, name):
"""Deletes a credential_type from tower
Args:
name: The name of the credential_type to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidCredential: The credential provided as argument does not exist.
"""
credential = self.get_credential_type_by_name(name)
if not credential:
raise InvalidCredential(name)
return credential.delete()
@property
def credentials(self):
"""The credentials configured in tower
Returns:
EntityManager: The manager object for credentials
"""
return EntityManager(self, entity_name='credentials', entity_object='Credential', primary_match_field='name')
def get_credential_by_name(self, name):
"""Retrieves a credential by name
Args:
name: The name of the credential to retrieve
Returns:
Host: The credential if a match is found else None
"""
return next(self.credentials.filter({'name__iexact': name}), None)
def get_credential_by_id(self, id_):
"""Retrieves a credential by id
Args:
id_: The id of the credential to retrieve
Returns:
Host: The credential if a match is found else None
"""
return next(self.credentials.filter({'id': id_}), None)
def create_credential_in_organization(self, # pylint: disable=too-many-arguments,too-many-locals
organization,
name,
description,
user,
team,
credential_type,
inputs_='{}'):
"""Creates a credential under an organization
Args:
organization: The name of the organization to create a credential under
name: The name of the credential to create
description: The description of the credential to create
user: The username of the user to assign to the credential
team: The name of the team to assign to the credential
credential_type: The name of the type of the credential
inputs_: A json with the values to set to the credential according to what is required by its type
Returns:
Credential: The created credential upon success, None otherwise
Raises:
InvalidOrganization: The organization provided as argument does not exist.
InvalidUser: The user provided as argument does not exist.
InvalidTeam: The team provided as argument does not exist.
InvalidCredentialType: The credential type provided as argument does not exist.
InvalidVariables: The inputs provided as argument is not valid json.
"""
organization_ = self.get_organization_by_name(organization)
if not organization_:
raise InvalidOrganization(organization)
user_ = self.get_user_by_username(user)
if not user_:
raise InvalidUser(user)
team_ = self.get_team_by_name(team)
if not team_:
raise InvalidTeam(team)
credential_type_ = self.get_credential_type_by_name(credential_type)
if not credential_type_:
raise InvalidCredentialType(credential_type)
payload = {'name': name,
'description': description,
'organization': organization_.id,
'user': user_.id,
'team': team_.id,
'credential_type': credential_type_.id}
try:
payload['inputs'] = json.loads(inputs_)
except ValueError:
raise InvalidVariables(inputs_)
url = '{api}/credentials/'.format(api=self.api)
response = self.session.post(url, data=json.dumps(payload))
return Credential(self, response.json()) if response.ok else None
def delete_credential(self, name):
"""Deletes a credential from tower
Args:
name: The name of the credential to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidCredential: The credentials provided as argument does not exist.
"""
credential = self.get_credential_by_name(name)
if not credential:
raise InvalidCredential(name)
return credential.delete()
@property
def job_templates(self):
"""The job templates configured in tower
Returns:
EntityManager: The manager object for job templates
"""
return EntityManager(self, entity_name='job_templates', entity_object='JobTemplate', primary_match_field='name')
def get_job_template_by_name(self, name):
"""Retrieves a job template by name
Args:
name: The name of the job template to retrieve
Returns:
JobTemplate: The job template if a match is found else None
"""
return next(self.job_templates.filter({'name__iexact': name}), None)
def get_job_template_by_id(self, id_):
"""Retrieves a job template by id
Args:
id_: The id of the job template to retrieve
Returns:
Host: The job template if a match is found else None
"""
return next(self.job_templates.filter({'id': id_}), None)
def delete_job_template(self, name):
"""Deletes a job template from tower
Args:
name: The name of the job template to delete
Returns:
bool: True on success, False otherwise
Raises:
InvalidJobTemplate: The job template provided as argument does not exist.
"""
job_template = self.get_job_template_by_name(name)
if not job_template:
raise InvalidJobTemplate(name)
return job_template.delete()
def create_job_template(self, # pylint: disable=too-many-arguments,too-many-locals
name,
description,
inventory,
project,
playbook,
credential,
instance_groups=None,
host_config_key=None,
job_type='run',
vault_credential=None,
forks=0,
limit=0,
verbosity=0,
extra_vars='',
job_tags='',
force_handlers=False,
skip_tags='',
start_at_task='',
timeout=0,
use_fact_cache=False,
ask_diff_mode_on_launch=False,
ask_variables_on_launch=False,
ask_limit_on_launch=False,
ask_tags_on_launch=False,
ask_skip_tags_on_launch=False,
ask_job_type_on_launch=False,
ask_verbosity_on_launch=False,
ask_inventory_on_launch=False,
ask_credential_on_launch=False,
survey_enabled=False,
become_enabled=False,
diff_mode=False,
allow_simultaneous=False):
"""Creates a job template
Args:
name: The name of the job template to create
description: The description of the job template to create
inventory: The inventory to use for the template
project: The project to use for the template
playbook: The playbook to run for the template
credential: The credential to use for the template
instance_groups: The instance groups to associate to the template
host_config_key: A host config key
job_type: The job type. Valid values are 'run' and 'check'
vault_credential: A vault credential
forks: The number of parallel or simultaneous processes to use while executing the playbook
limit: A host pattern to constrain the list of hosts that will be managed or affected by the playbook.
verbosity: The level of output ansible will produce as the playbook executes. Values [0-4]
extra_vars: Pass extra command line variables to the playbook.
job_tags: Tags to identify the template
force_handlers:
skip_tags: Skip specific parts of a play or task with tags
start_at_task:
timeout:
use_fact_cache:
ask_diff_mode_on_launch:
ask_variables_on_launch:
ask_limit_on_launch:
ask_tags_on_launch:
ask_skip_tags_on_launch:
ask_job_type_on_launch:
ask_verbosity_on_launch:
ask_inventory_on_launch:
ask_credential_on_launch:
survey_enabled:
become_enabled:
diff_mode:
allow_simultaneous:
Returns:
JobTemplate: The created job template if successful, None otherwise
Raises:
InvalidInventory: The inventory provided as argument does not exist.
InvalidProject: The project provided as argument does not exist.
InvalidPlaybook: The playbook provided as argument does not exist in project.
InvalidInstanceGroup: The instance group provided as argument does not exist.
InvalidJobType: The job type provided as argument does not exist.
InvalidVerbosity: The verbosity provided is not in valid range of 0-4.
"""
inventory_ = self.get_inventory_by_name(inventory)
if not inventory_:
raise InvalidInventory(inventory)
project_ = self.get_project_by_name(project)
if not project_:
raise InvalidProject(project)
if playbook not in project_.playbooks:
raise InvalidPlaybook(playbook)
credential_ = self.get_credential_by_name(credential)
if not credential_:
raise InvalidCredential(credential)
instance_group_ids = []
if instance_groups:
if not isinstance(instance_groups, (list, tuple)):
instance_groups = [instance_groups]
tower_instance_groups = [group_ for group_ in self.instance_groups]
tower_instance_groups_names = [group.name for group in tower_instance_groups]
invalid = set(instance_groups) - set(tower_instance_groups_names)
if invalid:
raise InvalidInstanceGroup(invalid)
for instance_group in set(instance_groups):
group = next((group for group in tower_instance_groups
if group.name == instance_group), None)
instance_group_ids.append(group.id)
if job_type not in JOB_TYPES:
raise InvalidJobType(job_type)
if verbosity not in VERBOSITY_LEVELS:
raise InvalidVerbosity(verbosity)
payload = {'name': name,
'description': description,
'inventory': inventory_.id,
'project': project_.id,
'playbook': playbook,
'credential': credential_.id,
'instance_groups': instance_group_ids,
'job_type': job_type,
'vault_credential': vault_credential,
'forks': forks,
'limit': limit,
'verbosity': verbosity,
'extra_vars': extra_vars,
'job_tags': job_tags,
'force_handlers': force_handlers,
'skip_tags': skip_tags,
'start_at_task': start_at_task,
'timeout': timeout,
'use_fact_cache': use_fact_cache,
'host_config_key': host_config_key,
'ask_diff_mode_on_launch': ask_diff_mode_on_launch,
'ask_variables_on_launch': ask_variables_on_launch,
'ask_limit_on_launch': ask_limit_on_launch,
'ask_tags_on_launch': ask_tags_on_launch,
'ask_skip_tags_on_launch': ask_skip_tags_on_launch,
'ask_job_type_on_launch': ask_job_type_on_launch,
'ask_verbosity_on_launch': ask_verbosity_on_launch,
'ask_inventory_on_launch': ask_inventory_on_launch,
'ask_credential_on_launch': ask_credential_on_launch,
'survey_enabled': survey_enabled,
'become_enabled': become_enabled,
'diff_mode': diff_mode,
'allow_simultaneous': allow_simultaneous}
url = '{api}/job_templates/'.format(api=self.api)
response = self.session.post(url, data=json.dumps(payload))
return JobTemplate(self, response.json()) if response.ok else None
@property
def roles(self):
"""The roles configured in tower
Returns:
EntityManager: The manager object for roles
"""
return EntityManager(self, entity_name='roles', entity_object='Role', primary_match_field='name')
def _get_object_by_url(self, object_type, url):
url = '{host}{url}'.format(host=self.host, url=url)
response = self.session.get(url)
entities = sys.modules['towerlib.entities']
obj = getattr(entities, object_type)
return obj(self, response.json()) if response.ok else None
| 1.53125 | 2 |
mbv1/mobilenetv1.py | klightz/splitting | 9 | 12760653 | '''MobileNetV1 in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import Swish, _make_divisible
class MbBlock(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1, activation='relu'):
super(MbBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.nn_act = Swish() if activation == 'swish' else nn.ReLU(inplace=True)
def forward(self, x):
out = self.nn_act(self.bn1(self.conv1(x)))
out = self.nn_act(self.bn2(self.conv2(out)))
return out
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, activation='swish'):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.nn_act = Swish() if activation == 'swish' else nn.ReLU(inplace=True)
def forward(self, x):
out = self.nn_act(self.bn(self.conv(x)))
return out
defaultcfg = [32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32]
class MobileNetV1(nn.Module):
def __init__(self, dataset='cifar10', cfg=None, width_mul=None, activation='relu'):
super(MobileNetV1, self).__init__()
if dataset == 'cifar10':
num_classes = 10
elif dataset == 'cifar100':
num_classes = 100
else:
raise NotImplementedError
self.num_classes = num_classes
self.activation = activation
if cfg is None:
cfg = defaultcfg
self.cfg = cfg
self.conv_block = ConvBlock(3, cfg[0], activation=self.activation)
self.layers = self._make_layers(in_planes=cfg[0])
self.linear = nn.Linear(cfg[-1], num_classes)
def _make_layers(self, in_planes):
layers = []
for i, x in enumerate(self.cfg[1:]):
out_planes = x
if (i+1) in [2, 4, 6, 12]:
stride = 2
else:
stride = 1
layers.append(MbBlock(in_planes, out_planes, stride, activation=self.activation))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv_block(x)
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
| 2.890625 | 3 |
qpid/tests/messaging/endpoints.py | martpat/qpid-python | 17 | 12760654 | <reponame>martpat/qpid-python<gh_stars>10-100
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# setup, usage, teardown, errors(sync), errors(async), stress, soak,
# boundary-conditions, config
import errno, os, socket, sys, time
from qpid import compat
from qpid.compat import set
from qpid.messaging import *
from qpid.messaging.transports import TRANSPORTS
from qpid.tests.messaging import Base
from threading import Thread
class SetupTests(Base):
def testEstablish(self):
self.conn = Connection.establish(self.broker, **self.connection_options())
self.ping(self.conn.session())
def testOpen(self):
self.conn = Connection(self.broker, **self.connection_options())
self.conn.open()
self.ping(self.conn.session())
def testOpenReconnectURLs(self):
options = self.connection_options()
options["reconnect_urls"] = [self.broker, self.broker]
self.conn = Connection(self.broker, **options)
self.conn.open()
self.ping(self.conn.session())
def testTcpNodelay(self):
self.conn = Connection.establish(self.broker, tcp_nodelay=True)
assert self.conn._driver._transport.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def testConnectError(self):
try:
# Specifying port 0 yields a bad address on Windows; port 4 is unassigned
self.conn = Connection.establish("localhost:4")
assert False, "connect succeeded"
except ConnectError, e:
assert "refused" in str(e)
def testGetError(self):
self.conn = Connection("localhost:0")
try:
self.conn.open()
assert False, "connect succeeded"
except ConnectError, e:
assert self.conn.get_error() == e
def use_fds(self):
fds = []
try:
while True:
fds.append(os.open(getattr(os, "devnull", "/dev/null"), os.O_RDONLY))
except OSError, e:
if e.errno != errno.EMFILE:
raise e
else:
return fds
def testOpenCloseResourceLeaks(self):
fds = self.use_fds()
try:
for i in range(32):
if fds: os.close(fds.pop())
for i in xrange(64):
conn = Connection.establish(self.broker, **self.connection_options())
conn.close()
finally:
while fds:
os.close(fds.pop())
def testOpenFailResourceLeaks(self):
fds = self.use_fds()
try:
for i in range(32):
if fds: os.close(fds.pop())
for i in xrange(64):
conn = Connection("localhost:0", **self.connection_options())
# XXX: we need to force a waiter to be created for this test
# to work
conn._lock.acquire()
conn._wait(lambda: False, timeout=0.001)
conn._lock.release()
try:
conn.open()
except ConnectError, e:
pass
finally:
while fds:
os.close(fds.pop())
def testReconnect(self):
options = self.connection_options()
real = TRANSPORTS["tcp"]
class flaky:
def __init__(self, conn, host, port):
self.real = real(conn, host, port)
self.sent_count = 0
self.recv_count = 0
def fileno(self):
return self.real.fileno()
def reading(self, reading):
return self.real.reading(reading)
def writing(self, writing):
return self.real.writing(writing)
def send(self, bytes):
if self.sent_count > 2048:
raise socket.error("fake error")
n = self.real.send(bytes)
self.sent_count += n
return n
def recv(self, n):
if self.recv_count > 2048:
return ""
bytes = self.real.recv(n)
self.recv_count += len(bytes)
return bytes
def close(self):
self.real.close()
TRANSPORTS["flaky"] = flaky
options["reconnect"] = True
options["reconnect_interval"] = 0
options["reconnect_limit"] = 100
options["reconnect_log"] = False
options["transport"] = "flaky"
self.conn = Connection.establish(self.broker, **options)
ssn = self.conn.session()
snd = ssn.sender("test-reconnect-queue; {create: always, delete: always}")
rcv = ssn.receiver(snd.target)
msgs = [self.message("testReconnect", i) for i in range(20)]
for m in msgs:
snd.send(m)
content = set()
drained = []
duplicates = []
try:
while True:
m = rcv.fetch(timeout=0)
if m.content not in content:
content.add(m.content)
drained.append(m)
else:
duplicates.append(m)
ssn.acknowledge(m)
except Empty:
pass
# XXX: apparently we don't always get duplicates, should figure out why
#assert duplicates, "no duplicates"
assert len(drained) == len(msgs)
for m, d in zip(msgs, drained):
# XXX: we should figure out how to provide proper end to end
# redelivered
self.assertEcho(m, d, d.redelivered)
class ConnectionTests(Base):
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def testCheckClosed(self):
assert not self.conn.check_closed()
def testSessionAnon(self):
ssn1 = self.conn.session()
ssn2 = self.conn.session()
self.ping(ssn1)
self.ping(ssn2)
assert ssn1 is not ssn2
def testSessionNamed(self):
ssn1 = self.conn.session("one")
ssn2 = self.conn.session("two")
self.ping(ssn1)
self.ping(ssn2)
assert ssn1 is not ssn2
assert ssn1 is self.conn.session("one")
assert ssn2 is self.conn.session("two")
def testDetach(self):
ssn = self.conn.session()
self.ping(ssn)
self.conn.detach()
try:
self.ping(ssn)
assert False, "ping succeeded"
except Detached:
# this is the expected failure when pinging on a detached
# connection
pass
self.conn.attach()
self.ping(ssn)
def testClose(self):
self.conn.close()
assert not self.conn.attached()
def testSimultaneousClose(self):
ssns = [self.conn.session() for i in range(3)]
for s in ssns:
for i in range(3):
s.receiver("amq.topic")
s.sender("amq.topic")
def closer(errors):
try:
self.conn.close()
except:
_, e, _ = sys.exc_info()
errors.append(compat.format_exc(e))
t1_errors = []
t2_errors = []
t1 = Thread(target=lambda: closer(t1_errors))
t2 = Thread(target=lambda: closer(t2_errors))
t1.start()
t2.start()
t1.join(self.delay())
t2.join(self.delay())
assert not t1_errors, t1_errors[0]
assert not t2_errors, t2_errors[0]
class hangable:
def __init__(self, conn, host, port):
self.tcp = TRANSPORTS["tcp"](conn, host, port)
self.hung = False
def hang(self):
self.hung = True
def fileno(self):
return self.tcp.fileno()
def reading(self, reading):
if self.hung:
return True
else:
return self.tcp.reading(reading)
def writing(self, writing):
if self.hung:
return False
else:
return self.tcp.writing(writing)
def send(self, bytes):
if self.hung:
return 0
else:
return self.tcp.send(bytes)
def recv(self, n):
if self.hung:
return ""
else:
return self.tcp.recv(n)
def close(self):
self.tcp.close()
TRANSPORTS["hangable"] = hangable
class TimeoutTests(Base):
def setup_connection(self):
options = self.connection_options()
options["transport"] = "hangable"
return Connection.establish(self.broker, **options)
def setup_session(self):
return self.conn.session()
def setup_sender(self):
return self.ssn.sender("amq.topic")
def setup_receiver(self):
return self.ssn.receiver("amq.topic; {link: {reliability: unreliable}}")
def teardown_connection(self, conn):
try:
conn.detach(timeout=0)
except Timeout:
pass
def hang(self):
self.conn._driver._transport.hang()
def timeoutTest(self, method):
self.hang()
try:
method(timeout=self.delay())
assert False, "did not time out"
except Timeout:
pass
def testSenderSync(self):
self.snd.send(self.content("testSenderSync"), sync=False)
self.timeoutTest(self.snd.sync)
def testSenderClose(self):
self.snd.send(self.content("testSenderClose"), sync=False)
self.timeoutTest(self.snd.close)
def testReceiverClose(self):
self.timeoutTest(self.rcv.close)
def testSessionSync(self):
self.snd.send(self.content("testSessionSync"), sync=False)
self.timeoutTest(self.ssn.sync)
def testSessionClose(self):
self.timeoutTest(self.ssn.close)
def testConnectionDetach(self):
self.timeoutTest(self.conn.detach)
def testConnectionClose(self):
self.timeoutTest(self.conn.close)
def testConnectionOpen(self):
options = self.connection_options()
options["reconnect"] = True
options["reconnect_timeout"] = self.delay()
try:
bad_conn = Connection.establish("badhostname", **options)
assert False, "did not time out"
except Timeout:
pass
ACK_QC = 'test-ack-queue; {create: always}'
ACK_QD = 'test-ack-queue; {delete: always}'
class SessionTests(Base):
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def testSender(self):
snd = self.ssn.sender('test-snd-queue; {create: sender, delete: receiver}',
durable=self.durable())
snd2 = self.ssn.sender(snd.target, durable=self.durable())
assert snd is not snd2
snd2.close()
content = self.content("testSender")
snd.send(content)
rcv = self.ssn.receiver(snd.target)
msg = rcv.fetch(0)
assert msg.content == content
self.ssn.acknowledge(msg)
def testReceiver(self):
rcv = self.ssn.receiver('test-rcv-queue; {create: always}')
rcv2 = self.ssn.receiver(rcv.source)
assert rcv is not rcv2
rcv2.close()
content = self.content("testReceiver")
snd = self.ssn.sender(rcv.source, durable=self.durable())
snd.send(content)
msg = rcv.fetch(0)
assert msg.content == content
self.ssn.acknowledge(msg)
snd2 = self.ssn.receiver('test-rcv-queue; {delete: always}')
def testDetachedReceiver(self):
self.conn.detach()
rcv = self.ssn.receiver("test-dis-rcv-queue; {create: always, delete: always}")
m = self.content("testDetachedReceiver")
self.conn.attach()
snd = self.ssn.sender("test-dis-rcv-queue")
snd.send(m)
self.drain(rcv, expected=[m])
def testNextReceiver(self):
ADDR = 'test-next-rcv-queue; {create: always, delete: always}'
rcv1 = self.ssn.receiver(ADDR, capacity=UNLIMITED)
rcv2 = self.ssn.receiver(ADDR, capacity=UNLIMITED)
rcv3 = self.ssn.receiver(ADDR, capacity=UNLIMITED)
snd = self.ssn.sender(ADDR)
msgs = []
for i in range(10):
content = self.content("testNextReceiver", i)
snd.send(content)
msgs.append(content)
fetched = []
try:
while True:
rcv = self.ssn.next_receiver(timeout=self.delay())
assert rcv in (rcv1, rcv2, rcv3)
assert rcv.available() > 0
fetched.append(rcv.fetch().content)
except Empty:
pass
assert msgs == fetched, "expecting %s, got %s" % (msgs, fetched)
self.ssn.acknowledge()
#we set the capacity to 0 to prevent the deletion of the queue -
#triggered the deletion policy when the first receiver is closed -
#resulting in session exceptions being issued for the remaining
#active subscriptions:
for r in [rcv1, rcv2, rcv3]:
r.capacity = 0
# XXX, we need a convenient way to assert that required queues are
# empty on setup, and possibly also to drain queues on teardown
def ackTest(self, acker, ack_capacity=None):
# send a bunch of messages
snd = self.ssn.sender(ACK_QC, durable=self.durable())
contents = [self.content("ackTest", i) for i in range(15)]
for c in contents:
snd.send(c)
# drain the queue, verify the messages are there and then close
# without acking
rcv = self.ssn.receiver(ACK_QC)
self.drain(rcv, expected=contents)
self.ssn.close()
# drain the queue again, verify that they are all the messages
# were requeued, and ack this time before closing
self.ssn = self.conn.session()
if ack_capacity is not None:
self.ssn.ack_capacity = ack_capacity
rcv = self.ssn.receiver(ACK_QC)
self.drain(rcv, expected=contents)
acker(self.ssn)
self.ssn.close()
# drain the queue a final time and verify that the messages were
# dequeued
self.ssn = self.conn.session()
rcv = self.ssn.receiver(ACK_QD)
self.assertEmpty(rcv)
def testAcknowledge(self):
self.ackTest(lambda ssn: ssn.acknowledge())
def testAcknowledgeAsync(self):
self.ackTest(lambda ssn: ssn.acknowledge(sync=False))
def testAcknowledgeAsyncAckCap0(self):
try:
try:
self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 0)
assert False, "acknowledge shouldn't succeed with ack_capacity of zero"
except InsufficientCapacity:
pass
finally:
self.ssn.ack_capacity = UNLIMITED
self.drain(self.ssn.receiver(ACK_QD))
self.ssn.acknowledge()
def testAcknowledgeAsyncAckCap1(self):
self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 1)
def testAcknowledgeAsyncAckCap5(self):
self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 5)
def testAcknowledgeAsyncAckCapUNLIMITED(self):
self.ackTest(lambda ssn: ssn.acknowledge(sync=False), UNLIMITED)
def testRelease(self):
msgs = [self.message("testRelease", i) for i in range(3)]
snd = self.ssn.sender("test-release-queue; {create: always, delete: always}")
for m in msgs:
snd.send(m)
rcv = self.ssn.receiver(snd.target)
echos = self.drain(rcv, expected=msgs)
self.ssn.acknowledge(echos[0])
self.ssn.acknowledge(echos[1], Disposition(RELEASED, set_redelivered=True))
self.ssn.acknowledge(echos[2], Disposition(RELEASED))
self.drain(rcv, limit=1, expected=msgs[1:2], redelivered=True)
self.drain(rcv, expected=msgs[2:3])
self.ssn.acknowledge()
def testReject(self):
msgs = [self.message("testReject", i) for i in range(3)]
snd = self.ssn.sender("""
test-reject-queue; {
create: always,
delete: always,
node: {
x-declare: {
alternate-exchange: 'amq.topic'
}
}
}
""")
for m in msgs:
snd.send(m)
rcv = self.ssn.receiver(snd.target)
rej = self.ssn.receiver("amq.topic")
echos = self.drain(rcv, expected=msgs)
self.ssn.acknowledge(echos[0])
self.ssn.acknowledge(echos[1], Disposition(REJECTED))
self.ssn.acknowledge(echos[2],
Disposition(REJECTED, code=0, text="test-reject"))
self.drain(rej, expected=msgs[1:])
self.ssn.acknowledge()
def send(self, ssn, target, base, count=1):
snd = ssn.sender(target, durable=self.durable())
messages = []
for i in range(count):
c = self.message(base, i)
snd.send(c)
messages.append(c)
snd.close()
return messages
def txTest(self, commit):
TX_Q = 'test-tx-queue; {create: sender, delete: receiver}'
TX_Q_COPY = 'test-tx-queue-copy; {create: always, delete: always}'
txssn = self.conn.session(transactional=True)
messages = self.send(self.ssn, TX_Q, "txTest", 3)
txrcv = txssn.receiver(TX_Q)
txsnd = txssn.sender(TX_Q_COPY, durable=self.durable())
rcv = self.ssn.receiver(txrcv.source)
copy_rcv = self.ssn.receiver(txsnd.target)
self.assertEmpty(copy_rcv)
for i in range(3):
m = txrcv.fetch(0)
txsnd.send(m)
self.assertEmpty(copy_rcv)
txssn.acknowledge()
if commit:
txssn.commit()
self.assertEmpty(rcv)
self.drain(copy_rcv, expected=messages)
else:
txssn.rollback()
self.drain(rcv, expected=messages, redelivered=True)
self.assertEmpty(copy_rcv)
self.ssn.acknowledge()
def testCommit(self):
self.txTest(True)
def testRollback(self):
self.txTest(False)
def txTestSend(self, commit):
TX_SEND_Q = 'test-tx-send-queue; {create: sender, delete: receiver}'
txssn = self.conn.session(transactional=True)
messages = self.send(txssn, TX_SEND_Q, "txTestSend", 3)
rcv = self.ssn.receiver(TX_SEND_Q)
self.assertEmpty(rcv)
if commit:
txssn.commit()
self.drain(rcv, expected=messages)
self.ssn.acknowledge()
else:
txssn.rollback()
self.assertEmpty(rcv)
txssn.commit()
self.assertEmpty(rcv)
def testCommitSend(self):
self.txTestSend(True)
def testRollbackSend(self):
self.txTestSend(False)
def txTestAck(self, commit):
TX_ACK_QC = 'test-tx-ack-queue; {create: always}'
TX_ACK_QD = 'test-tx-ack-queue; {delete: always}'
txssn = self.conn.session(transactional=True)
txrcv = txssn.receiver(TX_ACK_QC)
self.assertEmpty(txrcv)
messages = self.send(self.ssn, TX_ACK_QC, "txTestAck", 3)
self.drain(txrcv, expected=messages)
if commit:
txssn.acknowledge()
else:
txssn.rollback()
self.drain(txrcv, expected=messages, redelivered=True)
txssn.acknowledge()
txssn.rollback()
self.drain(txrcv, expected=messages, redelivered=True)
txssn.commit() # commit without ack
self.assertEmpty(txrcv)
txssn.close()
txssn = self.conn.session(transactional=True)
txrcv = txssn.receiver(TX_ACK_QC)
self.drain(txrcv, expected=messages, redelivered=True)
txssn.acknowledge()
txssn.commit()
rcv = self.ssn.receiver(TX_ACK_QD)
self.assertEmpty(rcv)
txssn.close()
self.assertEmpty(rcv)
def testCommitAck(self):
self.txTestAck(True)
def testRollbackAck(self):
self.txTestAck(False)
def testDoubleCommit(self):
ssn = self.conn.session(transactional=True)
snd = ssn.sender("amq.direct/doubleCommit")
rcv = ssn.receiver("amq.direct/doubleCommit")
msgs = [self.message("testDoubleCommit", i, subject="doubleCommit") for i in range(3)]
for m in msgs:
snd.send(m)
ssn.commit()
self.drain(rcv, expected=msgs)
ssn.acknowledge()
ssn.commit()
def testClose(self):
self.ssn.close()
try:
self.ping(self.ssn)
assert False, "ping succeeded"
except Detached:
pass
def testRxCallback(self):
"""Verify that the callback is invoked when a message is received.
"""
ADDR = 'test-rx_callback-queue; {create: always, delete: receiver}'
class CallbackHandler:
def __init__(self):
self.handler_called = False
def __call__(self, ssn):
self.handler_called = True
self.ssn = ssn
cb = CallbackHandler()
self.ssn.set_message_received_notify_handler(cb)
rcv = self.ssn.receiver(ADDR)
rcv.capacity = UNLIMITED
snd = self.ssn.sender(ADDR)
assert not cb.handler_called
snd.send("Ping")
deadline = time.time() + self.timeout()
while time.time() < deadline:
if cb.handler_called:
break;
assert cb.handler_called
assert cb.ssn == self.ssn
snd.close()
rcv.close()
RECEIVER_Q = 'test-receiver-queue; {create: always, delete: always}'
class ReceiverTests(Base):
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def setup_sender(self):
return self.ssn.sender(RECEIVER_Q)
def setup_receiver(self):
return self.ssn.receiver(RECEIVER_Q)
def send(self, base, count = None, sync=True):
content = self.content(base, count)
self.snd.send(content, sync=sync)
return content
def testFetch(self):
try:
msg = self.rcv.fetch(0)
assert False, "unexpected message: %s" % msg
except Empty:
pass
try:
start = time.time()
msg = self.rcv.fetch(self.delay())
assert False, "unexpected message: %s" % msg
except Empty:
elapsed = time.time() - start
assert elapsed >= self.delay()
one = self.send("testFetch", 1)
two = self.send("testFetch", 2)
three = self.send("testFetch", 3)
msg = self.rcv.fetch(0)
assert msg.content == one
msg = self.rcv.fetch(self.delay())
assert msg.content == two
msg = self.rcv.fetch()
assert msg.content == three
self.ssn.acknowledge()
def fetchFromClosedTest(self, entry):
entry.close()
try:
msg = self.rcv.fetch(0)
assert False, "unexpected result: %s" % msg
except Empty, e:
assert False, "unexpected exception: %s" % e
except LinkClosed, e:
pass
def testFetchFromClosedReceiver(self):
self.fetchFromClosedTest(self.rcv)
def testFetchFromClosedSession(self):
self.fetchFromClosedTest(self.ssn)
def testFetchFromClosedConnection(self):
self.fetchFromClosedTest(self.conn)
def fetchFromConcurrentCloseTest(self, entry):
def closer():
self.sleep()
entry.close()
t = Thread(target=closer)
t.start()
try:
msg = self.rcv.fetch()
assert False, "unexpected result: %s" % msg
except Empty, e:
assert False, "unexpected exception: %s" % e
except LinkClosed, e:
pass
t.join()
def testFetchFromConcurrentCloseReceiver(self):
self.fetchFromConcurrentCloseTest(self.rcv)
def testFetchFromConcurrentCloseSession(self):
self.fetchFromConcurrentCloseTest(self.ssn)
def testFetchFromConcurrentCloseConnection(self):
self.fetchFromConcurrentCloseTest(self.conn)
def testCapacityIncrease(self):
content = self.send("testCapacityIncrease")
self.sleep()
assert self.rcv.available() == 0
self.rcv.capacity = UNLIMITED
self.sleep()
assert self.rcv.available() == 1
msg = self.rcv.fetch(0)
assert msg.content == content
assert self.rcv.available() == 0
self.ssn.acknowledge()
def testCapacityDecrease(self):
self.rcv.capacity = UNLIMITED
one = self.send("testCapacityDecrease", 1)
self.sleep()
assert self.rcv.available() == 1
msg = self.rcv.fetch(0)
assert msg.content == one
self.rcv.capacity = 0
two = self.send("testCapacityDecrease", 2)
self.sleep()
assert self.rcv.available() == 0
msg = self.rcv.fetch(0)
assert msg.content == two
self.ssn.acknowledge()
def capacityTest(self, capacity, threshold=None):
if threshold is not None:
self.rcv.threshold = threshold
self.rcv.capacity = capacity
self.assertAvailable(self.rcv, 0)
for i in range(2*capacity):
self.send("capacityTest(%s, %s)" % (capacity, threshold), i, sync=False)
self.snd.sync()
self.sleep()
self.assertAvailable(self.rcv)
first = capacity/2
second = capacity - first
self.drain(self.rcv, limit = first)
self.sleep()
self.assertAvailable(self.rcv)
self.drain(self.rcv, limit = second)
self.sleep()
self.assertAvailable(self.rcv)
drained = self.drain(self.rcv)
assert len(drained) == capacity, "%s, %s" % (len(drained), drained)
self.assertAvailable(self.rcv, 0)
self.ssn.acknowledge()
def testCapacity5(self):
self.capacityTest(5)
def testCapacity5Threshold1(self):
self.capacityTest(5, 1)
def testCapacity10(self):
self.capacityTest(10)
def testCapacity10Threshold1(self):
self.capacityTest(10, 1)
def testCapacity100(self):
self.capacityTest(100)
def testCapacity100Threshold1(self):
self.capacityTest(100, 1)
def testCapacityUNLIMITED(self):
self.rcv.capacity = UNLIMITED
self.assertAvailable(self.rcv, 0)
for i in range(10):
self.send("testCapacityUNLIMITED", i)
self.sleep()
self.assertAvailable(self.rcv, 10)
self.drain(self.rcv)
self.assertAvailable(self.rcv, 0)
self.ssn.acknowledge()
def testAvailable(self):
self.rcv.capacity = UNLIMITED
assert self.rcv.available() == 0
for i in range(3):
self.send("testAvailable", i)
self.sleep()
assert self.rcv.available() == 3
for i in range(3, 10):
self.send("testAvailable", i)
self.sleep()
assert self.rcv.available() == 10
self.drain(self.rcv, limit=3)
assert self.rcv.available() == 7
self.drain(self.rcv)
assert self.rcv.available() == 0
self.ssn.acknowledge()
def testDoubleClose(self):
m1 = self.content("testDoubleClose", 1)
m2 = self.content("testDoubleClose", 2)
snd = self.ssn.sender("""test-double-close; {
create: always,
delete: sender,
node: {
type: topic
}
}
""")
r1 = self.ssn.receiver(snd.target)
r2 = self.ssn.receiver(snd.target)
snd.send(m1)
self.drain(r1, expected=[m1])
self.drain(r2, expected=[m1])
r1.close()
snd.send(m2)
self.drain(r2, expected=[m2])
r2.close()
# XXX: need testClose
def testMode(self):
msgs = [self.content("testMode", 1),
self.content("testMode", 2),
self.content("testMode", 3)]
for m in msgs:
self.snd.send(m)
rb = self.ssn.receiver('test-receiver-queue; {mode: browse}')
rc = self.ssn.receiver('test-receiver-queue; {mode: consume}')
self.drain(rb, expected=msgs)
self.drain(rc, expected=msgs)
rb2 = self.ssn.receiver(rb.source)
self.assertEmpty(rb2)
self.drain(self.rcv, expected=[])
def testUnsettled(self):
# just tests the code path and not the value
rcv = self.ssn.receiver('test-receiver-unsettled-queue; {create: always, delete: always}')
rcv.unsettled()
def unreliabilityTest(self, mode="unreliable"):
msgs = [self.message("testUnreliable", i) for i in range(3)]
snd = self.ssn.sender("test-unreliability-queue; {create: sender, delete: receiver}")
rcv = self.ssn.receiver(snd.target)
for m in msgs:
snd.send(m)
# close without ack on reliable receiver, messages should be requeued
ssn = self.conn.session()
rrcv = ssn.receiver("test-unreliability-queue")
self.drain(rrcv, expected=msgs)
ssn.close()
# close without ack on unreliable receiver, messages should not be requeued
ssn = self.conn.session()
urcv = ssn.receiver("test-unreliability-queue; {link: {reliability: %s}}" % mode)
self.drain(urcv, expected=msgs, redelivered=True)
ssn.close()
self.assertEmpty(rcv)
def testUnreliable(self):
self.unreliabilityTest(mode="unreliable")
def testAtMostOnce(self):
self.unreliabilityTest(mode="at-most-once")
class AddressTests(Base):
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def badOption(self, options, error):
try:
self.ssn.sender("test-bad-options-snd; %s" % options)
assert False
except InvalidOption, e:
assert "error in options: %s" % error == str(e), e
try:
self.ssn.receiver("test-bad-options-rcv; %s" % options)
assert False
except InvalidOption, e:
assert "error in options: %s" % error == str(e), e
def testIllegalKey(self):
self.badOption("{create: always, node: "
"{this-property-does-not-exist: 3}}",
"node: this-property-does-not-exist: "
"illegal key")
def testWrongValue(self):
self.badOption("{create: asdf}", "create: asdf not in "
"('always', 'sender', 'receiver', 'never')")
def testWrongType1(self):
self.badOption("{node: asdf}",
"node: asdf is not a map")
def testWrongType2(self):
self.badOption("{node: {durable: []}}",
"node: durable: [] is not a bool")
def testCreateQueue(self):
snd = self.ssn.sender("test-create-queue; {create: always, delete: always, "
"node: {type: queue, durable: False, "
"x-declare: {auto_delete: true}}}")
content = self.content("testCreateQueue")
snd.send(content)
rcv = self.ssn.receiver("test-create-queue")
self.drain(rcv, expected=[content])
def createExchangeTest(self, props=""):
addr = """test-create-exchange; {
create: always,
delete: always,
node: {
type: topic,
durable: False,
x-declare: {auto_delete: true, %s}
}
}""" % props
snd = self.ssn.sender(addr)
snd.send("ping")
rcv1 = self.ssn.receiver("test-create-exchange/first")
rcv2 = self.ssn.receiver("test-create-exchange/first")
rcv3 = self.ssn.receiver("test-create-exchange/second")
for r in (rcv1, rcv2, rcv3):
try:
r.fetch(0)
assert False
except Empty:
pass
msg1 = Message(self.content("testCreateExchange", 1), subject="first")
msg2 = Message(self.content("testCreateExchange", 2), subject="second")
snd.send(msg1)
snd.send(msg2)
self.drain(rcv1, expected=[msg1.content])
self.drain(rcv2, expected=[msg1.content])
self.drain(rcv3, expected=[msg2.content])
def testCreateExchange(self):
self.createExchangeTest()
def testCreateExchangeDirect(self):
self.createExchangeTest("type: direct")
def testCreateExchangeTopic(self):
self.createExchangeTest("type: topic")
def testDeleteBySender(self):
snd = self.ssn.sender("test-delete; {create: always}")
snd.send("ping")
snd.close()
snd = self.ssn.sender("test-delete; {delete: always}")
snd.send("ping")
snd.close()
try:
self.ssn.sender("test-delete")
except NotFound, e:
assert "no such queue" in str(e)
def testDeleteByReceiver(self):
rcv = self.ssn.receiver("test-delete; {create: always, delete: always}")
try:
rcv.fetch(0)
except Empty:
pass
rcv.close()
try:
self.ssn.receiver("test-delete")
assert False
except NotFound, e:
assert "no such queue" in str(e)
def testDeleteSpecial(self):
snd = self.ssn.sender("amq.topic; {delete: always}")
snd.send("asdf")
try:
snd.close()
assert False, "successfully deleted amq.topic"
except SessionError, e:
assert e.code == 530
# XXX: need to figure out close after error
self.conn._remove_session(self.ssn)
def testNodeBindingsQueue(self):
snd = self.ssn.sender("""
test-node-bindings-queue; {
create: always,
delete: always,
node: {
x-bindings: [{exchange: "amq.topic", key: "a.#"},
{exchange: "amq.direct", key: "b"},
{exchange: "amq.topic", key: "c.*"}]
}
}
""")
snd.send("one")
snd_a = self.ssn.sender("amq.topic/a.foo")
snd_b = self.ssn.sender("amq.direct/b")
snd_c = self.ssn.sender("amq.topic/c.bar")
snd_a.send("two")
snd_b.send("three")
snd_c.send("four")
rcv = self.ssn.receiver("test-node-bindings-queue")
self.drain(rcv, expected=["one", "two", "three", "four"])
def testNodeBindingsTopic(self):
rcv = self.ssn.receiver("test-node-bindings-topic-queue; {create: always, delete: always}")
rcv_a = self.ssn.receiver("test-node-bindings-topic-queue-a; {create: always, delete: always}")
rcv_b = self.ssn.receiver("test-node-bindings-topic-queue-b; {create: always, delete: always}")
rcv_c = self.ssn.receiver("test-node-bindings-topic-queue-c; {create: always, delete: always}")
snd = self.ssn.sender("""
test-node-bindings-topic; {
create: always,
delete: always,
node: {
type: topic,
x-bindings: [{queue: test-node-bindings-topic-queue, key: "#"},
{queue: test-node-bindings-topic-queue-a, key: "a.#"},
{queue: test-node-bindings-topic-queue-b, key: "b"},
{queue: test-node-bindings-topic-queue-c, key: "c.*"}]
}
}
""")
m1 = Message("one")
m2 = Message(subject="a.foo", content="two")
m3 = Message(subject="b", content="three")
m4 = Message(subject="c.bar", content="four")
snd.send(m1)
snd.send(m2)
snd.send(m3)
snd.send(m4)
self.drain(rcv, expected=[m1, m2, m3, m4])
self.drain(rcv_a, expected=[m2])
self.drain(rcv_b, expected=[m3])
self.drain(rcv_c, expected=[m4])
def testLinkBindings(self):
m_a = self.message("testLinkBindings", 1, subject="a")
m_b = self.message("testLinkBindings", 2, subject="b")
self.ssn.sender("test-link-bindings-queue; {create: always, delete: always}")
snd = self.ssn.sender("amq.topic")
snd.send(m_a)
snd.send(m_b)
snd.close()
rcv = self.ssn.receiver("test-link-bindings-queue")
self.assertEmpty(rcv)
snd = self.ssn.sender("""
amq.topic; {
link: {
x-bindings: [{queue: test-link-bindings-queue, key: a}]
}
}
""")
snd.send(m_a)
snd.send(m_b)
self.drain(rcv, expected=[m_a])
rcv.close()
rcv = self.ssn.receiver("""
test-link-bindings-queue; {
link: {
x-bindings: [{exchange: "amq.topic", key: b}]
}
}
""")
snd.send(m_a)
snd.send(m_b)
self.drain(rcv, expected=[m_a, m_b])
def testSubjectOverride(self):
snd = self.ssn.sender("amq.topic/a")
rcv_a = self.ssn.receiver("amq.topic/a")
rcv_b = self.ssn.receiver("amq.topic/b")
m1 = self.content("testSubjectOverride", 1)
m2 = self.content("testSubjectOverride", 2)
snd.send(m1)
snd.send(Message(subject="b", content=m2))
self.drain(rcv_a, expected=[m1])
self.drain(rcv_b, expected=[m2])
def testSubjectDefault(self):
m1 = self.content("testSubjectDefault", 1)
m2 = self.content("testSubjectDefault", 2)
snd = self.ssn.sender("amq.topic/a")
rcv = self.ssn.receiver("amq.topic")
snd.send(m1)
snd.send(Message(subject="b", content=m2))
e1 = rcv.fetch(timeout=0)
e2 = rcv.fetch(timeout=0)
assert e1.subject == "a", "subject: %s" % e1.subject
assert e2.subject == "b", "subject: %s" % e2.subject
self.assertEmpty(rcv)
def doReliabilityTest(self, reliability, messages, expected):
snd = self.ssn.sender("amq.topic")
rcv = self.ssn.receiver("amq.topic; {link: {reliability: %s}}" % reliability)
for m in messages:
snd.send(m)
self.conn.detach()
self.conn.attach()
self.drain(rcv, expected=expected)
def testReliabilityUnreliable(self):
msgs = [self.message("testReliabilityUnreliable", i) for i in range(3)]
self.doReliabilityTest("unreliable", msgs, [])
def testReliabilityAtLeastOnce(self):
msgs = [self.message("testReliabilityAtLeastOnce", i) for i in range(3)]
self.doReliabilityTest("at-least-once", msgs, msgs)
def testLinkName(self):
msgs = [self.message("testLinkName", i) for i in range(3)]
snd = self.ssn.sender("amq.topic")
trcv = self.ssn.receiver("amq.topic; {link: {name: test-link-name}}")
qrcv = self.ssn.receiver("test-link-name")
for m in msgs:
snd.send(m)
self.drain(qrcv, expected=msgs)
def testAssert1(self):
try:
snd = self.ssn.sender("amq.topic; {assert: always, node: {type: queue}}")
assert 0, "assertion failed to trigger"
except AssertionFailed, e:
pass
except NotFound, e: # queue named amp.topic not found
pass
def testAssert2(self):
snd = self.ssn.sender("amq.topic; {assert: always}")
NOSUCH_Q = "this-queue-should-not-exist"
UNPARSEABLE_ADDR = "name/subject; {bad options"
UNLEXABLE_ADDR = "\0x0\0x1\0x2\0x3"
class AddressErrorTests(Base):
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def senderErrorTest(self, addr, exc, check=lambda e: True):
try:
self.ssn.sender(addr, durable=self.durable())
assert False, "sender creation succeeded"
except exc, e:
assert check(e), "unexpected error: %s" % compat.format_exc(e)
def receiverErrorTest(self, addr, exc, check=lambda e: True):
try:
self.ssn.receiver(addr)
assert False, "receiver creation succeeded"
except exc, e:
assert check(e), "unexpected error: %s" % compat.format_exc(e)
def testNoneTarget(self):
self.senderErrorTest(None, MalformedAddress)
def testNoneSource(self):
self.receiverErrorTest(None, MalformedAddress)
def testNoTarget(self):
self.senderErrorTest(NOSUCH_Q, NotFound, lambda e: NOSUCH_Q in str(e))
def testNoSource(self):
self.receiverErrorTest(NOSUCH_Q, NotFound, lambda e: NOSUCH_Q in str(e))
def testUnparseableTarget(self):
self.senderErrorTest(UNPARSEABLE_ADDR, MalformedAddress,
lambda e: "expecting COLON" in str(e))
def testUnparseableSource(self):
self.receiverErrorTest(UNPARSEABLE_ADDR, MalformedAddress,
lambda e: "expecting COLON" in str(e))
def testUnlexableTarget(self):
self.senderErrorTest(UNLEXABLE_ADDR, MalformedAddress,
lambda e: "unrecognized characters" in str(e))
def testUnlexableSource(self):
self.receiverErrorTest(UNLEXABLE_ADDR, MalformedAddress,
lambda e: "unrecognized characters" in str(e))
def testInvalidMode(self):
self.receiverErrorTest('name; {mode: "this-is-a-bad-receiver-mode"}',
InvalidOption,
lambda e: "not in ('browse', 'consume')" in str(e))
SENDER_Q = 'test-sender-q; {create: always, delete: always}'
class SenderTests(Base):
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def setup_sender(self):
return self.ssn.sender(SENDER_Q)
def setup_receiver(self):
return self.ssn.receiver(SENDER_Q)
def checkContent(self, content):
self.snd.send(content)
msg = self.rcv.fetch(0)
assert msg.content == content
out = Message(content)
self.snd.send(out)
echo = self.rcv.fetch(0)
assert out.content == echo.content
assert echo.content == msg.content
self.ssn.acknowledge()
def testSendString(self):
self.checkContent(self.content("testSendString"))
def testSendList(self):
self.checkContent(["testSendList", 1, 3.14, self.test_id])
def testSendMap(self):
self.checkContent({"testSendMap": self.test_id, "pie": "blueberry", "pi": 3.14})
def asyncTest(self, capacity):
self.snd.capacity = capacity
msgs = [self.content("asyncTest", i) for i in range(15)]
for m in msgs:
self.snd.send(m, sync=False)
self.drain(self.rcv, timeout=self.delay(), expected=msgs)
self.ssn.acknowledge()
def testSendAsyncCapacity0(self):
try:
self.asyncTest(0)
assert False, "send shouldn't succeed with zero capacity"
except InsufficientCapacity:
# this is expected
pass
def testSendAsyncCapacity1(self):
self.asyncTest(1)
def testSendAsyncCapacity5(self):
self.asyncTest(5)
def testSendAsyncCapacityUNLIMITED(self):
self.asyncTest(UNLIMITED)
def testCapacityTimeout(self):
self.snd.capacity = 1
msgs = []
caught = False
while len(msgs) < 100:
m = self.content("testCapacity", len(msgs))
try:
self.snd.send(m, sync=False, timeout=0)
msgs.append(m)
except InsufficientCapacity:
caught = True
break
self.snd.sync()
self.drain(self.rcv, expected=msgs)
self.ssn.acknowledge()
assert caught, "did not exceed capacity"
def testEINTR(self):
m1 = self.content("testEINTR", 0)
m2 = self.content("testEINTR", 1)
self.snd.send(m1, timeout=self.timeout())
try:
os.setuid(500)
assert False, "setuid should fail"
except:
pass
self.snd.send(m2, timeout=self.timeout())
class ErrorCallbackTests(Base):
class Callback:
def __init__(self, name):
self.name = name
self.obj = None
self.exc = None
def __call__(self, obj, exc):
self.obj = obj
self.exc = exc
def testConnectErrorCallback(self):
cb = ErrorCallbackTests.Callback("connection")
self.conn = Connection("localhost:4")
self.conn.set_async_exception_notify_handler(cb)
try:
self.conn.open()
assert False, "connect succeeded"
except Exception:
assert self.conn == cb.obj, cb.obj
assert cb.name == "connection"
assert cb.exc is not None
def testSessionErrorCallback(self):
ccb = ErrorCallbackTests.Callback("connection")
self.conn = Connection.establish(self.broker, **self.connection_options())
self.conn.set_async_exception_notify_handler(ccb)
scb = ErrorCallbackTests.Callback("session")
self.ssn = self.conn.session(transactional=True)
self.ssn.set_async_exception_notify_handler(scb)
self.conn.detach()
try:
self.ping(self.ssn)
assert False, "session succeeded"
except Exception:
assert self.ssn == scb.obj, scb.obj
assert scb.name == "session"
assert scb.exc is not None
# connection callback should be empty
assert ccb.obj == None, ccb.obj
def testSenderErrorCallback(self):
ccb = ErrorCallbackTests.Callback("connection")
conn = Connection(self.broker, **self.connection_options())
conn.set_async_exception_notify_handler(ccb)
scb = ErrorCallbackTests.Callback("session")
ssn = conn.session()
ssn.set_async_exception_notify_handler(scb)
snd = ssn.sender(NOSUCH_Q)
sndcb = ErrorCallbackTests.Callback("sender")
snd.set_async_exception_notify_handler(sndcb)
conn.open()
try:
snd.send(self.message("HI"))
assert False, "send worked"
except Exception:
assert snd == sndcb.obj, sndcb.obj
assert sndcb.name == "sender"
assert sndcb.exc is not None
# connection and session callbacks are empty
assert ccb.obj == None, ccb.obj
assert scb.obj == None, scb.obj
def testReceiverErrorCallback(self):
ccb = ErrorCallbackTests.Callback("connection")
self.conn = Connection(self.broker, **self.connection_options())
self.conn.set_async_exception_notify_handler(ccb)
scb = ErrorCallbackTests.Callback("session")
self.ssn = self.conn.session()
self.ssn.set_async_exception_notify_handler(scb)
self.recv = self.ssn.receiver(NOSUCH_Q)
rcb = ErrorCallbackTests.Callback("receiver")
self.recv.set_async_exception_notify_handler(rcb)
self.conn.open()
try:
self.recv.fetch()
assert False, "fetch worked"
except Exception:
assert self.recv == rcb.obj, rcb.obj
assert rcb.name == "receiver"
assert rcb.exc is not None
# connection and session callbacks are empty
assert ccb.obj == None, ccb.obj
assert scb.obj == None, scb.obj
| 2.0625 | 2 |
python/days/d06/__init__.py | tamaroth/advent-of-code-2018 | 1 | 12760655 | <reponame>tamaroth/advent-of-code-2018
"""
Day 6: Chronal Coordinates
"""
from days import Day
from utils.file import read_lines_of_datafile
def get_day_06():
return Day06(
read_lines_of_datafile('day_06_data.txt')
)
class Day06(Day):
"""A solution to Day 6: Chronal Coordinates."""
def __init__(self, data, max_distance=None):
super().__init__('Day 6: Chronal Coordinates')
self._points = Points.from_string_lines(data)
self._min_x, self._min_y = self._points.min()
self._max_x, self._max_y = self._points.max()
self._max_distance = max_distance if max_distance is not None else 10000
def solve_part_one(self):
"""Solves the first part of the task."""
grid = self._get_distance_grid()
finite_points = self._get_finite_points(grid)
return max([point.get_score_on_grid(grid) for point in finite_points])
def solve_part_two(self):
"""Solves the second part of the task."""
assert self._max_distance, 'total distance is not set'
region_size = 0
for x in self._get_x_range():
for y in self._get_y_range():
if self._get_distance_to_all_coords(x, y) < self._max_distance:
region_size += 1
return region_size
def _get_distance_grid(self):
grid = {}
init_max = Point.generate_id(self._max_x, self._max_y)
for x in self._get_x_range():
for y in self._get_y_range():
max_distance = init_max
for point in self._points:
distance = point.distance_from(x, y)
if distance < max_distance:
grid[(x, y)] = point.id
max_distance = distance
elif distance == max_distance and distance != init_max:
grid[(x, y)] = -1
return grid
def _get_finite_points(self, grid):
return [point for point in self._points if not self._is_point_infinite(point, grid)]
def _is_point_infinite(self, point, grid):
for x in self._get_x_range():
if grid[(x, self._min_y)] == point.id or grid[(x, self._max_y - 1)] == point.id:
return True
for y in self._get_y_range():
if grid[(self._min_x, y)] == point.id or grid[(self._max_x - 1, y)] == point.id:
return True
return False
def _get_distance_to_all_coords(self, x, y):
score = 0
for point in self._points:
score += point.distance_from(x, y)
return score
def _get_x_range(self):
return range(self._min_x, self._max_x)
def _get_y_range(self):
return range(self._min_y, self._max_y)
class Point:
"""A point on the board."""
def __init__(self, x, y):
self._x = x
self._y = y
self._id = Point.generate_id(x, y)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def id(self):
return self._id
def distance_from(self, x, y):
return abs(self._x - x) + abs(self._y - y)
def get_score_on_grid(self, grid):
score = 0
for id in grid.values():
if id == self.id:
score += 1
return score
@classmethod
def from_string(cls, string):
elements = string.split(sep=', ')
return cls(int(elements[0]), int(elements[1]))
@staticmethod
def generate_id(x, y):
if x > y:
return x**2 + y
else:
return (y+1)**2 - 1 - x
class Points(list):
"""A list of Points."""
def min(self):
"""Returns a tuple of minimal x and y coordinates between all points.
Values are reduced by 1 to include margin.
"""
return (
min(point.x for point in self) - 1,
min(point.y for point in self) - 1
)
def max(self):
"""Returns a tuple of maximal x and y coordinates between all points.
Values are increased by 2 to include margin.
"""
return (
max(point.x for point in self) + 2,
max(point.y for point in self) + 2
)
@classmethod
def from_string_lines(cls, lines):
return cls(Point.from_string(line) for line in lines)
| 3.390625 | 3 |
LOSO_Split.py | wzhlearning/fNIRS-Transformer | 0 | 12760656 | <gh_stars>0
import numpy as np
def Split_Dataset_A(sub, feature, label, channels):
"""
LOSO-CV for Dataset A
Args:
sub: leave one subject out.
feature: input fNIRS signals.
label: input fNIRS labels.
channels: fNIRS channels.
Returns:
X_train: training set.
y_train: labels for training set.
X_test: test set.
y_test: labels for test set.
"""
if sub == 1:
X_test = feature[: 36]
y_test = label[: 36]
X_train = feature[36:]
y_train = label[36:]
elif sub == 8:
X_test = feature[300:]
y_test = label[300:]
X_train = feature[:300]
y_train = label[: 300]
else:
start, end = 0, 0
if sub in [2, 3]:
start = 36 * (sub - 1)
end = 36 * sub
elif sub in [4, 5, 6, 7]:
start = 108 + 48 * (sub - 4)
end = 108 + 48 * (sub - 3)
X_test = feature[start: end]
y_test = label[start: end]
feature_set_1 = feature[: start]
label_set_1 = label[:start]
feature_set_2 = feature[end:]
label_set_2 = label[end:]
X_train = np.append(feature_set_1, feature_set_2, axis=0)
y_train = np.append(label_set_1, label_set_2, axis=0)
X_train = X_train.reshape((X_train.shape[0], 2, channels, -1))
X_test = X_test.reshape((X_test.shape[0], 2, channels, -1))
return X_train, y_train, X_test, y_test
def Split_Dataset_A_Res(sub, feature, label, channels):
"""
Split one subject's data to evaluate the results of LOSO-CV on Dataset A.
Args:
sub: leave one subject out.
feature: input fNIRS signals.
label: input fNIRS labels.
channels: fNIRS channels.
Returns:
X_test: test set.
y_test: labels for test set.
"""
if sub == 1:
X_test = feature[: 36]
y_test = label[: 36]
elif sub == 8:
X_test = feature[300:]
y_test = label[300:]
else:
start, end = 0, 0
if sub in [2, 3]:
start = 36 * (sub - 1)
end = 36 * sub
elif sub in [4, 5, 6, 7]:
start = 108 + 48 * (sub - 4)
end = 108 + 48 * (sub - 3)
X_test = feature[start: end]
y_test = label[start: end]
X_test = X_test.reshape((X_test.shape[0], 2, channels, -1))
return X_test, y_test
def Split_Dataset_B(sub, feature, label, channels):
"""
LOSO-CV for Dataset B
Args:
sub: leave one subject out.
feature: input fNIRS signals.
label: input fNIRS labels.
channels: fNIRS channels.
Returns:
X_train: training set.
y_train: labels for training set.
X_test: test set.
y_test: labels for test set.
"""
if sub == 1:
X_test = feature[: 60]
y_test = label[: 60]
X_train = feature[60:]
y_train = label[60:]
elif sub == 29:
X_test = feature[60 * 28:]
y_test = label[60 * 28:]
X_train = feature[:60 * 28]
y_train = label[: 60 * 28]
else:
X_test = feature[60 * (sub - 1): 60 * sub]
y_test = label[60 * (sub - 1): 60 * sub]
feature_set_1 = feature[: 60 * (sub - 1)]
label_set_1 = label[:60 * (sub - 1)]
feature_set_2 = feature[60 * sub:]
label_set_2 = label[60 * sub:]
X_train = np.append(feature_set_1, feature_set_2, axis=0)
y_train = np.append(label_set_1, label_set_2, axis=0)
X_train = X_train.reshape((X_train.shape[0], 2, channels, -1))
X_test = X_test.reshape((X_test.shape[0], 2, channels, -1))
return X_train, y_train, X_test, y_test
def Split_Dataset_B_Res(sub, feature, label, channels):
"""
Split one subject's data to evaluate the results of LOSO-CV on Dataset B.
Args:
sub: leave one subject out.
feature: input fNIRS signals.
label: input fNIRS labels.
channels: fNIRS channels.
Returns:
X_test: test set.
y_test: labels for test set.
"""
if sub == 1:
X_test = feature[: 60]
y_test = label[: 60]
elif sub == 29:
X_test = feature[60 * 28:]
y_test = label[60 * 28:]
else:
X_test = feature[60 * (sub - 1): 60 * sub]
y_test = label[60 * (sub - 1): 60 * sub]
X_test = X_test.reshape((X_test.shape[0], 2, channels, -1))
return X_test, y_test
def Split_Dataset_C(sub, feature, label, channels):
"""
LOSO-CV for Dataset A
Args:
sub: leave one subject out.
feature: input fNIRS signals.
label: input fNIRS labels.
channels: fNIRS channels.
Returns:
X_train: training set.
y_train: labels for training set.
X_test: test set.
y_test: labels for test set.
"""
if sub == 1:
X_test = feature[: 75]
y_test = label[: 75]
X_train = feature[75:]
y_train = label[75:]
elif sub == 30:
X_test = feature[75 * 29:]
y_test = label[75 * 29:]
X_train = feature[:75 * 29]
y_train = label[: 75 * 29]
else:
X_test = feature[75 * (sub - 1): 75 * sub]
y_test = label[75 * (sub - 1): 75 * sub]
feature_set_1 = feature[: 75 * (sub - 1)]
label_set_1 = label[:75 * (sub - 1)]
feature_set_2 = feature[75 * sub:]
label_set_2 = label[75 * sub:]
X_train = np.append(feature_set_1, feature_set_2, axis=0)
y_train = np.append(label_set_1, label_set_2, axis=0)
X_train = X_train.reshape((X_train.shape[0], 2, channels, -1))
X_test = X_test.reshape((X_test.shape[0], 2, channels, -1))
return X_train, y_train, X_test, y_test
def Split_Dataset_C_Res(sub, feature, label, channels):
"""
Split one subject's data to evaluate the results of LOSO-CV on Dataset C.
Args:
sub: leave one subject out.
feature: input fNIRS signals.
label: input fNIRS labels.
channels: fNIRS channels.
Returns:
X_test: test set.
y_test: labels for test set.
"""
if sub == 1:
X_test = feature[: 75]
y_test = label[: 75]
elif sub == 30:
X_test = feature[75 * 29:]
y_test = label[75 * 29:]
else:
X_test = feature[75 * (sub - 1): 75 * sub]
y_test = label[75 * (sub - 1): 75 * sub]
X_test = X_test.reshape((X_test.shape[0], 2, channels, -1))
return X_test, y_test | 2.796875 | 3 |
backend/apps/subscriptions/services.py | Zimodra/foodgram-project-react | 0 | 12760657 | import logging
from typing import Optional
from django.conf import settings
from django.db.models.query import QuerySet
from rest_framework.exceptions import ValidationError
import subscriptions.interfaces as interface
from .models import Subscription
from .serializers import SubscriptionsSerializer
from utils.base_services import BaseService
logger = logging.getLogger(__name__)
class SubscriptionsService(BaseService):
instance = Subscription
serializer_class = SubscriptionsSerializer
# REST API logic
def list_subs(self) -> dict:
logger.info('Метод SubscriptionsService list_subs вызван')
queryset = self.get_queryset()
page = self.paginate_queryset(queryset)
users = [interface.UserInterface().get_user(pk=obj.author, request=self.request) for obj in page] # noqa
if page is not None:
serializer = self.get_serializer(users, many=True)
return self.get_paginated_data(serializer.data)
serializer = SubscriptionsSerializer(
[interface.UserInterface().get_user(pk=obj.author, request=self.request) for obj in queryset], # noqa
many=True,
)
return serializer.data
def subscribe(self, pk: int = None) -> dict:
logger.info('Метод SubscriptionsService subscribe вызван')
author = interface.UserInterface().get_user(pk=pk, request=self.request)
serializer = self.get_serializer(data=author)
serializer.is_valid(raise_exception=True)
serializer.save()
author['is_subscribed'] = True
serializer.instance = author
return serializer.data
def unsubscribe(self, pk: int = None) -> bool:
logger.info('Метод SubscriptionsService unsubscribe вызван')
self._validate_unsubscribe_request(self.request.user.id, pk)
self.instance.objects.get(follower=self.request.user.id, author=pk).delete()
return True
# APP API logic
def check_is_subscribed(self, user: int, author: int) -> bool:
logger.info('Метод SubscriptionsService check_is_subscribed вызван')
context = {'follower': user, 'author': author}
return self.check_is_in(context)
# Interface logic
def get_author_recipes(self, author: int) -> QuerySet:
logger.info('Метод SubscriptionsService get_recipes вызван')
return interface.RecipesInrerface().get_author_recipes(author=author)
def get_count_author_recipes(self, author: int) -> int:
logger.info('Метод SubscriptionsService get_count_recipes вызван')
return interface.RecipesInrerface().get_count_author_recipes(author=author)
# Service logic
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(follower=self.request.user.id)
# local functions
def _validate_unsubscribe_request(self, follower: int, author: int) -> Optional[Exception]:
if follower == author:
raise ValidationError(
{'errors': settings.ERROR_MESSAGE.get('self_unsubscription')}
)
if not self.instance.objects.filter(follower=follower, author=author).exists():
raise ValidationError(
{'errors': settings.ERROR_MESSAGE.get('not_subscribe')}
)
class SubscriptionsAdminService:
def get_user(self, pk: int) -> QuerySet:
logger.info('Метод SubscriptionsAdminService get_user вызван')
return interface.UsersAdminInterface().get_user(pk=pk)
def get_users(self) -> QuerySet:
logger.info('Метод SubscriptionsAdminService get_users вызван')
return interface.UsersAdminInterface().get_users()
| 2 | 2 |
deal_data.py | haishuowang/nl2sql_baseline-python3 | 0 | 12760658 | import os
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import re
date_now = datetime.now()
#
# data = pd.read_csv('/home/haishuowang/spider_data/2019-07-17/兰格钢铁网', sep='|', header=None)
# data.columns = ['Title', 'w_time', 'n_time', 'Link', 'Info']
# data = data[~data['Title'].duplicated(keep='first')]
#
# # x =
# data['deal_title'] = data.apply(lambda x: x['Title'].replace(f'{int(date_now.month)}月', '')
# .replace(f'{int(date_now.day)}日', '')
# , axis=1)
#
mid_word = ['稳', '→', '震荡', '平', ]
buy_word = ['涨', '上调', '↑', '上行', '强势', '走高']
sell_word = ['跌', '降', '下调', '探低', '↓', '下行', '弱势', '走低']
# # 方大特钢
#
#
file_name_list = ['全球金属网', '兰格钢铁网', '大宗内参', '海鑫钢网', '瑞达期货', '生意社', '西本新干线']
def contain(x, key_word, label=1):
for key in key_word:
if key in x:
return label
else:
return np.nan
def load_spot_data(read_path):
file_data = pd.read_csv(read_path, sep='|', header=None)
file_data.columns = ['Title', 'w_time', 'n_time', 'Link', 'Info']
file_data.index = pd.to_datetime(file_data['n_time'])
return file_data
def filer_target_word(raw_df):
target_df = raw_df[raw_df['Title'].str.contains('钢')]
return target_df
def get_file_pos(file_name):
root_path = '/home/haishuowang/temp'
date_list = sorted(os.listdir(root_path))
# file_name = '兰格钢铁网'
data_list = []
for target_date in date_list:
read_path = f'{root_path}/{target_date}/{file_name}'
if os.path.exists(f'{root_path}/{target_date}/{file_name}'):
file_data = pd.read_csv(read_path, sep='|', header=None)
file_data.columns = ['Title', 'w_time', 'n_time', 'Link', 'Info']
file_data.index = pd.to_datetime(file_data['n_time']) + timedelta(minutes=10)
file_data = file_data.sort_index()
mid = file_data['Title'].apply(lambda x: contain(x, mid_word, label=0))
mid.name = 'mid'
buy = file_data['Title'].apply(lambda x: contain(x, buy_word, label=1))
buy.name = 'buy'
sell = file_data['Title'].apply(lambda x: contain(x, sell_word, label=-1))
sell.name = 'sell'
mid_info = file_data['Info'].apply(lambda x: contain(x, mid_word, label=0))
mid_info.name = 'mid_info'
buy_info = file_data['Info'].apply(lambda x: contain(x, buy_word, label=1))
buy_info.name = 'buy_info'
sell_info = file_data['Info'].apply(lambda x: contain(x, sell_word, label=-1))
sell_info.name = 'sell_info'
# no_info = mid_info.isna() & buy_info.isna() & sell_info.isna()
part_info = pd.concat([file_data['Title'], mid, buy, sell, mid_info, buy_info, sell_info], axis=1)
data_list.append(part_info)
else:
print(target_date)
pass
all_info = pd.concat(data_list, axis=0)
all_info.to_csv(f'/home/haishuowang/PycharmProjects/{file_name}.csv')
return all_info
def get_spider_file_pos(file_name='生意社'):
root_path = '/home/haishuowang/spider_data'
date_list = sorted([x for x in os.listdir(root_path) if len(x) == 10 and '-' in x and x > '2019-07-18'])
data_list = []
for target_date in date_list:
read_path = f'/home/haishuowang/spider_data/{target_date}/{file_name}'
if os.path.exists(f'{root_path}/{target_date}/{file_name}'):
file_data = load_spot_data(read_path)
file_data = filer_target_word(file_data)
file_data.index = pd.to_datetime(file_data['n_time']) + timedelta(minutes=10)
file_data = file_data.sort_index()
mid = file_data['Title'].apply(lambda x: contain(x, mid_word, label=0))
mid.name = 'mid'
buy = file_data['Title'].apply(lambda x: contain(x, buy_word, label=1))
buy.name = 'buy'
sell = file_data['Title'].apply(lambda x: contain(x, sell_word, label=-1))
sell.name = 'sell'
mid_info = file_data['Info'].apply(lambda x: contain(x, mid_word, label=0))
mid_info.name = 'mid_info'
buy_info = file_data['Info'].apply(lambda x: contain(x, buy_word, label=1))
buy_info.name = 'buy_info'
sell_info = file_data['Info'].apply(lambda x: contain(x, sell_word, label=-1))
sell_info.name = 'sell_info'
part_info = pd.concat([file_data['Title'], mid, buy, sell, mid_info, buy_info, sell_info], axis=1)
data_list.append(part_info)
else:
print(target_date)
pass
all_info = pd.concat(data_list, axis=0)
all_info.to_csv(f'/home/haishuowang/PycharmProjects/{file_name}_spider.csv')
return all_info
# data_0717 = load_spot_data('/home/haishuowang/spider_data/2019-07-17/生意社')
# data_0719 = load_spot_data('/home/haishuowang/spider_data/2019-07-19/生意社')
# # data_0720 = load_spot_data('/home/haishuowang/spider_data/2019-07-20/生意社')
# data_0722 = load_spot_data('/home/haishuowang/spider_data/2019-07-22/生意社')
# data_0723 = load_spot_data('/home/haishuowang/spider_data/2019-07-23/生意社')
#
# data_0717 = filer_target_word(data_0717)
# data_0719 = filer_target_word(data_0719)
# data_0722 = filer_target_word(data_0722)
# data_0723 = filer_target_word(data_0723)
# all_info = get_spider_file_pos(file_name='生意社')
# for file_name in file_name_list:
# all_info = get_file_pos(file_name)
# if __name__ == '__main__':
def deal_jd_data(fut_name='鸡蛋', file_name='金谷高科'):
root_path = f'/home/haishuowang/PycharmProjects/dat_whs/{fut_name}/temp'
target_date_list = sorted([x for x in os.listdir(root_path) if x >= '2019-06-25'])
print(target_date_list)
result_list = []
for target_date in target_date_list:
if os.path.exists(f'{root_path}/{target_date}/{file_name}'):
print('_______________')
print(target_date)
info_data = load_spot_data(f'{root_path}/{target_date}/{file_name}')
print(info_data)
# print(info_data[info_data['Title'].str.contains('辽宁')])
# result_list.append(info_data[info_data['Title'].str.contains('辽宁')])
else:
pass
return pd.concat(result_list, axis=0)
def deal_cf_data(fut_name='棉花', file_name='金谷高科'):
root_path = f'/home/haishuowang/PycharmProjects/dat_whs/{fut_name}/temp'
info_data = load_spot_data(f'{root_path}/{file_name}')
return info_data.sort_index().drop_duplicates()
# '/home/haishuowang/PycharmProjects/dat_whs/甲醛/temp/生意社'
# info_data = deal_cf_data(fut_name='甲醛', file_name='生意社')
fut_name = '甲醛'
file_name = '生意社'
info_data = deal_cf_data(fut_name, file_name)
def title_filter(info_sr):
print(info_sr)
title = info_sr.iloc[0]
print(info_sr.name)
month_t, day_t = pd.to_datetime(info_sr.name).strftime('%m/%d').split('/')
print(title, month_t, day_t)
date_str = f'{str(int(month_t))}月{str(int(day_t))}日'
if date_str in title:
return True
else:
return False
info_data = info_data[info_data[['Title']].apply(title_filter, axis=1)]
mid = info_data['Title'].apply(lambda x: contain(x, mid_word, label=0))
mid.name = 'mid'
buy = info_data['Title'].apply(lambda x: contain(x, buy_word, label=1))
buy.name = 'buy'
sell = info_data['Title'].apply(lambda x: contain(x, sell_word, label=-1))
sell.name = 'sell'
mid_info = info_data['Info'].apply(lambda x: contain(x, mid_word, label=0))
mid_info.name = 'mid_info'
buy_info = info_data['Info'].apply(lambda x: contain(x, buy_word, label=1))
buy_info.name = 'buy_info'
sell_info = info_data['Info'].apply(lambda x: contain(x, sell_word, label=-1))
sell_info.name = 'sell_info'
part_info = pd.concat([info_data['Title'], mid, buy, sell, mid_info, buy_info, sell_info], axis=1)
part_info['pos_1'] = part_info[['mid', 'buy', 'sell']].sum(1)
part_info['pos_2'] = part_info[['mid', 'buy', 'sell', 'mid_info', 'buy_info', 'sell_info']].sum(1)
part_info.to_csv(f'~/PycharmProjects/dat_whs/{fut_name}_{file_name}.csv', sep='|')
# result_list = []
# for n_time, part_a in a.iterrows():
# city_info_list = part_a['Info'].split(':')[1].split('。')[:-1]
# for city_info in city_info_list:
# # print(city_info)
# city_name = city_info.split('市')[0]
# price_info = city_info.split('市')[1].split(',')[0]
# price_num = float(re.findall('(?<=为).*?(?=元)', price_info)[0])
# print(n_time, city_name, price_num)
# result_list.append([n_time, city_name, price_num])
#
# result_info = pd.DataFrame(result_list, columns=['n_time', 'city_name', 'price_num']).set_index(['n_time', 'city_name'])
# print(result_info.xs('大连', level=1))
| 2.734375 | 3 |
mkapi/core/structure.py | simbuerg/mkapi | 72 | 12760659 | """This module provides base class of [Node](mkapi.core.node.Node) and
[Module](mkapi.core.module.Module)."""
from dataclasses import dataclass, field
from typing import Any, Iterator, List, Union
from mkapi.core.base import Base, Type
from mkapi.core.docstring import Docstring, get_docstring
from mkapi.core.object import (get_origin, get_qualname,
get_sourcefile_and_lineno,
split_prefix_and_name)
from mkapi.core.signature import Signature, get_signature
"a.b.c".rpartition(".")
@dataclass
class Object(Base):
"""Object class represents an object.
Args:
name: Object name.
prefix: Object prefix.
qualname: Qualified name.
kind: Object kind such as 'class', 'function', *etc.*
signature: Signature if object is module or callable.
Attributes:
id: ID attribute of HTML.
type: Type for missing Returns and Yields sections.
"""
prefix: str = ""
qualname: str = ""
kind: str = ""
signature: Signature = field(default_factory=Signature)
module: str = field(init=False)
markdown: str = field(init=False)
id: str = field(init=False)
type: Type = field(default_factory=Type, init=False)
def __post_init__(self):
from mkapi.core import linker
self.id = self.name
if self.prefix:
self.id = ".".join([self.prefix, self.name])
if not self.qualname:
self.module = self.id
else:
self.module = self.id[: -len(self.qualname) - 1]
if not self.markdown:
name = linker.link(self.name, self.id)
if self.prefix:
prefix = linker.link(self.prefix, self.prefix)
self.markdown = ".".join([prefix, name])
else:
self.markdown = name
def __repr__(self):
class_name = self.__class__.__name__
id = self.id
return f"{class_name}({id!r})"
def __iter__(self) -> Iterator[Base]:
yield from self.type
yield self
@dataclass
class Tree:
"""Tree class. This class is the base class of [Node](mkapi.core.node.Node)
and [Module](mkapi.core.module.Module).
Args:
obj: Object.
Attributes:
sourcefile: Source file path.
lineno: Line number.
object: Object instance.
docstring: Docstring instance.
parent: Parent instance.
members: Member instances.
"""
obj: Any = field()
sourcefile: str = field(init=False)
lineno: int = field(init=False)
object: Object = field(init=False)
docstring: Docstring = field(init=False)
parent: Any = field(default=None, init=False)
members: List[Any] = field(init=False)
def __post_init__(self):
obj = get_origin(self.obj)
self.sourcefile, self.lineno = get_sourcefile_and_lineno(obj)
prefix, name = split_prefix_and_name(obj)
qualname = get_qualname(obj)
kind = self.get_kind()
signature = get_signature(obj)
self.object = Object(
prefix=prefix, name=name, qualname=qualname, kind=kind, signature=signature,
)
self.docstring = get_docstring(obj)
self.obj = obj
self.members = self.get_members()
for member in self.members:
member.parent = self
def __repr__(self):
class_name = self.__class__.__name__
id = self.object.id
sections = len(self.docstring.sections)
numbers = len(self.members)
return f"{class_name}({id!r}, num_sections={sections}, num_members={numbers})"
def __getitem__(self, index: Union[int, str, List[str]]):
"""Returns a member {class} instance.
If `index` is str, a member Tree instance whose name is equal to `index`
is returned.
Raises:
IndexError: If no member found.
"""
if isinstance(index, list):
node = self
for name in index:
node = node[name]
return node
if isinstance(index, int):
return self.members[index]
if isinstance(index, str) and "." in index:
names = index.split(".")
return self[names]
for member in self.members:
if member.object.name == index:
return member
raise IndexError
def __len__(self):
return len(self.members)
def __contains__(self, name):
for member in self.members:
if member.object.name == name:
return True
return False
def get_kind(self) -> str:
"""Returns kind of self."""
raise NotImplementedError
def get_members(self) -> List["Tree"]:
"""Returns a list of members."""
raise NotImplementedError
def get_markdown(self) -> str:
"""Returns a Markdown source for docstring of self."""
raise NotImplementedError
def walk(self) -> Iterator["Tree"]:
"""Yields all members."""
yield self
for member in self.members:
yield from member.walk()
| 2.5625 | 3 |
iv/Leetcode/easy/e172_factorial_training_zero.py | iamsuman/iv | 2 | 12760660 | <reponame>iamsuman/iv
class Solution:
def trailingZeroes(self, n: int) -> int:
l2 = [self.func(i, 2) for i in range(1, n+1) if i % 2 == 0]
l5 = [self.func(i, 5) for i in range(1, n + 1) if i % 5 == 0]
suml2 = sum(l2)
suml5 = sum(l5)
r = min(suml2, suml5)
return r
def func(self, n, i):
count = 0
while n > 0:
if n % i == 0:
count += 1
n = n // i
else:
break
return count
def trailingZeroes2(self, n: int) -> int:
fact = self.factorial(n)
count = 0
while fact > 0:
if fact % 10 == 0:
count += 1
fact = fact // 10
else:
break
return count
def factorial(self, n: int):
if n == 0:
return 1
res = 1
while n > 0:
res = res * n
n -= 1
return res
n = 3
n = 5
n = 0
n = 10
n = 8785
s = Solution()
print(s.trailingZeroes(n))
# print(s.factorial2(n))
print(s.trailingZeroes2(n))
| 3.203125 | 3 |
src/snosearch/tests/dummy_requests.py | ENCODE-DCC/snovault-search | 1 | 12760661 | <filename>src/snosearch/tests/dummy_requests.py
from pyramid.request import Request
from snosearch.adapters.flask.requests import RequestAdapter
from snosearch.interfaces import JSONLD_CONTEXT
from werkzeug.datastructures import ImmutableOrderedMultiDict
class FlaskDummyRequestAdapter(RequestAdapter):
def __setitem__(self, key, value):
self._request.__class__.parameter_storage_class = ImmutableOrderedMultiDict
environ = self._request.environ.copy()
environ.update({key: value})
self._request = self._request.__class__(environ)
def __getitem__(self, key):
return self._request.environ[key]
@property
def environ(self):
return self
@property
def effective_principals(self):
if self._request.environ.get('REMOTE_USER') == 'TEST_SUBMITTER':
return ['group.submitter']
return super().effective_principals
def has_permission(self, action):
principals = self.effective_principals
acls = getattr(
self.context,
'__acl__',
lambda: []
)
for principal in principals:
for acl in acls():
if acl[1] == principal and acl[2] == action:
return acl[0]
return False
class PyramidDummyRequest(Request):
__parent__ = None
@property
def effective_principals(self):
if self.environ.get('REMOTE_USER') == 'TEST_SUBMITTER':
return ['group.submitter']
return super().effective_principals
def has_permission(self, action):
principals = self.effective_principals
acls = getattr(
self.context,
'__acl__',
lambda: []
)
for principal in principals:
for acl in acls():
if acl[1] == principal and acl[2] == action:
return acl[0]
return False
def route_path(self, context):
if context == JSONLD_CONTEXT:
return '/terms/'
| 2.015625 | 2 |
src/resource/style.py | AaronWxy/TestCommon | 0 | 12760662 | css = """
body {
font-family: Roboto, Helvetica, Arial, sans-serif;
font-size:14px;
color: #555555;
background-color:#f5f5f5;
margin-left: 5px;
}
.container{
padding-right:5px;
padding-left:5px;
margin-right:auto;
margin-left:auto
}
.panel{
margin-bottom:20px;
background-color:#fff;
border:1px solid #ddd;
border-radius:1px;
}
.panel-footer{
padding:2px 25px;
background-color:#ffffff;
border:1px solid transparent;
font-size:13px;
text-decoration:none !important;
text-decoration:none;
}
.panel-body{
padding:5px;
background-color:#f5f5f5;
}
table {
font-family: arial, sans-serif;
border-collapse: collapse;
background-color: #ffffff;
width:100%;
}
th {
font-size:13px;
background-color: #e2e2e2;
border: 1px solid #f2f2f2;
text-align: left;
padding: 8px;
}
td {
font-size:14px;
border: 1px solid #f2f2f2;
text-align: left;
padding: 8px;
}
tr:nth-child(odd) {
background-color: #f2f2f2;
}
.pass, .pass:link, .pass:visited {
color:#FFFFFF;
background-color:#4CAF50;
text-decoration: none;
padding: 3px 11px;
text-align: center;
font-size:12px;
}
.pass:hover {
background-color: #5ec162;
}
.fail, .fail:link, .fail:visited {
color:#FFFFFF;
background-color:#ba2828;
text-decoration: none;
padding: 3px 11px;
text-align: center;
font-size:12px;
}
.fail:hover, .fail2:hover {
background-color: #d83434;
}
.fail2, .fail2:link, .fail2:visited {
color:#FFFFFF;
background-color:#ba2828;
text-decoration: none;
padding: 3px 8px;
text-align: center;
font-size:12px;
}
.skip, .skip:link, .skip:visited {
color:#FFFFFF;
background-color:#FFC300;
text-decoration: none;
padding: 3px 11px;
text-align: center;
font-size:12px;
}
.skip:hover, .skip2:hover {
background-color: #FF5733;
}
.skip2, .skip2:link, .skip2:visited {
color:#FFFFFF;
background-color:#FFC300;
text-decoration: none;
padding: 3px 8px;
text-align: center;
font-size:12px;
}
.testrail, .testrail:link, .testrail:visited {
color: #555555;
text-decoration: none;
""" | 1.726563 | 2 |
tests/test_rechunk.py | andrewbrettin/rechunker | 0 | 12760663 | from functools import partial
import importlib
import pytest
from pathlib import Path
import zarr
import dask.array as dsa
import dask
import dask.core
import xarray
import numpy
from rechunker import api
_DIMENSION_KEY = "_ARRAY_DIMENSIONS"
def requires_import(module, *args):
try:
importlib.import_module(module)
except ImportError:
skip = True
else:
skip = False
mark = pytest.mark.skipif(skip, reason=f"requires {module}")
return pytest.param(*args, marks=mark)
requires_beam = partial(requires_import, "apache_beam")
requires_prefect = partial(requires_import, "prefect")
requires_pywren = partial(requires_import, "pywren_ibm_cloud")
@pytest.fixture(params=[(8000, 200), {"y": 8000, "x": 200}])
def target_chunks(request):
return request.param
def test_invalid_executor():
with pytest.raises(ValueError, match="unrecognized executor"):
api._get_executor("unknown")
@pytest.mark.parametrize("shape", [(100, 50)])
@pytest.mark.parametrize("source_chunks", [(10, 50)])
@pytest.mark.parametrize("target_chunks", [(20, 10)])
@pytest.mark.parametrize("max_mem", ["10MB"])
@pytest.mark.parametrize("executor", ["dask"])
def test_rechunk_dataset(
tmp_path, shape, source_chunks, target_chunks, max_mem, executor
):
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
a = numpy.arange(numpy.prod(shape)).reshape(shape).astype("f4")
a[-1] = numpy.nan
ds = xarray.Dataset(
dict(
a=xarray.DataArray(
a, dims=["x", "y"], attrs={"a1": 1, "a2": [1, 2, 3], "a3": "x"}
),
b=xarray.DataArray(numpy.ones(shape[0]), dims=["x"]),
c=xarray.DataArray(numpy.ones(shape[1]), dims=["y"]),
),
coords=dict(
cx=xarray.DataArray(numpy.ones(shape[0]), dims=["x"]),
cy=xarray.DataArray(numpy.ones(shape[1]), dims=["y"]),
),
attrs={"a1": 1, "a2": [1, 2, 3], "a3": "x"},
)
ds = ds.chunk(chunks=dict(zip(["x", "y"], source_chunks)))
options = dict(
a=dict(
compressor=zarr.Blosc(cname="zstd"),
dtype="int32",
scale_factor=0.1,
_FillValue=-9999,
)
)
rechunked = api.rechunk(
ds,
target_chunks=dict(a=target_chunks, b=target_chunks[:1]),
max_mem=max_mem,
target_store=target_store,
target_options=options,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
rechunked.execute()
# Validate encoded variables
dst = xarray.open_zarr(target_store, decode_cf=False)
assert dst.a.dtype == options["a"]["dtype"]
assert all(dst.a.values[-1] == options["a"]["_FillValue"])
assert dst.a.encoding["compressor"] is not None
# Validate decoded variables
dst = xarray.open_zarr(target_store, decode_cf=True)
assert dst.a.data.chunksize == target_chunks
assert dst.b.data.chunksize == target_chunks[:1]
assert dst.c.data.chunksize == source_chunks[1:]
xarray.testing.assert_equal(ds.compute(), dst.compute())
assert ds.attrs == dst.attrs
@pytest.mark.parametrize("shape", [(8000, 8000)])
@pytest.mark.parametrize("source_chunks", [(200, 8000)])
@pytest.mark.parametrize("dtype", ["f4"])
@pytest.mark.parametrize("max_mem", [25600000, "25.6MB"])
@pytest.mark.parametrize(
"executor",
[
"dask",
"python",
requires_beam("beam"),
requires_prefect("prefect"),
requires_pywren("pywren"),
],
)
@pytest.mark.parametrize(
"dims,target_chunks",
[
(None, (8000, 200)),
# would be nice to support this syntax eventually
pytest.param(None, (-1, 200), marks=pytest.mark.xfail),
(["y", "x"], (8000, 200)),
(["y", "x"], {"y": 8000, "x": 200}),
# can't infer missing dimension chunk specification
pytest.param(["y", "x"], {"x": 200}, marks=pytest.mark.xfail),
# can't use dict syntax without array dims
pytest.param(None, {"y": 8000, "x": 200}, marks=pytest.mark.xfail),
],
)
def test_rechunk_array(
tmp_path, shape, source_chunks, dtype, dims, target_chunks, max_mem, executor
):
### Create source array ###
store_source = str(tmp_path / "source.zarr")
source_array = zarr.ones(
shape, chunks=source_chunks, dtype=dtype, store=store_source
)
# add some attributes
source_array.attrs["foo"] = "bar"
if dims:
source_array.attrs[_DIMENSION_KEY] = dims
### Create targets ###
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
rechunked = api.rechunk(
source_array,
target_chunks,
max_mem,
target_store,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
target_array = zarr.open(target_store)
if isinstance(target_chunks, dict):
target_chunks_list = [target_chunks[d] for d in dims]
else:
target_chunks_list = target_chunks
assert target_array.chunks == tuple(target_chunks_list)
assert dict(source_array.attrs) == dict(target_array.attrs)
result = rechunked.execute()
assert isinstance(result, zarr.Array)
a_tar = dsa.from_zarr(target_array)
assert dsa.equal(a_tar, 1).all().compute()
@pytest.mark.parametrize("shape", [(8000, 8000)])
@pytest.mark.parametrize("source_chunks", [(200, 8000), (800, 8000)])
@pytest.mark.parametrize("dtype", ["f4"])
@pytest.mark.parametrize("max_mem", [25600000])
@pytest.mark.parametrize(
"target_chunks", [(200, 8000), (800, 8000), (8000, 200), (400, 8000),],
)
def test_rechunk_dask_array(
tmp_path, shape, source_chunks, dtype, target_chunks, max_mem
):
### Create source array ###
source_array = dsa.ones(shape, chunks=source_chunks, dtype=dtype)
### Create targets ###
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
rechunked = api.rechunk(
source_array, target_chunks, max_mem, target_store, temp_store=temp_store
)
assert isinstance(rechunked, api.Rechunked)
target_array = zarr.open(target_store)
assert target_array.chunks == tuple(target_chunks)
result = rechunked.execute()
assert isinstance(result, zarr.Array)
a_tar = dsa.from_zarr(target_array)
assert dsa.equal(a_tar, 1).all().compute()
@pytest.mark.parametrize(
"executor",
[
"dask",
"python",
requires_beam("beam"),
requires_prefect("prefect"),
requires_pywren("pywren"),
],
)
def test_rechunk_group(tmp_path, executor):
store_source = str(tmp_path / "source.zarr")
group = zarr.group(store_source)
group.attrs["foo"] = "bar"
# 800 byte chunks
a = group.ones("a", shape=(5, 10, 20), chunks=(1, 10, 20), dtype="f4")
a.attrs["foo"] = "bar"
b = group.ones("b", shape=(20,), chunks=(10,), dtype="f4")
b.attrs["foo"] = "bar"
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
max_mem = 1600 # should force a two-step plan for a
target_chunks = {"a": (5, 10, 4), "b": (20,)}
rechunked = api.rechunk(
group,
target_chunks,
max_mem,
target_store,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
target_group = zarr.open(target_store)
assert "a" in target_group
assert "b" in target_group
assert dict(group.attrs) == dict(target_group.attrs)
rechunked.execute()
for aname in target_chunks:
assert target_group[aname].chunks == target_chunks[aname]
a_tar = dsa.from_zarr(target_group[aname])
assert dsa.equal(a_tar, 1).all().compute()
def sample_xarray_dataset():
return xarray.Dataset(
dict(
a=xarray.DataArray(
dsa.ones(shape=(10, 20, 40), chunks=(5, 10, 4), dtype="f4"),
dims=("x", "y", "z"),
attrs={"foo": "bar"},
),
b=xarray.DataArray(
dsa.ones(shape=(8000,), chunks=(200,), dtype="f4"),
dims="w",
attrs={"foo": "bar"},
),
),
attrs={"foo": "bar"},
)
def sample_zarr_group(tmp_path):
path = str(tmp_path / "source.zarr")
group = zarr.group(path)
group.attrs["foo"] = "bar"
# 800 byte chunks
a = group.ones("a", shape=(10, 20, 40), chunks=(5, 10, 4), dtype="f4")
a.attrs["foo"] = "bar"
b = group.ones("b", shape=(8000,), chunks=(200,), dtype="f4")
b.attrs["foo"] = "bar"
return group
def sample_zarr_array(tmp_path):
shape = (8000, 8000)
source_chunks = (200, 8000)
dtype = "f4"
dims = None
path = str(tmp_path / "source.zarr")
array = zarr.ones(shape, chunks=source_chunks, dtype=dtype, store=path)
# add some attributes
array.attrs["foo"] = "bar"
if dims:
array.attrs[_DIMENSION_KEY] = dims
return array
@pytest.fixture(params=["Array", "Group", "Dataset"])
def rechunk_args(tmp_path, request):
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
max_mem = 1600 # should force a two-step plan for a and b
target_chunks = {"a": (10, 5, 4), "b": (100,)}
args = dict(
target_chunks=target_chunks,
max_mem=max_mem,
target_store=target_store,
temp_store=temp_store,
)
if request.param == "Dataset":
ds = sample_xarray_dataset()
args.update({"source": ds})
elif request.param == "Group":
group = sample_zarr_group(tmp_path)
args.update({"source": group})
else:
array = sample_zarr_array(tmp_path)
max_mem = 25600000
target_chunks = (8000, 200)
args.update(
{"source": array, "target_chunks": target_chunks, "max_mem": max_mem,}
)
return args
@pytest.fixture()
def rechunked(rechunk_args):
return api.rechunk(**rechunk_args)
def test_repr(rechunked):
assert isinstance(rechunked, api.Rechunked)
repr_str = repr(rechunked)
assert repr_str.startswith("<Rechunked>")
assert all(thing in repr_str for thing in ["Source", "Intermediate", "Target"])
def test_repr_html(rechunked):
rechunked._repr_html_() # no exceptions
def _is_collection(source):
assert isinstance(
source,
(dask.array.Array, zarr.core.Array, zarr.hierarchy.Group, xarray.Dataset),
)
return isinstance(source, (zarr.hierarchy.Group, xarray.Dataset))
def _wrap_options(source, options):
if _is_collection(source):
options = {v: options for v in source}
return options
def test_rechunk_option_overwrite(rechunk_args):
api.rechunk(**rechunk_args).execute()
# TODO: make this match more reliable based on outcome of
# https://github.com/zarr-developers/zarr-python/issues/605
with pytest.raises(ValueError, match=r"path .* contains an array"):
api.rechunk(**rechunk_args).execute()
options = _wrap_options(rechunk_args["source"], dict(overwrite=True))
api.rechunk(**rechunk_args, target_options=options).execute()
def test_rechunk_passthrough(rechunk_args):
# Verify that no errors are raised when the target chunks == source chunks
if _is_collection(rechunk_args["source"]):
rechunk_args["target_chunks"] = {v: None for v in rechunk_args["source"]}
else:
rechunk_args["target_chunks"] = None
api.rechunk(**rechunk_args).execute()
def test_rechunk_no_temp_dir_provided_error(rechunk_args):
# Verify that the correct error is raised when no temp_store is given
# and the chunks to write differ from the chunks to read
args = {k: v for k, v in rechunk_args.items() if k != "temp_store"}
with pytest.raises(ValueError, match="A temporary store location must be provided"):
api.rechunk(**args).execute()
def test_rechunk_option_compression(rechunk_args):
def rechunk(compressor):
options = _wrap_options(
rechunk_args["source"], dict(overwrite=True, compressor=compressor)
)
rechunked = api.rechunk(**rechunk_args, target_options=options)
rechunked.execute()
return sum(
file.stat().st_size
for file in Path(rechunked._target.store.path).rglob("*")
)
size_uncompressed = rechunk(None)
size_compressed = rechunk(
zarr.Blosc(cname="zstd", clevel=9, shuffle=zarr.Blosc.SHUFFLE)
)
assert size_compressed < size_uncompressed
def test_rechunk_invalid_option(rechunk_args):
if isinstance(rechunk_args["source"], xarray.Dataset):
# Options are essentially unbounded for Xarray (for CF encoding params),
# so check only options with special error cases
options = _wrap_options(rechunk_args["source"], {"chunks": 10})
with pytest.raises(
ValueError,
match="Chunks must be provided in ``target_chunks`` rather than options",
):
api.rechunk(**rechunk_args, target_options=options)
else:
for o in ["shape", "chunks", "dtype", "store", "name", "unknown"]:
options = _wrap_options(rechunk_args["source"], {o: True})
with pytest.raises(ValueError, match=f"Zarr options must not include {o}"):
api.rechunk(**rechunk_args, temp_options=options)
with pytest.raises(ValueError, match=f"Zarr options must not include {o}"):
api.rechunk(**rechunk_args, target_options=options)
def test_rechunk_bad_target_chunks(rechunk_args):
if not _is_collection(rechunk_args["source"]):
return
rechunk_args = dict(rechunk_args)
rechunk_args["target_chunks"] = (10, 10)
with pytest.raises(
ValueError, match="You must specify ``target-chunks`` as a dict"
):
api.rechunk(**rechunk_args)
def test_rechunk_invalid_source(tmp_path):
with pytest.raises(
ValueError,
match="Source must be a Zarr Array, Zarr Group, Dask Array or Xarray Dataset",
):
api.rechunk(
[[1, 2], [3, 4]], target_chunks=(10, 10), max_mem=100, target_store=tmp_path
)
@pytest.mark.parametrize(
"source,target_chunks",
[
(sample_xarray_dataset(), {"a": (10, 5, 4), "b": (100,)}),
(dsa.ones((20, 10), chunks=(5, 5)), (10, 10)),
],
)
@pytest.mark.parametrize(
"executor",
[
"python",
requires_beam("beam"),
requires_prefect("prefect"),
requires_pywren("pywren"),
],
)
def test_unsupported_executor(tmp_path, source, target_chunks, executor):
with pytest.raises(
NotImplementedError, match="Executor type .* not supported for source",
):
api.rechunk(
source,
target_chunks=target_chunks,
max_mem=1600,
target_store=str(tmp_path / "target.zarr"),
temp_store=str(tmp_path / "temp.zarr"),
executor=executor,
)
def test_rechunk_no_target_chunks(rechunk_args):
rechunk_args = dict(rechunk_args)
if _is_collection(rechunk_args["source"]):
rechunk_args["target_chunks"] = {v: None for v in rechunk_args["source"]}
else:
rechunk_args["target_chunks"] = None
api.rechunk(**rechunk_args)
def test_no_intermediate():
a = zarr.ones((4, 4), chunks=(2, 2))
b = zarr.ones((4, 4), chunks=(4, 1))
rechunked = api.Rechunked(None, None, source=a, intermediate=None, target=b)
assert "Intermediate" not in repr(rechunked)
rechunked._repr_html_()
def test_no_intermediate_fused(tmp_path):
shape = (8000, 8000)
source_chunks = (200, 8000)
dtype = "f4"
max_mem = 25600000
target_chunks = (400, 8000)
store_source = str(tmp_path / "source.zarr")
source_array = zarr.ones(
shape, chunks=source_chunks, dtype=dtype, store=store_source
)
target_store = str(tmp_path / "target.zarr")
rechunked = api.rechunk(source_array, target_chunks, max_mem, target_store)
num_tasks = len([v for v in rechunked.plan.dask.values() if dask.core.istask(v)])
assert num_tasks < 20 # less than if no fuse
def test_pywren_function_executor(tmp_path):
pytest.importorskip("pywren_ibm_cloud")
from rechunker.executors.pywren import (
pywren_local_function_executor,
PywrenExecutor,
)
# Create a Pywren function exectutor that we manage ourselves
# and pass in to rechunker's PywrenExecutor
with pywren_local_function_executor() as function_executor:
executor = PywrenExecutor(function_executor)
shape = (8000, 8000)
source_chunks = (200, 8000)
dtype = "f4"
max_mem = 25600000
target_chunks = (400, 8000)
### Create source array ###
store_source = str(tmp_path / "source.zarr")
source_array = zarr.ones(
shape, chunks=source_chunks, dtype=dtype, store=store_source
)
### Create targets ###
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
rechunked = api.rechunk(
source_array,
target_chunks,
max_mem,
target_store,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
target_array = zarr.open(target_store)
assert target_array.chunks == tuple(target_chunks)
result = rechunked.execute()
assert isinstance(result, zarr.Array)
a_tar = dsa.from_zarr(target_array)
assert dsa.equal(a_tar, 1).all().compute()
| 2.0625 | 2 |
apis/v1/tags/interface_tag.py | billijoe/wechat_spider | 0 | 12760664 | # -*- coding: utf-8 -*-
"""
@project : WechatTogether
@Time : 2020/9/9 14:21
@Auth : AJay13
@File :interface_article_list.py
@IDE :PyCharm
@Motto:ABC(Always Be Coding)
"""
# 分类管理接口: 分类列表、删除分类、修改分离、添加分类
__all__ = ['InterFaceWechtTagList','InterfaceArticleFlag']
from flask import views
from sqlalchemy import and_
import config
from exts import db
from apis.common import response_code
from apis.common.api_version import api_version
from apis.common.auth import login_required
from apis.v1.tags.verify_tag import TagListForm,ArticleFlagForm
from apps.admin.models import WechatArticle, WechatArticleList
from models import WechatTag
class InterFaceWechtTagList(views.MethodView):
'''
公众号分类的接口
'''
@api_version
# @login_required # 自动完成认证
def get(self, version):
form = TagListForm().validate_for_api() # 验证表单
page = int(form.page.data)
limit = int(form.limit.data)
start = (page - 1) * limit
end = start + limit
tag_data = []
tag_obj = WechatTag.query
tags = tag_obj.slice(start, end)
total = tag_obj.count()
for i in tags:
tag = {}
tag['id'] = i.id
tag['tag_name'] = i.tag_name
tag['tag_en'] = i.tag_en
tag['tag_summary'] = i.tag_summary
tag['create_time'] = i.create_time
tag_data.append(tag)
return response_code.LayuiSuccess(message='查询成功!', data=tag_data, count=total)
class InterfaceArticleFlag(views.MethodView):
'''
# 如果flag=1 精华文章。else 普通文章
'''
@api_version
@login_required # 自动完成认证
def post(self, version):
form = ArticleFlagForm().validate_for_api() # 验证表单
id =form.id.data
flag =form.flag.data
wechat_article = WechatArticle.query.get(id)
if wechat_article:
if wechat_article.flag != flag:
wechat_article.flag = flag
db.session.commit()
return response_code.LayuiSuccess(message='文章:“{}”修改成功!'.format(wechat_article.title))
return response_code.ParameterException(message='已经被被人修改,刷新看看!!')
return response_code.ParameterException(message='修改失败!')
| 2.140625 | 2 |
stellargraph/core/graph.py | timpitman/stellargraph | 0 | 12760665 | <reponame>timpitman/stellargraph<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The StellarGraph class that encapsulates information required for
a machine-learning ready graph used by models.
"""
__all__ = ["StellarGraph", "StellarDiGraph", "GraphSchema", "NeighbourWithWeight"]
from typing import Iterable, Any, Mapping, List, Optional, Set
from collections import defaultdict, namedtuple
import pandas as pd
import numpy as np
import scipy.sparse as sps
import warnings
from .. import globalvar
from .schema import GraphSchema, EdgeType
from .experimental import experimental, ExperimentalWarning
from .element_data import NodeData, EdgeData, ExternalIdIndex
from .utils import is_real_iterable
from .validation import comma_sep
from . import convert
NeighbourWithWeight = namedtuple("NeighbourWithWeight", ["node", "weight"])
class StellarGraph:
"""
StellarGraph class for directed or undirected graph ML models. It stores both
graph structure and features for machine learning.
To create a StellarGraph object ready for machine learning, at a
minimum pass the graph structure to the StellarGraph as a NetworkX
graph:
For undirected models::
Gs = StellarGraph(nx_graph)
For directed models::
Gs = StellarDiGraph(nx_graph)
To create a StellarGraph object with node features, supply the features
as a numeric feature vector for each node.
To take the feature vectors from a node attribute in the original NetworkX
graph, supply the attribute name to the ``node_features`` argument::
Gs = StellarGraph(nx_graph, node_features="feature")
where the nx_graph contains nodes that have a "feature" attribute containing
the feature vector for the node. All nodes of the same type must have
the same size feature vectors.
Alternatively, supply the node features as Pandas DataFrame objects with
the of the DataFrame set to the node IDs. For graphs with a single node
type, you can supply the DataFrame object directly to StellarGraph::
node_data = pd.DataFrame(
[feature_vector_1, feature_vector_2, ..],
index=[node_id_1, node_id_2, ...])
Gs = StellarGraph(nx_graph, node_features=node_data)
For graphs with multiple node types, provide the node features as Pandas
DataFrames for each type separately, as a dictionary by node type.
This allows node features to have different sizes for each node type::
node_data = {
node_type_1: pd.DataFrame(...),
node_type_2: pd.DataFrame(...),
}
Gs = StellarGraph(nx_graph, node_features=node_data)
You can also supply the node feature vectors as an iterator of `node_id`
and feature vector pairs, for graphs with single and multiple node types::
node_data = zip([node_id_1, node_id_2, ...],
[feature_vector_1, feature_vector_2, ..])
Gs = StellarGraph(nx_graph, node_features=node_data)
.. warning::
The constructor variant using the ``nodes=..., edges=...`` arguments to create a "new"
StellarGraph is experimental: the type is insufficiently documented and does not support
some algorithms.
Args:
graph: The NetworkX graph instance.
node_type_name: str, optional (default=globals.TYPE_ATTR_NAME)
This is the name for the node types that StellarGraph uses
when processing heterogeneous graphs. StellarGraph will
look for this attribute in the nodes of the graph to determine
their type.
node_type_default: str, optional (default=globals.NODE_TYPE_DEFAULT)
This is the default node type to use for nodes that do not have
an explicit type.
edge_type_name: str, optional (default=globals.TYPE_ATTR_NAME)
This is the name for the edge types that StellarGraph uses
when processing heterogeneous graphs. StellarGraph will
look for this attribute in the edges of the graph to determine
their type.
edge_type_default: str, optional (default=globals.EDGE_TYPE_DEFAULT)
This is the default edge type to use for edges that do not have
an explicit type.
node_features: str, dict, list or DataFrame optional (default=None)
This tells StellarGraph where to find the node feature information
required by some graph models. These are expected to be
a numeric feature vector for each node in the graph.
nodes: DataFrame or dict of hashable to DataFrame
Features for every node in the graph. Any columns in the dataframe are taken as numeric
node features of type ``dtype``. If there is only one type of node, a DataFrame can be
passed directly, and the type defaults to the ``node_type_default`` parameter. Nodes
have an ID taken from the index of the dataframe, and they have to be unique across all
types. For nodes with no features, an appropriate DataFrame can be created with
``pandas.DataFrame([], index=node_ids)``, where ``node_ids`` is a list of the node
IDs. This must be used with the ``edges`` argument. This uses the same basic structure as the
``node_features`` argument, described above. **Warning**: this is experimental (see
warning above for more details).
edges: DataFrame or dict of hashable to DataFrame
An edge list for each type of edges as a Pandas DataFrame containing a source, target
and (optionally) weight column (the names of each are taken from the ``source_column``,
``target_column`` and ``edge_weight_label`` parameters). If there is only one type of
edges, a DataFrame can be passed directly, and the type defaults to the
``edge_type_default`` parameter. Edges have an ID taken from the index of the dataframe,
and they have to be unique across all types. This must be used with the ``nodes``
argument. This uses the same basic structure as the ``node_features`` argument,
described above. **Warning**: this is experimental (see warning above for more details).
source_column: str, optional
The name of the column to use as the source node of edges in the ``edges`` edge list
argument.
target_column: str, optional
The name of the column to use as the target node of edges in the ``edges`` edge list
argument.
edge_weight_label: str, optional
The name of the attribute to use as the weight of edges (for the `nodes`/`edges`
DataFrame parameters, this is the name of the column to use).
"""
def __init__(
self,
graph=None,
is_directed=False,
edge_weight_label="weight",
node_type_name=globalvar.TYPE_ATTR_NAME,
edge_type_name=globalvar.TYPE_ATTR_NAME,
node_type_default=globalvar.NODE_TYPE_DEFAULT,
edge_type_default=globalvar.EDGE_TYPE_DEFAULT,
feature_name=globalvar.FEATURE_ATTR_NAME,
target_name=globalvar.TARGET_ATTR_NAME,
node_features=None,
dtype="float32",
nodes=None,
edges=None,
source_column=globalvar.SOURCE,
target_column=globalvar.TARGET,
):
if graph is not None:
nodes, edges = convert.from_networkx(
graph,
node_type_name=node_type_name,
edge_type_name=edge_type_name,
node_type_default=node_type_default,
edge_type_default=edge_type_default,
edge_weight_label=edge_weight_label,
node_features=node_features,
dtype=dtype,
)
else:
warnings.warn(
"StellarGraph(nodes=..., edges=...) is experimental: it has not been fully "
"validated. It may be difficult to use and may have major changes at any time.",
ExperimentalWarning,
)
if nodes is None:
nodes = {}
if edges is None:
edges = {}
self._is_directed = is_directed
self._nodes = convert.convert_nodes(
nodes, name="nodes", default_type=node_type_default, dtype=dtype,
)
self._edges = convert.convert_edges(
edges,
self._nodes,
name="edges",
default_type=edge_type_default,
source_column=source_column,
target_column=target_column,
weight_column=edge_weight_label,
)
@staticmethod
def from_networkx(
graph,
edge_weight_label="weight",
node_type_name=globalvar.TYPE_ATTR_NAME,
edge_type_name=globalvar.TYPE_ATTR_NAME,
node_type_default=globalvar.NODE_TYPE_DEFAULT,
edge_type_default=globalvar.EDGE_TYPE_DEFAULT,
node_features=None,
dtype="float32",
):
"""
Construct a ``StellarGraph`` object from a NetworkX graph::
Gs = StellarGraph.from_networkx(nx_graph)
To create a StellarGraph object with node features, supply the features
as a numeric feature vector for each node.
To take the feature vectors from a node attribute in the original NetworkX
graph, supply the attribute name to the ``node_features`` argument::
Gs = StellarGraph.from_networkx(nx_graph, node_features="feature")
where the nx_graph contains nodes that have a "feature" attribute containing
the feature vector for the node. All nodes of the same type must have
the same size feature vectors.
Alternatively, supply the node features as Pandas DataFrame objects with
the index of the DataFrame set to the node IDs. For graphs with a single node
type, you can supply the DataFrame object directly to StellarGraph::
node_data = pd.DataFrame(
[feature_vector_1, feature_vector_2, ..],
index=[node_id_1, node_id_2, ...])
Gs = StellarGraph.from_networkx(nx_graph, node_features=node_data)
For graphs with multiple node types, provide the node features as Pandas
DataFrames for each type separately, as a dictionary by node type.
This allows node features to have different sizes for each node type::
node_data = {
node_type_1: pd.DataFrame(...),
node_type_2: pd.DataFrame(...),
}
Gs = StellarGraph.from_networkx(nx_graph, node_features=node_data)
You can also supply the node feature vectors as an iterator of `node_id`
and feature vector pairs, for graphs with single and multiple node types::
node_data = zip([node_id_1, node_id_2, ...],
[feature_vector_1, feature_vector_2, ..])
Gs = StellarGraph.from_networkx(nx_graph, node_features=node_data)
Args:
graph: The NetworkX graph instance.
node_type_name: str, optional (default=globals.TYPE_ATTR_NAME)
This is the name for the node types that StellarGraph uses
when processing heterogeneous graphs. StellarGraph will
look for this attribute in the nodes of the graph to determine
their type.
node_type_default: str, optional (default=globals.NODE_TYPE_DEFAULT)
This is the default node type to use for nodes that do not have
an explicit type.
edge_type_name: str, optional (default=globals.TYPE_ATTR_NAME)
This is the name for the edge types that StellarGraph uses
when processing heterogeneous graphs. StellarGraph will
look for this attribute in the edges of the graph to determine
their type.
edge_type_default: str, optional (default=globals.EDGE_TYPE_DEFAULT)
This is the default edge type to use for edges that do not have
an explicit type.
node_features: str, dict, list or DataFrame optional (default=None)
This tells StellarGraph where to find the node feature information
required by some graph models. These are expected to be
a numeric feature vector for each node in the graph.
edge_weight_label: str, optional
The name of the attribute to use as the weight of edges.
Returns:
A ``StellarGraph`` (if ``graph`` is undirected) or ``StellarDiGraph`` (if ``graph`` is
directed) instance representing the data in ``graph`` and ``node_features``.
"""
nodes, edges = convert.from_networkx(
graph,
node_type_name=node_type_name,
edge_type_name=edge_type_name,
node_type_default=node_type_default,
edge_type_default=edge_type_default,
edge_weight_label=edge_weight_label,
node_features=node_features,
dtype=dtype,
)
cls = StellarDiGraph if graph.is_directed() else StellarGraph
return cls(
nodes=nodes, edges=edges, edge_weight_label=edge_weight_label, dtype=dtype
)
# customise how a missing attribute is handled to give better error messages for the NetworkX
# -> no NetworkX transition.
def __getattr__(self, item):
import networkx
try:
# do the normal access, in case the attribute actually exists, and to get the native
# python wording of the error
return super().__getattribute__(item)
except AttributeError as e:
if hasattr(networkx.MultiDiGraph, item):
# a networkx class has this as an attribute, so let's assume that it's old code
# from before the conversion and replace (the `from None`) the default exception
# with one with a more specific message that guides the user to the fix
type_name = type(self).__name__
raise AttributeError(
f"{e.args[0]}. The '{type_name}' type no longer inherits from NetworkX types: use a new StellarGraph method, or, if that is not possible, the `.to_networkx()` conversion function."
) from None
# doesn't look like a NetworkX method so use the default error
raise
def is_directed(self) -> bool:
"""
Indicates whether the graph is directed (True) or undirected (False).
Returns:
bool: The graph directedness status.
"""
return self._is_directed
def number_of_nodes(self) -> int:
"""
Obtains the number of nodes in the graph.
Returns:
int: The number of nodes.
"""
return len(self._nodes)
def number_of_edges(self) -> int:
"""
Obtains the number of edges in the graph.
Returns:
int: The number of edges.
"""
return len(self._edges)
def nodes(self) -> Iterable[Any]:
"""
Obtains the collection of nodes in the graph.
Returns:
The graph nodes.
"""
return self._nodes.ids.pandas_index
def edges(
self, include_edge_type=False, include_edge_weight=False
) -> Iterable[Any]:
"""
Obtains the collection of edges in the graph.
Args:
include_edge_type (bool): A flag that indicates whether to return edge types
of format (node 1, node 2, edge type) or edge pairs of format (node 1, node 2).
include_edge_weight (bool): A flag that indicates whether to return edge weights.
Weights are returned in a separate list.
Returns:
The graph edges. If edge weights are included then a tuple of (edges, weights)
"""
# FIXME: these would be better returned as the 2 or 3 arrays directly, rather than tuple-ing
# (the same applies to all other instances of zip in this file)
if include_edge_type:
edges = list(
zip(
self._edges.sources,
self._edges.targets,
self._edges.type_of_iloc(slice(None)),
)
)
else:
edges = list(zip(self._edges.sources, self._edges.targets))
if include_edge_weight:
return edges, self._edges.weights
return edges
def has_node(self, node: Any) -> bool:
"""
Indicates whether or not the graph contains the specified node.
Args:
node (any): The node.
Returns:
bool: A value of True (cf False) if the node is
(cf is not) in the graph.
"""
return node in self._nodes
def _transform_edges(
self, other_node_id, ilocs, include_edge_weight, filter_edge_types
):
if include_edge_weight:
weights = self._edges.weights[ilocs]
else:
weights = None
if filter_edge_types is not None:
filter_edge_type_ilocs = self._edges.types.to_iloc(filter_edge_types)
edge_type_ilocs = self._edges.type_ilocs[ilocs]
correct_type = np.isin(edge_type_ilocs, filter_edge_type_ilocs)
other_node_id = other_node_id[correct_type]
if weights is not None:
weights = weights[correct_type]
# FIXME(#718): it would be better to return these as ndarrays, instead of (zipped) lists
if weights is not None:
return [
NeighbourWithWeight(node, weight)
for node, weight in zip(other_node_id, weights)
]
return list(other_node_id)
def neighbors(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
"""
Obtains the collection of neighbouring nodes connected
to the given node.
Args:
node (any): The node in question.
include_edge_weight (bool, default False): If True, each neighbour in the
output is a named tuple with fields `node` (the node ID) and `weight` (the edge weight)
edge_types (list of hashable, optional): If provided, only traverse the graph
via the provided edge types when collecting neighbours.
Returns:
iterable: The neighbouring nodes.
"""
ilocs = self._edges.edge_ilocs(node, ins=True, outs=True)
source = self._edges.sources[ilocs]
target = self._edges.targets[ilocs]
other_node_id = np.where(source == node, target, source)
return self._transform_edges(
other_node_id, ilocs, include_edge_weight, edge_types
)
def in_nodes(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
"""
Obtains the collection of neighbouring nodes with edges
directed to the given node. For an undirected graph,
neighbours are treated as both in-nodes and out-nodes.
Args:
node (any): The node in question.
include_edge_weight (bool, default False): If True, each neighbour in the
output is a named tuple with fields `node` (the node ID) and `weight` (the edge weight)
edge_types (list of hashable, optional): If provided, only traverse the graph
via the provided edge types when collecting neighbours.
Returns:
iterable: The neighbouring in-nodes.
"""
if not self.is_directed():
# all edges are both incoming and outgoing for undirected graphs
return self.neighbors(
node, include_edge_weight=include_edge_weight, edge_types=edge_types
)
ilocs = self._edges.edge_ilocs(node, ins=True, outs=False)
source = self._edges.sources[ilocs]
return self._transform_edges(source, ilocs, include_edge_weight, edge_types)
def out_nodes(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
"""
Obtains the collection of neighbouring nodes with edges
directed from the given node. For an undirected graph,
neighbours are treated as both in-nodes and out-nodes.
Args:
node (any): The node in question.
include_edge_weight (bool, default False): If True, each neighbour in the
output is a named tuple with fields `node` (the node ID) and `weight` (the edge weight)
edge_types (list of hashable, optional): If provided, only traverse the graph
via the provided edge types when collecting neighbours.
Returns:
iterable: The neighbouring out-nodes.
"""
if not self.is_directed():
# all edges are both incoming and outgoing for undirected graphs
return self.neighbors(
node, include_edge_weight=include_edge_weight, edge_types=edge_types
)
ilocs = self._edges.edge_ilocs(node, ins=False, outs=True)
target = self._edges.targets[ilocs]
return self._transform_edges(target, ilocs, include_edge_weight, edge_types)
def nodes_of_type(self, node_type=None):
"""
Get the nodes of the graph with the specified node types.
Args:
node_type (hashable, optional): a type of nodes that exist in the graph
Returns:
A list of node IDs with type node_type
"""
if node_type is None:
return self.nodes()
ilocs = self._nodes.type_range(node_type)
return list(self._nodes.ids.from_iloc(ilocs))
def _key_error_for_missing(self, query_ids, node_ilocs):
valid = self._nodes.ids.is_valid(node_ilocs)
missing_values = np.asarray(query_ids)[~valid]
if len(missing_values) == 1:
return KeyError(missing_values[0])
return KeyError(missing_values)
def node_type(self, node):
"""
Get the type of the node
Args:
node: Node ID
Returns:
Node type
"""
nodes = [node]
node_ilocs = self._nodes.ids.to_iloc(nodes)
try:
type_sequence = self._nodes.type_of_iloc(node_ilocs)
except IndexError:
raise self._key_error_for_missing(nodes, node_ilocs)
assert len(type_sequence) == 1
return type_sequence[0]
@property
def node_types(self):
"""
Get a list of all node types in the graph.
Returns:
set of types
"""
return set(self._nodes.types.pandas_index)
def node_feature_sizes(self, node_types=None):
"""
Get the feature sizes for the specified node types.
Args:
node_types (list, optional): A list of node types. If None all current node types
will be used.
Returns:
A dictionary of node type and integer feature size.
"""
all_sizes = self._nodes.feature_sizes()
if node_types is None:
return all_sizes
node_types = set(node_types)
return {
type_name: size for type_name, size in all_sizes if type_name in node_types
}
def check_graph_for_ml(self, features=True):
"""
Checks if all properties required for machine learning training/inference are set up.
An error will be raised if the graph is not correctly setup.
"""
if all(size == 0 for _, size in self.node_feature_sizes().items()):
raise RuntimeError(
"This StellarGraph has no numeric feature attributes for nodes"
"Node features are required for machine learning"
)
# TODO: check the schema
# TODO: check the feature node_ids against the graph node ids?
def node_features(self, nodes, node_type=None):
"""
Get the numeric feature vectors for the specified node or nodes.
If the node type is not specified the node types will be found
for all nodes. It is therefore important to supply the ``node_type``
for this method to be fast.
Args:
nodes (list or hashable): Node ID or list of node IDs
node_type (hashable): the type of the nodes.
Returns:
Numpy array containing the node features for the requested nodes.
"""
nodes = np.asarray(nodes)
node_ilocs = self._nodes.ids.to_iloc(nodes)
valid = self._nodes.ids.is_valid(node_ilocs)
all_valid = valid.all()
valid_ilocs = node_ilocs if all_valid else node_ilocs[valid]
if node_type is None:
# infer the type based on the valid nodes
types = np.unique(self._nodes.type_of_iloc(valid_ilocs))
if len(types) == 0:
raise ValueError(
"must have at least one node for inference, if `node_type` is not specified"
)
if len(types) > 1:
raise ValueError("all nodes must have the same type")
node_type = types[0]
if all_valid:
return self._nodes.features(node_type, valid_ilocs)
# If there's some invalid values, they get replaced by zeros; this is designed to allow
# models that build fixed-size structures (e.g. GraphSAGE) based on neighbours to fill out
# missing neighbours with zeros automatically, using None as a sentinel.
# FIXME: None as a sentinel forces nodes to have dtype=object even with integer IDs, could
# instead use an impossible integer (e.g. 2**64 - 1)
nones = nodes == None
if not (nones | valid).all():
# every ID should be either valid or None, otherwise it was a completely unknown ID
raise self._key_error_for_missing(nodes[~nones], node_ilocs[~nones])
sampled = self._nodes.features(node_type, valid_ilocs)
features = np.zeros((len(nodes), sampled.shape[1]))
features[valid] = sampled
return features
##################################################################
# Computationally intensive methods:
def _edge_type_iloc_triples(self, selector=slice(None), stacked=False):
source_ilocs = self._nodes.ids.to_iloc(self._edges.sources[selector])
source_type_ilocs = self._nodes.type_ilocs[source_ilocs]
rel_type_ilocs = self._edges.type_ilocs[selector]
target_ilocs = self._nodes.ids.to_iloc(self._edges.targets[selector])
target_type_ilocs = self._nodes.type_ilocs[target_ilocs]
all_ilocs = source_type_ilocs, rel_type_ilocs, target_type_ilocs
if stacked:
return np.stack(all_ilocs, axis=-1)
return all_ilocs
def _edge_type_triples(self, selector=slice(None)):
src_ilocs, rel_ilocs, tgt_ilocs = self._edge_type_iloc_triples(
selector, stacked=False
)
return (
self._nodes.types.from_iloc(src_ilocs),
self._edges.types.from_iloc(rel_ilocs),
self._nodes.types.from_iloc(tgt_ilocs),
)
def _unique_type_triples(self, *, return_counts, selector=slice(None)):
all_type_ilocs = self._edge_type_iloc_triples(selector, stacked=True)
if len(all_type_ilocs) == 0:
# FIXME(https://github.com/numpy/numpy/issues/15559): if there's no edges, np.unique is
# being called on a shape=(0, 3) ndarray, and hits "ValueError: cannot reshape array of
# size 0 into shape (0,newaxis)", so we manually reproduce what would be returned
if return_counts:
ret = None, [], []
else:
ret = None, []
else:
ret = np.unique(
all_type_ilocs, axis=0, return_index=True, return_counts=return_counts
)
edge_ilocs = ret[1]
# we've now got the indices for an edge with each triple, along with the counts of them, so
# we can query to get the actual edge types (this is, at the time of writing, easier than
# getting the actual type for each type iloc in the triples)
unique_ets = self._edge_type_triples(edge_ilocs)
if return_counts:
return zip(*unique_ets, ret[2])
return zip(*unique_ets)
def info(self, show_attributes=True, sample=None):
"""
Return an information string summarizing information on the current graph.
This includes node and edge type information and their attributes.
Note: This requires processing all nodes and edges and could take a long
time for a large graph.
Args:
show_attributes (bool, default True): If True, include attributes information
sample (int): To speed up the graph analysis, use only a random sample of
this many nodes and edges.
Returns:
An information string.
"""
directed_str = "Directed" if self.is_directed() else "Undirected"
lines = [
f"{type(self)}: {directed_str} multigraph",
f" Nodes: {self.number_of_nodes()}, Edges: {self.number_of_edges()}",
]
# Numpy processing is much faster than NetworkX processing, so we don't bother sampling.
gs = self.create_graph_schema()
def str_edge_type(et):
n1, rel, n2 = et
return f"{n1}-{rel}->{n2}"
lines.append("")
lines.append(" Node types:")
for nt in gs.node_types:
nodes = self.nodes_of_type(nt)
lines.append(f" {nt}: [{len(nodes)}]")
edge_types = ", ".join(str_edge_type(et) for et in gs.schema[nt])
lines.append(f" Edge types: {edge_types}")
lines.append("")
lines.append(" Edge types:")
# FIXME: it would be better for the schema to just include the counts directly
for src_ty, rel_ty, tgt_ty, count in self._unique_type_triples(
return_counts=True
):
et = EdgeType(src_ty, rel_ty, tgt_ty)
lines.append(f" {str_edge_type(et)}: [{count}]")
return "\n".join(lines)
def create_graph_schema(self, nodes=None):
"""
Create graph schema in dict of dict format from current graph.
Note the assumption we make that there is only one
edge of a particular edge type per node pair.
This means that specifying an edge by node0, node1 and edge type
is unique.
Arguments:
nodes (list): A list of node IDs to use to build schema. This must
represent all node types and all edge types in the graph.
If not specified, all nodes and edges in the graph are used.
Returns:
GraphSchema object.
"""
graph_schema = {nt: set() for nt in self.node_types}
edge_types = set()
if nodes is None:
selector = slice(None)
else:
selector = np.isin(self._edges.sources, nodes) & np.isin(
self._edges.targets, nodes
)
for n1, rel, n2 in self._unique_type_triples(
selector=selector, return_counts=False
):
edge_type_tri = EdgeType(n1, rel, n2)
edge_types.add(edge_type_tri)
graph_schema[n1].add(edge_type_tri)
if not self.is_directed():
edge_type_tri = EdgeType(n2, rel, n1)
edge_types.add(edge_type_tri)
graph_schema[n2].add(edge_type_tri)
# Create ordered list of edge_types
edge_types = sorted(edge_types)
# Create keys for node and edge types
schema = {
node_label: sorted(node_data)
for node_label, node_data in graph_schema.items()
}
return GraphSchema(
self.is_directed(), sorted(self.node_types), edge_types, schema
)
def node_degrees(self) -> Mapping[Any, int]:
"""
Obtains a map from node to node degree.
Returns:
The degree of each node.
"""
return self._edges.degrees()
def to_adjacency_matrix(self, nodes: Optional[Iterable] = None, weighted=False):
"""
Obtains a SciPy sparse adjacency matrix of edge weights.
By default (``weighted=False``), each element of the matrix contains the number
of edges between the two vertices (only 0 or 1 in a graph without multi-edges).
Args:
nodes (iterable): The optional collection of nodes
comprising the subgraph. If specified, then the
adjacency matrix is computed for the subgraph;
otherwise, it is computed for the full graph.
weighted (bool): If true, use the edge weight column from the graph instead
of edge counts (weights from multi-edges are summed).
Returns:
The weighted adjacency matrix.
"""
if nodes is None:
index = self._nodes._id_index
selector = slice(None)
else:
nodes = list(nodes)
index = ExternalIdIndex(nodes)
selector = np.isin(self._edges.sources, nodes) & np.isin(
self._edges.targets, nodes
)
# these indices are computed relative to the index above. If `nodes` is None, they'll be the
# overall ilocs (for the original graph), otherwise they'll be the indices of the `nodes`
# list.
src_idx = index.to_iloc(self._edges.sources[selector])
tgt_idx = index.to_iloc(self._edges.targets[selector])
if weighted:
weights = self._edges.weights[selector]
else:
weights = np.ones(src_idx.shape, dtype=self._edges.weights.dtype)
n = len(index)
adj = sps.csr_matrix((weights, (src_idx, tgt_idx)), shape=(n, n))
if not self.is_directed():
# in an undirected graph, the adjacency matrix should be symmetric: which means counting
# weights from either "incoming" or "outgoing" edges, but not double-counting self loops
backward = sps.csr_matrix((weights, (tgt_idx, src_idx)), shape=(n, n))
backward.setdiag(0)
adj += backward
# this is a multigraph, let's eliminate any duplicate entries
adj.sum_duplicates()
return adj
def to_networkx(
self,
node_type_name=globalvar.TYPE_ATTR_NAME,
edge_type_name=globalvar.TYPE_ATTR_NAME,
edge_weight_label=globalvar.WEIGHT,
feature_name=globalvar.FEATURE_ATTR_NAME,
):
"""
Create a NetworkX MultiGraph or MultiDiGraph instance representing this graph.
Args:
node_type_name (str): the name of the attribute to use to store a node's type (or label).
edge_type_name (str): the name of the attribute to use to store a edge's type (or label).
edge_weight_label (str): the name of the attribute to use to store a edge's weight.
feature_name (str, optional): the name of the attribute to use to store a node's feature
vector; if ``None``, feature vectors are not stored within each node.
Returns:
An instance of `networkx.MultiDiGraph` (if directed) or `networkx.MultiGraph` (if
undirected) containing all the nodes & edges and their types & features in this graph.
"""
import networkx
if self.is_directed():
graph = networkx.MultiDiGraph()
else:
graph = networkx.MultiGraph()
for ty in self.node_types:
node_ids = self.nodes_of_type(ty)
ty_dict = {node_type_name: ty}
if feature_name is not None:
features = self.node_features(node_ids, node_type=ty)
for node_id, node_features in zip(node_ids, features):
graph.add_node(
node_id, **ty_dict, **{feature_name: node_features},
)
else:
graph.add_nodes_from(node_ids, **ty_dict)
iterator = zip(
self._edges.sources,
self._edges.targets,
self._edges.type_of_iloc(slice(None)),
self._edges.weights,
)
graph.add_edges_from(
(src, dst, {edge_type_name: type_, edge_weight_label: weight})
for src, dst, type_, weight in iterator
)
return graph
# FIXME: Experimental/special-case methods that need to be considered more; the underscores
# denote "package private", not fully private, and so are ok to use in the rest of stellargraph
def _get_index_for_nodes(self, nodes, node_type=None):
"""
Get the indices for the specified node or nodes.
If the node type is not specified the node types will be found
for all nodes. It is therefore important to supply the ``node_type``
for this method to be fast.
Args:
n: (list or hashable) Node ID or list of node IDs
node_type: (hashable) the type of the nodes.
Returns:
Numpy array containing the indices for the requested nodes.
"""
return self._nodes._id_index.to_iloc(nodes)
def _adjacency_types(self, graph_schema: GraphSchema):
"""
Obtains the edges in the form of the typed mapping:
{edge_type_triple: {source_node: [target_node, ...]}}
Args:
graph_schema: The graph schema.
Returns:
The edge types mapping.
"""
source_types, rel_types, target_types = self._edge_type_triples(slice(None))
triples = defaultdict(lambda: defaultdict(lambda: []))
iterator = zip(
source_types,
rel_types,
target_types,
self._edges.sources,
self._edges.targets,
)
for src_type, rel_type, tgt_type, src, tgt in iterator:
triple = EdgeType(src_type, rel_type, tgt_type)
triples[triple][src].append(tgt)
if not self.is_directed() and src != tgt:
other_triple = EdgeType(tgt_type, rel_type, src_type)
triples[other_triple][tgt].append(src)
for subdict in triples.values():
for v in subdict.values():
# each list should be in order, to ensure sampling methods are deterministic
v.sort(key=str)
return triples
def _edge_weights(self, source_node: Any, target_node: Any) -> List[Any]:
"""
Obtains the weights of edges between the given pair of nodes.
Args:
source_node (any): The source node.
target_node (any): The target node.
Returns:
list: The edge weights.
"""
# self loops should only be counted once, which means they're effectively always a directed
# edge at the storage level, unlikely other edges in an undirected graph. This is
# particularly important with the intersection1d call, where the source_ilocs and
# target_ilocs will be equal, when source_node == target_node, and thus the intersection
# will contain all incident edges.
effectively_directed = self.is_directed() or source_node == target_node
both_dirs = not effectively_directed
source_ilocs = self._edges.edge_ilocs(source_node, ins=both_dirs, outs=True)
target_ilocs = self._edges.edge_ilocs(target_node, ins=True, outs=both_dirs)
ilocs = np.intersect1d(source_ilocs, target_ilocs, assume_unique=True)
return [float(x) for x in self._edges.weights[ilocs]]
# A convenience class that merely specifies that edges have direction.
class StellarDiGraph(StellarGraph):
def __init__(
self,
graph=None,
edge_weight_label="weight",
node_type_name=globalvar.TYPE_ATTR_NAME,
edge_type_name=globalvar.TYPE_ATTR_NAME,
node_type_default=globalvar.NODE_TYPE_DEFAULT,
edge_type_default=globalvar.EDGE_TYPE_DEFAULT,
feature_name=globalvar.FEATURE_ATTR_NAME,
target_name=globalvar.TARGET_ATTR_NAME,
node_features=None,
dtype="float32",
nodes=None,
edges=None,
source_column=globalvar.SOURCE,
target_column=globalvar.TARGET,
):
super().__init__(
graph=graph,
is_directed=True,
edge_weight_label=edge_weight_label,
node_type_name=node_type_name,
edge_type_name=edge_type_name,
node_type_default=node_type_default,
edge_type_default=edge_type_default,
feature_name=feature_name,
target_name=target_name,
node_features=node_features,
dtype=dtype,
nodes=nodes,
edges=edges,
source_column=source_column,
target_column=target_column,
)
| 2.21875 | 2 |
caas-cStart.py | abhishekkhandal/py-project | 3 | 12760666 | #!/usr/bin/python2
import commands,cgi
print "content-type: text/html"
containerName = cgi.FormContent()['containerName'][0]
cStopStatus = commands.getstatusoutput("sudo docker start {}".format(containerName))
if cStopStatus[0] == 0:
print "location: dp.py"
print
else:
print "Container could not be stopped"
| 2.375 | 2 |
Chapter05/LEDtest.py | PacktPublishing/Wearable-Projects-with-Raspberry-Pi-Zero | 9 | 12760667 | #! /usr/bin/python3
from gpiozero import LEDBoard
from time import sleep
redLEDs = LEDBoard(15, 18, 17, 27)
greLEDs = LEDBoard(14, 2, 3, 4)
bluLEDs = LEDBoard(23, 24, 22, 25)
while True:
redLEDs.on()
sleep(0.5)
greLEDs.on()
sleep(0.5)
bluLEDs.on()
sleep(0.5)
redLEDs.off()
sleep(0.5)
greLEDs.off()
sleep(0.5)
bluLEDs.off()
sleep(0.5)
#light the reds in turn
redLEDs.value = (1, 0, 0, 0)
sleep(0.25)
redLEDs.value = (1, 1, 0, 0)
sleep(0.25)
redLEDs.value = (1, 1, 1, 0)
sleep(0.25)
redLEDs.value = (1, 1, 1, 1)
sleep(0.25)
redLEDs.value = (0, 0, 0, 0)
sleep(0.25)
#light the greens in turn
greLEDs.value = (1, 0, 0, 0)
sleep(0.25)
greLEDs.value = (1, 1, 0, 0)
sleep(0.25)
greLEDs.value = (1, 1, 1, 0)
sleep(0.25)
greLEDs.value = (1, 1, 1, 1)
sleep(0.25)
greLEDs.value = (0, 0, 0, 0)
sleep(0.25)
#light the blues in turn
bluLEDs.value = (1, 0, 0, 0)
sleep(0.25)
bluLEDs.value = (1, 1, 0, 0)
sleep(0.25)
bluLEDs.value = (1, 1, 1, 0)
sleep(0.25)
bluLEDs.value = (1, 1, 1, 1)
sleep(0.25)
bluLEDs.value = (0, 0, 0, 0)
sleep(0.25)
| 2.78125 | 3 |
app/api/operator_api.py | deepzec/Apfell | 1 | 12760668 | from app import apfell, db_objects, auth
from sanic.response import json
from app.database_models.model import Operator
from sanic import response
from sanic.exceptions import abort
from app import crypto
from sanic_auth import User
# ------------ OPERATORS ------------------------
@apfell.route("/api/v1.0/operators/", methods=['GET'])
async def get_all_operators(request):
ops = await db_objects.execute(Operator.select())
return json([p.to_json() for p in ops])
@apfell.route("/api/v1.0/operators/", methods=['POST'])
async def create_operator(request):
data = request.json
if not 'username' in data:
return json({'status': 'error',
'error': '"username" field is required'})
if not isinstance(data['username'], str) or not len(data['username']):
return json({'status': 'error',
'error': '"username" must be string with at least one character'})
password = await crypto.hash_SHA512(data['password'])
# we need to create a new user
try:
user = await db_objects.create(Operator, username=data['username'], password=password)
# login_user = User(id=user.id, name=user.username)
# auth.login_user(request, login_user)
return response.json({'status': 'success'})
except:
return json({'status': 'error',
'error': 'failed to add user'})
@apfell.route("/api/v1.0/operators/<id:int>", methods=['GET'])
async def get_one_operator(request, id):
try:
op = await db_objects.get(Operator, id=id)
return json(str(op))
except:
print("Failed in /api/v1.0/operators/id for a GET request")
return abort(404)
@apfell.route("/api/v1.0/operators/<id:int>", methods=["PUT"])
async def update_operator(request, id):
try:
op = await db_objects.get(Operator, id=id)
data = request.json
if 'username' in data:
op.username = data['username']
if 'password' in data:
op.password = await crypto.hash_SHA512(data['password'])
await db_objects.update(op)
return json({'status': 'success'})
except:
abort(404)
@apfell.route("/api/v1.0/operators/<id:int>", methods=["DELETE"])
async def remove_operator(request, id):
try:
op = await db_objects.get(Operator, id=id)
await db_objects.delete(op)
return json({'status': 'success'})
except:
abort(404) | 2.375 | 2 |
src/satyr-train-test.py | negotiatorvivian/PDP-SP | 0 | 12760669 | #!/usr/bin/env python3
"""The main entry point to the PDP trainer/tester/predictor."""
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file
# in the project root for full license information.
import numpy as np
import torch
import torch.optim as optim
import logging
import argparse, os, yaml, csv
from pdp.generator import *
from pdp.trainer import SatFactorGraphTrainer
##########################################################################################################################
def write_to_csv(result_list, file_path):
with open(file_path, mode = 'w', newline = '') as f:
writer = csv.writer(f, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for row in result_list:
writer.writerow([row[0], row[1][1, 0]])
def write_to_csv_time(result_list, file_path):
with open(file_path, mode = 'w', newline = '') as f:
writer = csv.writer(f, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for row in result_list:
writer.writerow([row[0], row[2]])
def run(random_seed, config_file, is_training, load_model, cpu, reset_step, use_generator, batch_replication):
"""Runs the train/test/predict procedures."""
if not use_generator:
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# Set the configurations (from either JSON or YAML file)
with open(config_file, 'r') as f:
config = yaml.load(f)
config['train_path'] = [os.path.abspath(p) for p in config['train_path']]
config['validation_path'] = [os.path.abspath(p) for p in config['train_path']]
config['model_path'] = os.path.abspath(config['model_path'])
# Set the logger
format = '[%(levelname)s] %(asctime)s - %(name)s: %(message)s'
logging.basicConfig(level = logging.DEBUG, format = format)
logger = logging.getLogger(config['model_name'] + ' (' + config['version'] + ')')
# Check if the input path is a list or on
if not isinstance(config['train_path'], list):
config['train_path'] = [os.path.join(config['train_path'], f) for f in os.listdir(config['train_path']) if
os.path.isfile(os.path.join(config['train_path'], f)) and f.endswith('.json')]
if not isinstance(config['validation_path'], list):
config['validation_path'] = [os.path.join(config['validation_path'], f) for f in os.listdir(config['validation_path']) if
os.path.isfile(os.path.join(config['validation_path'], f)) and f.endswith('.json')]
if config['verbose']:
if use_generator:
logger.info("Generating training examples via %s generator." % config['generator'])
else:
logger.info("Training file(s): %s" % config['train_path'])
logger.info("Validation file(s): %s" % config['validation_path'])
best_model_path_base = os.path.join(os.path.relpath(config['model_path']), config['model_name'], config['version'], "best")
last_model_path_base = os.path.join(os.path.relpath(config['model_path']), config['model_name'], config['version'], "last")
if not os.path.exists(best_model_path_base):
os.makedirs(best_model_path_base)
if not os.path.exists(last_model_path_base):
os.makedirs(last_model_path_base)
trainer = SatFactorGraphTrainer(config = config, use_cuda = not cpu, logger = logger)
# Training
if is_training:
if config['verbose']:
logger.info("Starting the training phase...")
generator = None
if use_generator:
if config['generator'] == 'modular':
generator = ModularCNFGenerator(config['min_k'], config['min_n'], config['max_n'], config['min_q'], config['max_q'], config['min_c'],
config['max_c'], config['min_alpha'], config['max_alpha'])
elif config['generator'] == 'v-modular':
generator = VariableModularCNFGenerator(config['min_k'], config['max_k'], config['min_n'], config['max_n'], config['min_q'],
config['max_q'], config['min_c'], config['max_c'], config['min_alpha'], config['max_alpha'])
else:
generator = UniformCNFGenerator(config['min_n'], config['max_n'], config['min_k'], config['max_k'], config['min_alpha'],
config['max_alpha'])
model_list, errors, losses = trainer.train(train_list = config['train_path'], validation_list = config['validation_path'],
optimizer = optim.Adam(trainer.get_parameter_list(), lr = config['learning_rate'], weight_decay = config['weight_decay']),
last_export_path_base = last_model_path_base, best_export_path_base = best_model_path_base, metric_index = config['metric_index'],
load_model = load_model, reset_step = reset_step, generator = generator, train_epoch_size = config['train_epoch_size'])
if config['verbose']:
logger.info("Starting the test phase...")
for test_files in config['test_path']:
if config['verbose']:
logger.info("Testing " + test_files)
if load_model == "last":
import_path_base = last_model_path_base
elif load_model == "best":
import_path_base = best_model_path_base
else:
import_path_base = None
result = trainer.test(test_list = test_files, import_path_base = import_path_base, batch_replication = batch_replication)
if config['verbose']:
for row in result:
filename, errors, _ = row
print('Dataset: ' + filename)
print("Accuracy: \t%s" % (1 - errors[0]))
print("Recall: \t%s" % (1 - errors[1]))
if os.path.isdir(test_files):
write_to_csv(result,
os.path.join(test_files, config['model_type'] + '_' + config['model_name'] + '_' + config['version'] + '-results.csv'))
write_to_csv_time(result, os.path.join(test_files,
config['model_type'] + '_' + config['model_name'] + '_' + config['version'] + '-results-time.csv'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config', help = 'The configuration JSON file')
parser.add_argument('-t', '--test', help = 'The test mode', action = 'store_true')
parser.add_argument('-l', '--load_model', help = 'Load the previous model')
parser.add_argument('-c', '--cpu_mode', help = 'Run on CPU', action = 'store_true')
parser.add_argument('-r', '--reset', help = 'Reset the global step', action = 'store_true')
parser.add_argument('-g', '--use_generator', help = 'Reset the global step', action = 'store_true')
parser.add_argument('-b', '--batch_replication', help = 'Batch replication factor', type = int, default = 1)
args = parser.parse_args()
run(0, args.config, not args.test, args.load_model, args.cpu_mode, args.reset, args.use_generator, args.batch_replication)
| 2.1875 | 2 |
binary_gap.py | lucasp90/codility | 0 | 12760670 | def solution(N):
# write your code in Python 3.6
binary = to_binary(N)
started = False
max_gap = 0
current_gap = 0
for i in range(len(binary)):
if not started:
started = binary[i] == '1'
else:
if binary[i] == '1':
max_gap = max(current_gap, max_gap)
current_gap = 0
else:
current_gap += 1
return max_gap
def to_binary(n):
"""
Converts a given positive number to its representation as a binary string
"""
binary = ''
partial_m = n
while partial_m > 0:
binary = str(partial_m % 2) + binary
partial_m = partial_m // 2
print(binary)
return binary | 3.734375 | 4 |
lib_pypy/pyrepl/cmdrepl.py | hausen/pypy3.6-7.3.0-osx | 10 | 12760671 | # Copyright 2000-2007 <NAME> <<EMAIL>>
# <NAME>
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR <NAME> DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Wedge pyrepl behaviour into cmd.Cmd-derived classes.
replize, when given a subclass of cmd.Cmd, returns a class that
behaves almost identically to the supplied class, except that it uses
pyrepl instead if raw_input.
It was designed to let you do this:
>>> import pdb
>>> from pyrepl import replize
>>> pdb.Pdb = replize(pdb.Pdb)
which is in fact done by the `pythoni' script that comes with
pyrepl."""
from __future__ import print_function
from pyrepl import completing_reader as cr, reader, completer
from pyrepl.completing_reader import CompletingReader as CR
import cmd
class CmdReader(CR):
def collect_keymap(self):
return super(CmdReader, self).collect_keymap() + (
("\\M-\\n", "invalid-key"),
("\\n", "accept"))
CR_init = CR.__init__
def __init__(self, completions):
self.CR_init(self)
self.completions = completions
def get_completions(self, stem):
if len(stem) != self.pos:
return []
return sorted(set(s for s in self.completions
if s.startswith(stem)))
def replize(klass, history_across_invocations=1):
"""Return a subclass of the cmd.Cmd-derived klass that uses
pyrepl instead of readline.
Raises a ValueError if klass does not derive from cmd.Cmd.
The optional history_across_invocations parameter (default 1)
controls whether instances of the returned class share
histories."""
completions = [s[3:]
for s in completer.get_class_members(klass)
if s.startswith("do_")]
if not issubclass(klass, cmd.Cmd):
raise Exception
# if klass.cmdloop.im_class is not cmd.Cmd:
# print "this may not work"
class CmdRepl(klass):
k_init = klass.__init__
if history_across_invocations:
_CmdRepl__history = []
def __init__(self, *args, **kw):
self.k_init(*args, **kw)
self.__reader = CmdReader(completions)
self.__reader.history = CmdRepl._CmdRepl__history
self.__reader.historyi = len(CmdRepl._CmdRepl__history)
else:
def __init__(self, *args, **kw):
self.k_init(*args, **kw)
self.__reader = CmdReader(completions)
def cmdloop(self, intro=None):
self.preloop()
if intro is not None:
self.intro = intro
if self.intro:
print(self.intro)
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue[0]
del self.cmdqueue[0]
else:
try:
self.__reader.ps1 = self.prompt
line = self.__reader.readline()
except EOFError:
line = "EOF"
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
CmdRepl.__name__ = "replize(%s.%s)"%(klass.__module__, klass.__name__)
return CmdRepl
| 2.59375 | 3 |
caer/adorad/_tensor_str.py | kmamine/caer | 3 | 12760672 | <gh_stars>1-10
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-21 The Caer Authors <http://github.com/jasmcaus>
import math
import numpy as np
class __PrinterOptions(object):
precision = 4
threshold = 1000
edgeitems = 3
linewidth = 80
sci_mode = None
prefix = 'tensor('
PRINT_OPTS = __PrinterOptions()
class _Formatter(object):
def __init__(self, tensor):
self.floating_dtype = 'float' in str(repr(tensor.dtype))
self.int_mode = True
self.sci_mode = False
self.max_width = 1
tensor_view = tensor.reshape(-1)
if not self.floating_dtype:
for value in tensor_view:
value_str = '{}'.format(value)
# d = max(self.max_width, len(value_str))
d = max(2, len(value_str))
self.max_width = d
else:
# FLOATING POINT
for value in tensor_view:
if value != np.ceil(value):
self.int_mode = False
break
if self.int_mode:
for value in tensor_view:
value_str = ('{:.0f}').format(value)
self.max_width = max(self.max_width, len(value_str) + 1)
if PRINT_OPTS.sci_mode is not None:
self.sci_mode = PRINT_OPTS.sci_mode
def width(self):
return self.max_width
def format(self, value):
if self.floating_dtype:
if self.sci_mode:
ret = ('{{:{}.{}e}}').format(self.max_width, PRINT_OPTS.precision).format(value)
elif self.int_mode:
ret = '{:.0f}'.format(value)
if not (math.isinf(value) or math.isnan(value)):
ret += '.'
else:
ret = ('{{:.{}f}}').format(PRINT_OPTS.precision).format(value)
else:
ret = '{}'.format(value)
# return (self.max_width - len(ret)) * ' ' + ret
return (2 - len(ret)) * ' ' + ret
def _scalar_str(self, formatter):
# Usually, we must never come here.
# This is only for when the 'Adorad' library is built.
# Changes may be made to this.
return formatter.format(self.item())
def _vector_str(self, indent, summarize, formatter):
# length includes spaces and comma between elements
element_length = formatter.width() + 2
elements_per_line = max(1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length))))
char_per_line = element_length * elements_per_line
def _val_formatter(val, formatter=formatter):
return formatter.format(val)
# Preventing the entire tensor from being displayed to the terminal.
# We (figuratively) "prune" the tensor for output
if summarize and self.size_dim(0) > 2 * PRINT_OPTS.edgeitems:
data = ([_val_formatter(val) for val in self[:PRINT_OPTS.edgeitems].tolist()] +
[' ...'] +
[_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems:].tolist()])
else:
data = [_val_formatter(val) for val in self.tolist()]
data_lines = [data[i:i + elements_per_line] for i in range(0, len(data), elements_per_line)]
lines = [', '.join(line) for line in data_lines]
return '[' + (',' + '\n' + ' ' * (indent + 1)).join(lines) + ']'
def _tensor_str_with_formatter(self, indent, summarize, formatter):
dim = self.dim()
# dim = self.ndim()
# if dim == 0:
# return _scalar_str(self, formatter)
if dim == 1:
return _vector_str(self, indent, summarize, formatter)
# Preventing the entire tensor from being displayed to the terminal.
# We (figuratively) "prune" the tensor for output
if summarize and self.size_dim(0) > 2 * PRINT_OPTS.edgeitems:
slices = ([_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter)
for i in range(0, PRINT_OPTS.edgeitems)] +
['...'] +
[_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter)
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))])
# If tensor is small enough to display to terminal
else:
slices = [_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter)
for i in range(0, self.size_dim(0))]
tensor_str = (',' + '\n' * (dim - 1) + ' ' * (indent + 1)).join(slices)
return '[' + tensor_str + ']'
def _tensor_str(self, indent):
if self.numel() == 0:
return '[]'
summarize = self.numel() > PRINT_OPTS.threshold
# summarize = self.size_dim > PRINT_OPTS.threshold
# if self.dtype is torch.float16 or self.dtype is torch.bfloat16:
# self = self.float()
formatter = _Formatter(get_summarized_data(self) if summarize else self)
x = _tensor_str_with_formatter(self, indent, summarize, formatter)
return x
def _add_suffixes(tensor_str, suffixes, indent, force_newline):
tensor_strs = [tensor_str]
last_line_len = len(tensor_str) - tensor_str.rfind('\n') + 1
for suffix in suffixes:
suffix_len = len(suffix)
if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
tensor_strs.append(',\n' + ' ' * indent + suffix)
last_line_len = indent + suffix_len
force_newline = False
else:
tensor_strs.append(', ' + suffix)
last_line_len += suffix_len + 2
tensor_strs.append(')')
return ''.join(tensor_strs)
def get_summarized_data(self):
dim = self.dim()
if dim == 0:
return self
if dim == 1:
if self.size_dim(0) > 2 * PRINT_OPTS.edgeitems:
return np.concatenate((self[:PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems:]))
else:
return self
if self.size_dim(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = ([self[i]
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))])
return np.stack([get_summarized_data(x) for x in (start + end)])
else:
return np.stack([get_summarized_data(x) for x in self])
def _str_intern(self):
prefix = PRINT_OPTS.prefix
indent = len(prefix)
suffixes = []
if self.numel() == 0:
# Explicitly print the shape if it is not (0,), to match NumPy behavior
if self.dim() != 1:
suffixes.append('size=' + str(tuple(self.shape)))
tensor_str = '[]'
else:
suffixes.append('dtype=' + str(self.dtype))
tensor_str = _tensor_str(self, indent)
return _add_suffixes(prefix + tensor_str, suffixes, indent, force_newline=False)
def _str(self):
return _str_intern(self)
| 2.3125 | 2 |
build/scripts/starter_project_release.py | p-podsiadly/ApprovalTests.cpp | 0 | 12760673 | <filename>build/scripts/starter_project_release.py
import os
import shutil
from git import Repo
from scripts.git_utilities import GitUtilities
from scripts.project_details import ProjectDetails
from scripts.release_details import ReleaseDetails
from scripts.utilities import assert_step, replace_text_in_file, use_directory, run, check_url_exists
class PrepareStarterProjectRelease:
@staticmethod
def check_pre_conditions_for_starter_project_repo(details: ReleaseDetails) -> None:
repo = Repo(details.locations.starter_project_dir)
assert_step(not repo.bare)
GitUtilities.check_branch_name(repo, 'master')
@staticmethod
def update_starter_project(details: ReleaseDetails) -> None:
STARTER_PATH_OLD_SINGLE_HEADER = F"{details.locations.starter_project_dir}/lib/{details.old_single_header}"
STARTER_PATH_NEW_SINGLE_HEADER = F"{details.locations.starter_project_dir}/lib/{details.new_single_header}"
# Make sure starter project folder is clean
project_dir = details.locations.starter_project_dir
GitUtilities.reset_and_clean_working_directory(project_dir)
shutil.copyfile(details.release_new_single_header, STARTER_PATH_NEW_SINGLE_HEADER)
# Delete the last release:
if os.path.exists(STARTER_PATH_OLD_SINGLE_HEADER):
os.remove(STARTER_PATH_OLD_SINGLE_HEADER)
else:
raise RuntimeError(F"""
----------------------------------------------------------------
ERROR: Old header file does not exist:
{STARTER_PATH_OLD_SINGLE_HEADER}
Starting state of Starter Project does not match '{details.old_version.get_version_text()}'
Check whether:
1. There were uncommitted changes to version.ini in main project,
from a previous release preparation step.
2. The Starter Project repo needs pulling.
----------------------------------------------------------------
""")
# Update the version in the "redirect" header:
replace_text_in_file(
F"{details.locations.starter_project_dir}/lib/{details.project_details.simulated_single_header_file}",
details.old_version.get_version_text(),
details.new_version.get_version_text())
# Update the version number in the Visual Studio projects:
PrepareStarterProjectRelease.update_solution_file(details,
F"{details.locations.starter_project_dir}/visual-studio-2017/StarterProject.vcxproj")
PrepareStarterProjectRelease.update_solution_file(details,
F"{details.locations.starter_project_dir}/visual-studio-2019/StarterProject2019.vcxproj")
@staticmethod
def update_solution_file(details: ReleaseDetails, visual_studio_sln: str) -> None:
if os.path.isfile(visual_studio_sln):
replace_text_in_file(visual_studio_sln,
details.old_single_header,
details.new_single_header)
else:
print(f"Info: No Visual Studio solution file: {visual_studio_sln}")
@staticmethod
def check_starter_project_builds(details: ReleaseDetails) -> None:
with use_directory(F"{details.locations.starter_project_dir}/cmake-build-debug"):
run(["cmake", "--build", "."])
class DeployStarterProjectRelease:
@staticmethod
def get_url_for_starter_project_single_header_for_version(project_details: ProjectDetails,
version_without_v: str) -> str:
return F'https://raw.githubusercontent.com/approvals/' \
F'{project_details.github_project_name}.StarterProject/master/lib/' \
F'{project_details.library_folder_name}.v.{version_without_v}.hpp'
@staticmethod
def commit_starter_project(details: ReleaseDetails) -> None:
message = F"Update to {details.project_details.github_project_name} {details.new_version_as_text()}"
GitUtilities.commit_everything(details.locations.starter_project_dir, message)
@staticmethod
def push_starter_project(details: ReleaseDetails) -> None:
with use_directory(details.locations.starter_project_dir):
run(["git", "push", "origin", "master"])
@staticmethod
def publish_starter_project(details: ReleaseDetails) -> None:
DeployStarterProjectRelease.commit_starter_project(details)
DeployStarterProjectRelease.push_starter_project(details)
assert_step(DeployStarterProjectRelease.check_starter_project_published(details),
"the starter project is published")
@staticmethod
def check_starter_project_published(details: ReleaseDetails) -> bool:
version = details.new_version.get_version_text_without_v()
url = DeployStarterProjectRelease.get_url_for_starter_project_single_header_for_version(
details.project_details, version)
published = check_url_exists(url)
return published
| 2.46875 | 2 |
lib/pystatsml/plot_utils.py | gautard/pystatsml | 123 | 12760674 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 29 10:58:31 2016
@author: <EMAIL>
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Ellipse
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
| 3.21875 | 3 |
tests/test_decorator_anonymous_dataframe_pandas.py | christianhelle/autofaker | 0 | 12760675 | <gh_stars>0
import unittest
from dataclasses import dataclass
import pandas
from autofaker import autopandas, fakepandas, autodata
class SimpleClassA:
id = -1
name: 'test'
text = 'test'
class SimpleClassB:
def __init__(self, id: int, name: str, text: str):
self.text = text
self.name = name
self.id = id
class SimpleClassC:
def __init__(self, a: SimpleClassA, b: SimpleClassB):
self.b = b
self.a = a
class AnonymousPandasDataFrameViaDecoratorTests(unittest.TestCase):
@autopandas(SimpleClassA)
def test_create_anonymous_pandas_dataframe_returns_not_none(self, df: pandas.DataFrame):
self.assertIsNotNone(df)
@autopandas(SimpleClassA, 10)
def test_create_anonymous_pandas_dataframe_with_rowcount_returns_not_empty(self, df: pandas.DataFrame):
self.assertNotEqual(len(df.index), 0)
@fakepandas(SimpleClassA)
def test_create_fake_pandas_dataframe_returns_not_none(self, df: pandas.DataFrame):
self.assertIsNotNone(df)
@fakepandas(SimpleClassA, 10)
def test_create_fake_pandas_dataframe_with_rowcount_returns_not_empty(self, df: pandas.DataFrame):
self.assertNotEqual(len(df.index), 0)
@autopandas(SimpleClassA)
def test_can_create_anonymous_pandas_dataframes(self, cls):
print(cls)
self.assertIsNotNone(cls)
@autopandas(SimpleClassB)
def test_can_create_anonymous_pandas_dataframes_from_class_with_constructor_arguments(self, cls):
print(cls)
self.assertIsNotNone(cls)
@autopandas(SimpleClassC)
def test_can_create_anonymous_pandas_dataframes_from_class_with_constructor_class_arguments(self, cls):
print(cls)
self.assertIsNotNone(cls)
@dataclass
class DataClass:
id: int
name: str
text: str
class AnonymousPandasDataFrameViaDecoratorFromDataClassTests(unittest.TestCase):
@autopandas(DataClass)
def test_create_anonymous_pandas_dataframe_returns_not_none(self, df: pandas.DataFrame):
self.assertIsNotNone(df)
@autopandas(DataClass, 10)
def test_create_anonymous_pandas_dataframe_with_rowcount_returns_not_empty(self, df: pandas.DataFrame):
self.assertNotEqual(len(df.index), 0)
@fakepandas(DataClass, 10)
def test_create_fake_pandas_dataframe_with_rowcount_returns_not_empty(self, df: pandas.DataFrame):
self.assertNotEqual(len(df.index), 0)
@fakepandas(DataClass)
def test_create_fake_pandas_dataframe_returns_not_none(self, df: pandas.DataFrame):
self.assertIsNotNone(df)
class AutodataDecoratorIgnoresPandas(unittest.TestCase):
@autodata()
def test_autodata_decorator_ignores_pandas_dataframe(self, df: pandas.DataFrame):
self.assertIsNone(df)
@autodata()
def test_autodata_decorator_ignores_only_pandas_dataframe(self, text: str, df: pandas.DataFrame):
self.assertIsNotNone(text)
self.assertIsNone(df)
| 2.875 | 3 |
Layers/reactionattention.py | esmou2/Kylearn-pytorch | 0 | 12760676 | <reponame>esmou2/Kylearn-pytorch<filename>Layers/reactionattention.py
import torch
import torch.nn as nn
import numpy as np
from Layers.bottlenecks import LinearBottleneckLayer
class ReactionDotProduction(nn.Module):
''' Scaled Dot Productionss '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, query, key, value):
'''
Arguments:
key {Tensor, shape [n_head * batch, d_features, n_depth_per_head]} -- expansion
query {Tensor, shape [n_head * batch, 1, n_depth_per_head]} -- depth
value {Tensor, shape [n_head * batch, 1, d_features]} -- value
Returns:
output {Tensor, shape [n_head * batch, 1, d_features]} -- output
attn {Tensor, shape [n_head * batch, 1, d_features]} -- reaction attention
'''
attn = torch.bmm(query, key.transpose(1, 2)) # [n_head * batch, 1, d_features]
# How should we set the temperature
attn = attn / self.temperature
attn = self.softmax(attn) # softmax over d_f1
attn = self.dropout(attn)
output = torch.mul(attn, value)
return output, attn
class ReactionAttentionLayerV1(nn.Module):
'''Reaction Attention'''
def __init__(self,expansion_layer, n_depth, d_features, d_meta, n_head, dropout,
use_bottleneck=True, d_bottleneck=None):
super().__init__()
self.d_features = d_features
self.d_meta = d_meta
self.n_head = n_head
self.n_depth = n_depth
self.use_bottleneck = use_bottleneck
self.expansion = expansion_layer(d_features=d_features, n_channel=n_head, n_depth=n_depth) # output [batch, d_features, n_channel * n_depth]
self.expansion.initialize_param(nn.init.xavier_normal_)
# query, value map
self.query = nn.Linear(d_meta, n_head * self.n_depth)
self.value = nn.Linear(d_features, n_head * d_features)
nn.init.xavier_normal_(self.query.weight)
nn.init.xavier_normal_(self.value.weight)
self.attention = ReactionDotProduction(temperature=np.power(self.n_depth, 0.5))
self.layer_norm = nn.LayerNorm(d_features)
self.fc = nn.Linear(n_head * d_features, d_features)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
if use_bottleneck:
self.bottleneck = LinearBottleneckLayer(d_features, d_bottleneck)
def forward(self, features, meta):
'''
Arguments:
feature_1 {Tensor, shape [batch, d_features]} -- feature part 1
feature_2 {Tensor, shape [batch, d_meta]} -- feature part 2, can be categorical data
Returns:
output {Tensor, shape [batch, d_features]} -- output
attn {Tensor, shape [n_head * batch, 1, d_features]} -- self attention
'''
d_features, d_meta, n_head, n_depth_per_head = self.d_features, self.d_meta, self.n_head, self.n_depth
batch_size, _ = features.size()
residual = features
query = self.query(meta).view(batch_size, 1, n_head, n_depth_per_head)
key = self.expansion(features).view(batch_size, n_depth_per_head, d_features, n_head) # [batch, n_depth, n_head, d_features]
value = self.value(features).view(batch_size, 1, n_head, d_features)
# value = feature_1.repeat(1, n_head).view(batch_size, 1, n_head, d_features)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, 1, n_depth_per_head)
key = key.permute(2, 0, 3, 1).contiguous().view(-1, d_features, n_depth_per_head)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, 1, d_features)
output, attn = self.attention(query, key, value)
output = output.view(n_head, batch_size, 1, d_features)
output = output.permute(1, 2, 0, 3).contiguous().view(batch_size, -1)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
if self.use_bottleneck:
output = self.bottleneck(output)
return output, attn
| 2.484375 | 2 |
tests/strategy/test_memory.py | djmattyg007/freiner | 0 | 12760677 | import time
import pytest
from freiner.limits import RateLimitItemPerMinute, RateLimitItemPerSecond
from freiner.storage.memory import MemoryStorage
from freiner.strategies.fixed_window import FixedWindowRateLimiter
from freiner.strategies.fixed_window_elastic import FixedWindowElasticExpiryRateLimiter
from freiner.strategies.moving_window import MovingWindowRateLimiter
from ..util import freeze_time
@pytest.fixture
def storage() -> MemoryStorage:
return MemoryStorage()
def test_fixed_window_simple(storage: MemoryStorage):
limiter = FixedWindowRateLimiter(storage)
with freeze_time():
limit = RateLimitItemPerSecond(2, 1)
assert limiter.test(limit) is True
assert limiter.hit(limit) is True
assert limiter.test(limit) is True
assert limiter.hit(limit) is True
assert limiter.test(limit) is False
assert limiter.hit(limit) is False
def test_fixed_window(storage: MemoryStorage):
limiter = FixedWindowRateLimiter(storage)
with freeze_time() as frozen_datetime:
limit = RateLimitItemPerSecond(10, 2)
start = time.time()
assert all([limiter.hit(limit) for _ in range(0, 10)]) is True
assert limiter.hit(limit) is False
frozen_datetime.tick(1)
assert limiter.hit(limit) is False
window_stats = limiter.get_window_stats(limit)
assert window_stats.reset_time == start + 2
assert window_stats.remaining_count == 0
frozen_datetime.tick(1)
assert limiter.get_window_stats(limit).remaining_count == 10
assert limiter.hit(limit) is True
def test_fixed_window_with_elastic_expiry(storage: MemoryStorage):
limiter = FixedWindowElasticExpiryRateLimiter(storage)
with freeze_time() as frozen_datetime:
limit = RateLimitItemPerSecond(10, 2)
start = time.time()
assert all([limiter.hit(limit) for _ in range(0, 10)]) is True
assert limiter.hit(limit) is False
frozen_datetime.tick(1)
assert limiter.hit(limit) is False
window_stats = limiter.get_window_stats(limit)
# three extensions to the expiry
assert window_stats.reset_time == start + 3
assert window_stats.remaining_count == 0
frozen_datetime.tick(1)
assert limiter.hit(limit) is False
frozen_datetime.tick(3)
start = time.time()
assert limiter.hit(limit) is True
window_stats = limiter.get_window_stats(limit)
assert window_stats.reset_time == start + 2
assert window_stats.remaining_count == 9
def test_moving_window_simple(storage: MemoryStorage):
limiter = MovingWindowRateLimiter(storage)
with freeze_time():
limit = RateLimitItemPerSecond(2, 1)
assert limiter.test(limit) is True
assert limiter.hit(limit) is True
assert limiter.test(limit) is True
assert limiter.hit(limit) is True
assert limiter.test(limit) is False
assert limiter.hit(limit) is False
def test_moving_window(storage: MemoryStorage):
limiter = MovingWindowRateLimiter(storage)
with freeze_time() as frozen_datetime:
limit = RateLimitItemPerMinute(10)
for i in range(0, 5):
assert limiter.hit(limit) is True
assert limiter.hit(limit) is True
assert limiter.get_window_stats(limit).remaining_count == 10 - ((i + 1) * 2)
frozen_datetime.tick(10)
assert limiter.get_window_stats(limit).remaining_count == 0
assert limiter.hit(limit) is False
frozen_datetime.tick(20)
window_stats = limiter.get_window_stats(limit)
assert window_stats.reset_time == time.time() + 30
assert window_stats.remaining_count == 4
frozen_datetime.tick(30)
assert limiter.get_window_stats(limit).remaining_count == 10
| 1.953125 | 2 |
vsts/vsts/git/v4_1/models/git_fork_operation_status_detail.py | kenkuo/azure-devops-python-api | 0 | 12760678 | <reponame>kenkuo/azure-devops-python-api
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class GitForkOperationStatusDetail(Model):
"""GitForkOperationStatusDetail.
:param all_steps: All valid steps for the forking process
:type all_steps: list of str
:param current_step: Index into AllSteps for the current step
:type current_step: int
:param error_message: Error message if the operation failed.
:type error_message: str
"""
_attribute_map = {
'all_steps': {'key': 'allSteps', 'type': '[str]'},
'current_step': {'key': 'currentStep', 'type': 'int'},
'error_message': {'key': 'errorMessage', 'type': 'str'}
}
def __init__(self, all_steps=None, current_step=None, error_message=None):
super(GitForkOperationStatusDetail, self).__init__()
self.all_steps = all_steps
self.current_step = current_step
self.error_message = error_message
| 1.9375 | 2 |
math_dioproject/basicfunctions/simples.py | marcocrippa/Package_DioProject_math | 0 | 12760679 | def soma(num1, num2):
print(f'A soma dos numeros eh igual é: {num1+num2}')
def sub(num1, num2):
print(f'A soma dos numeros eh igual é: {num1-num2}')
def mult(num1, num2):
print(f'A soma dos numeros eh igual é: {num1*num2}')
def divi(num1, num2):
print(f'A soma dos numeros eh igual é: {num1/num2}')
| 3.453125 | 3 |
hsploit/searcher/engine/string.py | nicolas-carolo/hsploit | 10 | 12760680 | import re
def str_contains_numbers(str):
"""
Check if a string contains at least one number.
:param str: the string to check.
:return: true if the string contains at least one number, false else.
"""
return bool(re.search(r'\d', str))
def str_is_num_version(str):
"""
Check if a string contains a number of version.
:param str: the string to check.
:return: true if the string contains a number of version, false else.
"""
return bool(re.search(r' \d+((\.\d+)+)?', str))
def word_is_num_version(str):
"""
Check if a word contains a number of version.
:param str: the word to check.
:return: true if the word contains a number of version, false else.
"""
return bool(re.search(r'\d+((\.\d+)+)?', str))
def str_contains_num_version_range(str):
"""
Check if a string contains a range of number version.
:param str: the string to check.
:return: true if the string contains a a range of number version, false else.
"""
return bool(re.search(r'\d+((\.\d+)+)? < \d+((\.\d+)+)?', str))
def str_contains_num_version_range_with_x(str):
"""
Check if a string contains a range of number version with x.
:param str: the string to check.
:return: true if the string contains a a range of number version with x, false else.
"""
return bool(re.search(r'\d+((\.\d+)+)?(\.x)? < \d+((\.\d+)+)?(\.x)?', str))
def get_vulnerability_extension(vulnerability_file):
"""
Get the extension of the vulnerability passed as parameter.
:param vulnerability_file: the vulnerability we want to get its extension.
:return: the extension of the vulnerability passed as parameter.
"""
regex = re.search(r'\.(?P<extension>\w+)', vulnerability_file)
extension = '.' + regex.group('extension')
return extension
| 4.125 | 4 |
example_in.py | roberhag/variables-to-argparse | 1 | 12760681 | <reponame>roberhag/variables-to-argparse<gh_stars>1-10
# Here follows example input:
fs = 48000 # Sample rate (Hz)
base_freq = 100. # Base frequency (Hz)
bpm = 120 # Beats per minute
| 1.59375 | 2 |
0205.isomorphic_strings/solution.py | WZMJ/Algorithms | 5 | 12760682 | class Solution:
def is_isomorphic(self, s: str, t: str) -> bool:
""" 根据值在两个字符串中的索引是否相同 """
for i, v in enumerate(s):
if s.find(v) != t.find(t[i]):
return False
return True
def other_solution(self, s, t):
""" 将s中的字符对应的t中的字符存入字典,如果不符合条件则为False """
d = {}
for i, v in enumerate(s):
if d.get(v):
if d[v] != t[i]:
return False
elif t[i] in d.values():
return False
d[v] = t[i]
return True
if __name__ == "__main__":
print(Solution().test("abc", "bac"))
| 3.421875 | 3 |
smsymer/evm/exception/__init__.py | Troublor/smSymer | 3 | 12760683 | from .insufficientInputException import InsufficientInputException
from .invalidOperationException import InvalidOperationException
from .evmExecutionException import EvmExecutionException
| 1.15625 | 1 |
flask_request_validator/nested_json.py | reggermont/flask_request_validator | 0 | 12760684 | <filename>flask_request_validator/nested_json.py
from typing import Union, Dict, List, Tuple, Any
from .exceptions import (
JsonError,
RequiredJsonKeyError,
JsonListItemTypeError,
RulesError,
)
from .rules import CompositeRule, AbstractRule
class JsonParam:
"""
Nested json validation
"""
def __init__(
self,
rules_map: Union[
Dict[str, Union[Dict, List, CompositeRule, 'JsonParam']],
Union[CompositeRule, List[AbstractRule]],
],
required: bool = True,
as_list: bool = False,
) -> None:
if isinstance(rules_map, list):
self.rules_map = CompositeRule(*rules_map)
else:
for k, rules in rules_map.items():
if isinstance(rules, list):
rules_map[k] = CompositeRule(*rules)
self.rules_map = rules_map
self.required = required
self.as_list = as_list # JsonParam is list or dict
def _check_list_item_type(self, nested: 'JsonParam', value: Any):
"""
:raises JsonListItemTypeError
"""
if isinstance(nested.rules_map, CompositeRule):
if value is None:
return
if not isinstance(value, (str, int, float, bool,)):
raise JsonListItemTypeError(False)
return
if isinstance(nested.rules_map, dict) and not isinstance(value, dict):
raise JsonListItemTypeError()
def _validate_list(
self,
value: Union[Dict, List],
nested: 'JsonParam',
depth: list,
errors: List[JsonError],
) -> Tuple[Union[Dict, List], List]:
n_err = {}
for ix, node in enumerate(value): # type: int, dict or list
try:
self._check_list_item_type(nested, node)
except JsonListItemTypeError as e:
n_err[ix] = e
continue
if isinstance(node, dict):
value, errors, rules_err = self._validate_dict(node, nested, depth, errors)
if rules_err:
n_err[ix] = rules_err
continue
try:
new_val = nested.rules_map.validate(value[ix])
value[ix] = new_val
except RulesError as e:
n_err[ix] = e
if n_err:
errors = self._collect_errors(depth, errors, n_err, nested.as_list)
return value, errors
def _collect_errors(
self,
depth: list,
errors: list,
nested_errors: dict,
as_list: bool = False,
) -> list:
if nested_errors:
try:
raise JsonError(depth, nested_errors, as_list)
except JsonError as e:
errors.append(e)
return errors
def _validate_dict(
self,
value: Union[Dict, List],
nested: 'JsonParam',
depth: list,
errors: List[JsonError],
) -> Tuple[Any, List[JsonError], Dict[str, RulesError]]:
err = dict()
for key, rules in nested.rules_map.items():
if key not in value:
continue
elif isinstance(rules, JsonParam):
new_val, errors = self.validate(value[key], rules, depth + [key], errors)
continue
try:
new_val = rules.validate(value[key])
value[key] = new_val
except RulesError as e:
err[key] = e
return value, errors, err
def _check_required(self, key: str, value: dict, rule: Any):
"""
:raises RequiredJsonKeyError
"""
if isinstance(rule, JsonParam) and rule.required and key not in value:
raise RequiredJsonKeyError(key)
def validate(
self,
value: Union[Dict, List],
nested: 'JsonParam' = None,
depth: list = None,
errors: List[JsonError] = None,
) -> Tuple[Union[Dict, List], List]:
depth = depth or ['root']
errors = errors or []
node_errors = dict()
nested = nested or self
if isinstance(nested.rules_map, dict) and not nested.as_list:
for key, rule in nested.rules_map.items():
try:
self._check_required(key, value, rule)
except RequiredJsonKeyError as e:
node_errors[key] = e
if nested.as_list:
value, errors = self._validate_list(value, nested, depth, errors)
return value, errors
value, errors, nested_errors = self._validate_dict(value, nested, depth, errors)
node_errors.update(nested_errors)
errors = self._collect_errors(depth, errors, node_errors)
return value, errors
| 2.828125 | 3 |
whoville/cloudbreak/apis/v1util_api.py | balazsgaspar/whoville | 30 | 12760685 | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V1utilApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def check_client_version(self, version, **kwargs):
"""
checks the client version
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.check_client_version(version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str version: (required)
:return: VersionCheckResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.check_client_version_with_http_info(version, **kwargs)
else:
(data) = self.check_client_version_with_http_info(version, **kwargs)
return data
def check_client_version_with_http_info(self, version, **kwargs):
"""
checks the client version
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.check_client_version_with_http_info(version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str version: (required)
:return: VersionCheckResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method check_client_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `check_client_version`")
collection_formats = {}
path_params = {}
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/client/{version}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionCheckResult',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_rds_database_util(self, **kwargs):
"""
create a database for the service in the RDS if the connection could be created
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_database_util(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RDSBuildRequest body:
:param list[str] target:
:return: RdsBuildResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_rds_database_util_with_http_info(**kwargs)
else:
(data) = self.create_rds_database_util_with_http_info(**kwargs)
return data
def create_rds_database_util_with_http_info(self, **kwargs):
"""
create a database for the service in the RDS if the connection could be created
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_database_util_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RDSBuildRequest body:
:param list[str] target:
:return: RdsBuildResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'target']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_rds_database_util" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'target' in params:
query_params.append(('target', params['target']))
collection_formats['target'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/rds-database', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RdsBuildResult',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cloud_storage_matrix(self, **kwargs):
"""
returns supported cloud storage for stack version
Define stack version at least at patch level eg. 2.6.0
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cloud_storage_matrix(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str stack_version:
:return: list[CloudStorageSupportedResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_cloud_storage_matrix_with_http_info(**kwargs)
else:
(data) = self.get_cloud_storage_matrix_with_http_info(**kwargs)
return data
def get_cloud_storage_matrix_with_http_info(self, **kwargs):
"""
returns supported cloud storage for stack version
Define stack version at least at patch level eg. 2.6.0
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cloud_storage_matrix_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str stack_version:
:return: list[CloudStorageSupportedResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stack_version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cloud_storage_matrix" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'stack_version' in params:
query_params.append(('stackVersion', params['stack_version']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/cloudstoragematrix', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CloudStorageSupportedResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_custom_parameters(self, **kwargs):
"""
returns custom parameters
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_custom_parameters(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ParametersQueryRequest body:
:return: ParametersQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_custom_parameters_with_http_info(**kwargs)
else:
(data) = self.get_custom_parameters_with_http_info(**kwargs)
return data
def get_custom_parameters_with_http_info(self, **kwargs):
"""
returns custom parameters
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_custom_parameters_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ParametersQueryRequest body:
:return: ParametersQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_custom_parameters" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/custom-parameters', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParametersQueryResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_file_system_parameters(self, **kwargs):
"""
returns filesystem parameters
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_file_system_parameters(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StructuredParametersQueryRequest body:
:return: StructuredParameterQueriesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_file_system_parameters_with_http_info(**kwargs)
else:
(data) = self.get_file_system_parameters_with_http_info(**kwargs)
return data
def get_file_system_parameters_with_http_info(self, **kwargs):
"""
returns filesystem parameters
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_file_system_parameters_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StructuredParametersQueryRequest body:
:return: StructuredParameterQueriesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_system_parameters" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/filesystem-parameters', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StructuredParameterQueriesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_knox_services(self, blueprint_name, **kwargs):
"""
returns supported knox services
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_knox_services(blueprint_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str blueprint_name: (required)
:return: list[ExposedServiceResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_knox_services_with_http_info(blueprint_name, **kwargs)
else:
(data) = self.get_knox_services_with_http_info(blueprint_name, **kwargs)
return data
def get_knox_services_with_http_info(self, blueprint_name, **kwargs):
"""
returns supported knox services
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_knox_services_with_http_info(blueprint_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str blueprint_name: (required)
:return: list[ExposedServiceResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['blueprint_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_knox_services" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'blueprint_name' is set
if ('blueprint_name' not in params) or (params['blueprint_name'] is None):
raise ValueError("Missing the required parameter `blueprint_name` when calling `get_knox_services`")
collection_formats = {}
path_params = {}
if 'blueprint_name' in params:
path_params['blueprintName'] = params['blueprint_name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/knoxservices/{blueprintName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExposedServiceResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stack_matrix_util(self, **kwargs):
"""
returns default ambari details for distinct HDP and HDF
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_matrix_util(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StackMatrix
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_stack_matrix_util_with_http_info(**kwargs)
else:
(data) = self.get_stack_matrix_util_with_http_info(**kwargs)
return data
def get_stack_matrix_util_with_http_info(self, **kwargs):
"""
returns default ambari details for distinct HDP and HDF
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_matrix_util_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StackMatrix
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stack_matrix_util" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/stackmatrix', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackMatrix',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_ambari_database_util(self, **kwargs):
"""
tests a database connection parameters
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_ambari_database_util(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AmbariDatabaseDetails body:
:return: AmbariDatabaseTestResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.test_ambari_database_util_with_http_info(**kwargs)
else:
(data) = self.test_ambari_database_util_with_http_info(**kwargs)
return data
def test_ambari_database_util_with_http_info(self, **kwargs):
"""
tests a database connection parameters
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_ambari_database_util_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AmbariDatabaseDetails body:
:return: AmbariDatabaseTestResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_ambari_database_util" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/util/ambari-database', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AmbariDatabaseTestResult',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 1.5625 | 2 |
Python3/0987-Vertical-Order-Traversal-of-a-Binary-Tree/soln-1.py | wyaadarsh/LeetCode-Solutions | 5 | 12760686 | <filename>Python3/0987-Vertical-Order-Traversal-of-a-Binary-Tree/soln-1.py
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def verticalTraversal(self, root: 'TreeNode') -> 'List[List[int]]':
queue = collections.deque([(root, 0, 0)])
d = collections.defaultdict(list)
while queue:
node, x, y = queue.popleft()
d[x].append((y, node.val))
if node.left:
queue.append((node.left, x - 1, y + 1))
if node.right:
queue.append((node.right, x + 1, y + 1))
ans = []
for key in sorted(d):
lst = sorted(d[key])
ans.append([b for a, b in lst])
return ans
| 4 | 4 |
muak/defines.py | ok65/muak | 0 | 12760687 | <gh_stars>0
VALIGN_TOP = "top"
VALIGN_CENTER = "center"
VALIGN_BASELINE = "baseline"
VALIGN_BOTTOM = "bottom"
HALIGN_LEFT = "left"
HALIGN_CENTER = "center"
HALIGN_RIGHT = "right"
PROPAGATE_UP = "up"
PROPAGATE_DOWN = "down"
NO_PROPAGATION = "local"
NO_LOCAL_INVOKE = "nolocal"
BOLD = "bold"
NORMAL = "normal"
SEMIBOLD = "semibold"
LEFT = "left"
CENTER = "center"
RIGHT = "right" | 1.140625 | 1 |
ubpf/asm_parser.py | dongxingshui/ubpf | 466 | 12760688 | #!/usr/bin/env python
from __future__ import print_function
from parcon import *
from collections import namedtuple
hexchars = '0123456789abcdefABCDEF'
Reg = namedtuple("Reg", ["num"])
Imm = namedtuple("Imm", ["value"])
MemRef = namedtuple("MemRef", ["reg", "offset"])
def keywords(vs):
return First(*[Keyword(SignificantLiteral(v)) for v in vs])
hexnum = SignificantLiteral('0x') + +CharIn(hexchars)
decnum = +Digit()
offset = (CharIn("+-") + Exact(hexnum | decnum))[flatten]["".join][lambda x: int(x, 0)]
imm = (-CharIn("+-") + Exact(hexnum | decnum))[flatten]["".join][lambda x: int(x, 0)][Imm]
reg = Literal('r') + integer[int][Reg]
memref = (Literal('[') + reg + Optional(offset, 0) + Literal(']'))[lambda x: MemRef(*x)]
unary_alu_ops = ['neg', 'neg32', 'le16', 'le32', 'le64', 'be16', 'be32', 'be64']
binary_alu_ops = ['add', 'sub', 'mul', 'div', 'or', 'and', 'lsh', 'rsh',
'mod', 'xor', 'mov', 'arsh']
binary_alu_ops.extend([x + '32' for x in binary_alu_ops])
alu_instruction = \
(keywords(unary_alu_ops) + reg) | \
(keywords(binary_alu_ops) + reg + "," + (reg | imm))
mem_sizes = ['w', 'h', 'b', 'dw']
mem_store_reg_ops = ['stx' + s for s in mem_sizes]
mem_store_imm_ops = ['st' + s for s in mem_sizes]
mem_load_ops = ['ldx' + s for s in mem_sizes]
mem_instruction = \
(keywords(mem_store_reg_ops) + memref + "," + reg) | \
(keywords(mem_store_imm_ops) + memref + "," + imm) | \
(keywords(mem_load_ops) + reg + "," + memref) | \
(keywords(["lddw"]) + reg + "," + imm)
jmp_cmp_ops = ['jeq', 'jgt', 'jge', 'jlt', 'jle', 'jset', 'jne', 'jsgt', 'jsge', 'jslt', 'jsle']
jmp_instruction = \
(keywords(jmp_cmp_ops) + reg + "," + (reg | imm) + "," + offset) | \
(keywords(['ja']) + offset) | \
(keywords(['call']) + imm) | \
(keywords(['exit'])[lambda x: (x, )])
instruction = alu_instruction | mem_instruction | jmp_instruction
start = ZeroOrMore(instruction + Optional(Literal(';'))) + End()
def parse(source):
return start.parse_string(source)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Assembly parser", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('file', type=argparse.FileType('r'), default='-')
args = parser.parse_args()
result = parse(args.file.read())
for inst in result:
print(repr(inst))
| 2.734375 | 3 |
acme/agents/jax/bc/agent_test.py | ostap-viniavskyi/acme | 2,650 | 12760689 | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BC agent."""
from absl.testing import absltest
from absl.testing import parameterized
from acme import specs
from acme import types
from acme.agents.jax import bc
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.testing import fakes
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax.scipy import special
import numpy as np
import optax
def make_networks(
spec: specs.EnvironmentSpec,
discrete_actions: bool = False) -> networks_lib.FeedForwardNetwork:
"""Creates networks used by the agent."""
if discrete_actions:
final_layer_size = spec.actions.num_values
else:
final_layer_size = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
if discrete_actions:
network = hk.nets.MLP([64, 64, final_layer_size])
else:
network = hk.Sequential([
networks_lib.LayerNormMLP([64, 64], activate_final=True),
networks_lib.NormalTanhDistribution(final_layer_size),
])
return network(obs)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
network = networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply)
return network
class BCTest(parameterized.TestCase):
@parameterized.parameters(
('logp',),
('mse',),
('peerbc',)
)
def test_continuous_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
network = make_networks(spec)
if loss_name == 'logp':
loss_fn = bc.logp(
logp_fn=lambda dist_params, actions: dist_params.log_prob(actions))
elif loss_name == 'mse':
loss_fn = bc.mse(
sample_fn=lambda dist_params, key: dist_params.sample(seed=key))
elif loss_name == 'peerbc':
base_loss_fn = bc.logp(
logp_fn=lambda dist_params, actions: dist_params.log_prob(actions))
loss_fn = bc.peerbc(base_loss_fn, zeta=0.1)
else:
raise ValueError
learner = bc.BCLearner(
network=network,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
demonstrations=dataset_demonstration,
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
@parameterized.parameters(
('logp',),
('rcal',))
def test_discrete_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=10, num_observations=100, obs_shape=(10,),
obs_dtype=np.float32)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
network = make_networks(spec, discrete_actions=True)
def logp_fn(logits, actions):
max_logits = jnp.max(logits, axis=-1, keepdims=True)
logits = logits - max_logits
logits_actions = jnp.sum(
jax.nn.one_hot(actions, spec.actions.num_values) * logits, axis=-1)
log_prob = logits_actions - special.logsumexp(logits, axis=-1)
return log_prob
if loss_name == 'logp':
loss_fn = bc.logp(logp_fn=logp_fn)
elif loss_name == 'rcal':
base_loss_fn = bc.logp(logp_fn=logp_fn)
loss_fn = bc.rcal(base_loss_fn, discount=0.99, alpha=0.1)
else:
raise ValueError
learner = bc.BCLearner(
network=network,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
demonstrations=dataset_demonstration,
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
if __name__ == '__main__':
absltest.main()
| 1.984375 | 2 |
simtool/polymerxtal/struct2lammps/data4lammps/data4Lammps.py | janash/polymerxtal | 1 | 12760690 | import os
import string
from .Gasteiger import getGasteiger_parameters, getGasteigerCharge
from .getForcefield import *
from .handleAtoms import Atomtypes, AtomsInfo, AtomLink
from .handleBonds import *
from .PCFF import (
PCFF_getAngletypes,
PCFF_getDihstypes,
PCFF_getImpstypes,
PCFF_getAtommass,
PCFF_getPairCoeffs,
PCFF_readPairCoeffs,
PCFF_getBondCoeffs,
PCFF_getAngleCoeffs,
getBBCoeffs,
getBACoeffs,
PCFF_getDihsCoeffs,
getMBTCoeffs,
getEBTCoeffs,
getATCoeffs,
getAATCoeffs,
getBB13Coeffs,
PCFF_getImpsCoeffs,
getAACoeffs,
)
from .qeq import Qeq_charge_equilibration
##############################################################################################
def checkAtomtype(inpfile):
atomtypes = Atomtypes(inpfile)
Forcefieldfile = getDreidingParamFile()
typedefault = ("H_", "C_3", "N_3", "O_3", "F_", "S_3", "Cl", "I_", "Br_")
atypes = []
flag = 0
fin = open(Forcefieldfile, "r")
dataline = fin.readline()
while dataline != "" and dataline != "\n" and flag == 0:
words = dataline[0 : len(dataline) - 1]
if str(words).upper() == "ATOMTYPES":
flag = 1
dataline = fin.readline()
words = dataline[0 : len(dataline) - 1].split()
while str(words[0]).upper() != "END":
atype = str(words[0])
atypes.append(atype)
dataline = fin.readline()
words = dataline[0 : len(dataline) - 1].split()
dataline = fin.readline()
fin.close()
# print(atypes)
anychange = "NO"
changed = []
for i in range(len(atomtypes)):
atomtypeID = atomtypes[i][0]
atomtype = atomtypes[i][1]
if atomtype not in atypes:
anychange = "YES"
for j in range(len(typedefault)):
deftype = typedefault[j]
if atomtype[0:2] == deftype[0:2]:
atomtypes[i][1] = deftype
changed.append([atomtype, deftype])
for i in range(len(atomtypes)):
atomtypeID = atomtypes[i][0]
atomtype = atomtypes[i][1]
if atomtype not in atypes:
anychange = "YES"
for j in range(len(typedefault)):
deftype = typedefault[j]
if atomtype[0] == deftype[0]:
atomtypes[i][1] = deftype
changed.append([atomtype, deftype])
for i in range(len(atomtypes)):
atomtypeID = atomtypes[i][0]
atomtype = atomtypes[i][1]
if atomtype not in atypes:
anychange = "YES"
deftype = "C_3"
atomtypes[i][1] = deftype
changed.append([atomtype, deftype])
if anychange == "YES":
fout = open("atom_type_reassigned.dat", "w")
for i in range(len(atomtypes)):
atomtypeID = atomtypes[i][0]
atomtype = atomtypes[i][1]
fout.write(str(atomtypeID) + " " + atomtype + "\n")
# print >> fout, atomtypeID, atomtype
fout.close()
if anychange == "YES":
wout = open("Datafile_warnings1.txt", "w")
wout.write(
"##==============Warning: Force field parameters============================"
+ "\n"
)
# print >> wout, "##==============Warning: Force field parameters============================"
wout.write("## Atom type is re-assigned as following:" + "\n")
# print >> wout, "## Atom type is re-assigned as following:"
wout.write("##" + str(changed) + "\n")
# print >> wout, "##", changed
wout.write(
"##==============Warning: Force field parameters============================"
+ "\n"
)
# print >> wout, "##==============Warning: Force field parameters============================"
wout.close()
return atomtypes, changed
##############################################################################################
def printCoeffs(fout, ptitle, ptypes, ptypecoeffs):
fout.write("\n")
# print >>fout
fout.write(ptitle + "\n")
# print >>fout,ptitle
fout.write("\n")
# print >>fout
for i in range(len(ptypes)):
outline = ""
for j in range(len(ptypecoeffs[i + 1])):
outline = outline + str(ptypecoeffs[i + 1][j]) + " \t"
fout.write(outline + "\n")
# print >>fout,outline
##############################################################################################
# Write out force field parameters.
def outputDreidingCoeffs(fout, atomtypes, bondtypes, angletypes, dihstypes, impstypes):
warning1 = getPairCoeffs(atomtypes) # coeffs are in file "paircoeffs.txt"
paircoeffs = readPairCoeffs()
fout.write("\n")
# print >>fout
fout.write("Pair Coeffs" + "\n")
# print >>fout,"Pair Coeffs"
fout.write("\n")
# print >>fout
for i in range(len(paircoeffs)):
fout.write(
"%3i %12.6f %12.6f %s %s"
% (
paircoeffs[i][0],
paircoeffs[i][1],
paircoeffs[i][2],
paircoeffs[i][3],
paircoeffs[i][4],
)
+ "\n"
)
# print >>fout,'%3i %12.6f %12.6f %s %s' % (paircoeffs[i][0],paircoeffs[i][1],paircoeffs[i][2],paircoeffs[i][3],paircoeffs[i][4])
fout.write("\n")
# print >>fout
fout.write("Bond Coeffs" + "\n")
# print >>fout,"Bond Coeffs"
fout.write("\n")
# print >>fout
bondcoeffs, warning2 = getBondCoeffs(bondtypes)
for i in range(len(bondtypes)):
fout.write(
"%3i %12.6f %12.6f %s%s%s%s"
% (
bondcoeffs[i][0],
bondcoeffs[i][1],
bondcoeffs[i][2],
str(" # "),
bondcoeffs[i][3],
str(" "),
bondcoeffs[i][4],
)
+ "\n"
)
# print >>fout,'%3i %12.6f %12.6f %s%s%s%s' % (bondcoeffs[i][0],bondcoeffs[i][1],bondcoeffs[i][2],str(" # "),bondcoeffs[i][3],str(" "),bondcoeffs[i][4])
fout.write("\n")
# print >>fout
fout.write("Angle Coeffs" + "\n")
# print >>fout,"Angle Coeffs"
fout.write("\n")
# print >>fout
anglecoeffs, warning3 = getAngleCoeffs(angletypes)
for i in range(len(angletypes)):
fout.write(
"%3i %12.6f %12.6f %s%s%s"
% (
anglecoeffs[i][0],
anglecoeffs[i][1],
anglecoeffs[i][2],
str(" # X "),
anglecoeffs[i][3],
str(" X "),
)
+ "\n"
)
# print >>fout,'%3i %12.6f %12.6f %s%s%s' % (anglecoeffs[i][0],anglecoeffs[i][1],anglecoeffs[i][2],str(" # X "),anglecoeffs[i][3],str(" X "))
fout.write("\n")
# print >>fout
fout.write("Dihedral Coeffs" + "\n")
# print >>fout,"Dihedral Coeffs"
fout.write("\n")
# print >>fout
dihscoeffs, warning4 = getDihsCoeffs(dihstypes)
for i in range(len(dihstypes)):
fout.write(
"%3i %12.6f %3i %3i %s%s%s%s%s%s%s%s"
% (
dihscoeffs[i][0],
dihscoeffs[i][1],
dihscoeffs[i][3],
dihscoeffs[i][2],
str(" # "),
dihscoeffs[i][4],
str(" "),
dihscoeffs[i][5],
str(" "),
dihscoeffs[i][6],
str(" "),
dihscoeffs[i][7],
)
+ "\n"
)
# print >>fout,'%3i %12.6f %3i %3i %s%s%s%s%s%s%s%s' % (dihscoeffs[i][0],dihscoeffs[i][1],dihscoeffs[i][3],dihscoeffs[i][2],str(" # "),dihscoeffs[i][4],str(" "),dihscoeffs[i][5],str(" "),dihscoeffs[i][6],str(" "),dihscoeffs[i][7])
fout.write("\n")
# print >>fout
fout.write("Improper Coeffs" + "\n")
# print >>fout,"Improper Coeffs"
fout.write("\n")
# print >>fout
impscoeffs, warning5 = getImpsCoeffs(impstypes)
for i in range(len(impscoeffs)):
fout.write(
"%3i %12.6f %12.6f %s%s%s"
% (
impscoeffs[i][0],
impscoeffs[i][1],
impscoeffs[i][2],
str(" # "),
impscoeffs[i][3],
str(" X X X "),
)
+ "\n"
)
# print >>fout,'%3i %12.6f %12.6f %s%s%s' % (impscoeffs[i][0],impscoeffs[i][1],impscoeffs[i][2],str(" # "),impscoeffs[i][3],str(" X X X "))
if (
warning1 != ""
or warning2 != ""
or warning3 != ""
or warning4 != ""
or warning5 != ""
):
wout = open("Datafile_warnings2.txt", "w")
wout.write("##" + warning1 + "\n")
# print >>wout,"##",warning1
wout.write("##" + warning2 + "\n")
# print >>wout,"##",warning2
wout.write("##" + warning3 + "\n")
# print >>wout,"##",warning3
wout.write("##" + warning4 + "\n")
# print >>wout,"##",warning4
wout.write("##" + warning5 + "\n")
# print >>wout,"##",warning5
wout.write(
"##==============Warning: Force field parameters============================"
+ "\n"
)
# print >>wout,"##==============Warning: Force field parameters============================"
wout.close()
##############################################################################################
# Write out force field parameters.
def outputPCFFCoeffs(fout, atomtypes, bondtypes, angletypes, dihstypes, impstypes):
PCFF_getPairCoeffs(atomtypes) # coeffs are in file "paircoeffs.txt"
paircoeffs = PCFF_readPairCoeffs()
printCoeffs(fout, "Pair Coeffs", atomtypes, paircoeffs)
bondcoeffs = PCFF_getBondCoeffs(bondtypes)
printCoeffs(fout, "Bond Coeffs", bondtypes, bondcoeffs)
anglecoeffs = PCFF_getAngleCoeffs(angletypes)
printCoeffs(fout, "Angle Coeffs", angletypes, anglecoeffs)
BBcoeffs = getBBCoeffs(angletypes, bondtypes, bondcoeffs)
printCoeffs(fout, "BondBond Coeffs", angletypes, BBcoeffs)
BAcoeffs = getBACoeffs(angletypes, bondtypes, bondcoeffs)
printCoeffs(fout, "BondAngle Coeffs", angletypes, BAcoeffs)
dihscoeffs = PCFF_getDihsCoeffs(dihstypes)
printCoeffs(fout, "Dihedral Coeffs", dihstypes, dihscoeffs)
MBTcoeffs = getMBTCoeffs(dihstypes, bondtypes, bondcoeffs)
printCoeffs(fout, "MiddleBondTorsion Coeffs", dihstypes, MBTcoeffs)
EBTcoeffs = getEBTCoeffs(dihstypes, bondtypes, bondcoeffs)
printCoeffs(fout, "EndBondTorsion Coeffs", dihstypes, EBTcoeffs)
ATcoeffs = getATCoeffs(dihstypes, angletypes, anglecoeffs)
printCoeffs(fout, "AngleTorsion Coeffs", dihstypes, ATcoeffs)
AATcoeffs = getAATCoeffs(dihstypes, angletypes, anglecoeffs)
printCoeffs(fout, "AngleAngleTorsion Coeffs", dihstypes, AATcoeffs)
BB13coeffs = getBB13Coeffs(dihstypes, bondtypes, bondcoeffs)
printCoeffs(fout, "BondBond13 Coeffs", dihstypes, BB13coeffs)
impscoeffs = PCFF_getImpsCoeffs(impstypes)
printCoeffs(fout, "Improper Coeffs", impstypes, impscoeffs)
AAcoeffs = getAACoeffs(impstypes, angletypes, anglecoeffs)
printCoeffs(fout, "AngleAngle Coeffs", impstypes, AAcoeffs)
########################################################################
def getFileName():
fin = open("structure.name", "r")
dataline = fin.readline()
words = dataline[0 : len(dataline) - 1].split()
structureName = words[0]
return structureName
########################################################################
def getForcefield():
fin = open("forcefield.name", "r")
dataline = fin.readline()
words = dataline[0 : len(dataline) - 1].split()
forcefieldName = words[0]
return forcefieldName
########################################################################
def readPairCoeffs():
paircoeffs = []
fin = open("LJpaircoeffs.txt", "r")
dataline = fin.readline()
while dataline != "":
words = dataline[0 : len(dataline) - 1].split()
atomtype = eval(words[1])
D0 = eval(words[2])
R0 = eval(words[3])
C1 = words[4]
C2 = words[5]
paircoeffs.append([atomtype, D0, R0, C1, C2])
dataline = fin.readline()
return paircoeffs
########################################################################
# Assumes cubic cell
def createReaxDatafile(
forcefield, structureName, xlo, xhi, ylo, yhi, zlo, zhi, chargeMethod
):
inpfile = "atom_type.dat"
if str(forcefield).upper() == "DREIDING":
atomtypes, anychange = checkAtomtype(inpfile)
else:
atomtypes = Atomtypes(inpfile)
anychange = []
print("Atomtypes total=", len(atomtypes))
inpfile = "atoms.dat"
baseatoms = AtomsInfo(inpfile)
print("Atoms total=", len(baseatoms))
natomtype = len(atomtypes)
totalatoms = len(baseatoms)
atommass = getAtommass(atomtypes)
####################################################################
# Output reaxFF data file for lammps
datafile = structureName + "_reaxFF.data"
fout = open(datafile, "w")
fout.write("LAMMPS data file using " + forcefield + " for " + structureName + "\n")
# print >>fout,"LAMMPS data file using "+forcefield+" for "+structureName
fout.write("\n")
# print >>fout
fout.write(str(totalatoms) + " atoms" + "\n")
# print >>fout,str(totalatoms)+" atoms"
fout.write(str(natomtype) + " atom types" + "\n")
# print >>fout,str(natomtype)+" atom types"
fout.write("\n")
# print >>fout
fout.write(xlo + " " + xhi + " xlo xhi" + "\n")
# print >>fout,xlo+" "+xhi+" xlo xhi"
fout.write(ylo + " " + yhi + " ylo yhi" + "\n")
# print >>fout,ylo+" "+yhi+" ylo yhi"
fout.write(zlo + " " + zhi + " zlo zhi" + "\n")
# print >>fout,zlo+" "+zhi+" zlo zhi"
fout.write("\n")
# print >>fout
fout.write("Masses" + "\n")
# print >>fout,"Masses"
fout.write("\n")
# print >>fout
for i in range(len(atommass)):
atomtype = atommass[i][2]
# atomtype=atomtype[0]
fout.write(
"%3i %12.6f %s%s"
% (atommass[i][0], atommass[i][1], str(" # "), atomtype)
+ "\n"
)
# print >>fout,'%3i %12.6f %s%s' % (atommass[i][0],atommass[i][1],str(" # "), atomtype)
####################################################################
# Output atom data
fout.write("\n")
# print >>fout
fout.write("Atoms # full" + "\n")
# print >>fout, "Atoms"
fout.write("\n")
# print >>fout
for i in range(len(baseatoms)):
dataline = str(baseatoms[i])
w = string.split(dataline[1 : len(dataline) - 1], ",")
fout.write(
(
"%6d %3d %3d %10.5f %15.8f %15.8f %15.8f"
% (
eval(w[0]),
eval(w[1]),
eval(w[2]),
eval(w[3]),
eval(w[4]),
eval(w[5]),
eval(w[6]),
)
)
+ "\n"
)
# print >>fout, ('%6d %3d %3d %10.5f %15.8f %15.8f %15.8f' %
# (eval(w[0]),eval(w[1]),eval(w[2]),eval(w[3]),eval(w[4]),eval(w[5]),eval(w[6])))
fout.write("\n")
# print >>fout
fout.close()
print(datafile + " created!")
return datafile
####################################################################
def createDatafile(
forcefield, structureName, xlo, xhi, ylo, yhi, zlo, zhi, xy, xz, yz, chargeMethod
):
inpfile = ".tmp/types/newatom_type.dat"
if str(forcefield).upper() == "DREIDING":
atomtypes, anychange = checkAtomtype(inpfile)
else:
atomtypes = Atomtypes(inpfile)
anychange = []
inpfile = ".tmp/types/newatoms.dat"
baseatoms = AtomsInfo(inpfile)
# Update bondtype if default types used
inpfile = ".tmp/types/newbond_type.dat"
bondtypes = getBondtypes(inpfile)
for i in range(len(bondtypes)):
atom1type = bondtypes[i][1]
atom2type = bondtypes[i][2]
for j in range(len(anychange)):
replaced = anychange[j][0]
defatype = anychange[j][1]
if atom1type.upper() == replaced.upper():
bondtypes[i][1] = defatype
if atom2type.upper() == replaced.upper():
bondtypes[i][2] = defatype
inpfile = ".tmp/types/newbonds.dat"
basebonds = getBonds(inpfile, 0, 1)
print("Equilibrating charge... \n")
if chargeMethod == "Gasteiger":
# Replace charge for Gasteiger charge
forcefield = getForcefield()
Gparas = getGasteiger_parameters(forcefield)
Q = getGasteigerCharge(Gparas, atomtypes, baseatoms, basebonds)
elif chargeMethod == "QEq":
Q = Qeq_charge_equilibration(baseatoms)
atomlinks = AtomLink(baseatoms, basebonds)
# print("Links generated")
baseangles = createAngles(atomlinks)
# print("Angles generated")
if str(forcefield).upper() == "PCFF":
angletypes, baseangles = PCFF_getAngletypes(baseangles, baseatoms, atomtypes)
if str(forcefield).upper() == "DREIDING":
angletypes, baseangles = getAngletypes(baseangles, baseatoms, atomtypes)
# print("Angles updated")
basedihs = createDihedrals(atomlinks, basebonds)
# print("Dihs generated")
if str(forcefield).upper() == "PCFF":
dihstypes, basedihs = PCFF_getDihstypes(basedihs, baseatoms, atomtypes)
if str(forcefield).upper() == "DREIDING":
dihstypes, basedihs = getDihstypes(basedihs, baseatoms, atomtypes)
# print("Dihs updated")
baseimps = createImpropers(atomlinks)
# print("Imps generated")
if str(forcefield).upper() == "PCFF":
impstypes, baseimps = PCFF_getImpstypes(baseimps, baseatoms, atomtypes)
if str(forcefield).upper() == "DREIDING":
impstypes, baseimps = getImpstypes(baseimps, baseatoms, atomtypes)
# print("Imps updated")
####################################################################
# Total quantities
natomtype = len(atomtypes)
nbondtype = len(bondtypes)
nangletype = len(angletypes)
ndihstype = len(dihstypes)
nimpstype = len(impstypes)
totalatoms = len(baseatoms)
totalbonds = len(basebonds)
totalangles = len(baseangles)
totaldihs = len(basedihs)
totalimps = len(baseimps)
####################################################################
atommass = getAtommass(atomtypes)
if str(forcefield).upper() == "PCFF":
atommass = PCFF_getAtommass(atomtypes)
# Output Lammps data file
####################################################################
# Output head of data file for lammps
datafile = structureName + ".data"
# datafile = "LAMMPSDataFile.data"
fout = open(datafile, "w")
fout.write("LAMMPS data file using " + forcefield + " for " + structureName + "\n")
# print >>fout,"LAMMPS data file using "+forcefield+" for "+structureName
fout.write("\n")
# print >>fout
fout.write(str(totalatoms) + " atoms" + "\n")
# print >>fout,str(totalatoms)+" atoms"
fout.write(str(totalbonds) + " bonds" + "\n")
# print >>fout,str(totalbonds)+" bonds"
fout.write(str(totalangles) + " angles" + "\n")
# print >>fout,str(totalangles)+" angles"
fout.write(str(totaldihs) + " dihedrals" + "\n")
# print >>fout,str(totaldihs)+" dihedrals"
fout.write(str(totalimps) + " impropers" + "\n")
# print >>fout,str(totalimps)+" impropers"
fout.write("\n")
# print >>fout
fout.write(str(natomtype) + " atom types" + "\n")
# print >>fout,str(natomtype)+" atom types"
fout.write(str(nbondtype) + " bond types" + "\n")
# print >>fout,str(nbondtype)+" bond types"
fout.write(str(nangletype) + " angle types" + "\n")
# print >>fout,str(nangletype)+" angle types"
fout.write(str(ndihstype) + " dihedral types" + "\n")
# print >>fout,str(ndihstype)+" dihedral types"
fout.write(str(nimpstype) + " improper types" + "\n")
# print >>fout,str(nimpstype)+" improper types"
fout.write("\n")
# print >>fout
fout.write(xlo + " " + xhi + " xlo xhi" + "\n")
# print >>fout,xlo+" "+xhi+" xlo xhi"
fout.write(ylo + " " + yhi + " ylo yhi" + "\n")
# print >>fout,ylo+" "+yhi+" ylo yhi"
fout.write(zlo + " " + zhi + " zlo zhi" + "\n")
# print >>fout,zlo+" "+zhi+" zlo zhi"
if xy == "0.0" and xz == "0.0" and yz == "0.0":
fout.write("\n")
# print >>fout
else:
fout.write(xy + " " + xz + " " + yz + " xy xz yz" + "\n")
# print >>fout,xy+" "+xz+" "+yz+" xy xz yz"
fout.write("\n")
# print >>fout
fout.write("Masses" + "\n")
# print >>fout,"Masses"
fout.write("\n")
# print >>fout
for i in range(len(atommass)):
fout.write(
"%3i %12.6f %s%s"
% (atommass[i][0], atommass[i][1], str(" # "), atommass[i][2])
+ "\n"
)
# print >>fout,'%3i %12.6f %s%s' % (atommass[i][0],atommass[i][1],str(" # "), atommass[i][2])
####################################################################
# Output data
fout.write("\n")
# print >>fout
fout.write("Atoms # full" + "\n")
# print >>fout, "Atoms"
fout.write("\n")
# print >>fout
for i in range(len(baseatoms)):
dataline = str(baseatoms[i])
w = dataline[1 : len(dataline) - 1].split(",")
fout.write(
(
"%6d %3d %3d %10.5f %15.8f %15.8f %15.8f %3d %3d %3d"
% (
eval(w[0]),
eval(w[1]),
eval(w[2]),
Q[i + 1],
eval(w[4]),
eval(w[5]),
eval(w[6]),
eval(w[7]),
eval(w[8]),
eval(w[9]),
)
)
+ "\n"
)
# print >>fout, ('%6d %3d %3d %10.5f %15.8f %15.8f %15.8f %3d %3d %3d' %
# (eval(w[0]),eval(w[1]),eval(w[2]),Q[i+1],eval(w[4]),eval(w[5]),eval(w[6]), eval(w[7]), eval(w[8]), eval(w[9])))
fout.write("\n")
# print >>fout
fout.write("Bonds" + "\n")
# print >>fout, "Bonds"
fout.write("\n")
# print >>fout
for i in range(len(basebonds)):
dataline = str(basebonds[i])
words = dataline[1 : len(dataline) - 1].split(",")
outline = ""
for i in range(len(words)):
outline = outline + str(words[i]) + " "
fout.write(outline + "\n")
# print >>fout, outline
fout.write("\n")
# print >>fout
fout.write("Angles" + "\n")
# print >>fout, "Angles"
fout.write("\n")
# print >>fout
for i in range(len(baseangles)):
dataline = str(baseangles[i])
words = dataline[1 : len(dataline) - 1].split(",")
outline = ""
for i in range(len(words)):
outline = outline + str(words[i]) + " "
fout.write(outline + "\n")
# print >>fout, outline
fout.write("\n")
# print >>fout
fout.write("Dihedrals" + "\n")
# print >>fout, "Dihedrals"
fout.write("\n")
# print >>fout
for i in range(len(basedihs)):
dataline = str(basedihs[i])
words = dataline[1 : len(dataline) - 1].split(",")
outline = ""
for i in range(len(words)):
outline = outline + str(words[i]) + " "
fout.write(outline + "\n")
# print >>fout, outline
fout.write("\n")
# print >>fout
fout.write("Impropers" + "\n")
# print >>fout, "Impropers"
fout.write("\n")
# print >>fout
for i in range(len(baseimps)):
dataline = str(baseimps[i])
words = dataline[1 : len(dataline) - 1].split(",")
outline = ""
for i in range(len(words)):
outline = outline + str(words[i]) + " "
fout.write(outline + "\n")
# print >>fout, outline
# Coeffs
if str(forcefield).upper() == "DREIDING":
outputDreidingCoeffs(
fout, atomtypes, bondtypes, angletypes, dihstypes, impstypes
)
if str(forcefield).upper() == "PCFF":
outputPCFFCoeffs(fout, atomtypes, bondtypes, angletypes, dihstypes, impstypes)
fout.close()
print(datafile + " created!")
if os.path.exists("Datafile_warnings.txt"):
cmd2 = "rm Datafile_warnings.txt"
os.system(cmd2)
if os.path.exists("Datafile_warnings1.txt"):
cmd1 = "cat Datafile_warnings1.txt >>Datafile_warnings.txt"
cmd2 = "rm Datafile_warnings1.txt"
os.system(cmd1)
os.system(cmd2)
if os.path.exists("Datafile_warnings2.txt"):
cmd1 = "cat Datafile_warnings2.txt >>Datafile_warnings.txt"
cmd2 = "rm Datafile_warnings2.txt"
os.system(cmd1)
os.system(cmd2)
return datafile
########################################################################
| 2.109375 | 2 |
Chapter02/02_06.py | JeffreyAsuncion/LearningPythonStdLib | 0 | 12760691 | <filename>Chapter02/02_06.py
# Itertools Part 2
import itertools
# Permutations: Order matters - some copies with same inputs but in different order
election = {
1 : "Barb",
2 : "Karen",
3 : "Erin"
}
for p in itertools.permutations(election):
print(p)
for p1 in itertools.permutations(election.values()):
print(p1)
# Combinations: Order does not matter - no copies with same inputs
colorsForPainting = ["Red","Blue","Purple","Orange","Yellow","Pink"]
for c in itertools.combinations(colorsForPainting,2):
print(c) | 3.59375 | 4 |
nepugia/compression/huffmanc.py | latot/python-nepugia | 0 | 12760692 | <reponame>latot/python-nepugia
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitarray import bitarray
from ..util import StringIO
from ..util.file_io import chunked_copy
class HuffmanNode(object):
SEGMENT_SIZE = 8
@classmethod
def build_tree(cls, bitstream, cursor=0, root_node=None):
if root_node is None:
root_node = cls()
working_node = root_node.find_first_active()
while working_node is not None:
left_depth = cursor
try:
while bitstream[cursor]:
cursor += 1
except IndexError as err:
raise ValueError('Tree parsing aborted at bit: 0x%08X' % cursor, err)
left_depth = cursor - left_depth
cursor += 1
value = bitstream[cursor:cursor+cls.SEGMENT_SIZE].tobytes()
cursor += cls.SEGMENT_SIZE
for _ in range(left_depth):
working_node = working_node.expand_node()
working_node.value = value
working_node = root_node.find_first_active()
return (root_node, cursor)
def __init__(self):
self.L = None
self.R = None
self.is_leaf = False
self.is_active = True
self._value = '\x00'
def __repr__(self):
return '{cls}(value={v})[{L},{R}]'.format(
cls=self.__class__.__name__,
v=repr(self._value),
L=self.L,
R=self.R,
)
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self.is_active = False
self.is_leaf = True
self._value = new_value
@property
def children(self):
return (self.L, self.R)
def bilateral_expand(self):
self.is_active = False
self.is_leaf = False
self.L = self.__class__()
self.R = self.__class__()
return self.children
def expand_node(self):
return self.bilateral_expand()[0]
def find_first_active(self):
if self.is_leaf:
return None
if self.is_active:
return self
if self.L.is_active:
return self.L
left = self.L.find_first_active()
if left is not None:
return left
if self.R.is_active:
return self.R
right = self.R.find_first_active()
if right is not None:
return right
return None
class HuffmanCoding(object):
def __init__(self, node_cls=HuffmanNode):
self._node_class = node_cls
def compress_stream(self, input_buffer, output_buffer):
raise NotImplemented
def compress(self, input_data):
output_handle = StringIO()
self.compress_stream(StringIO(input_data), output_handle)
return output_handle.getvalue()
def decompress_stream(self, input_buffer, output_buffer, output_size):
input_bits = bitarray(endian='big')
chunked_copy(input_buffer.read, input_bits.frombytes)
output_head = output_buffer.tell()
root_node, cursor = self._node_class.build_tree(input_bits)
while (output_buffer.tell() - output_head) < output_size:
working_node = root_node
while not working_node.is_leaf:
try:
chu = input_bits[cursor]
except IndexError as err:
raise ValueError('Data parsing aborted at bit: 0x%08X' % cursor, err)
cursor += 1
working_node = working_node.R if chu else working_node.L
if working_node is None:
raise ValueError('Data parsing aborted, invalid working node')
output_buffer.write(working_node.value)
def decompress(self, input_data, output_size):
output_handle = StringIO()
self.decompress_stream(StringIO(input_data), output_handle, output_size)
return output_handle.getvalue()
def compress(data):
return HuffmanCoding().compress(data)
def decompress(data, data_size):
return HuffmanCoding().decompress(data, data_size)
| 2.015625 | 2 |
application.py | Samb55/AWS | 1 | 12760693 | <reponame>Samb55/AWS
from flask import Flask, render_template, request, redirect, url_for, session
from flaskext.mysql import MySQL
import bcrypt
application=app = Flask(__name__ , static_url_path='/static')
app.secret_key = <KEY>'
#app.config['SESSION_TYPE'] = 'filesystem'
app.config['MYSQL_DATABASE_HOST'] = 'endpoint of the database from RDS'
app.config['MYSQL_DATABASE_USER'] = 'username of the database from RDS'
app.config['MYSQL_DATABASE_PASSWORD'] = 'Password of the database from RDS'
app.config['MYSQL_DATABASE_DB'] = 'name of the of the database from RDS'
mysql = MySQL(app)
@app.route('/')
def home():
return render_template("home.html")
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == 'GET':
return render_template("register.html")
else:
name = request.form['name']
email = request.form['email']
password = request.form['password']
hash_password = <PASSWORD>.hashpw(password.encode('utf8'), <PASSWORD>())
cur = mysql.get_db().cursor()
cur.execute("INSERT INTO users (name, email, password) VALUES (%s,%s,%s)", (name, email, hash_password,))
cur.connection.commit()
session['name'] = request.form['name']
session['email'] = request.form['email']
return redirect(url_for('home'))
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password'].encode('utf-8')
cur = mysql.get_db().cursor()
cur.execute("SELECT * FROM users WHERE email=%s", (email,))
user = cur.fetchone()
print(user)
cur.close()
if user and len(user) > 0:
if bcrypt.checkpw(password, user[3].encode('utf-8')):
session['name'] = user[1]
session['email'] = user[2]
return render_template("schd.html")
else:
return "Error password and email not match"
else:
return render_template("register.html")
else:
return render_template("login.html")
@app.route('/logout', methods=["GET", "POST"])
def logout():
session.clear()
return render_template("login.html")
@app.route('/finalschd')
def finalschd():
return render_template("finalschd.html")
@app.route('/schd')
def schd():
return render_template("schd.html")
@app.route('/programs')
def programs():
return render_template("programs.html")
if __name__ == "__main__":
app.run(debug=True)
| 2.6875 | 3 |
woid/apps/services/migrations/0006_auto_20150902_1522.py | emognato/project | 229 | 12760694 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('services', '0005_auto_20150901_1253'),
]
operations = [
migrations.AlterField(
model_name='story',
name='content_type',
field=models.CharField(blank=True, max_length=1, null=True, choices=[(b'T', b'text'), (b'U', b'url'), (b'I', b'image')]),
),
migrations.AlterField(
model_name='story',
name='date',
field=models.DateField(auto_now_add=True),
),
]
| 1.617188 | 2 |
aoc11.py | UnrelatedString/advent-of-code-2021 | 0 | 12760695 | <reponame>UnrelatedString/advent-of-code-2021<filename>aoc11.py
import sys
ls = [l for l in iter(input,'')]#sys.stdin]
#ns = [n for n in eval(input())]#OPTIONS
os = {}
for y,r in enumerate(ls):
for x,n in enumerate(r):
os[complex(x,y)] = int(n)
fs = 0
for _ in range(100):
for o in os:
os[o] += 1
fd = set()
while True:
for o in os:
if os[o] > 9 and o not in fd:
fs += 1
fd.add(o)
for s in 1,-1,1j,-1j,1+1j,1-1j,-1+1j,-1-1j:
if o+s in os:
os[o+s] += 1
break
else:
break
for o in os:
if os[o] > 9:
os[o] = 0
print(fs)
| 2.953125 | 3 |
functions/exercise/02_add_and_subtract.py | Galchov/python-fundamentals | 0 | 12760696 | <gh_stars>0
def sum_numbers(first_int, second_int):
"""Returns the sum of the two integers"""
result = first_int + second_int
return result
def subtract(third_int):
"""Returns the difference between the
result of sum_numbers and the third integer"""
diff = sum_numbers(first_int=number_1, second_int=number_2) - third_int
return diff
def add_and_subtract(first_int, second_int, third_int):
"""Receives all the three integers and
returns the other two functions"""
sum_numbers(first_int, second_int)
subtract(third_int)
number_1 = int(input())
number_2 = int(input())
number_3 = int(input())
add_and_subtract(number_1, number_2, number_3)
print(subtract(number_3))
# def sum_numbers(num_1: int, num_2: int):
# """Returns the sum of the two arguments"""
#
# total = num_1 + num_2
#
# return total
#
# def subtract(sum_1: int, num_3: int):
# """Returns the difference between sum_numbers
# and num_3"""
#
# difference = sum_1 - num_3
#
# return difference
#
# def add_and_subtract(num_1: int, num_2: int, num_3: int):
# """Receives all the three integers and
# returns the other two functions"""
#
# sum_1 = sum_numbers(num_1, num_2)
# result = subtract(sum_1, num_3)
#
# return result
#
# number_1 = int(input())
# number_2 = int(input())
# number_3 = int(input())
#
# print(add_and_subtract(number_1, number_2, number_3)) | 4.1875 | 4 |
htsinfer_barcode_kmer.v2.py | buffalo0124/python_practice | 0 | 12760697 | <reponame>buffalo0124/python_practice<filename>htsinfer_barcode_kmer.v2.py
"""Infer potential barcode k-mers from sample data"""
from enum import Enum
from typing import List, Tuble
class Outcomes(Enum):
invalid_nuc_requested = "invalid_nucleotide_requested"
invalid_number_requested = "invalid_number_requested"
def group_kmers(input_kmers: List[str], kmer_size: float) -> List[str]:
"""Function that groups list of potential barcodes by kmer size
Args:
input_kmers (List[str]) : list of potential barcodes
kmer_size(float) : K-mer length.
Returns:
selected_barcode (Tuple[str, float]): A list of tuples of selected barcode and the count {barcode, total count}
Raises:
TypeError: 'input_kmers' is not a list
TypeError: 'kmer_size' is not a float
"""
validate kmer character input parameters
if kmer_input != ["A","C","T","G"]:
return Outcomes.invalid_nuc_requested.value
# Generate a list of selected barcodes of same kmer length
selected_barcode = []
# iteration for each barcode
for barcode in kmer_input:
# if the length barcode is same as the input
if len(barcode) == kmer_size:
# append the list
selected_barcode.append(barcode)
return selected_barcode, len(selected_barcode)
def similarity_kmers(barcode: List[str]) -> Tuples[str,str,int,float,str]:
"""Function that returns position dependent frequency of list of grouped barcodes
Args:
selected_barcode (List[str]) : list of grouped barcodes
Returns:
barcodes (Tuple[str,str,int,float,str]) : A list of tuples of position-dependent frequency measured barcodes
i.e {a,b,c,d,e} where:
a = first barcode
b = second barcode
c = kmer_size
d = position-dependent frequency
e = position-dependent frequency score
Raises:
TypeError : 'selected_barcode' is not a list
"""
loop_cnt = 0
# Create a matrix for list of barcodes
for barcode1 in barcode:
loop_cnt2 = 0
for barcode2 in barcode:
if loop_cnt2 >= loop_cnt:
score = 0
# To remove redundancy, loop count is measured
for i in range(0, len(barcode1)):
# Measuring the position-dependent frequency
if barcode1[i] == barcode2[i]:
score += 1
else:
score += 0
# Score percentage by number of match (score) by the total length of kmer
score_percentage = float(score)/float(len(barcode1))
# Categorising by score percentage but this can be modified by the user
# Here categorised as "High score" for >= 0.7, "Low score" for <= 0.3,
# and "Middle" for the rest
if score_percentage >= 0.7:
score_value = "High score"
elif score_percentage <= 0.3:
score_value = "Low score"
else:
score_value = "Middle"
# Logically, potential barcodes have higher abundance therefore
# return list of barcodes with high position-dependent frequency score
if score_value == "High score":
print(barcode1,barcode2,len(barcode1),score_percentage,score_value)
loop_cnt2 += 1
loop_cnt += 1
| 3.09375 | 3 |
tools/dia_dll.py | xumoyan/engine | 5,823 | 12760698 | <reponame>xumoyan/engine<filename>tools/dia_dll.py
#!/usr/bin/env python3
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is based on chromium/chromium/main/tools/clang/scripts/update.py.
It is used on Windows platforms to copy the correct msdia*.dll to the
clang folder, as a "gclient hook".
"""
import os
import shutil
import stat
import sys
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
LLVM_BUILD_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', 'buildtools',
'windows-x64', 'clang'))
def GetDiaDll():
"""Get the location of msdia*.dll for the platform."""
# Bump after VC updates.
DIA_DLL = {
'2013': 'msdia120.dll',
'2015': 'msdia140.dll',
'2017': 'msdia140.dll',
'2019': 'msdia140.dll',
}
# Don't let vs_toolchain overwrite our environment.
environ_bak = os.environ
sys.path.append(os.path.join(THIS_DIR, '..', '..', 'build'))
import vs_toolchain
win_sdk_dir = vs_toolchain.SetEnvironmentAndGetSDKDir()
msvs_version = vs_toolchain.GetVisualStudioVersion()
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1'))):
dia_path = os.path.join(win_sdk_dir, '..', 'DIA SDK', 'bin', 'amd64')
else:
if 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
vs_path = os.environ['GYP_MSVS_OVERRIDE_PATH']
else:
vs_path = vs_toolchain.DetectVisualStudioPath()
dia_path = os.path.join(vs_path, 'DIA SDK', 'bin', 'amd64')
os.environ = environ_bak
return os.path.join(dia_path, DIA_DLL[msvs_version])
def CopyFile(src, dst):
"""Copy a file from src to dst."""
print("Copying %s to %s" % (str(src), str(dst)))
shutil.copy(src, dst)
def CopyDiaDllTo(target_dir):
# This script always wants to use the 64-bit msdia*.dll.
dia_dll = GetDiaDll()
CopyFile(dia_dll, target_dir)
def main():
CopyDiaDllTo(os.path.join(LLVM_BUILD_DIR, 'bin'))
return 0
if __name__ == '__main__':
sys.exit(main())
| 1.796875 | 2 |
regparser/utils.py | cfpb/regulations-parser | 36 | 12760699 | <filename>regparser/utils.py
from random import choice
def roman_nums():
"""Generator for roman numerals."""
mapping = [
(1, 'i'), (4, 'iv'), (5, 'v'), (9, 'ix'),
(10, 'x'), (40, 'xl'), (50, 'l'), (90, 'xc'),
(100, 'c'), (400, 'cd'), (500, 'd'), (900, 'cm'),
(1000, 'm')
]
i = 1
while True:
next_str = ''
remaining_int = i
remaining_mapping = list(mapping)
while remaining_mapping:
(amount, chars) = remaining_mapping.pop()
while remaining_int >= amount:
next_str += chars
remaining_int -= amount
yield next_str
i += 1
def title_body(text):
"""Split text into its first line (the title) and the rest of the text."""
newline = text.find("\n")
if newline < 0:
return text, ""
return text[:newline], text[newline:]
def flatten(list_of_lists):
"""List[List[X]] -> List[X]"""
return sum(list_of_lists, [])
letters = [chr(i) for i in range(97, 97 + 26)]
ucase_letters = [chr(i) for i in range(97, 97 + 26)]
def random_letters(length):
result = ''
for i in range(length):
result += choice(letters)
return result
def set_of_random_letters(num_items, length):
result = set()
while len(result) < num_items:
candidate = random_letters(length)
result.add(candidate)
return result
def interpolate_string(text, offsets, values):
result = ''.encode('utf-8')
current_pos = 0
for i, offset in enumerate(offsets):
start = offset[0]
end = offset[1]
fragment = text[current_pos:start].encode('utf-8')
current_pos = end
result = (result.decode('utf-8') +
fragment.decode('utf-8') +
values[i].decode('utf-8')).encode('utf-8')
result = (result.encode('utf-8') +
text[current_pos:].encode('utf-8')).decode('utf-8')
return result
| 3.296875 | 3 |
Conv3D/tools.py | SnorlaxSE/CVPR21Chal-SLR | 85 | 12760700 | <gh_stars>10-100
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import torchvision.utils as utils
import cv2
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def get_label_and_pred(model, dataloader, device):
all_label = []
all_pred = []
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
# get the inputs and labels
inputs, labels = data['data'].to(device), data['label'].to(device)
# forward
outputs = model(inputs)
if isinstance(outputs, list):
outputs = outputs[0]
# collect labels & prediction
prediction = torch.max(outputs, 1)[1]
all_label.extend(labels.squeeze())
all_pred.extend(prediction)
# Compute accuracy
all_label = torch.stack(all_label, dim=0)
all_pred = torch.stack(all_pred, dim=0)
all_label = all_label.squeeze().cpu().data.squeeze().numpy()
all_pred = all_pred.cpu().data.squeeze().numpy()
return all_label, all_pred
def plot_confusion_matrix(model, dataloader, device, save_path='confmat.png', normalize=True):
# Get prediction
all_label, all_pred = get_label_and_pred(model, dataloader, device)
confmat = confusion_matrix(all_label, all_pred)
# Normalize the matrix
if normalize:
confmat = confmat.astype('float') / confmat.sum(axis=1)[:, np.newaxis]
# Draw matrix
plt.figure(figsize=(20,20))
# confmat = np.random.rand(100,100)
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
# Add ticks
ticks = np.arange(100)
plt.xticks(ticks, fontsize=8)
plt.yticks(ticks, fontsize=8)
plt.grid(True)
# Add title & labels
plt.title('Confusion matrix', fontsize=20)
plt.xlabel('Predicted label', fontsize=20)
plt.ylabel('True label', fontsize=20)
# Save figure
plt.savefig(save_path)
# Ranking
sorted_index = np.diag(confmat).argsort()
for i in range(10):
# print(type(sorted_index[i]))
print(test_set.label_to_word(int(sorted_index[i])), confmat[sorted_index[i]][sorted_index[i]])
# Save to csv
np.savetxt('matrix.csv', confmat, delimiter=',')
def visualize_attn(I, c):
# Image
img = I.permute((1,2,0)).cpu().numpy()
# Heatmap
N, C, H, W = c.size()
a = F.softmax(c.view(N,C,-1), dim=2).view(N,C,H,W)
up_factor = 128/H
# print(up_factor, I.size(), c.size())
if up_factor > 1:
a = F.interpolate(a, scale_factor=up_factor, mode='bilinear', align_corners=False)
attn = utils.make_grid(a, nrow=4, normalize=True, scale_each=True)
attn = attn.permute((1,2,0)).mul(255).byte().cpu().numpy()
attn = cv2.applyColorMap(attn, cv2.COLORMAP_JET)
attn = cv2.cvtColor(attn, cv2.COLOR_BGR2RGB)
# Add the heatmap to the image
vis = 0.6 * img + 0.4 * attn
return torch.from_numpy(vis).permute(2,0,1)
def plot_attention_map(model, dataloader, device):
# Summary writer
writer = SummaryWriter("runs/attention_{:%Y-%m-%d_%H-%M-%S}".format(datetime.now()))
model.eval()
with torch.no_grad():
for batch_idx, data in enumerate(dataloader):
# get images
inputs = data['data'].to(device)
if batch_idx == 0:
images = inputs[0:16,:,:,:,:]
I = utils.make_grid(images[:,:,0,:,:], nrow=4, normalize=True, scale_each=True)
writer.add_image('origin', I)
_, c1, c2, c3, c4 = model(images)
# print(I.shape, c1.shape, c2.shape, c3.shape, c4.shape)
attn1 = visualize_attn(I, c1[:,:,0,:,:])
writer.add_image('attn1', attn1)
attn2 = visualize_attn(I, c2[:,:,0,:,:])
writer.add_image('attn2', attn2)
attn3 = visualize_attn(I, c3[:,:,0,:,:])
writer.add_image('attn3', attn3)
attn4 = visualize_attn(I, c4[:,:,0,:,:])
writer.add_image('attn4', attn4)
break
"""
Calculate Word Error Rate
Word Error Rate = (Substitutions + Insertions + Deletions) / Number of Words Spoken
Reference:
https://holianh.github.io/portfolio/Cach-tinh-WER/
https://github.com/imalic3/python-word-error-rate
"""
def wer(r, h):
# initialisation
d = np.zeros((len(r)+1)*(len(h)+1), dtype=np.uint8)
d = d.reshape((len(r)+1, len(h)+1))
for i in range(len(r)+1):
for j in range(len(h)+1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
# computation
for i in range(1, len(r)+1):
for j in range(1, len(h)+1):
if r[i-1] == h[j-1]:
d[i][j] = d[i-1][j-1]
else:
substitution = d[i-1][j-1] + 1
insertion = d[i][j-1] + 1
deletion = d[i-1][j] + 1
d[i][j] = min(substitution, insertion, deletion)
return float(d[len(r)][len(h)]) / len(r) * 100
if __name__ == '__main__':
# Calculate WER
r = [1,2,3,4]
h = [1,1,3,5,6]
print(wer(r, h))
| 2.265625 | 2 |
hpi_keras/proc/data.py | doublechenching/hpi | 7 | 12760701 | <reponame>doublechenching/hpi
#encoding: utf-8
from __future__ import print_function
import numpy as np
import pandas as pd
import os
import glob
from sklearn.model_selection import train_test_split
def describe_data(df):
"""analyse class number of dataset
# Args:
"""
target_counts = df.drop(["Id", "Target"], axis=1).sum(axis=0).sort_values(ascending=False)
print('%-30s' % 'Class', '\t', 'Count', '\t%s' % 'Percentage')
for value , percent, item in zip(target_counts.ravel(), target_counts.ravel() / np.sum(target_counts.ravel()), target_counts.index.values):
print('%-30s' % item, '\t', value, '\t%.2f' % percent)
def load_train_csv(cfg):
"""split dataframe using offical train_val and test spliting text
# Args:
csv_path: dataset path
"""
def fill_targets(row):
row.Target = np.array(row.Target.split(" ")).astype(np.int)
for num in row.Target:
name = cfg.label_names[int(num)]
row.loc[name] = 1
return row
train_labels = pd.read_csv(cfg.train_csv)
for key in cfg.label_names.keys():
train_labels[cfg.label_names[key]] = 0
train_labels = train_labels.apply(fill_targets, axis=1)
print('*'*100)
describe_data(train_labels)
return train_labels
def load_extra_csv(cfg):
"""split dataframe using offical train_val and test spliting text
# Args:
csv_path: dataset path
"""
def fill_targets(row):
row.Target = np.array(row.Target.split(" ")).astype(np.int)
for num in row.Target:
name = cfg.label_names[int(num)]
row.loc[name] = 1
return row
train_labels = pd.read_csv(cfg.extra_data_csv)
for key in cfg.label_names.keys():
train_labels[cfg.label_names[key]] = 0
train_labels = train_labels.apply(fill_targets, axis=1)
print('*'*100)
describe_data(train_labels)
return train_labels
def load_test_csv(cfg):
test_df = pd.read_csv(cfg.test_sample_csv)
return test_df
def split_train_val(train_val_df, ratio=0.25, seed=42):
"""random split train_val dataframe, note that this method is not patient-wise spliting
# Args:
train_val_df: dataframe, training and validation dataframe
ratio: float, spliting ratio
seed: int, set a random seed, get same reslut every time
"""
train_df, valid_df = train_test_split(train_val_df,
test_size=ratio,
random_state=seed)
print('*'*100)
describe_data(train_df)
print('*'*100)
describe_data(valid_df)
return train_df, valid_df
if __name__ == "__main__":
pass | 2.921875 | 3 |
turbosms/lib.py | pmaigutyak/mp-turbosms | 7 | 12760702 | <gh_stars>1-10
from django.apps import apps
from django.db import connections
from django.template.loader import render_to_string
from turbosms import config
def get_default_sms_recipients():
if apps.is_installed('site_config'):
from site_config import config as _config
return getattr(config, 'SMS_RECIPIENTS', _config.SMS_RECIPIENTS)
return config.SMS_RECIPIENTS
def send_sms(message, recipients=None):
if not config.IS_SMS_ENABLED:
return
if recipients is None:
recipients = get_default_sms_recipients()
query = (
'INSERT INTO {} (number, message, sign) VALUES (%s, %s, %s)'
).format(config.SMS_USERNAME)
with connections['turbosms'].cursor() as cursor:
for number in recipients:
cursor.execute(query, [
number,
message,
config.SMS_SIGNATURE])
def send_sms_from_template(template_name, context=None, recipients=None):
message = render_to_string(template_name, context)
send_sms(message, recipients)
| 2.21875 | 2 |
chameleon/views/query.py | jakebrinkmann/docker-grafana | 0 | 12760703 | <gh_stars>0
import json
import re
import copy
from flask import request
from flask_restplus import Resource
from chameleon.utils import db_instance
from chameleon.metrics import QUERIES
class Query(Resource):
""" return metrics
"""
def get(self):
return {'hello': 'world'}
def post(self):
""" return [{"target": <name>, "datapoints": [[v,t],[v,t]...]},...]
"""
data = request.get_json()
print("JSON: {}".format(data))
targets_list = data.get('targets')
time_range = data.get('range')
metrics_out = list()
for t in targets_list:
name = t.get('target')
if name is None:
break
name_pts = name.split('.')
tartype = t.get('type')
print("NAME: {}".format(name))
print("TYPE: {}".format(tartype))
metric = copy.deepcopy(QUERIES.get(name_pts[0]))
if len(name_pts) > 1:
metric = metric.get(name_pts[1])
if metric:
query = metric.get('query')
if query:
# need to pass these in to the query if ALL, or subset them
args = metric.get('templates')
# Template: $source templates.metrics.orders_ordered.source
# Example: metrics.orders_ordered.source=$source
templated = [k for k in args.keys() if k in name]
if templated:
for k in templated:
val = [n for n in name_pts if k in n].pop()
val_pts = val.split('=')
val_pts[1] = val_pts[1].replace('{', '').replace('}', '')
args[val_pts[0]] = tuple(val_pts[1].split(','))
args.update(time_range)
print('ARGS: {}'.format(args))
print('SQL: {}'.format(query))
sql_res = db_instance.select(query, args)
if tartype == 'timeserie':
retval = dict(target=name, datapoints=list())
retval['datapoints'] = [[r['val'], r['ts']] for r in sql_res]
elif tartype == 'table':
retval = dict(columns=list(), rows=list(), type="table")
type_lut = {str: "string", int: "number", float: "number"}
for c,v in sql_res[0].items():
retval['columns'].append(dict(text=c, type=type_lut[type(v)]))
retval['rows'] = sql_res[:]
print('RES: {}'.format(retval))
metrics_out.append(retval)
return metrics_out
| 2.265625 | 2 |
_unittests/ut_documentation/test_run_notebooks.py | sdpython/cpyquickhelper | 2 | 12760704 | <filename>_unittests/ut_documentation/test_run_notebooks.py
# -*- coding: utf-8 -*-
"""
@brief test log(time=33s)
"""
import os
import unittest
import platform
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase, skipif_appveyor
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
import cpyquickhelper
class TestRunNotebooksPython(ExtTestCase):
@unittest.skipIf(platform.system().lower() == "darwin",
reason="no openmp")
def test_run_notebooks_branching(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import jyquickhelper as jyq
self.assertNotEmpty(jyq)
self.assertNotEmpty(cpyquickhelper)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks")
test_notebook_execution_coverage(
__file__, "branching", folder, 'cpyquickhelper', fLOG=fLOG)
@skipif_appveyor("unstable issue")
def test_run_notebooks_nobranching(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import jyquickhelper as jyq
self.assertNotEmpty(jyq)
self.assertNotEmpty(cpyquickhelper)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks")
test_notebook_execution_coverage(
__file__, "", folder, 'cpyquickhelper', fLOG=fLOG,
filter_name=lambda name: "branching" not in name)
if __name__ == "__main__":
unittest.main()
| 2.171875 | 2 |
test_prj/test_prj/tests/test_core.py | quatrope/djmpl | 0 | 12760705 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME> & QuatroPe
# License: BSD-3-Clause
# Full Text: https://github.com/quatrope/djmpl/blob/master/LICENSE
# =============================================================================
# DOCS
# =============================================================================
"""Tests for django_matplotlib.core
"""
# =============================================================================
# IMPORTS
# =============================================================================
from django.utils.safestring import SafeString
import django_matplotlib as djmpl
from django_matplotlib import core, settings
import jinja2
import matplotlib.pyplot as plt
from pyquery import PyQuery as pq
import pytest
# =============================================================================
# CONSTANTS
# =============================================================================
ALL_ENGINE_NAMES = list(settings.TEMPLATES_FORMATERS) + list(
settings.TEMPLATE_ALIAS
)
plt.rcParams.update({"figure.max_open_warning": 0})
# =============================================================================
# TESTS
# =============================================================================
@pytest.mark.parametrize(
"engine, safe_type",
[("django", SafeString), ("jinja2", jinja2.Markup), ("str", str)],
)
def test_png(engine, safe_type):
plot = djmpl.subplots(plot_format="png", template_engine=engine)
html = plot.to_html()
assert isinstance(html, safe_type)
div = pq(html)
assert len(div) == 1
assert div[0].tag == "div"
assert div.has_class("djmpl")
assert div.has_class("djmpl-png")
children = div[0].getchildren()
assert len(children) == 1
img = children[0]
assert img.tag == "img"
assert img.attrib["src"].split(",", 1)[0] == "data:image/png;base64"
@pytest.mark.parametrize(
"engine, safe_type",
[("django", SafeString), ("jinja2", jinja2.Markup), ("str", str)],
)
def test_svg(engine, safe_type):
plot = djmpl.subplots(plot_format="svg", template_engine=engine)
html = plot.to_html()
assert isinstance(html, safe_type)
div = pq(html)
assert len(div) == 1
assert div[0].tag == "div"
assert div.has_class("djmpl")
assert div.has_class("djmpl-svg")
children = div[0].getchildren()
assert len(children) == 3
img = children[-1]
assert img.tag == "svg"
@pytest.mark.parametrize(
"engine, safe_type",
[("django", SafeString), ("jinja2", jinja2.Markup), ("str", str)],
)
def test_mpld3(engine, safe_type):
plot = djmpl.subplots(plot_format="mpld3", template_engine=engine)
html = plot.to_html()
assert isinstance(html, safe_type)
div = pq(html)
assert len(div) == 1
assert div[0].tag == "div"
assert div.has_class("djmpl")
assert div.has_class("djmpl-mpld3")
children = div[0].getchildren()
assert len(children) == 3
img = children[-1]
assert img.tag == "script"
@pytest.mark.parametrize("fmt", settings.AVAILABLE_FORMATS)
@pytest.mark.parametrize("engine", ALL_ENGINE_NAMES)
def test_valid_engine_and_format(fmt, engine):
plot = djmpl.subplots(plot_format=fmt, template_engine=engine)
assert plot.plot_format == fmt
assert plot.template_engine == core.template_by_alias(engine)
@pytest.mark.parametrize("engine", ALL_ENGINE_NAMES)
def test_invalid_and_format(engine):
with pytest.raises(ValueError):
djmpl.subplots(plot_format="%NOT-EXISTS%", template_engine=engine)
@pytest.mark.parametrize("fmt", settings.AVAILABLE_FORMATS)
def test_invalid_engine(fmt):
with pytest.raises(core.EngineNotSupported):
djmpl.subplots(plot_format=fmt, template_engine="%NOT-EXISTS%")
| 1.523438 | 2 |
setup.py | jaraco/aspen | 1 | 12760706 | <filename>setup.py
try:
import setuptools # noqa
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import find_packages, setup
version = open('version.txt').read()
classifiers = [ 'Development Status :: 4 - Beta'
, 'Environment :: Console'
, 'Intended Audience :: Developers'
, 'License :: OSI Approved :: MIT License'
, 'Natural Language :: English'
, 'Operating System :: OS Independent'
, 'Programming Language :: Python :: 2.6'
, 'Programming Language :: Python :: 2.7'
, 'Programming Language :: Python :: Implementation :: CPython'
, 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application'
]
setup( author = 'Gratipay, LLC'
, author_email = '<EMAIL>'
, classifiers = classifiers
, description = ('Aspen is a Python web framework. '
'Simplates are the main attraction.')
, entry_points = {'console_scripts': ['fcgi_aspen = fcgi_aspen:main [fcgi]']}
, name = 'aspen'
, packages = find_packages(exclude=['aspen.tests', 'aspen.tests.*'])
, py_modules = ['fcgi_aspen']
, url = 'http://aspen.io/'
, version = version
, zip_safe = False
, package_data = {'aspen': ['www/*', 'configuration/mime.types']}
, install_requires = [ 'python-mimeparse==0.1.4'
, 'first==2.0.1'
, 'algorithm>=1.0.0'
, 'filesystem_tree>=1.0.0'
]
, extras_require = {'fcgi': ['flup']}
, tests_require = [ 'virtualenv>=1.11'
, 'py'
, 'coverage'
, 'pytest'
, 'pytest-cov'
]
)
| 1.429688 | 1 |
library/src/test/unit/detectors/relative_entropy_detector_test.py | unSAD-admin/unSAD | 3 | 12760707 | # Created by <NAME> on 10/7/2019, 12:53 AM
import sys
import os
project_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(project_path)
from detectors.relative_entropy_detector import RelativeEntropyDetector
from common.dataset import CSVDataset
from utils.analysis import draw_array
def test_detector():
# read in the data
file_path = project_path + "/../data/NAB_data/data/realAWSCloudwatch/ec2_cpu_utilization_5f5533.csv"
data = CSVDataset(file_path, header=1, values=1, test_size=0).get_data()[0]["values"]
# finding min max of the value
min_value = min(data)
max_value = max(data)
# initialize the detector
detector = RelativeEntropyDetector()
# set the window_size to be 52 and n_bins to be 5 for testing a normal case
detector.initialize(input_min=min_value, input_max=max_value, window_size=52, n_bins=5)
# handle all the record
result = detector.handle_record_sequence(data)
draw_array(result)
if __name__ == "__main__":
test_detector()
| 2.375 | 2 |
mediagoblin/submit/views.py | 3rdwiki/mediagoblin | 1 | 12760708 | <gh_stars>1-10
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin import messages
import mediagoblin.mg_globals as mg_globals
import uuid
from os.path import splitext
from celery import registry
import urllib
import urllib2
import logging
_log = logging.getLogger(__name__)
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from mediagoblin.db.util import ObjectId
from mediagoblin.tools.text import convert_to_tag_list_of_dicts
from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.tools.response import render_to_response, redirect
from mediagoblin.decorators import require_active_login
from mediagoblin.submit import forms as submit_forms
from mediagoblin.processing import mark_entry_failed
from mediagoblin.processing.task import ProcessMedia
from mediagoblin.messages import add_message, SUCCESS
from mediagoblin.media_types import sniff_media, \
InvalidFileType, FileTypeNotSupported
@require_active_login
def submit_start(request):
"""
First view for submitting a file.
"""
submit_form = submit_forms.SubmitStartForm(request.form)
if request.method == 'POST' and submit_form.validate():
if not ('file' in request.files
and isinstance(request.files['file'], FileStorage)
and request.files['file'].stream):
submit_form.file.errors.append(
_(u'You must provide a file.'))
else:
try:
filename = request.files['file'].filename
# Sniff the submitted media to determine which
# media plugin should handle processing
media_type, media_manager = sniff_media(
request.files['file'])
# create entry and save in database
entry = request.db.MediaEntry()
entry.id = ObjectId()
entry.media_type = unicode(media_type)
entry.title = (
unicode(request.form['title'])
or unicode(splitext(filename)[0]))
entry.description = unicode(request.form.get('description'))
entry.license = unicode(request.form.get('license', "")) or None
entry.uploader = request.user._id
# Process the user's folksonomy "tags"
entry.tags = convert_to_tag_list_of_dicts(
request.form.get('tags'))
# Generate a slug from the title
entry.generate_slug()
# We generate this ourselves so we know what the taks id is for
# retrieval later.
# (If we got it off the task's auto-generation, there'd be
# a risk of a race condition when we'd save after sending
# off the task)
task_id = unicode(uuid.uuid4())
# Now store generate the queueing related filename
queue_filepath = request.app.queue_store.get_unique_filepath(
['media_entries',
task_id,
secure_filename(filename)])
# queue appropriately
queue_file = request.app.queue_store.get_file(
queue_filepath, 'wb')
with queue_file:
queue_file.write(request.files['file'].stream.read())
# Add queued filename to the entry
entry.queued_media_file = queue_filepath
entry.queued_task_id = task_id
# Save now so we have this data before kicking off processing
entry.save(validate=True)
# Pass off to processing
#
# (... don't change entry after this point to avoid race
# conditions with changes to the document via processing code)
process_media = registry.tasks[ProcessMedia.name]
try:
process_media.apply_async(
[unicode(entry._id)], {},
task_id=task_id)
except BaseException as exc:
# The purpose of this section is because when running in "lazy"
# or always-eager-with-exceptions-propagated celery mode that
# the failure handling won't happen on Celery end. Since we
# expect a lot of users to run things in this way we have to
# capture stuff here.
#
# ... not completely the diaper pattern because the
# exception is re-raised :)
mark_entry_failed(entry._id, exc)
# re-raise the exception
raise
if mg_globals.app_config["push_urls"]:
feed_url = request.urlgen(
'mediagoblin.user_pages.atom_feed',
qualified=True,
user=request.user.username)
hubparameters = {
'hub.mode': 'publish',
'hub.url': feed_url}
hubdata = urllib.urlencode(hubparameters)
hubheaders = {
"Content-type": "application/x-www-form-urlencoded",
"Connection": "close"}
for huburl in mg_globals.app_config["push_urls"]:
hubrequest = urllib2.Request(huburl, hubdata, hubheaders)
try:
hubresponse = urllib2.urlopen(hubrequest)
except urllib2.HTTPError as exc:
# This is not a big issue, the item will be fetched
# by the PuSH server next time we hit it
_log.warning(
"push url %r gave error %r", huburl, exc.code)
except urllib2.URLError as exc:
_log.warning(
"push url %r is unreachable %r", huburl, exc.reason)
add_message(request, SUCCESS, _('Woohoo! Submitted!'))
return redirect(request, "mediagoblin.user_pages.user_home",
user=request.user.username)
except Exception as e:
'''
This section is intended to catch exceptions raised in
mediagoblin.media_types
'''
if isinstance(e, InvalidFileType) or \
isinstance(e, FileTypeNotSupported):
submit_form.file.errors.append(
e)
else:
raise
return render_to_response(
request,
'mediagoblin/submit/start.html',
{'submit_form': submit_form,
'app_config': mg_globals.app_config})
@require_active_login
def add_collection(request, media=None):
"""
View to create a new collection
"""
submit_form = submit_forms.AddCollectionForm(request.form)
if request.method == 'POST' and submit_form.validate():
try:
collection = request.db.Collection()
collection.id = ObjectId()
collection.title = unicode(request.form['title'])
collection.description = unicode(request.form.get('description'))
collection.creator = request.user._id
collection.generate_slug()
# Make sure this user isn't duplicating an existing collection
existing_collection = request.db.Collection.find_one({
'creator': request.user._id,
'title':collection.title})
if existing_collection:
messages.add_message(
request, messages.ERROR, _('You already have a collection called "%s"!' % collection.title))
else:
collection.save(validate=True)
add_message(request, SUCCESS, _('Collection "%s" added!' % collection.title))
return redirect(request, "mediagoblin.user_pages.user_home",
user=request.user.username)
except Exception as e:
raise
return render_to_response(
request,
'mediagoblin/submit/collection.html',
{'submit_form': submit_form,
'app_config': mg_globals.app_config})
| 1.859375 | 2 |
src/visitpy/examples/matexprs.py | visit-dav/vis | 226 | 12760709 | ###############################################################################
#
# Purpose: Use VisIt CLI to iterate over Curves in a material database and
# compute and plot some common difference curves and output the results
# to either a curve or image file format.
#
# Programmer: <NAME>
# Date: Wed May 27 13:15:07 PDT 2009
#
#
# Modifications:
# <NAME>, Mon Jun 15 17:52:15 PDT 2009
# Removed subclassing used to override behavior of Optparse in presence of
# unrecognized options. By using Argv(), VisIt-specific options never wind
# up getting passed to this script.
###############################################################################
import sys, re, os, glob
from optparse import *
#
# Convert '#FFCC13" strings to color tuple
#
def ColorTupleFromHexString(s):
if s[0] != '#':
return (0, 0, 0, 255)
return (int("0x%s"%s[1:3],16), \
int("0x%s"%s[3:5],16), \
int("0x%s"%s[5:7],16), \
255)
#
# Command-line options
#
def BuildCommandLineOptions():
parser = OptionParser()
parser.add_option("--image-width",
help="Set width of images [%default].",
type="int", dest="image_width", default="500", metavar="INT")
parser.add_option("--image-height",
help="Set height of images [%default].",
type="int", dest="image_height", default="500", metavar="INT")
parser.add_option("--data-min",
type="float", dest="data_min", metavar="FLOAT",
help="Mininum data value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--data-max",
type="float", dest="data_max", metavar="FLOAT",
help="Mininum data value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--log-data",
help="Display data (y) axis in log scaling.",
action="store_true", dest="log_data", default=False)
parser.add_option("--x-min",
type="float", dest="x_min", metavar="FLOAT",
help="Mininum positional (x) value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--x-max",
type="float", dest="x_max", metavar="FLOAT",
help="Maximum positional (x) value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--log-x",
help="Display positional (x) axis in log scaling.",
action="store_true", dest="log_x", default=False)
parser.add_option("--image-format",
help="Set output format for images (e.g. 'tiff', 'png', 'jpeg'). "
"If none specified, no images will be saved.",
dest="image_format", metavar="STRING")
parser.add_option("--curve-format",
help="Set output format for curves (e.g. 'ultra', 'curve'). "
"If none specified, no curve files will be saved.",
dest="curve_format", metavar="STRING")
parser.add_option("--color0",
help="Set color to be used for first curve plot.",
dest="color0", metavar="#RRGGBB")
parser.add_option("--color1",
help="Set color to be used for second curve plot.",
dest="color1", metavar="#RRGGBB")
parser.add_option("--line-width",
help="Set line width for curves.",
type="int", default=0, dest="line_width", metavar="INT")
parser.add_option("--point-density",
help="Plot symbols representing individual points in curves every Nth point. "
"A value of zero turns the display of points off [%default].",
type="int", default=0, dest="point_density", metavar="N")
parser.add_option("--point-size",
help="Size of symbols representing individual points in curve plots.",
type="int", default=5, dest="point_size", metavar="INT")
parser.add_option("--show-legend",
help="Display curve plot legends.",
action="store_true", dest="show_legend", default=False)
parser.add_option("--show-labels",
help="Display curve plot labels.",
action="store_true", dest="show_labels", default=False)
parser.set_usage("matexprs.py [options] dbname")
return parser
#
# Iterate through curves, finding all unique 'dirs' containing curves.
#
def GetVarMap(metadata):
dirMap = {}
for i in range(metadata.GetNumCurves()):
dirinfo = re.search("(.*)/([^/]*)", metadata.GetCurves(i).name)
if dirinfo != None:
dirname = dirinfo.group(1)
varname = dirinfo.group(2)
varMap = {}
if dirname in dirMap:
varMap = dirMap[dirname]
varMap[varname] = 1
dirMap[dirname] = varMap
return dirMap
#
# Begin main program
#
parser = BuildCommandLineOptions()
#
# This bit of logic allows users to get usage/help from
# the command 'python matexpers.py --help'. Without it
# using VisIt's cli the '--help' will get interpreted
# in internallauncher and never make it into this script.
#
if "-h" in sys.argv or \
"--help" in sys.argv or \
"-help" in sys.argv or \
"help" in sys.argv:
parser.print_help()
sys.exit(1)
#
# Argv() is a function defined by VisIt's cli that
# returns ONLY the options after the argument (filename)
# to the '-s' command-line option. In theory, that
# should be only the arguments that this script itself
# should interpret.
#
(clOpts, clArgs) = parser.parse_args(list(Argv()))
#
# Set the name of the database. It is the only 'positional'
# argument on the command line.
#
dbname = ""
if len(clArgs) > 0:
dbname = clArgs[0]
if not glob.glob(dbname):
if dbname == "":
sys.stderr.write("No database specified.\n")
else:
sys.stderr.write("Invalid database, \"%s\", specified.\n"%dbname)
parser.print_usage()
sys.exit(1)
#
# Open the database, get metadata, get info on curve 'dirs'
#
OpenDatabase(dbname)
metadata = GetMetaData(dbname)
dirMap = GetVarMap(metadata)
#
# Build up base save window attributes
#
swa = SaveWindowAttributes()
swa.family = 0
swa.width = clOpts.image_width
swa.height = clOpts.image_height
#
# Build up base curve attributes
#
ca = CurveAttributes()
ca.lineWidth = clOpts.line_width
if clOpts.color0 != None:
ca.color = ColorTupleFromHexString(clOpts.color0)
ca.cycleColors = 0
ca.showLabels = clOpts.show_labels
#if clOpts.point_density > 0:
# ca.showPoints = 1
#ca.pointSize = clOpts.point_size
ca.showLegend = clOpts.show_legend
#ca.symbolDensity = clOpts.point_density
SetDefaultPlotOptions(ca)
#
# Iterate through all curve 'dirs', finding instances where
# all essential variables exist. Create expressions and plot 'em
#
for k in list(dirMap.keys()):
if not ("Ec" in dirMap[k] and \
"cEc" in dirMap[k] and \
"cEc_fit" in dirMap[k]):
print("Ignoring %s because not all required vars are present."%k)
#del dirMap[k]
continue
DefineCurveExpression("%s/c0"%k, "<%s/Ec>-<%s/cEc_fit>"%(k,k))
DefineCurveExpression("%s/c1"%k, "<%s/cEc>-<%s/cEc_fit>"%(k,k))
AddPlot("Curve","%s/c0"%k)
AddPlot("Curve","%s/c1"%k)
DrawPlots()
v = GetViewCurve()
if clOpts.x_min != None:
v.domainCoords = (clOpts.x_min, v.domainCoords[1])
if clOpts.x_max != None:
v.domainCoords = (v.domainCoords[0], clOpts.x_max)
if clOpts.log_x:
v.domainScale = v.LOG
if clOpts.data_min != None:
v.rangeCoords = (clOpts.data_min, v.rangeCoords[1])
if clOpts.data_max != None:
v.rangeCoords = (v.rangeCoords[0], clOpts.data_max)
if clOpts.log_data:
v.rangeScale = v.LOG
SetViewCurve(v)
if clOpts.color1 != None:
ca2 = CurveAttributes()
ca2.color = ColorTupleFromHexString(clOpts.color1)
ca2.cycleColors = 0
SetActivePlots((1,))
SetPlotOptions(ca2)
DrawPlots()
if clOpts.curve_format != None:
swa.format = getattr(swa,clOpts.curve_format.upper())
swa.fileName = k # .curve is added automatically
SetSaveWindowAttributes(swa)
SaveWindow()
if clOpts.image_format != None:
swa.format = getattr(swa,clOpts.image_format.upper())
#swa.fileName = "%s.%s"%(k,clOpts.image_format.lower())
swa.fileName = k
SetSaveWindowAttributes(swa)
SaveWindow()
DeleteAllPlots()
| 2.78125 | 3 |
tests/test_als_implicit.py | DeniseWarncke/Fair-Recommendations | 1 | 12760710 | <gh_stars>1-10
import logging
import pickle
from lenskit import topn
from lenskit.algorithms import als
import pandas as pd
import numpy as np
from pytest import mark
import lk_test_utils as lktu
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_als_basic_build():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
assert set(algo.user_index_) == set([10, 12, 13])
assert set(algo.item_index_) == set([1, 2, 3])
assert algo.user_features_.shape == (3, 20)
assert algo.item_features_.shape == (3, 20)
def test_als_predict_basic():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= -0.1
assert preds.loc[3] <= 5
def test_als_predict_bad_item():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(10, [4])
assert len(preds) == 1
assert preds.index[0] == 4
assert np.isnan(preds.loc[4])
def test_als_predict_bad_user():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(50, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert np.isnan(preds.loc[3])
@lktu.wantjit
def test_als_train_large():
algo = als.ImplicitMF(20, iterations=20)
ratings = lktu.ml_pandas.renamed.ratings
algo.fit(ratings)
assert len(algo.user_index_) == ratings.user.nunique()
assert len(algo.item_index_) == ratings.item.nunique()
assert algo.user_features_.shape == (ratings.user.nunique(), 20)
assert algo.item_features_.shape == (ratings.item.nunique(), 20)
def test_als_save_load():
algo = als.ImplicitMF(20, iterations=5)
ratings = lktu.ml_pandas.renamed.ratings
algo.fit(ratings)
mod = pickle.dumps(algo)
_log.info('serialized to %d bytes', len(mod))
restored = pickle.loads(mod)
assert np.all(restored.user_features_ == algo.user_features_)
assert np.all(restored.item_features_ == algo.item_features_)
assert np.all(restored.item_index_ == algo.item_index_)
assert np.all(restored.user_index_ == algo.user_index_)
@lktu.wantjit
def test_als_train_large_noratings():
algo = als.ImplicitMF(20, iterations=20)
ratings = lktu.ml_pandas.renamed.ratings
ratings = ratings.loc[:, ['user', 'item']]
algo.fit(ratings)
assert len(algo.user_index_) == ratings.user.nunique()
assert len(algo.item_index_) == ratings.item.nunique()
assert algo.user_features_.shape == (ratings.user.nunique(), 20)
assert algo.item_features_.shape == (ratings.item.nunique(), 20)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_als_implicit_batch_accuracy():
import lenskit.crossfold as xf
from lenskit import batch
from lenskit import topn
ratings = lktu.ml100k.load_ratings()
algo = als.ImplicitMF(25, iterations=20)
def eval(train, test):
_log.info('running training')
train['rating'] = train.rating.astype(np.float_)
algo.fit(train)
users = test.user.unique()
_log.info('testing %d users', len(users))
candidates = topn.UnratedCandidates(train)
recs = batch.recommend(algo, users, 100, candidates)
return recs
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
test = pd.concat(te for (tr, te) in folds)
recs = pd.concat(eval(train, test) for (train, test) in folds)
_log.info('analyzing recommendations')
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, test)
_log.info('nDCG for users is %.4f', results.ndcg.mean())
assert results.ndcg.mean() > 0
| 2.171875 | 2 |
tests/test_note_pages.py | alexdmoss/alchemyst | 0 | 12760711 | <gh_stars>0
from alchemyst import app
from fakes.fake_note import FakeOrganicNote, FakeInorganicNote, FakePhysicalNote
def test_notes_page(mocker):
mocker.patch('alchemyst.ui.routes.notes')
notes_list = mocker.patch('alchemyst.ui.routes.notes_from_dicts')
notes_list.return_value = [FakeInorganicNote(), FakeOrganicNote(), FakePhysicalNote()]
with app.test_request_context('/notes', method='GET'):
request = app.dispatch_request()
response = app.make_response(request)
assert response.status_code == 200
assert "Filter By" in response.get_data(as_text=True)
assert "Advanced Solid State" in response.get_data(as_text=True)
assert "Applications of Statistical Mechanics" in response.get_data(as_text=True)
assert "Alicyclic Chemistry" in response.get_data(as_text=True)
def test_notes_by_category(mocker):
mocker.patch('alchemyst.ui.routes.notes_by_category')
notes_list = mocker.patch('alchemyst.ui.routes.notes_from_dicts')
notes_list.return_value = [FakePhysicalNote()]
with app.test_request_context('/notes/physical', method='GET'):
request = app.dispatch_request()
response = app.make_response(request)
assert response.status_code == 200
assert "Filter By" in response.get_data(as_text=True)
assert "Applications of Statistical Mechanics" in response.get_data(as_text=True)
assert "Physical - 3rd Year Undergraduate" in response.get_data(as_text=True)
assert "Starts with basic revision" in response.get_data(as_text=True)
def test_note_page(mocker):
mocker.patch('alchemyst.ui.routes.get_document')
mocker.patch('alchemyst.ui.routes.note')
mocker.patch('alchemyst.ui.routes.note_from_dict')
note = mocker.patch('alchemyst.ui.routes.note_view')
note.return_value = FakePhysicalNote()
with app.test_request_context('/note/advanced-solid-state', method='GET'):
request = app.dispatch_request()
response = app.make_response(request)
assert response.status_code == 200
assert "Applications of Statistical Mechanics" in response.get_data(as_text=True)
assert "Author" in response.get_data(as_text=True)
assert "<NAME>" in response.get_data(as_text=True)
assert "basic revision" in response.get_data(as_text=True)
assert "3rd Year Undergraduate" in response.get_data(as_text=True)
def test_pdf_page(mocker, socket_enabled):
with app.test_request_context('/pdf/Inorganic/solid_state_advanced.pdf', method='GET'):
request = app.dispatch_request()
response = app.make_response(request)
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/pdf'
assert int(response.headers['Content-Length']) > 65000
| 2.25 | 2 |
tests/test_flask_without_expected_scopes.py | Simon-Le/layabauth | 4 | 12760712 | import flask
import flask_restx
import flask.testing
import layabauth.flask
from layabauth.testing import *
@pytest.fixture
def app() -> flask.Flask:
application = flask.Flask(__name__)
application.testing = True
api = flask_restx.Api(application)
@api.route("/requires_scopes")
class RequiresScopes(flask_restx.Resource):
@layabauth.flask.requires_authentication("https://test_identity_provider")
def get(self):
layabauth.flask.requires_scopes(
lambda token, token_body: token_body["scopes"], "scope1", "scope2"
)
return flask.g.token_body
return application
@pytest.fixture
def jwks_uri():
return "https://test_identity_provider"
@pytest.fixture
def token_body():
return {"upn": "<EMAIL>", "scopes": ["scope2", "scope3"]}
def test_auth_mock_with_1_scope_ok_1_missing(
client: flask.testing.FlaskClient, auth_mock
):
response = client.open(
method="GET",
path="/requires_scopes",
headers={"Authorization": "Bearer my_token"},
)
assert response.status_code == 403
assert response.json == {"message": "The scope1 must be provided in the token."}
| 2.28125 | 2 |
tests/test_package_fetcher.py | UnitedTraders/centos-package-cron | 0 | 12760713 | #!/usr/bin/python
# coding: latin-1
import unittest
import sys
import os
from centos_package_cron import package_fetcher
from mock import Mock
from centos_package_cron import mockable_execute
class ChangeLogParserTestCase(unittest.TestCase):
def testGet_log_version_nums_selinux(self):
# arrange
parser = package_fetcher.ChangeLogParser()
# act
result = parser.get_log_version_nums('3.12.1','153.el7_0.11')
# assert
assert result == ['3.12.1-153.11', '3.12.1-153', '3.12.1-153.el7_0.11']
def testGet_log_version_nums_suffix(self):
# arrange
parser = package_fetcher.ChangeLogParser()
# act
result = parser.get_log_version_nums('1.0.1e','34.el7_0.4')
# assert
assert result == ['1.0.1e-34.4', '1.0.1e-34', '1.0.1e-34.el7_0.4']
def testGet_log_version_nums_no_suffix(self):
# arrange
parser = package_fetcher.ChangeLogParser()
# act
result = parser.get_log_version_nums('2014.1.98','70.0.el7_0')
# assert
assert result == ['2014.1.98-70.0', '2014.1.98-70.0.el7_0']
def testGet_log_version_nums_rhel_7(self):
# arrange
parser = package_fetcher.ChangeLogParser()
# act
result = parser.get_log_version_nums('4.2.45','5.el7_0.4')
# assert
assert result == ['4.2.45-5.4', '4.2.45-5', '4.2.45-5.el7_0.4']
def testParseStandardRhel(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_bash.txt').read()
# act
results = parser.parse(output,'bash','4.2.45','5.el7_0.4')
# assert
expected_output = """* Thu Sep 25 07:00:00 2014 <NAME> <<EMAIL>> - 4.2.45-5.4
- CVE-2014-7169
Resolves: #1146324
"""
self.assertEquals(results,expected_output)
def testParseSeLinux(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_selinux-policy-targeted.txt').read()
# act
results = parser.parse(output,'selinux-policy-targeted', '3.12.1', '153.el7_0.11')
# assert
expected_output = """* Fri Aug 22 07:00:00 2014 <NAME> <<EMAIL>> 3.12.1-153.el7_0.11
- Back port OpenStack fixes
- Allow mdadm to connect to own socket created by mdadm running as kernel_t
Resolves:#1132828
"""
self.assertEquals(results,expected_output)
def testParseWithRegexCharInPackageName(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_stdc.txt').read()
# act
results = parser.parse(output,'libstdc++','4.8.2','16.2.el7_0')
# assert
expected_output = """* Wed Aug 6 07:00:00 2014 <NAME> <<EMAIL>> 4.8.2-16.2
- backport two further OpenMP 4.0 libgomp tasking fixes (#1121077)
- fix scheduler wrong-code with DEBUG_INSNs containing volatile ASM_OPERANDS
(#1127120, PR rtl-optimization/61801)
"""
assert results == expected_output
def testPackageInMiddle(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_nss-tools.txt').read()
# act
results = parser.parse(output,'nss-tools','3.16.2', '7.el7_0')
# assert
expected_output = """* Wed Sep 24 07:00:00 2014 <NAME> <<EMAIL>> - 3.16.2-7
- Resolves: Bug 1145433 - CVE-2014-1568
"""
assert results == expected_output
def testParseCentos(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_openssl.txt').read()
# act
results = parser.parse(output,'openssl','1.0.1e','34.el7_0.4')
# assert
expected_output = """* Fri Aug 8 07:00:00 2014 <NAME> <<EMAIL>> 1.0.1e-34.4
- fix CVE-2014-3505 - doublefree in DTLS packet processing
- fix CVE-2014-3506 - avoid memory exhaustion in DTLS
- fix CVE-2014-3507 - avoid memory leak in DTLS
- fix CVE-2014-3508 - fix OID handling to avoid information leak
- fix CVE-2014-3509 - fix race condition when parsing server hello
- fix CVE-2014-3510 - fix DoS in anonymous (EC)DH handling in DTLS
- fix CVE-2014-3511 - disallow protocol downgrade via fragmentation
"""
self.assertEquals(results,expected_output)
def testParseAnotherVersionString(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_ca-certificates.txt').read()
# act
results = parser.parse(output,'ca-certificates','2014.1.98','70.0.el7_0')
# assert
expected_output = """* Thu Sep 11 07:00:00 2014 <NAME> <<EMAIL>> - 2014.1.98-70.0
- update to CKBI 1.98 from NSS 3.16.1
- building on RHEL 7 no longer requires java-openjdk
- added more detailed instructions for release numbers on RHEL branches,
to avoid problems when rebasing on both z- and y-stream branches.
"""
self.assertEquals(results,expected_output)
def testNotFound(self):
# arrange
parser = package_fetcher.ChangeLogParser()
output = open('tests/changelog_bash.txt').read()
# act
result = parser.parse(output,'bash','4.4','5.el7_0.4')
# assert
self.assertEquals(result,'Unable to parse changelog for package bash version 4.4 release 5.el7_0.4')
class PackageFetcherTestCase(unittest.TestCase):
def testfetch_installed_packages(self):
# arrange
ch_log_parser = Mock()
executor = Mock()
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor)
# act
result = fetcher.fetch_installed_packages()
# assert
expected_min_number = 183
assert len(result) >= expected_min_number, "Actual size is %d" % (len(result))
first_package = result[0]
assert isinstance(first_package, package_fetcher.Package)
print "1st package name is %s" % (first_package.name)
assert first_package.name != None
print "1st package version is %s" % (first_package.version)
assert first_package.version != None
print "1st package release is %s" % (first_package.release)
assert first_package.release != None
self.assertEquals(first_package.arch, 'x86_64')
assert first_package.repository != None
def testFetch_package_updates(self):
# arrange
ch_log_parser = Mock()
executor = Mock()
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor)
# act
result = fetcher.get_package_updates()
# assert
assert len(result) > 0
first_package = result[0]
assert isinstance(first_package, package_fetcher.Package)
print "1st package name is %s" % (first_package.name)
self.assertNotEquals(first_package.name, None)
print "1st package version is %s" % (first_package.version)
self.assertNotEquals(first_package.version, None)
print "1st package release is %s" % (first_package.release)
self.assertNotEquals(first_package.release, None)
self.assertNotEquals(first_package.arch, None)
assert first_package.repository != None
def testGet_package_updates_with_exclusions_specific(self):
# arrange
ch_log_parser = Mock()
executor = Mock()
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor,['epel', 'extras', 'updates', 'base'])
# act
result = fetcher.get_package_updates()
# assert
# We should have excluded everything by now
assert len(result) == 0
def testGet_package_updates_with_exclusions_wildcard(self):
# arrange
ch_log_parser = Mock()
executor = Mock()
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor,['*'])
# act
result = fetcher.get_package_updates()
# assert
# We should have excluded everything by now
assert len(result) == 0
def testGet_package_updates_with_inclusions_specific(self):
# arrange
ch_log_parser = Mock()
executor = Mock()
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor,['*'],['updates'])
# act
result = fetcher.get_package_updates()
# assert
assert len(result) > 0
def testGet_package_updates_with_inclusions_wildcard(self):
# arrange
ch_log_parser = Mock()
executor = Mock()
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor,['updates'],['*'])
# act
result = fetcher.get_package_updates()
# assert
assert len(result) > 0
def testGetWhatDependsOn(self):
# arrange
fetcher = package_fetcher.PackageFetcher(package_fetcher.ChangeLogParser(),mockable_execute.MockableExecute())
# act
result = fetcher.get_what_depends_on('nss')
# assert
assert len(result) >= 2, "Actual size is %d" % (len(result))
names = map(lambda p:p.name, result)
# list includes
anything_missing = list(set(['rpm-libs']) - set(names))
assert anything_missing == []
def testGetWhatDependsOnNone(self):
# arrange
fetcher = package_fetcher.PackageFetcher(package_fetcher.ChangeLogParser(),mockable_execute.MockableExecute())
# act
result = fetcher.get_what_depends_on('postgresql93-devel')
# assert
assert result == []
def testGetPackageChangeLogMock(self):
# arrange
ch_log_parser = Mock()
ch_log_parser.parse = Mock(return_value='the changelog info')
executor = Mock()
executor.run_command = Mock(return_value='the raw output')
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor)
# act
result = fetcher.get_package_changelog('bash', '1.2', '33')
# assert
executor.run_command.assert_called_once_with(['/usr/bin/yum', 'changelog', 'updates', 'bash'])
self.assertEquals(result, 'the changelog info')
def testGetPackageChangeLogMockWithExclusions(self):
# arrange
ch_log_parser = Mock()
ch_log_parser.parse = Mock(return_value='the changelog info')
executor = Mock()
executor.run_command = Mock(return_value='the raw output')
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor,['extras','updates'])
# act
result = fetcher.get_package_changelog('bash', '1.2', '33')
# assert
executor.run_command.assert_called_once_with(['/usr/bin/yum', '--disablerepo=extras,updates' ,'changelog', 'updates', 'bash'])
self.assertEquals(result, 'the changelog info')
def testGetPackageChangeLogMockWithInclusions(self):
# arrange
ch_log_parser = Mock()
ch_log_parser.parse = Mock(return_value='the changelog info')
executor = Mock()
executor.run_command = Mock(return_value='the raw output')
fetcher = package_fetcher.PackageFetcher(ch_log_parser,executor,repos_to_include=['epel'])
# act
result = fetcher.get_package_changelog('bash', '1.2', '33')
# assert
executor.run_command.assert_called_once_with(['/usr/bin/yum', '--enablerepo=epel' ,'changelog', 'updates', 'bash'])
self.assertEquals(result, 'the changelog info')
def testGetPackageChangeLogRealBash(self):
# arrange
fetcher = package_fetcher.PackageFetcher(package_fetcher.ChangeLogParser(),mockable_execute.MockableExecute())
# act
result = fetcher.get_package_changelog('bash', '4.2.45', '5.el7_0.4')
# assert
self.assertNotEquals(result, None)
def testGetPackageChangeLogRealBashExclusionsSetup(self):
# arrange
fetcher = package_fetcher.PackageFetcher(package_fetcher.ChangeLogParser(),mockable_execute.MockableExecute(),['epel', 'extras', 'updates'])
# act
result = fetcher.get_package_changelog('bash', '4.2.45', '5.el7_0.4')
# assert
assert result == 'Unable to parse changelog for package bash version 4.2.45 release 5.el7_0.4'
def testGetPackageChangeLogRealBashInclusionsSetup(self):
# arrange
fetcher = package_fetcher.PackageFetcher(package_fetcher.ChangeLogParser(),mockable_execute.MockableExecute(),['epel', 'extras', 'updates'],['*'])
# act
result = fetcher.get_package_changelog('bash', '4.2.45', '5.el7_0.4')
# assert
self.assertNotEquals(result, None)
def testGetPackageChangeLogRealOpenssl(self):
# arrange
fetcher = package_fetcher.PackageFetcher(package_fetcher.ChangeLogParser(),mockable_execute.MockableExecute())
# act
result = fetcher.get_package_changelog('openssl', '1.0.1e', '34.el7_0.4')
# assert
self.assertNotEquals(result, None)
if __name__ == "__main__":
unittest.main()
| 2.546875 | 3 |
src/jets/train/train_one_batch.py | isaachenrion/jets | 9 | 12760714 | <gh_stars>1-10
import logging
import time
import torch
from src.data_ops.wrapping import unwrap
from src.admin.utils import log_gpu_usage
from ..loss import loss
def train_one_batch(model, batch, optimizer, administrator, epoch, batch_number, clip):
logger = administrator.logger
(x, y) = batch
# forward
model.train()
optimizer.zero_grad()
y_pred = model(x, logger=logger, epoch=epoch, iters=batch_number)
l = loss(y_pred, y)
# backward
l.backward()
if clip is not None:
torch.nn.utils.clip_grad_norm(model.parameters(), clip)
#if batch_number == 0:
# old_params = torch.cat([p.view(-1) for p in model.parameters()], 0)
# grads = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], 0)
if batch_number == 1:
log_gpu_usage()
optimizer.step()
#if batch_number == 0:
# model_params = torch.cat([p.view(-1) for p in model.parameters()], 0)
# logdict = dict(
# grads=grads,
# old_params=old_params,
# model_params=model_params
# )
# administrator.training_only_monitors(**logdict)
# administrator.training_only_monitors.visualize()
#del y; del y_pred; del x; del batch
return float(unwrap(l))
| 2.03125 | 2 |
misc/zip/Cura-master/cura/Machines/QualityGroup.py | criscola/G-Gen | 1 | 12760715 | # Copyright (c) 2018 Ultim<NAME>.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Dict, Optional, List
from PyQt5.QtCore import QObject, pyqtSlot
#
# A QualityGroup represents a group of containers that must be applied to each ContainerStack when it's used.
# Some concrete examples are Quality and QualityChanges: when we select quality type "normal", this quality type
# must be applied to all stacks in a machine, although each stack can have different containers. Use an Ultimaker 3
# as an example, suppose we choose quality type "normal", the actual InstanceContainers on each stack may look
# as below:
# GlobalStack ExtruderStack 1 ExtruderStack 2
# quality container: um3_global_normal um3_aa04_pla_normal um3_aa04_abs_normal
#
# This QualityGroup is mainly used in quality and quality_changes to group the containers that can be applied to
# a machine, so when a quality/custom quality is selected, the container can be directly applied to each stack instead
# of looking them up again.
#
class QualityGroup(QObject):
def __init__(self, name: str, quality_type: str, parent = None):
super().__init__(parent)
self.name = name
self.node_for_global = None # type: Optional["QualityGroup"]
self.nodes_for_extruders = {} # type: Dict[int, "QualityGroup"]
self.quality_type = quality_type
self.is_available = False
@pyqtSlot(result = str)
def getName(self) -> str:
return self.name
def getAllKeys(self) -> set:
result = set()
for node in [self.node_for_global] + list(self.nodes_for_extruders.values()):
if node is None:
continue
result.update(node.getContainer().getAllKeys())
return result
def getAllNodes(self) -> List["QualityGroup"]:
result = []
if self.node_for_global is not None:
result.append(self.node_for_global)
for extruder_node in self.nodes_for_extruders.values():
result.append(extruder_node)
return result
| 2.34375 | 2 |
coverage-3.7.1/tests/test_misc.py | I-Valchev/UrPas | 1 | 12760716 | <filename>coverage-3.7.1/tests/test_misc.py
"""Tests of miscellaneous stuff."""
import sys
from coverage.misc import Hasher, file_be_gone
from coverage import __version__, __url__
from tests.coveragetest import CoverageTest
class HasherTest(CoverageTest):
"""Test our wrapper of md5 hashing."""
run_in_temp_dir = False
def test_string_hashing(self):
h1 = Hasher()
h1.update("Hello, world!")
h2 = Hasher()
h2.update("Goodbye!")
h3 = Hasher()
h3.update("Hello, world!")
self.assertNotEqual(h1.digest(), h2.digest())
self.assertEqual(h1.digest(), h3.digest())
def test_dict_hashing(self):
h1 = Hasher()
h1.update({'a': 17, 'b': 23})
h2 = Hasher()
h2.update({'b': 23, 'a': 17})
self.assertEqual(h1.digest(), h2.digest())
class RemoveFileTest(CoverageTest):
"""Tests of misc.file_be_gone."""
def test_remove_nonexistent_file(self):
# it's ok to try to remove a file that doesn't exist.
file_be_gone("not_here.txt")
def test_remove_actual_file(self):
# it really does remove a file that does exist.
self.make_file("here.txt", "We are here, we are here, we are here!")
file_be_gone("here.txt")
self.assert_doesnt_exist("here.txt")
def test_actual_errors(self):
# Errors can still happen.
# ". is a directory" on Unix, or "Access denied" on Windows
self.assertRaises(OSError, file_be_gone, ".")
class SetupPyTest(CoverageTest):
"""Tests of setup.py"""
run_in_temp_dir = False
def test_metadata(self):
status, output = self.run_command_status(
"python setup.py --description --version --url --author"
)
self.assertEqual(status, 0)
out = output.splitlines()
self.assertIn("measurement", out[0])
self.assertEqual(out[1], __version__)
self.assertEqual(out[2], __url__)
self.assertIn("<NAME>", out[3])
def test_more_metadata(self):
# Let's be sure we pick up our own setup.py
# CoverageTest.tearDown restores the original sys.path.
sys.path.insert(0, '')
from setup import setup_args
classifiers = setup_args['classifiers']
self.assertGreater(len(classifiers), 7)
self.assertTrue(classifiers[-1].startswith("Development Status ::"))
long_description = setup_args['long_description'].splitlines()
self.assertGreater(len(long_description), 7)
self.assertNotEqual(long_description[0].strip(), "")
self.assertNotEqual(long_description[-1].strip(), "")
| 2.53125 | 3 |
tests/urls.py | marquicus/django-postalcodes-mexico | 0 | 12760717 | <reponame>marquicus/django-postalcodes-mexico<filename>tests/urls.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('django_postalcodes_mexico.urls', namespace='django_postalcodes_mexico'))
]
| 1.445313 | 1 |
daily_menu/user/management/commands/generate_recommendations.py | michalkvacek/at-chutna | 0 | 12760718 | from django.core.management import BaseCommand
from django.contrib.auth import get_user_model
from django.db.models import Prefetch, Q
from user.models import Recommender, Friendship, Recommendation
from user.recommendations.saver import RecommendationSaver
class Command(BaseCommand):
help = 'Generate recommendations for users'
def handle(self, *args, **kwargs):
# load users
users = get_user_model().objects.prefetch_related(
'automatic_classification',
'manual_classification',
'locations',
Prefetch('friends_set', queryset=Friendship.objects.filter(lunch_together=True)),
).all()
saver = RecommendationSaver()
generators = Recommender.objects.order_by('order').all()
for generator in generators:
for user in users:
print(user.email)
saver.save_recommendations(generator, user)
| 2.234375 | 2 |
corehq/apps/sms/tasks.py | SEL-Columbia/commcare-hq | 1 | 12760719 | <gh_stars>1-10
import math
import pytz
import logging
from datetime import datetime, timedelta
from celery.task import task
from time import sleep
from redis_cache.cache import RedisCache
from corehq.apps.sms.mixin import SMSLoadBalancingMixin
from corehq.apps.sms.models import SMSLog, OUTGOING, INCOMING
from corehq.apps.sms.api import send_message_via_backend, process_incoming
from django.conf import settings
from corehq.apps.domain.models import Domain
from corehq.apps.smsbillables.models import SmsBillable
from dimagi.utils.timezones import utils as tz_utils
from dimagi.utils.couch.cache import cache_core
from threading import Thread
ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS = "TOO_MANY_UNSUCCESSFUL_ATTEMPTS"
ERROR_MESSAGE_IS_STALE = "MESSAGE_IS_STALE"
ERROR_INVALID_DIRECTION = "INVALID_DIRECTION"
def set_error(msg, system_error_message=None):
msg.error = True
msg.system_error_message = system_error_message
msg.save()
def handle_unsuccessful_processing_attempt(msg):
msg.num_processing_attempts += 1
if msg.num_processing_attempts < settings.SMS_QUEUE_MAX_PROCESSING_ATTEMPTS:
delay_processing(msg, settings.SMS_QUEUE_REPROCESS_INTERVAL)
else:
set_error(msg, ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS)
def handle_successful_processing_attempt(msg):
utcnow = datetime.utcnow()
msg.num_processing_attempts += 1
msg.processed = True
msg.processed_timestamp = utcnow
if msg.direction == OUTGOING:
msg.date = utcnow
msg.save()
def delay_processing(msg, minutes):
msg.datetime_to_process += timedelta(minutes=minutes)
msg.save()
def get_lock(client, key):
return client.lock(key, timeout=settings.SMS_QUEUE_PROCESSING_LOCK_TIMEOUT*60)
def time_within_windows(domain_now, windows):
weekday = domain_now.weekday()
time = domain_now.time()
for window in windows:
if (window.day in [weekday, -1] and
(window.start_time is None or time >= window.start_time) and
(window.end_time is None or time <= window.end_time)):
return True
return False
def handle_domain_specific_delays(msg, domain_object, utcnow):
"""
Checks whether or not we need to hold off on sending an outbound message
due to any restrictions set on the domain, and delays processing of the
message if necessary.
Returns True if a delay was made, False if not.
"""
domain_now = tz_utils.adjust_datetime_to_timezone(utcnow, pytz.utc.zone,
domain_object.default_timezone)
if len(domain_object.restricted_sms_times) > 0:
if not time_within_windows(domain_now, domain_object.restricted_sms_times):
delay_processing(msg, settings.SMS_QUEUE_DOMAIN_RESTRICTED_RETRY_INTERVAL)
return True
if msg.chat_user_id is None and len(domain_object.sms_conversation_times) > 0:
if time_within_windows(domain_now, domain_object.sms_conversation_times):
sms_conversation_length = domain_object.sms_conversation_length
conversation_start_timestamp = utcnow - timedelta(minutes=sms_conversation_length)
if SMSLog.inbound_entry_exists(msg.couch_recipient_doc_type,
msg.couch_recipient,
conversation_start_timestamp,
utcnow):
delay_processing(msg, 1)
return True
return False
def message_is_stale(msg, utcnow):
oldest_allowable_datetime = \
utcnow - timedelta(hours=settings.SMS_QUEUE_STALE_MESSAGE_DURATION)
if isinstance(msg.date, datetime):
return msg.date < oldest_allowable_datetime
else:
return True
def _wait_and_release_lock(lock, timeout, start_timestamp):
while (datetime.utcnow() - start_timestamp) < timedelta(seconds=timeout):
sleep(0.1)
try:
lock.release()
except:
# The lock could have timed out in the meantime
pass
def wait_and_release_lock(lock, timeout):
timestamp = datetime.utcnow()
t = Thread(target=_wait_and_release_lock, args=(lock, timeout, timestamp))
t.start()
def handle_outgoing(msg):
"""
Should return a requeue flag, so if it returns True, the message will be
requeued and processed again immediately, and if it returns False, it will
not be queued again.
"""
def onerror():
logging.exception("Exception while processing SMS %s" % msg._id)
backend = msg.outbound_backend
sms_interval = backend.get_sms_interval()
use_rate_limit = sms_interval is not None
use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) > 1)
if use_rate_limit or use_load_balancing:
client = cache_core.get_redis_client()
lbi = None
orig_phone_number = None
if use_load_balancing:
lbi = backend.get_next_phone_number(client)
orig_phone_number = lbi.phone_number
elif (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) == 1):
# If there's only one phone number, we don't need to go through the
# load balancing algorithm. But we should always pass an
# orig_phone_number if it's an instance of SMSLoadBalancingMixin.
orig_phone_number = backend.phone_numbers[0]
if use_rate_limit:
if use_load_balancing:
lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id,
lbi.phone_number)
else:
lock_key = "sms-backend-%s-rate-limit" % backend._id
lock = client.lock(lock_key, timeout=30)
if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)):
if use_load_balancing:
lbi.finish(save_stats=True)
result = send_message_via_backend(msg, backend=backend,
orig_phone_number=orig_phone_number, onerror=onerror)
if use_rate_limit:
wait_and_release_lock(lock, sms_interval)
if result:
handle_successful_processing_attempt(msg)
else:
handle_unsuccessful_processing_attempt(msg)
return False
else:
# We're using rate limiting, but couldn't acquire the lock, so
# another thread is sending sms with this backend. Rather than wait,
# we'll just put this message at the back of the queue.
if use_load_balancing:
lbi.finish(save_stats=False)
return True
def handle_incoming(msg):
try:
process_incoming(msg)
handle_successful_processing_attempt(msg)
except:
logging.exception("Exception while processing SMS %s" % msg._id)
handle_unsuccessful_processing_attempt(msg)
@task(queue="sms_queue")
def process_sms(message_id):
"""
message_id - _id of an SMSLog entry
"""
# Note that Redis error/exception notifications go out from the
# run_sms_queue command, so no need to send them out here
# otherwise we'd get too many emails.
rcache = cache_core.get_redis_default_cache()
if not isinstance(rcache, RedisCache):
return
try:
client = rcache.raw_client
except NotImplementedError:
return
utcnow = datetime.utcnow()
# Prevent more than one task from processing this SMS, just in case
# the message got enqueued twice.
message_lock = get_lock(client, "sms-queue-processing-%s" % message_id)
if message_lock.acquire(blocking=False):
msg = SMSLog.get(message_id)
if message_is_stale(msg, utcnow):
set_error(msg, ERROR_MESSAGE_IS_STALE)
message_lock.release()
return
if msg.direction == OUTGOING:
domain_object = Domain.get_by_name(msg.domain, strict=True)
if handle_domain_specific_delays(msg, domain_object, utcnow):
message_lock.release()
return
requeue = False
# Process inbound SMS from a single contact one at a time
recipient_block = msg.direction == INCOMING
if (isinstance(msg.processed, bool)
and not msg.processed
and not msg.error
and msg.datetime_to_process < utcnow):
if recipient_block:
recipient_lock = get_lock(client,
"sms-queue-recipient-phone-%s" % msg.phone_number)
recipient_lock.acquire(blocking=True)
if msg.direction == OUTGOING:
requeue = handle_outgoing(msg)
elif msg.direction == INCOMING:
handle_incoming(msg)
else:
set_error(msg, ERROR_INVALID_DIRECTION)
if recipient_block:
recipient_lock.release()
message_lock.release()
if requeue:
process_sms.delay(message_id)
@task
def store_billable(msg):
if msg._id and not SmsBillable.objects.filter(log_id=msg._id).exists():
try:
msg.text.encode('iso-8859-1')
msg_length = 160
except UnicodeEncodeError:
# This string contains unicode characters, so the allowed
# per-sms message length is shortened
msg_length = 70
for _ in range(int(math.ceil(float(len(msg.text)) / msg_length))):
SmsBillable.create(msg)
| 1.914063 | 2 |
language/common/utils/exporters.py | oja/language | 1 | 12760720 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exporters for tf.estimator training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from language.common.utils import file_utils
import tensorflow as tf
class BestSavedModelAndCheckpointExporter(tf.estimator.BestExporter):
"""Exporter that saves the best SavedModel and checkpoint."""
def __init__(self,
eval_spec_name,
serving_input_receiver_fn,
compare_fn=None,
metric_name=None,
higher_is_better=True):
"""Creates an exporter that compares models on the given eval and metric.
While the SavedModel is useful for inference, the checkpoint is useful for
warm-starting another stage of training (e.g., fine-tuning).
Args:
eval_spec_name: Name of the EvalSpec to use to compare models.
serving_input_receiver_fn: Callable that returns a ServingInputReceiver.
compare_fn: Callable that compares eval metrics of two models. See
tf.estimator.BestExporter for details on the expected API. Specify
either this or `metric_name`.
metric_name: Name of the eval metric to use to compare models. Specify
either this or `compare_fn`.
higher_is_better: Whether higher or lower eval metric values are better.
Only used when `metric_name` is specified.
"""
self._metric_name = metric_name
def _default_compare_fn(best_eval_result, current_eval_result):
"""Returns True if the current metric is better than the best metric."""
if higher_is_better:
return current_eval_result[metric_name] > best_eval_result[metric_name]
else:
return current_eval_result[metric_name] < best_eval_result[metric_name]
super(BestSavedModelAndCheckpointExporter, self).__init__(
name="best_%s" % eval_spec_name,
serving_input_receiver_fn=serving_input_receiver_fn,
event_file_pattern="eval_%s/*.tfevents.*" % eval_spec_name,
compare_fn=compare_fn or _default_compare_fn)
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
"""Implements Exporter.export()."""
# Since export() returns None if export was skipped, we can use this to
# detect when the current model is the new best model.
exported_dir = super(BestSavedModelAndCheckpointExporter, self).export(
estimator=estimator,
export_path=export_path,
checkpoint_path=checkpoint_path,
eval_result=eval_result,
is_the_final_export=is_the_final_export)
if exported_dir is None:
return None # best model unchanged
checkpoint_dir = os.path.join(export_path, "checkpoint")
tf.logging.info("Saving new best checkpoint to %s", checkpoint_dir)
file_utils.make_empty_dir(checkpoint_dir)
file_utils.copy_files_to_dir(
source_filepattern=checkpoint_path + ".*", dest_dir=checkpoint_dir)
# Also save the new best metrics.
all_metrics = "".join(
" %r: %r,\n" % (name, metric)
for name, metric in sorted(self._best_eval_result.items()))
file_utils.set_file_contents(
data="{\n" + all_metrics + "}\n",
path=os.path.join(export_path, "all_metrics.txt"))
file_utils.set_file_contents(
data="%d %r\n" % (self._best_eval_result["global_step"],
self._best_eval_result[self._metric_name]),
path=os.path.join(export_path, "best_metric.txt"))
file_utils.set_file_contents(
data=exported_dir + "\n",
path=os.path.join(export_path, "best_saved_model.txt"))
return exported_dir
| 1.851563 | 2 |
tests/test_version.py | grdorin/mopidy | 6,700 | 12760721 | import unittest
from distutils.version import StrictVersion
from mopidy import __version__
class VersionTest(unittest.TestCase):
def test_current_version_is_parsable_as_a_strict_version_number(self):
StrictVersion(__version__)
| 2.03125 | 2 |
tests/unit/generate_xml_test.py | menishmueli/minio-py | 0 | 12760722 | <reponame>menishmueli/minio-py
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018, 2019 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import eq_
from minio.definitions import Part
from minio.select.options import (CSVInput, CSVOutput, InputSerialization,
OutputSerialization, RequestProgress,
SelectObjectOptions)
from minio.xml_marshal import marshal_complete_multipart, xml_marshal_select
class GenerateRequestTest(TestCase):
def test_generate_complete_multipart_upload(self):
expected_string = (b'<CompleteMultipartUpload '
b'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
b'<Part><PartNumber>1</PartNumber>'
b'<ETag>"a54357aff0632cce46d942af68356b38"</ETag>'
b'</Part>'
b'<Part><PartNumber>2</PartNumber>'
b'<ETag>"0c78aef83f66abc1fa1e8477f296d394"</ETag>'
b'</Part>'
b'<Part><PartNumber>3</PartNumber>'
b'<ETag>"acbd18db4cc2f85cedef654fccc4a4d8"</ETag>'
b'</Part>'
b'</CompleteMultipartUpload>')
etags = [
Part(1, 'a54357aff0632cce46d942af68356b38'),
Part(2, '0c78aef83f66abc1fa1e8477f296d394'),
Part(3, 'acbd18db4cc2f85cedef654fccc4a4d8'),
]
actual_string = marshal_complete_multipart(etags)
eq_(expected_string, actual_string)
def test_xml_marshal_select(self):
expected_string = (b'<SelectObjectContentRequest>'
b'<Expression>select * from s3object</Expression>'
b'<ExpressionType>SQL</ExpressionType>'
b'<InputSerialization>'
b'<CompressionType>NONE</CompressionType>'
b'<CSV><FileHeaderInfo>USE</FileHeaderInfo>'
b'<RecordDelimiter>\n</RecordDelimiter>'
b'<FieldDelimiter>,</FieldDelimiter>'
b'<QuoteCharacter>"</QuoteCharacter>'
b'<QuoteEscapeCharacter>"</QuoteEscapeCharacter>'
b'<Comments>#</Comments>'
b'<AllowQuotedRecordDelimiter>false'
b'</AllowQuotedRecordDelimiter></CSV>'
b'</InputSerialization>'
b'<OutputSerialization><CSV>'
b'<QuoteFields>ASNEEDED</QuoteFields>'
b'<RecordDelimiter>\n</RecordDelimiter>'
b'<FieldDelimiter>,</FieldDelimiter>'
b'<QuoteCharacter>"</QuoteCharacter>'
b'<QuoteEscapeCharacter>"</QuoteEscapeCharacter>'
b'</CSV></OutputSerialization>'
b'<RequestProgress>'
b'<Enabled>true</Enabled>'
b'</RequestProgress>'
b'</SelectObjectContentRequest>')
options = SelectObjectOptions(
expression="select * from s3object",
input_serialization=InputSerialization(
compression_type="NONE",
csv=CSVInput(file_header_info="USE",
record_delimiter="\n",
field_delimiter=",",
quote_character='"',
quote_escape_character='"',
comments="#",
allow_quoted_record_delimiter=False),
),
output_serialization=OutputSerialization(
csv=CSVOutput(quote_fields="ASNEEDED",
record_delimiter="\n",
field_delimiter=",",
quote_character='"',
quote_escape_character='"')
),
request_progress=RequestProgress(
enabled=True
)
)
actual_string = xml_marshal_select(options)
eq_(expected_string, actual_string)
| 1.398438 | 1 |
scripts/JustFaceNet.py | nfsergiu/PyOpenPose | 300 | 12760723 | <filename>scripts/JustFaceNet.py
"""
Example script using only the Face detector of Openpose.
"""
import PyOpenPose as OP
import time
import cv2
import numpy as np
import os
OPENPOSE_ROOT = os.environ["OPENPOSE_ROOT"]
def ComputeBB(face, padding=0.4):
minX = np.min(face[:, 0])
minY = np.min(face[:, 1])
maxX = np.max(face[:, 0])
maxY = np.max(face[:, 1])
width = maxX - minX
height = maxY - minY
padX = width * padding / 2
padY = height * padding / 2
minX -= padX
minY -= padY
width += 2 * padX
height += 2 * padY
score = np.mean(face[:, 2])
return score, [int(minX), int(minY), int(width), int(height)]
def run():
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
imgSize = list(frame.shape)
outSize = imgSize[1::-1]
print("Net output size: ", outSize)
download_heatmaps = False
with_hands = False
with_face = True
op = OP.OpenPose((656, 368), (240, 240), tuple(outSize), "COCO", OPENPOSE_ROOT + os.sep + "models" + os.sep, 0,
download_heatmaps, OP.OpenPose.ScaleMode.ZeroToOne, with_face, with_hands)
actual_fps = 0
paused = False
delay = {True: 0, False: 1}
newFaceBB = initFaceBB = faceBB = [240, 120, 150, 150]
print("Entering main Loop. Put your hand into the box to start tracking")
while True:
start_time = time.time()
try:
ret, frame = cap.read()
rgb = frame[:, :outSize[0]]
except Exception as e:
print("Failed to grab", e)
break
t = time.time()
op.detectFace(rgb, np.array(faceBB, dtype=np.int32).reshape((1, 4)))
t = time.time() - t
op_fps = 1.0 / t
res = op.render(rgb)
cv2.putText(res, 'UI FPS = %f, OP-FACE FPS = %f. Press \'r\' to reset.' % (actual_fps, op_fps), (20, 20), 0, 0.5,
(0, 0, 255))
cv2.rectangle(res, (faceBB[0], faceBB[1]), (faceBB[0] + faceBB[2], faceBB[1] + faceBB[3]), [50, 155, 50], 2)
cv2.rectangle(res, (newFaceBB[0], newFaceBB[1]), (newFaceBB[0] + newFaceBB[2], newFaceBB[1] + newFaceBB[3]),
[250, 55, 50], 1)
cv2.imshow("OpenPose result", res)
face = op.getKeypoints(op.KeypointType.FACE)[0].reshape(-1, 3)
score, newFaceBB = ComputeBB(face)
print("Res Score, faceBB: ", score, newFaceBB)
if score > 0.5: # update BB only when score is good.
faceBB = newFaceBB
key = cv2.waitKey(delay[paused])
if key & 255 == ord('p'):
paused = not paused
if key & 255 == ord('q'):
break
if key & 255 == ord('r'):
faceBB = initFaceBB
actual_fps = 1.0 / (time.time() - start_time)
if __name__ == '__main__':
run()
| 2.5625 | 3 |
util/TurtleHare.py | arsonite/codescout | 0 | 12760724 | <filename>util/TurtleHare.py
import time
class TurtleHare:
def __init__(self, *args, **kwargs):
self.function = kwargs.get('function')
def measure(self, function, *args):
print('\nMeasuring execution time with TurtleHare ...')
print('Print statements of executed function below:')
print('--------------------------------------------\n')
start = time.time()
self.function(*args)
end = time.time()
print('\n---------------------------------------------')
print('End of print statements of executed function.')
print('Final execution measurement:')
print(f' In seconds: {end - start}')
class static:
@staticmethod
def measure(function, *args):
print('\nMeasuring execution time with TurtleHare ...')
print('Print statements of executed function below:')
print('--------------------------------------------\n')
start = time.time()
function(*args)
end = time.time()
print('\n---------------------------------------------')
print('End of print statements of executed function.')
print('Final execution measurement:')
print(f' In seconds: {end - start}')
| 3.46875 | 3 |
_/0349_04_Code/09.py | paullewallencom/javascript-978-1-8495-1034-9 | 0 | 12760725 | <filename>_/0349_04_Code/09.py<gh_stars>0
publish_externally = models.BooleanField(required = False)
| 1.140625 | 1 |
petitions/tests.py | sosumi/IowaIdeas | 0 | 12760726 | """
Author: <NAME> (@zujko)
Author: <NAME> (@chrislemelin)
Description: Tests for petition operations.
Date Created: Sept 15 2016
Updated: Feb 17 2017
"""
from datetime import timedelta
from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.test.client import RequestFactory
from django.utils import timezone
from petitions.models import Petition, Response, Tag, Update
from .consumers import serialize_petitions
from .views import (PETITION_DEFAULT_BODY, PETITION_DEFAULT_TITLE, edit_check,
get_petition, petition_edit, petition_sign)
class PetitionTest(TestCase):
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
self.superUser = User.objects.create_user(
username='txu1267', email='txu1267', is_staff=True)
self.superUser.set_password('<PASSWORD>')
self.superUser.save()
self.superUser2 = User.objects.create_user(
username='txu1266', email='txu1266', is_superuser=True)
self.superUser2.set_password('<PASSWORD>')
self.superUser2.save()
self.user = User.objects.create_user(
username='axu7254', email='axu7254')
self.user2 = User.objects.create_user(
username='cxl1234', email='cxl1234')
self.user3 = User.objects.create_user(
username='abc4321', email='abc4321')
self.tag = Tag(name='Test')
self.tag.save()
self.petition = Petition(title='Test petition',
description='This is a test petition',
author=self.user,
created_at=timezone.now(),
status=0,
expires=timezone.now() + timedelta(days=30)
)
self.petition.save()
self.petition.tags.add(self.tag)
self.petitionPublished = Petition(title='Test petition Published',
description='This is a test petition Published',
author=self.user2,
created_at=timezone.now(),
status=1,
expires=timezone.now() + timedelta(days=30)
)
self.petitionPublished.save()
def test_about_page(self):
response = self.client.get('/about/')
assert response.status_code == 200
self.assertTemplateUsed(response, 'about.html')
def test_maintenance_page(self):
response = self.client.get('/maintenance/')
assert response.status_code == 200
self.assertTemplateUsed(response, 'Something_Special.html')
def test_index_page(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
def test_404(self):
response = self.client.get('/doesnotexist')
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_petition_edit(self):
self.client.force_login(self.superUser)
# Change petition title to 'New'
obj = {
"attribute": "title",
"value": "New"
}
response = self.client.post(
'/petition/update/' + str(self.petition.id), obj)
# Check that it doesn't 404
self.assertNotEqual(response.status_code, 404)
# Check that petition was actually changed
pet = Petition.objects.get(pk=self.petition.id)
self.assertEqual(pet.title, 'New')
def test_petition_publish(self):
self.client.force_login(self.user)
tag = Tag(name='test tag')
tag.save()
obj = {
"attribute": "publish",
"value": "foo"
}
self.petition.status = 0
self.petition.tags.add(tag)
self.petition.save()
request = self.factory.post(
'/petition/update/' + str(self.petition.id), obj)
request.META['HTTP_HOST'] = 'localhost'
request.user = self.user
response = petition_edit(request, self.petition.id)
# Make sure there is no 404
self.assertNotEqual(response.status_code, 404)
# Check that petition was published
pet = Petition.objects.get(pk=self.petition.id)
self.assertEqual(pet.status, 1)
def test_sign_petition(self):
self.client.force_login(self.superUser)
response = self.client.post(
'/petition/sign/' + str(self.petitionPublished.id), {'test': 'test'})
pet = Petition.objects.get(pk=self.petitionPublished.id)
self.assertEqual(pet.signatures, 1)
self.assertEqual(response.status_code, 200)
def test_petition_subscribe(self):
self.client.force_login(self.user)
user = User.objects.get(pk=self.user.id)
self.assertEqual(user.profile.subscriptions.filter(
pk=self.petition.id).exists(), False)
response = self.client.post(
'/petition/subscribe/' + str(self.petition.id), {})
user = User.objects.get(pk=self.user.id)
self.assertEqual(user.profile.subscriptions.filter(
pk=self.petition.id).exists(), True)
def test_petition_unsubscribe(self):
self.client.force_login(self.user)
user = User.objects.get(pk=self.user.id)
self.assertEqual(user.profile.subscriptions.filter(
pk=self.petition.id).exists(), False)
response = self.client.post(
'/petition/subscribe/' + str(self.petition.id), {})
user = User.objects.get(pk=self.user.id)
self.assertEqual(user.profile.subscriptions.filter(
pk=self.petition.id).exists(), True)
response = self.client.post(
'/petition/unsubscribe/' + str(self.petition.id), {})
user = User.objects.get(pk=self.user.id)
self.assertEqual(user.profile.subscriptions.filter(
pk=self.petition.id).exists(), False)
def test_petition_unpublish(self):
self.client.force_login(self.superUser)
response = self.client.post(
'/petition/unpublish/' + str(self.petition.id))
self.assertEqual(response.status_code, 200)
pet = Petition.objects.get(pk=self.petition.id)
self.assertEqual(pet.status, 2)
# Test using not super user
self.client.force_login(self.user)
pet.status = 1
pet.save()
response = self.client.post(
'/petition/unpublish/' + str(self.petition.id))
pet = Petition.objects.get(pk=self.petition.id)
self.assertEqual(pet.status, 1)
def test_petition_page(self):
response = self.client.get('/petition/' + str(self.petition.id))
self.assertEqual(response.status_code, 200)
def test_url_redirect_fail(self):
self.client.force_login(self.user)
response = self.client.get('/petition/' + str(666))
self.assertEqual(response.status_code, 404)
def test_create_petition(self):
self.client.force_login(self.user)
response = self.client.post('/petition/create/')
self.assertEqual(response.status_code, 200)
userobj = User.objects.get(pk=self.user.id)
self.assertEqual(userobj.profile.petitions_signed.all()[
0].title, PETITION_DEFAULT_TITLE)
def test_check_edit(self):
self.client.force_login(self.user)
self.assertEqual(edit_check(self.user, self.petition), True)
self.assertEqual(edit_check(self.superUser, self.petition), True)
self.assertEqual(edit_check(self.superUser2, self.petition), False)
self.assertEqual(edit_check(self.user2, self.petition), False)
def test_serialize_petitions(self):
petitions = Petition.objects.all()
json_response = serialize_petitions(petitions)
# TODO: Improve this test to be more thorough
self.assertNotEqual(json_response, None)
def test_url_redirect(self):
self.client.force_login(self.user)
response = self.client.get('/petitions/' + str(self.petition.id))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/?p='+str(self.petition.id),
status_code=302, target_status_code=200)
def test_edit_petition_description(self):
self.client.force_login(self.superUser)
obj = {
"attribute": "description",
"value": "test test test"
}
response = self.client.post(
'/petition/update/' + str(self.petition.id), obj)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petition.id)
self.assertEqual(pet.description, "test test test")
def test_petition_add_tag(self):
self.client.force_login(self.superUser)
tag = Tag(name='test tag2')
tag.save()
obj = {
"attribute": "add-tag",
"value": tag.id
}
response = self.client.post(
'/petition/update/' + str(self.petition.id), obj)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petition.id)
if tag not in pet.tags.all():
self.fail("tag not added")
def test_petition_remove_tag(self):
self.client.force_login(self.superUser)
tag = Tag(name='test tag2')
tag.save()
self.petition.tags.add(tag)
self.petition.save()
obj = {
"attribute": "remove-tag",
"value": tag.id
}
response = self.client.post(
'/petition/update/' + str(self.petition.id), obj)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petition.id)
if tag in pet.tags.all():
self.fail("tag not removed")
def test_petition_add_update(self):
self.client.force_login(self.superUser)
obj = {
"attribute": "add_update",
"value": "test update"
}
request = self.factory.post(
'/petition/update/' + str(self.petition.id), obj)
request.user = self.superUser
request.META['HTTP_HOST'] = "random"
response = petition_edit(request, self.petition.id)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petition.id)
fail = True
for update in pet.updates.all():
value = update.description
if value == "test update":
fail = False
if fail:
self.fail("did not add update")
def test_petition_add_response(self):
self.client.force_login(self.superUser)
obj = {
"attribute": "response",
"value": "test response"
}
request = self.factory.post(
'/petition/update/' + str(self.petition.id), obj)
request.user = self.superUser
request.META['HTTP_HOST'] = "random"
response = petition_edit(request, self.petition.id)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petition.id)
if pet.response.description != "test response":
self.fail()
def test_petition_mark_work_in_progress(self):
self.client.force_login(self.superUser)
obj = {
"attribute": "mark-in-progress"
}
self.assertEqual(self.petitionPublished.in_progress, None)
response = self.client.post(
'/petition/update/' + str(self.petitionPublished.id), obj)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petitionPublished.id)
self.assertEqual(pet.in_progress, True)
def test_petition_unpublish_progress(self):
self.client.force_login(self.superUser)
obj = {
"attribute": "unpublish"
}
self.assertEqual(self.petitionPublished.status, 1)
request = self.factory.post(
'/petition/update/' + str(self.petitionPublished.id), obj)
request.user = self.superUser
request.META['HTTP_HOST'] = "random"
response = petition_edit(request, self.petitionPublished.id)
self.assertNotEqual(response.status_code, 404)
pet = Petition.objects.get(pk=self.petitionPublished.id)
self.assertEqual(pet.status, 2)
def test_get_petition(self):
self.client.force_login(self.superUser)
petition = get_petition(self.petition.id, self.user)
self.assertEqual(petition, self.petition)
def test_get_petition_fail(self):
self.client.force_login(self.superUser)
petition = get_petition(self.petition.id, self.user2)
self.assertEqual(petition, False)
def test_petition_str(self):
assert str(self.petition) == self.petition.title
def test_tag_str(self):
assert str(self.tag) == self.tag.name
def test_response_str(self):
resp = Response.objects.create(
description='Response', created_at=timezone.now(), author='Test Author')
assert str(resp) == 'Test Author'
| 2.46875 | 2 |
akshare/event/sos.py | PKUuu/akshare | 0 | 12760727 | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/23 9:07
contact: <EMAIL>
desc: 新增-事件接口
新增-事件接口新型冠状病毒-网易
新增-事件接口新型冠状病毒-丁香园
"""
import json
import time
import requests
import pandas as pd
from bs4 import BeautifulSoup
def epidemic_163():
url = "https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
province_list = [item.get_text() for item in soup.find("ul").find_all("strong")]
desc_list = [item.get_text() for item in soup.find("ul").find_all("li")]
temp_df = pd.DataFrame([province_list, desc_list],
index=["地区", f"数据-{soup.find(attrs={'class': 'tit'}).find('span').get_text()}"]).T
return temp_df
def epidemic_dxy(indicator="data"):
url = "https://3g.dxy.cn/newh5/view/pneumonia"
params = {
"scene": "2",
"clicktime": int(time.time()),
"enterid": int(time.time()),
"from": "groupmessage",
"isappinstalled": "0",
}
res = requests.get(url, params=params)
res.encoding = "utf-8"
soup = BeautifulSoup(res.text, "lxml")
text_data_news = str(soup.find_all("script", attrs={"id": "getTimelineService"}))
temp_json = text_data_news[text_data_news.find("= {") + 2: text_data_news.rfind("}catch")]
json_data = pd.DataFrame(json.loads(temp_json)["result"])
desc_data = json_data[["title", "summary", "infoSource", "provinceName", "sourceUrl"]]
text_data_news = str(soup.find_all("script", attrs={"id": "getListByCountryTypeService1"}))
temp_json = text_data_news[text_data_news.find("= [{") + 2: text_data_news.rfind("catch") - 1]
json_data = pd.DataFrame(json.loads(temp_json))
data = json_data[['tags', 'provinceShortName']]
dig_data = data[['provinceShortName', 'tags']]
# text_data_news = str(soup.find_all("script")[6])
# temp_json = text_data_news[text_data_news.find("= {") + 2: text_data_news.rfind("}catch")]
# info_data = pd.DataFrame(json.loads(temp_json), index=[0]).T
if indicator == "data":
return dig_data
else:
return desc_data
if __name__ == '__main__':
epidemic_dxy_df = epidemic_dxy(indicator="data")
print(epidemic_dxy_df)
epidemic_163_df = epidemic_163()
print(epidemic_163_df)
| 2.8125 | 3 |
demo_sceneanalysis/demo_sceneanalysis.py | angus-ai/angus-demos | 0 | 12760728 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cv2
import numpy as np
import StringIO
import datetime
import pytz
import angus
import angus_display as ad
import stats as st
def f(stream_index, width, height):
camera = cv2.VideoCapture(stream_index)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, int(width))
camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))
camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)
if not camera.isOpened():
print("Cannot open stream of index {}".format(stream_index))
exit(1)
print("Video stream is of resolution {} x {}".format(camera.get(3), camera.get(4)))
stats = st.Stats("stats.json")
animation = []
engaged = []
conn = angus.connect()
service = conn.services.get_service("scene_analysis", version=1)
service.enable_session()
while camera.isOpened():
ret, frame = camera.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
buff = StringIO.StringIO(np.array(buff).tostring())
t = datetime.datetime.now(pytz.utc)
job = service.process({"image": buff,
"timestamp" : t.isoformat(),
"camera_position": "facing",
"sensitivity": {
"appearance": 0.7,
"disappearance": 0.7,
"age_estimated": 0.4,
"gender_estimated": 0.5,
"focus_locked": 0.9,
"emotion_detected": 0.4,
"direction_estimated": 0.8
}
})
res = job.result
events = res["events"]
entities = res["entities"]
for idx, h in entities.iteritems():
pt = ad.displayAge(frame, idx, h, 0.50, 0.35)
ch = ad.displayHair(frame, idx, h)
ad.displayAvatar(frame, h, pt, ch)
ad.displayEmotion(frame, h, pt)
ad.displayGender(frame, h, pt)
ad.displayGaze(frame, idx, h, pt, 0.50)
panel = ((width - 180, 40), (width-20, height - 40))
ad.blur(frame, panel[0], panel[1], (255, 255, 255), 2)
ad.computeConversion(res, events, entities, engaged, stats, animation, 0.5, 500)
ad.displayConversion(frame, stats, (width - 100, int(0.3*height)))
ad.displayAnimation(frame, animation)
ad.display_logo(frame, 20, height - 60)
cv2.imshow('window', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
stats.save()
break
service.disable_session()
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
### Web cam index might be different from 0 on your setup.
### To grab a given video file instead of the host computer cam, try:
### main("/path/to/myvideo.avi")
f(0, 640, 480)
| 2.265625 | 2 |
tests/core/test_dataset.py | MaxCodeXTC/hover | 1 | 12760729 | <filename>tests/core/test_dataset.py
import pytest
from copy import deepcopy
from hover.core.dataset import SupervisableTextDataset
@pytest.mark.core
class TestSupervisableTextDataset:
TEST_DICTL = [
{"content": "Aristotle", "mark": "A"},
{"content": "<NAME>", "mark": "B"},
{"content": "CRISPR", "mark": "C"},
{"content": "Doudna", "mark": "D"},
]
DEV_DICTL = [
{"content": "Doudna", "mark": "D"},
{"content": "<NAME>", "mark": "E"},
]
TRAIN_DICTL = [
{"content": "<NAME>", "mark": "e"},
{"content": "<NAME>", "mark": "F"},
{"content": "Geralt of Rivia", "mark": "G"},
]
RAW_DICTL = [{"content": "Geralt of Rivia"}, {"content": "Hailstorm"}]
EFFECTIVE_SIZE = {"test": 4, "dev": 1, "train": 2, "raw": 1}
EFFECTIVE_CLASSES = 7
def test_init(self):
dataset = SupervisableTextDataset(
self.__class__.RAW_DICTL[:],
train_dictl=self.__class__.TRAIN_DICTL[:],
dev_dictl=self.__class__.DEV_DICTL[:],
test_dictl=self.__class__.TEST_DICTL[:],
feature_key="content",
label_key="mark",
)
# check the subset sizes
for _key, _value in self.__class__.EFFECTIVE_SIZE.items():
assert dataset.dfs[_key].shape[0] == _value
# check the number of classes
assert len(dataset.classes) == self.__class__.EFFECTIVE_CLASSES
@staticmethod
def test_compute_2d_embedding(mini_supervisable_text_dataset, dummy_vectorizer):
dataset = deepcopy(mini_supervisable_text_dataset)
dataset.compute_2d_embedding(dummy_vectorizer, "umap")
dataset.compute_2d_embedding(dummy_vectorizer, "ivis")
@staticmethod
def test_loader(mini_supervisable_text_dataset, dummy_vectorizer):
from torch.utils.data import DataLoader
dataset = deepcopy(mini_supervisable_text_dataset)
try:
loader = dataset.loader("raw", dummy_vectorizer, smoothing_coeff=0.1)
pytest.fail(
"The raw subset managed to produce a loader, which should not happen"
)
except KeyError:
loader = dataset.loader("dev", dummy_vectorizer, smoothing_coeff=0.1)
assert isinstance(loader, DataLoader)
| 2.296875 | 2 |
alphamind/exceptions/exceptions.py | iLampard/alpha-mind | 0 | 12760730 | <reponame>iLampard/alpha-mind
# -*- coding: utf-8 -*-
"""
Created on 2018-6-12
@author: cheng.li
"""
class PortfolioBuilderException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg) | 1.867188 | 2 |
bio/__init__.py | nadrees/PyRosalind | 0 | 12760731 | <reponame>nadrees/PyRosalind
__author__ = 'Nathen'
| 0.855469 | 1 |
workflows/TW-phoSim-r3/helpers/genFocalPlane.py | LSSTDESC/Twinkles | 6 | 12760732 | <gh_stars>1-10
#!/nfs/farm/g/lsst/u1/software/redhat6-x86_64-64bit-gcc44/anaconda/2.3.0/bin/python
"""
genFocalPlane.py - create a text file containing all sets of phoSim
sensor locations for use as command line options, e.g.,"-s
R122_S11" is the sensor in the very center of the focal plane.
Strings are of the format Rxy_Sxy, where Rxy = raft location on focal
plane (x and y = 0-4), and Sxy = sensor location within raft (x and y = 0-2).
Note that R00, R04, R40 and R44 are special 'corner rafts', each with only three sensors.
"""
raftDim = [0,1,2,3,4]
sensorDim = [0,1,2]
corners = ['00','04','40','44']
sensorTot = 0
fd = open('sensorList.txt','w')
for rx in raftDim:
for ry in raftDim:
rxy = str(rx)+str(ry)
if rxy in corners: continue ## No corner rafts
# if rxy != '22': continue ## Center raft only
for sx in sensorDim:
for sy in sensorDim:
sxy = str(sx)+str(sy)
sensorTot += 1
sensor = 'R'+rxy+'_'+'S'+sxy
print sensor
fd.write(sensor+'\n')
pass
pass
pass
pass
fd.close()
print 'sensorTot = ',sensorTot
| 2.625 | 3 |
ch09-templates/python/templates-jinja-simple/render.py | oreilly-npa-book/examples | 0 | 12760733 | <filename>ch09-templates/python/templates-jinja-simple/render.py<gh_stars>0
from jinja2 import Environment, FileSystemLoader
ENV = Environment(loader=FileSystemLoader('.'))
template = ENV.get_template("template.jinja")
interface_dict = {
"name": "GigabitEthernet0/1",
"description": "Server Port",
"vlan": 10,
"uplink": False
}
print(template.render(interface=interface_dict)) | 2.3125 | 2 |
Projects/Online Workouts/w3resource/Dictionary/program-3.py | ivenpoker/Python-Projects | 1 | 12760734 | <reponame>ivenpoker/Python-Projects
#!/usr/bin/env python3
############################################################################################
# #
# Program purpose: Concatenates dictionaries. #
# Program Author : <NAME> <<EMAIL>> #
# Creation Date : November 27, 2019 #
# #
############################################################################################
import operator
import random
def random_dict(low: int, high: int, size: int) -> dict:
if size < 0:
raise ValueError(f'Invalid size [{size}] for dictionary')
key_data = [random.randint(low, high) for _ in range(size)]
value_data = [random.randint(low, high) for _ in range(size)]
return {k: v for (k, v) in zip(key_data, value_data)}
def concat_dict(dict_A: dict, dict_B: dict) -> dict:
final_dict = {k: v for (k, v) in zip(dict_A.keys(), dict_A.values())}
for (k, v) in zip(dict_B.keys(), dict_B.values()):
final_dict[k] = v
return dict(sorted(final_dict.items(), key=operator.itemgetter(0)))
if __name__ == "__main__":
rand_dict_A = random_dict(low=0, high=10, size=5)
rand_dict_B = random_dict(low=11, high=20, size=5)
print(f'Random dictionary A: {rand_dict_A} --> [size: {len(rand_dict_A)}]')
print(f'Random dictionary B: {rand_dict_B} --> [size: {len(rand_dict_B)}]')
merged_dict = concat_dict(dict_A=rand_dict_A, dict_B=rand_dict_B)
print(f'Concatenated (and sorted) merge: {merged_dict} --> [size: {len(merged_dict)}]')
| 3.515625 | 4 |
django_kickstart/template/app/manage.py | theduke/django-kickstart | 5 | 12760735 | <gh_stars>1-10
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# Make it possible to run from any directory.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
sys.path.append('apps')
sys.path.append('lib')
# Now add virtualenv site-packages path.
from {{ project_name }} import settings
sys.path.append(os.path.join(settings.VIRTENV_DIR, settings.VIRTENV_PACKAGE_DIR))
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 1.726563 | 2 |
metadata-ingestion/tests/unit/test_usage_common.py | pramodbiligiri/datahub | 9 | 12760736 | from datetime import datetime
import pytest
from pydantic import ValidationError
from datahub.configuration.time_window_config import BucketDuration, get_time_bucket
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.usage.usage_common import (
BaseUsageConfig,
GenericAggregatedDataset,
)
from datahub.metadata.schema_classes import DatasetUsageStatisticsClass
_TestTableRef = str
_TestAggregatedDataset = GenericAggregatedDataset[_TestTableRef]
def test_add_one_query_without_columns():
test_email = "<EMAIL>"
test_query = "select * from test"
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.add_read_entry(
test_email,
test_query,
[],
)
assert ta.queryCount == 1
assert ta.queryFreq[test_query] == 1
assert ta.userFreq[test_email] == 1
assert len(ta.columnFreq) == 0
def test_multiple_query_without_columns():
test_email = "<EMAIL>"
test_email2 = "<EMAIL>"
test_query = "select * from test"
test_query2 = "select * from test2"
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.add_read_entry(
test_email,
test_query,
[],
)
ta.add_read_entry(
test_email,
test_query,
[],
)
ta.add_read_entry(
test_email2,
test_query2,
[],
)
assert ta.queryCount == 3
assert ta.queryFreq[test_query] == 2
assert ta.userFreq[test_email] == 2
assert ta.queryFreq[test_query2] == 1
assert ta.userFreq[test_email2] == 1
assert len(ta.columnFreq) == 0
def test_make_usage_workunit():
test_email = "<EMAIL>"
test_query = "select * from test"
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.add_read_entry(
test_email,
test_query,
[],
)
wu: MetadataWorkUnit = ta.make_usage_workunit(
bucket_duration=BucketDuration.DAY, urn_builder=lambda x: x, top_n_queries=10
)
assert wu.id == "2020-01-01T00:00:00-test_db.test_schema.test_table"
assert isinstance(wu.get_metadata()["metadata"], MetadataChangeProposalWrapper)
du: DatasetUsageStatisticsClass = wu.get_metadata()["metadata"].aspect
assert du.totalSqlQueries == 1
assert du.topSqlQueries
assert du.topSqlQueries.pop() == test_query
def test_query_trimming():
test_email: str = "<EMAIL>"
test_query: str = "select * from test where a > 10 and b > 20 order by a asc"
top_n_queries: int = 10
total_budget_for_query_list: int = 200
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.total_budget_for_query_list = total_budget_for_query_list
ta.add_read_entry(
test_email,
test_query,
[],
)
wu: MetadataWorkUnit = ta.make_usage_workunit(
bucket_duration=BucketDuration.DAY,
urn_builder=lambda x: x,
top_n_queries=top_n_queries,
)
assert wu.id == "2020-01-01T00:00:00-test_db.test_schema.test_table"
assert isinstance(wu.get_metadata()["metadata"], MetadataChangeProposalWrapper)
du: DatasetUsageStatisticsClass = wu.get_metadata()["metadata"].aspect
assert du.totalSqlQueries == 1
assert du.topSqlQueries
assert du.topSqlQueries.pop() == "select * f ..."
def test_top_n_queries_validator_fails():
with pytest.raises(ValidationError) as excinfo:
GenericAggregatedDataset.total_budget_for_query_list = 20
BaseUsageConfig(top_n_queries=2)
assert "top_n_queries is set to 2 but it can be maximum 1" in str(excinfo.value)
| 2.109375 | 2 |
madgraph/various/banner.py | jlrainbolt/MG5_v2_6_1 | 0 | 12760737 | <filename>madgraph/various/banner.py
################################################################################
#
# Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
from __future__ import division
import collections
import copy
import logging
import numbers
import os
import sys
import re
import math
import StringIO
pjoin = os.path.join
try:
import madgraph
except ImportError:
MADEVENT = True
from internal import MadGraph5Error, InvalidCmd
import internal.file_writers as file_writers
import internal.files as files
import internal.check_param_card as param_card_reader
import internal.misc as misc
MEDIR = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
MEDIR = os.path.split(MEDIR)[0]
else:
MADEVENT = False
import madgraph.various.misc as misc
import madgraph.iolibs.file_writers as file_writers
import madgraph.iolibs.files as files
import models.check_param_card as param_card_reader
from madgraph import MG5DIR, MadGraph5Error, InvalidCmd
logger = logging.getLogger('madevent.cards')
# A placeholder class to store unknown parameters with undecided format
class UnknownType(str):
pass
#dict
class Banner(dict):
""" """
ordered_items = ['mgversion', 'mg5proccard', 'mgproccard', 'mgruncard',
'slha','initrwgt','mggenerationinfo', 'mgpythiacard', 'mgpgscard',
'mgdelphescard', 'mgdelphestrigger','mgshowercard',
'ma5card_parton','ma5card_hadron','run_settings']
capitalized_items = {
'mgversion': 'MGVersion',
'mg5proccard': 'MG5ProcCard',
'mgproccard': 'MGProcCard',
'mgruncard': 'MGRunCard',
'ma5card_parton' : 'MA5Card_parton',
'ma5card_hadron' : 'MA5Card_hadron',
'mggenerationinfo': 'MGGenerationInfo',
'mgpythiacard': 'MGPythiaCard',
'mgpgscard': 'MGPGSCard',
'mgdelphescard': 'MGDelphesCard',
'mgdelphestrigger': 'MGDelphesTrigger',
'mgshowercard': 'MGShowerCard' }
forbid_cdata = ['initrwgt']
def __init__(self, banner_path=None):
""" """
if isinstance(banner_path, Banner):
dict.__init__(self, banner_path)
self.lhe_version = banner_path.lhe_version
return
else:
dict.__init__(self)
#Look at the version
if MADEVENT:
self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read()
else:
info = misc.get_pkg_info()
self['mgversion'] = info['version']+'\n'
self.lhe_version = None
if banner_path:
self.read_banner(banner_path)
############################################################################
# READ BANNER
############################################################################
pat_begin=re.compile('<(?P<name>\w*)>')
pat_end=re.compile('</(?P<name>\w*)>')
tag_to_file={'slha':'param_card.dat',
'mgruncard':'run_card.dat',
'mgpythiacard':'pythia_card.dat',
'mgpgscard' : 'pgs_card.dat',
'mgdelphescard':'delphes_card.dat',
'mgdelphestrigger':'delphes_trigger.dat',
'mg5proccard':'proc_card_mg5.dat',
'mgproccard': 'proc_card.dat',
'init': '',
'mggenerationinfo':'',
'scalesfunctionalform':'',
'montecarlomasses':'',
'initrwgt':'',
'madspin':'madspin_card.dat',
'mgshowercard':'shower_card.dat',
'pythia8':'pythia8_card.dat',
'ma5card_parton':'madanalysis5_parton_card.dat',
'ma5card_hadron':'madanalysis5_hadron_card.dat',
'run_settings':''
}
def read_banner(self, input_path):
"""read a banner"""
if isinstance(input_path, str):
if input_path.find('\n') ==-1:
input_path = open(input_path)
else:
def split_iter(string):
return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL))
input_path = split_iter(input_path)
text = ''
store = False
for line in input_path:
if self.pat_begin.search(line):
if self.pat_begin.search(line).group('name').lower() in self.tag_to_file:
tag = self.pat_begin.search(line).group('name').lower()
store = True
continue
if store and self.pat_end.search(line):
if tag == self.pat_end.search(line).group('name').lower():
self[tag] = text
text = ''
store = False
if store and not line.startswith(('<![CDATA[',']]>')):
if line.endswith('\n'):
text += line
else:
text += '%s%s' % (line, '\n')
#reaching end of the banner in a event file avoid to read full file
if "</init>" in line:
break
elif "<event>" in line:
break
def __getattribute__(self, attr):
"""allow auto-build for the run_card/param_card/... """
try:
return super(Banner, self).__getattribute__(attr)
except:
if attr not in ['run_card', 'param_card', 'slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse']:
raise
return self.charge_card(attr)
def change_lhe_version(self, version):
"""change the lhe version associate to the banner"""
version = float(version)
if version < 3:
version = 1
elif version > 3:
raise Exception, "Not Supported version"
self.lhe_version = version
def get_cross(self, witherror=False):
"""return the cross-section of the file"""
if "init" not in self:
misc.sprint(self.keys())
raise Exception
text = self["init"].split('\n')
cross = 0
error = 0
for line in text:
s = line.split()
if len(s)==4:
cross += float(s[0])
if witherror:
error += float(s[1])**2
if not witherror:
return cross
else:
return cross, math.sqrt(error)
def scale_init_cross(self, ratio):
"""modify the init information with the associate scale"""
assert "init" in self
all_lines = self["init"].split('\n')
new_data = []
new_data.append(all_lines[0])
for i in range(1, len(all_lines)):
line = all_lines[i]
split = line.split()
if len(split) == 4:
xsec, xerr, xmax, pid = split
else:
new_data += all_lines[i:]
break
pid = int(pid)
line = " %+13.7e %+13.7e %+13.7e %i" % \
(ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid)
new_data.append(line)
self['init'] = '\n'.join(new_data)
def get_pdg_beam(self):
"""return the pdg of each beam"""
assert "init" in self
all_lines = self["init"].split('\n')
pdg1,pdg2,_ = all_lines[0].split(None, 2)
return int(pdg1), int(pdg2)
def load_basic(self, medir):
""" Load the proc_card /param_card and run_card """
self.add(pjoin(medir,'Cards', 'param_card.dat'))
self.add(pjoin(medir,'Cards', 'run_card.dat'))
if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')):
self.add(pjoin(medir,'SubProcesses', 'procdef_mg5.dat'))
self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat'))
else:
self.add(pjoin(medir,'Cards', 'proc_card.dat'))
def change_seed(self, seed):
"""Change the seed value in the banner"""
# 0 = iseed
p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M)
new_seed_str = " %s = iseed" % seed
self['mgruncard'] = p.sub(new_seed_str, self['mgruncard'])
def add_generation_info(self, cross, nb_event):
"""add info on MGGeneration"""
text = """
# Number of Events : %s
# Integrated weight (pb) : %s
""" % (nb_event, cross)
self['MGGenerationInfo'] = text
############################################################################
# SPLIT BANNER
############################################################################
def split(self, me_dir, proc_card=True):
"""write the banner in the Cards directory.
proc_card argument is present to avoid the overwrite of proc_card
information"""
for tag, text in self.items():
if tag == 'mgversion':
continue
if not proc_card and tag in ['mg5proccard','mgproccard']:
continue
if not self.tag_to_file[tag]:
continue
ff = open(pjoin(me_dir, 'Cards', self.tag_to_file[tag]), 'w')
ff.write(text)
ff.close()
############################################################################
# WRITE BANNER
############################################################################
def check_pid(self, pid2label):
"""special routine removing width/mass of particles not present in the model
This is usefull in case of loop model card, when we want to use the non
loop model."""
if not hasattr(self, 'param_card'):
self.charge_card('slha')
for tag in ['mass', 'decay']:
block = self.param_card.get(tag)
for data in block:
pid = data.lhacode[0]
if pid not in pid2label.keys():
block.remove((pid,))
def get_lha_strategy(self):
"""get the lha_strategy: how the weight have to be handle by the shower"""
if not self["init"]:
raise Exception, "No init block define"
data = self["init"].split('\n')[0].split()
if len(data) != 10:
misc.sprint(len(data), self['init'])
raise Exception, "init block has a wrong format"
return int(float(data[-2]))
def set_lha_strategy(self, value):
"""set the lha_strategy: how the weight have to be handle by the shower"""
if not (-4 <= int(value) <= 4):
raise Exception, "wrong value for lha_strategy", value
if not self["init"]:
raise Exception, "No init block define"
all_lines = self["init"].split('\n')
data = all_lines[0].split()
if len(data) != 10:
misc.sprint(len(data), self['init'])
raise Exception, "init block has a wrong format"
data[-2] = '%s' % value
all_lines[0] = ' '.join(data)
self['init'] = '\n'.join(all_lines)
def modify_init_cross(self, cross):
"""modify the init information with the associate cross-section"""
assert isinstance(cross, dict)
# assert "all" in cross
assert "init" in self
all_lines = self["init"].split('\n')
new_data = []
new_data.append(all_lines[0])
for i in range(1, len(all_lines)):
line = all_lines[i]
split = line.split()
if len(split) == 4:
xsec, xerr, xmax, pid = split
else:
new_data += all_lines[i:]
break
if int(pid) not in cross:
raise Exception
pid = int(pid)
ratio = cross[pid]/float(xsec)
line = " %+13.7e %+13.7e %+13.7e %i" % \
(float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid)
new_data.append(line)
self['init'] = '\n'.join(new_data)
############################################################################
# WRITE BANNER
############################################################################
def write(self, output_path, close_tag=True, exclude=[]):
"""write the banner"""
if isinstance(output_path, str):
ff = open(output_path, 'w')
else:
ff = output_path
if MADEVENT:
header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read()
else:
header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read()
if not self.lhe_version:
self.lhe_version = self.get('run_card', 'lhe_version', default=1.0)
if float(self.lhe_version) < 3:
self.lhe_version = 1.0
ff.write(header % { 'version':float(self.lhe_version)})
for tag in [t for t in self.ordered_items if t in self.keys()]+ \
[t for t in self.keys() if t not in self.ordered_items]:
if tag in ['init'] or tag in exclude:
continue
capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag
start_data, stop_data = '', ''
if capitalized_tag not in self.forbid_cdata and \
('<' in self[tag] or '@' in self[tag]):
start_data = '\n<![CDATA['
stop_data = ']]>\n'
ff.write('<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s</%(tag)s>\n' % \
{'tag':capitalized_tag, 'text':self[tag].strip(),
'start_data': start_data, 'stop_data':stop_data})
if not '/header' in exclude:
ff.write('</header>\n')
if 'init' in self and not 'init' in exclude:
text = self['init']
ff.write('<%(tag)s>\n%(text)s\n</%(tag)s>\n' % \
{'tag':'init', 'text':text.strip()})
if close_tag:
ff.write('</LesHouchesEvents>\n')
return ff
############################################################################
# BANNER
############################################################################
def add(self, path, tag=None):
"""Add the content of the file to the banner"""
if not tag:
card_name = os.path.basename(path)
if 'param_card' in card_name:
tag = 'slha'
elif 'run_card' in card_name:
tag = 'MGRunCard'
elif 'pythia_card' in card_name:
tag = 'MGPythiaCard'
elif 'pythia8_card' in card_name or 'pythia8.cmd' in card_name:
tag = 'MGPythiaCard'
elif 'pgs_card' in card_name:
tag = 'MGPGSCard'
elif 'delphes_card' in card_name:
tag = 'MGDelphesCard'
elif 'delphes_trigger' in card_name:
tag = 'MGDelphesTrigger'
elif 'proc_card_mg5' in card_name:
tag = 'MG5ProcCard'
elif 'proc_card' in card_name:
tag = 'MGProcCard'
elif 'procdef_mg5' in card_name:
tag = 'MGProcCard'
elif 'shower_card' in card_name:
tag = 'MGShowerCard'
elif 'madspin_card' in card_name:
tag = 'madspin'
elif 'FO_analyse_card' in card_name:
tag = 'foanalyse'
elif 'reweight_card' in card_name:
tag='reweight_card'
elif 'madanalysis5_parton_card' in card_name:
tag='MA5Card_parton'
elif 'madanalysis5_hadron_card' in card_name:
tag='MA5Card_hadron'
else:
raise Exception, 'Impossible to know the type of the card'
self.add_text(tag.lower(), open(path).read())
def add_text(self, tag, text):
"""Add the content of the file to the banner"""
if tag == 'param_card':
tag = 'slha'
elif tag == 'run_card':
tag = 'mgruncard'
elif tag == 'proc_card':
tag = 'mg5proccard'
elif tag == 'shower_card':
tag = 'mgshowercard'
elif tag == 'FO_analyse_card':
tag = 'foanalyse'
self[tag.lower()] = text
def charge_card(self, tag):
"""Build the python object associated to the card"""
if tag == 'param_card':
tag = 'slha'
elif tag == 'run_card':
tag = 'mgruncard'
elif tag == 'proc_card':
tag = 'mg5proccard'
elif tag == 'shower_card':
tag = 'mgshowercard'
elif tag == 'FO_analyse_card':
tag = 'foanalyse'
assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag
if tag == 'slha':
param_card = self[tag].split('\n')
self.param_card = param_card_reader.ParamCard(param_card)
return self.param_card
elif tag == 'mgruncard':
self.run_card = RunCard(self[tag])
return self.run_card
elif tag == 'mg5proccard':
proc_card = self[tag].split('\n')
self.proc_card = ProcCard(proc_card)
return self.proc_card
elif tag =='mgshowercard':
shower_content = self[tag]
if MADEVENT:
import internal.shower_card as shower_card
else:
import madgraph.various.shower_card as shower_card
self.shower_card = shower_card.ShowerCard(shower_content, True)
# set testing to false (testing = true allow to init using
# the card content instead of the card path"
self.shower_card.testing = False
return self.shower_card
elif tag =='foanalyse':
analyse_content = self[tag]
if MADEVENT:
import internal.FO_analyse_card as FO_analyse_card
else:
import madgraph.various.FO_analyse_card as FO_analyse_card
# set testing to false (testing = true allow to init using
# the card content instead of the card path"
self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True)
self.FOanalyse_card.testing = False
return self.FOanalyse_card
def get_detail(self, tag, *arg, **opt):
"""return a specific """
if tag in ['param_card', 'param']:
tag = 'slha'
attr_tag = 'param_card'
elif tag in ['run_card', 'run']:
tag = 'mgruncard'
attr_tag = 'run_card'
elif tag == 'proc_card':
tag = 'mg5proccard'
attr_tag = 'proc_card'
elif tag == 'model':
tag = 'mg5proccard'
attr_tag = 'proc_card'
arg = ('model',)
elif tag == 'generate':
tag = 'mg5proccard'
attr_tag = 'proc_card'
arg = ('generate',)
elif tag == 'shower_card':
tag = 'mgshowercard'
attr_tag = 'shower_card'
assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag
if not hasattr(self, attr_tag):
self.charge_card(attr_tag)
card = getattr(self, attr_tag)
if len(arg) == 0:
return card
elif len(arg) == 1:
if tag == 'mg5proccard':
try:
return card.get(arg[0])
except KeyError, error:
if 'default' in opt:
return opt['default']
else:
raise
try:
return card[arg[0]]
except KeyError:
if 'default' in opt:
return opt['default']
else:
raise
elif len(arg) == 2 and tag == 'slha':
try:
return card[arg[0]].get(arg[1:])
except KeyError:
if 'default' in opt:
return opt['default']
else:
raise
elif len(arg) == 0:
return card
else:
raise Exception, "Unknow command"
#convenient alias
get = get_detail
def set(self, card, *args):
"""modify one of the cards"""
if tag == 'param_card':
tag = 'slha'
attr_tag = 'param_card'
elif tag == 'run_card':
tag = 'mgruncard'
attr_tag = 'run_card'
elif tag == 'proc_card':
tag = 'mg5proccard'
attr_tag = 'proc_card'
elif tag == 'model':
tag = 'mg5proccard'
attr_tag = 'proc_card'
arg = ('model',)
elif tag == 'generate':
tag = 'mg5proccard'
attr_tag = 'proc_card'
arg = ('generate',)
elif tag == 'shower_card':
tag = 'mgshowercard'
attr_tag = 'shower_card'
assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized'
if not hasattr(self, attr_tag):
self.charge_card(attr_tag)
card = getattr(self, attr_tag)
if len(args) ==2:
if tag == 'mg5proccard':
card.info[args[0]] = args[-1]
else:
card[args[0]] = args[1]
else:
card[args[:-1]] = args[-1]
@misc.multiple_try()
def add_to_file(self, path, seed=None, out=None):
"""Add the banner to a file and change the associate seed in the banner"""
if seed is not None:
self.set("run_card", "iseed", seed)
if not out:
path_out = "%s.tmp" % path
else:
path_out = out
ff = self.write(path_out, close_tag=False,
exclude=['MGGenerationInfo', '/header', 'init'])
ff.write("## END BANNER##\n")
if self.lhe_version >= 3:
#add the original content
[ff.write(line) if not line.startswith("<generator name='MadGraph5_aMC@NLO'")
else ff.write("<generator name='MadGraph5_aMC@NLO' version='%s'>" % self['mgversion'][:-1])
for line in open(path)]
else:
[ff.write(line) for line in open(path)]
ff.write("</LesHouchesEvents>\n")
ff.close()
if out:
os.remove(path)
else:
files.mv(path_out, path)
def split_banner(banner_path, me_dir, proc_card=True):
"""a simple way to split a banner"""
banner = Banner(banner_path)
banner.split(me_dir, proc_card)
def recover_banner(results_object, level, run=None, tag=None):
"""as input we receive a gen_crossxhtml.AllResults object.
This define the current banner and load it
"""
if not run:
try:
_run = results_object.current['run_name']
_tag = results_object.current['tag']
except Exception:
return Banner()
else:
_run = run
if not tag:
try:
_tag = results_object[run].tags[-1]
except Exception,error:
return Banner()
else:
_tag = tag
path = results_object.path
banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag))
if not os.path.exists(banner_path):
if level != "parton" and tag != _tag:
return recover_banner(results_object, level, _run, results_object[_run].tags[0])
elif level == 'parton':
paths = [pjoin(path,'Events',run, 'unweighted_events.lhe.gz'),
pjoin(path,'Events',run, 'unweighted_events.lhe'),
pjoin(path,'Events',run, 'events.lhe.gz'),
pjoin(path,'Events',run, 'events.lhe')]
for p in paths:
if os.path.exists(p):
if MADEVENT:
import internal.lhe_parser as lhe_parser
else:
import madgraph.various.lhe_parser as lhe_parser
lhe = lhe_parser.EventFile(p)
return Banner(lhe.banner)
# security if the banner was remove (or program canceled before created it)
return Banner()
banner = Banner(banner_path)
if level == 'pythia':
if 'mgpythiacard' in banner:
del banner['mgpythiacard']
if level in ['pythia','pgs','delphes']:
for tag in ['mgpgscard', 'mgdelphescard', 'mgdelphestrigger']:
if tag in banner:
del banner[tag]
return banner
class InvalidRunCard(InvalidCmd):
pass
class ProcCard(list):
"""Basic Proccard object"""
history_header = \
'#************************************************************\n' + \
'#* MadGraph5_aMC@NLO *\n' + \
'#* *\n' + \
"#* * * *\n" + \
"#* * * * * *\n" + \
"#* * * * * 5 * * * * *\n" + \
"#* * * * * *\n" + \
"#* * * *\n" + \
"#* *\n" + \
"#* *\n" + \
"%(info_line)s" +\
"#* *\n" + \
"#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \
"#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \
'#* *\n' + \
'#************************************************************\n' + \
'#* *\n' + \
'#* Command File for MadGraph5_aMC@NLO *\n' + \
'#* *\n' + \
'#* run as ./bin/mg5_aMC filename *\n' + \
'#* *\n' + \
'#************************************************************\n'
def __init__(self, init=None):
""" initialize a basic proc_card"""
self.info = {'model': 'sm', 'generate':None,
'full_model_line':'import model sm'}
list.__init__(self)
if init:
self.read(init)
def read(self, init):
"""read the proc_card and save the information"""
if isinstance(init, str): #path to file
init = file(init, 'r')
store_line = ''
for line in init:
line = line.rstrip()
if line.endswith('\\'):
store_line += line[:-1]
else:
tmp = store_line + line
self.append(tmp.strip())
store_line = ""
if store_line:
raise Exception, "WRONG CARD FORMAT"
def move_to_last(self, cmd):
"""move an element to the last history."""
for line in self[:]:
if line.startswith(cmd):
self.remove(line)
list.append(self, line)
def append(self, line):
""""add a line in the proc_card perform automatically cleaning"""
line = line.strip()
cmds = line.split()
if len(cmds) == 0:
return
list.append(self, line)
# command type:
cmd = cmds[0]
if cmd == 'output':
# Remove previous outputs from history
self.clean(allow_for_removal = ['output'], keep_switch=True,
remove_bef_last='output')
elif cmd == 'generate':
# Remove previous generations from history
self.clean(remove_bef_last='generate', keep_switch=True,
allow_for_removal= ['generate', 'add process', 'output'])
self.info['generate'] = ' '.join(cmds[1:])
elif cmd == 'add' and cmds[1] == 'process' and not self.info['generate']:
self.info['generate'] = ' '.join(cmds[2:])
elif cmd == 'import':
if len(cmds) < 2:
return
if cmds[1].startswith('model'):
self.info['full_model_line'] = line
self.clean(remove_bef_last='import', keep_switch=True,
allow_for_removal=['generate', 'add process', 'add model', 'output'])
if cmds[1] == 'model':
self.info['model'] = cmds[2]
else:
self.info['model'] = None # not UFO model
elif cmds[1] == 'proc_v4':
#full cleaning
self[:] = []
def clean(self, to_keep=['set','add','load'],
remove_bef_last=None,
to_remove=['open','display','launch', 'check','history'],
allow_for_removal=None,
keep_switch=False):
"""Remove command in arguments from history.
All command before the last occurrence of 'remove_bef_last'
(including it) will be removed (but if another options tells the opposite).
'to_keep' is a set of line to always keep.
'to_remove' is a set of line to always remove (don't care about remove_bef_
status but keep_switch acts.).
if 'allow_for_removal' is define only the command in that list can be
remove of the history for older command that remove_bef_lb1. all parameter
present in to_remove are always remove even if they are not part of this
list.
keep_switch force to keep the statement remove_bef_??? which changes starts
the removal mode.
"""
#check consistency
if __debug__ and allow_for_removal:
for arg in to_keep:
assert arg not in allow_for_removal
nline = -1
removal = False
#looping backward
while nline > -len(self):
switch = False # set in True when removal pass in True
#check if we need to pass in removal mode
if not removal and remove_bef_last:
if self[nline].startswith(remove_bef_last):
removal = True
switch = True
# if this is the switch and is protected pass to the next element
if switch and keep_switch:
nline -= 1
continue
# remove command in to_remove (whatever the status of removal)
if any([self[nline].startswith(arg) for arg in to_remove]):
self.pop(nline)
continue
# Only if removal mode is active!
if removal:
if allow_for_removal:
# Only a subset of command can be removed
if any([self[nline].startswith(arg)
for arg in allow_for_removal]):
self.pop(nline)
continue
elif not any([self[nline].startswith(arg) for arg in to_keep]):
# All command have to be remove but protected
self.pop(nline)
continue
# update the counter to pass to the next element
nline -= 1
def get(self, tag, default=None):
if isinstance(tag, int):
list.__getattr__(self, tag)
elif tag == 'info' or tag == "__setstate__":
return default #for pickle
elif tag == "multiparticles":
out = []
for line in self:
if line.startswith('define'):
name, content = line[7:].split('=',1)
out.append((name, content))
return out
else:
return self.info[tag]
def write(self, path):
"""write the proc_card to a given path"""
fsock = open(path, 'w')
fsock.write(self.history_header)
for line in self:
while len(line) > 70:
sub, line = line[:70]+"\\" , line[70:]
fsock.write(sub+"\n")
else:
fsock.write(line+"\n")
class ConfigFile(dict):
""" a class for storing/dealing with input file.
"""
def __init__(self, finput=None, **opt):
"""initialize a new instance. input can be an instance of MadLoopParam,
a file, a path to a file, or simply Nothing"""
if isinstance(finput, self.__class__):
dict.__init__(self, finput)
assert finput.__dict__.keys()
for key in finput.__dict__:
setattr(self, key, copy.copy(getattr(finput, key)) )
return
else:
dict.__init__(self)
# Initialize it with all the default value
self.user_set = set()
self.auto_set = set()
self.system_only = set()
self.lower_to_case = {}
self.list_parameter = {} #key -> type of list (int/float/bool/str/...
self.dict_parameter = {}
self.comments = {} # comment associated to parameters. can be display via help message
self.default_setup()
# if input is define read that input
if isinstance(finput, (file, str, StringIO.StringIO)):
self.read(finput, **opt)
def default_setup(self):
pass
def __copy__(self):
return self.__class__(self)
def __add__(self, other):
"""define the sum"""
assert isinstance(other, dict)
base = self.__class__(self)
#base = copy.copy(self)
base.update((key.lower(),value) for key, value in other.items())
return base
def __radd__(self, other):
"""define the sum"""
new = copy.copy(other)
new.update((key, value) for key, value in self.items())
return new
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def __iter__(self):
iter = super(ConfigFile, self).__iter__()
return (self.lower_to_case[name] for name in iter)
def keys(self):
return [name for name in self]
def items(self):
return [(name,self[name]) for name in self]
def __setitem__(self, name, value, change_userdefine=False):
"""set the attribute and set correctly the type if the value is a string.
change_userdefine on True if we have to add the parameter in user_set
"""
if not len(self):
#Should never happen but when deepcopy/pickle
self.__init__()
name = name.strip()
lower_name = name.lower()
# 0. check if this parameter is a system only one
if change_userdefine and lower_name in self.system_only:
logger.critical('%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]))
return
#1. check if the parameter is set to auto -> pass it to special
if lower_name in self:
targettype = type(dict.__getitem__(self, lower_name))
if targettype != str and isinstance(value, str) and value.lower() == 'auto':
self.auto_set.add(lower_name)
if lower_name in self.user_set:
self.user_set.remove(lower_name)
#keep old value.
return
elif lower_name in self.auto_set:
self.auto_set.remove(lower_name)
# 2. Find the type of the attribute that we want
if lower_name in self.list_parameter:
targettype = self.list_parameter[lower_name]
if isinstance(value, str):
# split for each comma/space
value = value.strip()
if value.startswith('[') and value.endswith(']'):
value = value[1:-1]
#do not perform split within a " or ' block
data = re.split(r"((?<![\\])['\"])((?:.(?!(?<![\\])\1))*.?)\1", str(value))
new_value = []
i = 0
while len(data) > i:
current = filter(None, re.split(r'(?:(?<!\\)\s)|,', data[i], re.VERBOSE))
i+=1
if len(data) > i+1:
if current:
current[-1] += '{0}{1}{0}'.format(data[i], data[i+1])
else:
current = ['{0}{1}{0}'.format(data[i], data[i+1])]
i+=2
new_value += current
value = new_value
elif not hasattr(value, '__iter__'):
value = [value]
elif isinstance(value, dict):
raise Exception, "not being able to handle dictionary in card entry"
#format each entry
values =[self.format_variable(v, targettype, name=name)
for v in value]
dict.__setitem__(self, lower_name, values)
if change_userdefine:
self.user_set.add(lower_name)
return
elif lower_name in self.dict_parameter:
targettype = self.dict_parameter[lower_name]
full_reset = True #check if we just update the current dict or not
if isinstance(value, str):
value = value.strip()
# allowed entry:
# name : value => just add the entry
# name , value => just add the entry
# name value => just add the entry
# {name1:value1, name2:value2} => full reset
# split for each comma/space
if value.startswith('{') and value.endswith('}'):
new_value = {}
for pair in value[1:-1].split(','):
if not pair.strip():
break
x, y = pair.split(':')
x, y = x.strip(), y.strip()
if x.startswith(('"',"'")) and x.endswith(x[0]):
x = x[1:-1]
new_value[x] = y
value = new_value
elif ',' in value:
x,y = value.split(',')
value = {x.strip():y.strip()}
full_reset = False
elif ':' in value:
x,y = value.split(':')
value = {x.strip():y.strip()}
full_reset = False
else:
x,y = value.split()
value = {x:y}
full_reset = False
if isinstance(value, dict):
for key in value:
value[key] = self.format_variable(value[key], targettype, name=name)
if full_reset:
dict.__setitem__(self, lower_name, value)
else:
dict.__getitem__(self, lower_name).update(value)
else:
raise Exception, '%s should be of dict type'% lower_name
if change_userdefine:
self.user_set.add(lower_name)
return
elif name in self:
targettype = type(self[name])
else:
logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\
'This argument is not defined by default. Please consider adding it.')
suggestions = [k for k in self.keys() if k.startswith(name[0].lower())]
if len(suggestions)>0:
logger.debug("Did you mean one of the following: %s"%suggestions)
self.add_param(lower_name, self.format_variable(UnknownType(value),
UnknownType, name))
self.lower_to_case[lower_name] = name
if change_userdefine:
self.user_set.add(lower_name)
return
value = self.format_variable(value, targettype, name=name)
dict.__setitem__(self, lower_name, value)
if change_userdefine:
self.user_set.add(lower_name)
def add_param(self, name, value, system=False, comment=False, typelist=None):
"""add a default parameter to the class"""
lower_name = name.lower()
if __debug__:
if lower_name in self:
raise Exception("Duplicate case for %s in %s" % (name,self.__class__))
dict.__setitem__(self, lower_name, value)
self.lower_to_case[lower_name] = name
if isinstance(value, list):
if len(value):
targettype = type(value[0])
else:
targettype=typelist
assert typelist
if any([targettype != type(v) for v in value]):
raise Exception, "All entry should have the same type"
self.list_parameter[lower_name] = targettype
elif isinstance(value, dict):
allvalues = value.values()
if any([type(allvalues[0]) != type(v) for v in allvalues]):
raise Exception, "All entry should have the same type"
self.dict_parameter[lower_name] = type(allvalues[0])
if '__type__' in value:
del value['__type__']
dict.__setitem__(self, lower_name, value)
if system:
self.system_only.add(lower_name)
if comment:
self.comments[lower_name] = comment
def do_help(self, name):
"""return a minimal help for the parameter"""
out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__)
if name.lower() in self:
out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name]))
if name.lower() in self.comments:
out += '## %s\n' % self.comments[name.lower()].replace('\n', '\n## ')
else:
out += "## Unknown for this class\n"
if name.lower() in self.user_set:
out += "## This value is considered as been set by the user\n"
else:
out += "## This value is considered as been set by the system\n"
logger.info(out)
@staticmethod
def format_variable(value, targettype, name="unknown"):
"""assign the value to the attribute for the given format"""
if not isinstance(value, str):
# just have to check that we have the correct format
if isinstance(value, targettype):
pass # assignement at the end
elif isinstance(value, numbers.Number) and issubclass(targettype, numbers.Number):
try:
new_value = targettype(value)
except TypeError:
if value.imag/value.real<1e-12:
new_value = targettype(value.real)
else:
raise
if new_value == value:
value = new_value
else:
raise Exception, "Wrong input type for %s found %s and expecting %s for value %s" %\
(name, type(value), targettype, value)
else:
raise Exception, "Wrong input type for %s found %s and expecting %s for value %s" %\
(name, type(value), targettype, value)
else:
# We have a string we have to format the attribute from the string
if targettype == UnknownType:
# No formatting
pass
elif targettype == bool:
value = value.strip()
if value.lower() in ['0', '.false.', 'f', 'false', 'off']:
value = False
elif value.lower() in ['1', '.true.', 't', 'true', 'on']:
value = True
else:
raise Exception, "%s can not be mapped to True/False for %s" % (repr(value),name)
elif targettype == str:
value = value.strip()
if value.startswith('\'') and value.endswith('\''):
value = value[1:-1]
elif value.startswith('"') and value.endswith('"'):
value = value[1:-1]
elif targettype == int:
if value.isdigit():
value = int(value)
elif value[1:].isdigit() and value[0] == '-':
value = int(value)
elif value.endswith(('k', 'M')) and value[:-1].isdigit():
convert = {'k':1000, 'M':1000000}
value =int(value[:-1]) * convert[value[-1]]
else:
try:
value = float(value.replace('d','e'))
except ValueError:
raise Exception, "%s can not be mapped to an integer" % value
try:
new_value = int(value)
except ValueError:
raise Exception, "%s can not be mapped to an integer" % value
else:
if value == new_value:
value = new_value
else:
raise Exception, "incorect input: %s need an integer for %s" % (value,name)
elif targettype == float:
value = value.replace('d','e') # pass from Fortran formatting
try:
value = float(value)
except ValueError:
try:
split = re.split('(\*|/)',value)
v = float(split[0])
for i in range((len(split)//2)):
if split[2*i+1] == '*':
v *= float(split[2*i+2])
else:
v /= float(split[2*i+2])
except:
v=0
raise Exception, "%s can not be mapped to a float" % value
finally:
value = v
else:
raise Exception, "type %s is not handle by the card" % targettype
return value
def __getitem__(self, name):
lower_name = name.lower()
if __debug__:
if lower_name not in self:
if lower_name in [key.lower() for key in self] :
raise Exception, "Some key are not lower case %s. Invalid use of the class!"\
% [key for key in self if key.lower() != key]
if lower_name in self.auto_set:
return 'auto'
return dict.__getitem__(self, name.lower())
def set(self, name, value, changeifuserset=True, user=False):
"""convenient way to change attribute.
changeifuserset=False means that the value is NOT change is the value is not on default.
user=True, means that the value will be marked as modified by the user
(potentially preventing future change to the value)
"""
# changeifuserset=False -> we need to check if the user force a value.
if not changeifuserset:
if name.lower() in self.user_set:
#value modified by the user -> do nothing
return
self.__setitem__(name, value, change_userdefine=user)
class ProcCharacteristic(ConfigFile):
"""A class to handle information which are passed from MadGraph to the madevent
interface."""
def default_setup(self):
"""initialize the directory to the default value"""
self.add_param('loop_induced', False)
self.add_param('has_isr', False)
self.add_param('has_fsr', False)
self.add_param('nb_channel', 0)
self.add_param('nexternal', 0)
self.add_param('ninitial', 0)
self.add_param('grouped_matrix', True)
self.add_param('has_loops', False)
self.add_param('bias_module','None')
self.add_param('max_n_matched_jets', 0)
self.add_param('colored_pdgs', [1,2,3,4,5])
self.add_param('complex_mass_scheme', False)
def read(self, finput):
"""Read the input file, this can be a path to a file,
a file object, a str with the content of the file."""
if isinstance(finput, str):
if "\n" in finput:
finput = finput.split('\n')
elif os.path.isfile(finput):
finput = open(finput)
else:
raise Exception, "No such file %s" % finput
for line in finput:
if '#' in line:
line = line.split('#',1)[0]
if not line:
continue
if '=' in line:
key, value = line.split('=',1)
self[key.strip()] = value
def write(self, outputpath):
"""write the file"""
template ="# Information about the process #\n"
template +="#########################################\n"
fsock = open(outputpath, 'w')
fsock.write(template)
for key, value in self.items():
fsock.write(" %s = %s \n" % (key, value))
fsock.close()
class GridpackCard(ConfigFile):
"""an object for the GridpackCard"""
def default_setup(self):
"""default value for the GridpackCard"""
self.add_param("GridRun", True)
self.add_param("gevents", 2500)
self.add_param("gseed", 1)
self.add_param("ngran", -1)
def read(self, finput):
"""Read the input file, this can be a path to a file,
a file object, a str with the content of the file."""
if isinstance(finput, str):
if "\n" in finput:
finput = finput.split('\n')
elif os.path.isfile(finput):
finput = open(finput)
else:
raise Exception, "No such file %s" % finput
for line in finput:
line = line.split('#')[0]
line = line.split('!')[0]
line = line.split('=',1)
if len(line) != 2:
continue
self[line[1].strip()] = line[0].replace('\'','').strip()
def write(self, output_file, template=None):
"""Write the run_card in output_file according to template
(a path to a valid run_card)"""
if not template:
if not MADEVENT:
template = pjoin(MG5DIR, 'Template', 'LO', 'Cards',
'grid_card_default.dat')
else:
template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat')
text = ""
for line in file(template,'r'):
nline = line.split('#')[0]
nline = nline.split('!')[0]
comment = line[len(nline):]
nline = nline.split('=')
if len(nline) != 2:
text += line
elif nline[1].strip() in self:
text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment)
else:
logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip())
text += line
if isinstance(output_file, str):
fsock = open(output_file,'w')
else:
fsock = output_file
fsock.write(text)
fsock.close()
class PY8Card(ConfigFile):
""" Implements the Pythia8 card."""
def add_default_subruns(self, type):
""" Placeholder function to allow overwriting in the PY8SubRun daughter.
The initialization of the self.subruns attribute should of course not
be performed in PY8SubRun."""
if type == 'parameters':
if "LHEFInputs:nSubruns" not in self:
self.add_param("LHEFInputs:nSubruns", 1,
hidden='ALWAYS_WRITTEN',
comment="""
====================
Subrun definitions
====================
""")
if type == 'attributes':
if not(hasattr(self,'subruns')):
first_subrun = PY8SubRun(subrun_id=0)
self.subruns = dict([(first_subrun['Main:subrun'],first_subrun)])
def default_setup(self):
""" Sets up the list of available PY8 parameters."""
# Visible parameters
# ==================
self.add_param("Main:numberOfEvents", -1)
# for MLM merging
# -1.0 means that it will be set automatically by MadGraph5_aMC@NLO
self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False)
self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False)
# -1 means that it is automatically set.
self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False)
# for CKKWL merging
self.add_param("Merging:TMS", -1.0, always_write_to_card=False)
self.add_param("Merging:Process", '<set_by_user>', always_write_to_card=False)
# -1 means that it is automatically set.
self.add_param("Merging:nJetMax", -1, always_write_to_card=False)
# for both merging, chose whether to also consider different merging
# scale values for the extra weights related to scale and PDF variations.
self.add_param("SysCalc:fullCutVariation", False)
# Select the HepMC output. The user can prepend 'fifo:<optional_fifo_path>'
# to indicate that he wants to pipe the output. Or /dev/null to turn the
# output off.
self.add_param("HEPMCoutput:file", 'auto')
# Hidden parameters always written out
# ====================================
self.add_param("Beams:frameType", 4,
hidden=True,
comment='Tell Pythia8 that an LHEF input is used.')
self.add_param("HEPMCoutput:scaling", 1.0e9,
hidden=True,
comment='1.0 corresponds to HEPMC weight given in [mb]. We choose here the [pb] normalization.')
self.add_param("Check:epTolErr", 1e-2,
hidden=True,
comment='Be more forgiving with momentum mismatches.')
# By default it is important to disable any cut on the rapidity of the showered jets
# during MLML merging and by default it is set to 2.5
self.add_param("JetMatching:etaJetMax", 1000.0, hidden=True, always_write_to_card=True)
# Hidden parameters written out only if user_set or system_set
# ============================================================
self.add_param("PDF:pSet", 'LHAPDF5:CT10.LHgrid', hidden=True, always_write_to_card=False,
comment='Reminder: Parameter below is shower tune dependent.')
self.add_param("SpaceShower:alphaSvalue", 0.118, hidden=True, always_write_to_card=False,
comment='Reminder: Parameter below is shower tune dependent.')
self.add_param("TimeShower:alphaSvalue", 0.118, hidden=True, always_write_to_card=False,
comment='Reminder: Parameter below is shower tune dependent.')
self.add_param("hadronlevel:all", True, hidden=True, always_write_to_card=False,
comment='This allows to turn on/off hadronization alltogether.')
self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False,
comment='This allows to turn on/off MPI alltogether.')
self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True,
always_write_to_card=False,
comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.')
# for MLM merging
self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False,
comment='Specifiy if we are merging sample of different multiplicity.')
self.add_param("SysCalc:qCutList", [10.0,20.0], hidden=True, always_write_to_card=False)
self['SysCalc:qCutList'] = 'auto'
self.add_param("SysCalc:qWeed",-1.0,hidden=True, always_write_to_card=False,
comment='Value of the merging scale below which one does not even write the HepMC event.')
self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False,
comment='Do veto externally (e.g. in SysCalc).')
self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False)
self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False,
comment='Specify one must read inputs from the MadGraph banner.')
self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False)
self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False)
# for CKKWL merging (common with UMEPS, UNLOPS)
self.add_param("TimeShower:pTmaxMatch", 2, hidden=True, always_write_to_card=False)
self.add_param("SpaceShower:pTmaxMatch", 1, hidden=True, always_write_to_card=False)
self.add_param("SysCalc:tmsList", [10.0,20.0], hidden=True, always_write_to_card=False)
self['SysCalc:tmsList'] = 'auto'
self.add_param("Merging:muFac", 91.188, hidden=True, always_write_to_card=False,
comment='Set factorisation scales of the 2->2 process.')
self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False,
comment='Do veto externally (e.g. in SysCalc).')
self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False,
comment='If turned off, then the option belows forces PY8 to keep the original weight.')
self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False,
comment='Set renormalization scales of the 2->2 process.')
self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False,
comment='Set factorisation scales of the 2->2 Matrix Element.')
self.add_param("Merging:muRenInME", 91.188, hidden=True, always_write_to_card=False,
comment='Set renormalization scales of the 2->2 Matrix Element.')
self.add_param("SpaceShower:rapidityOrder", False, hidden=True, always_write_to_card=False)
self.add_param("Merging:nQuarksMerge",4,hidden=True, always_write_to_card=False)
# To be added in subruns for CKKWL
self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False)
self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False)
self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False)
self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False)
# Special Pythia8 paremeters useful to simplify the shower.
self.add_param("BeamRemnants:primordialKT", True, hidden=True, always_write_to_card=False, comment="see http://home.thep.lu.se/~torbjorn/pythia82html/BeamRemnants.html")
self.add_param("PartonLevel:Remnants", True, hidden=True, always_write_to_card=False, comment="Master switch for addition of beam remnants. Cannot be used to generate complete events")
self.add_param("Check:event", True, hidden=True, always_write_to_card=False, comment="check physical sanity of the events")
self.add_param("TimeShower:QEDshowerByQ", True, hidden=True, always_write_to_card=False, comment="Allow quarks to radiate photons for FSR, i.e. branchings q -> q gamma")
self.add_param("TimeShower:QEDshowerByL", True, hidden=True, always_write_to_card=False, comment="Allow leptons to radiate photons for FSR, i.e. branchings l -> l gamma")
self.add_param("SpaceShower:QEDshowerByQ", True, hidden=True, always_write_to_card=False, comment="Allow quarks to radiate photons for ISR, i.e. branchings q -> q gamma")
self.add_param("SpaceShower:QEDshowerByL", True, hidden=True, always_write_to_card=False, comment="Allow leptons to radiate photonsfor ISR, i.e. branchings l -> l gamma")
self.add_param("PartonLevel:FSRinResonances", True, hidden=True, always_write_to_card=False, comment="Do not allow shower to run from decay product of unstable particle")
self.add_param("ProcessLevel:resonanceDecays", True, hidden=True, always_write_to_card=False, comment="Do not allow unstable particle to decay.")
# Add parameters controlling the subruns execution flow.
# These parameters should not be part of PY8SubRun daughter.
self.add_default_subruns('parameters')
def __init__(self, *args, **opts):
# Parameters which are not printed in the card unless they are
# 'user_set' or 'system_set' or part of the
# self.hidden_params_to_always_print set.
self.hidden_param = []
self.hidden_params_to_always_write = set()
self.visible_params_to_always_write = set()
# List of parameters that should never be written out given the current context.
self.params_to_never_write = set()
# Parameters which have been set by the system (i.e. MG5 itself during
# the regular course of the shower interface)
self.system_set = set()
# Add attributes controlling the subruns execution flow.
# These attributes should not be part of PY8SubRun daughter.
self.add_default_subruns('attributes')
# Parameters which have been set by the
super(PY8Card, self).__init__(*args, **opts)
def add_param(self, name, value, hidden=False, always_write_to_card=True,
comment=None):
""" add a parameter to the card. value is the default value and
defines the type (int/float/bool/str) of the input.
The option 'hidden' decides whether the parameter should be visible to the user.
The option 'always_write_to_card' decides whether it should
always be printed or only when it is system_set or user_set.
The option 'comment' can be used to specify a comment to write above
hidden parameters.
"""
super(PY8Card, self).add_param(name, value, comment=comment)
name = name.lower()
if hidden:
self.hidden_param.append(name)
if always_write_to_card:
self.hidden_params_to_always_write.add(name)
else:
if always_write_to_card:
self.visible_params_to_always_write.add(name)
if not comment is None:
if not isinstance(comment, str):
raise MadGraph5Error("Option 'comment' must be a string, not"+\
" '%s'."%str(comment))
def add_subrun(self, py8_subrun):
"""Add a subrun to this PY8 Card."""
assert(isinstance(py8_subrun,PY8SubRun))
if py8_subrun['Main:subrun']==-1:
raise MadGraph5Error, "Make sure to correctly set the subrun ID"+\
" 'Main:subrun' *before* adding it to the PY8 Card."
if py8_subrun['Main:subrun'] in self.subruns:
raise MadGraph5Error, "A subrun with ID '%s'"%py8_subrun['Main:subrun']+\
" is already present in this PY8 card. Remove it first, or "+\
" access it directly."
self.subruns[py8_subrun['Main:subrun']] = py8_subrun
if not 'LHEFInputs:nSubruns' in self.user_set:
self['LHEFInputs:nSubruns'] = max(self.subruns.keys())
def userSet(self, name, value, **opts):
"""Set an attribute of this card, following a user_request"""
self.__setitem__(name, value, change_userdefine=True, **opts)
if name.lower() in self.system_set:
self.system_set.remove(name.lower())
def vetoParamWriteOut(self, name):
""" Forbid the writeout of a specific parameter of this card when the
"write" function will be invoked."""
self.params_to_never_write.add(name.lower())
def systemSet(self, name, value, **opts):
"""Set an attribute of this card, independently of a specific user
request and only if not already user_set."""
try:
force = opts.pop('force')
except KeyError:
force = False
if force or name.lower() not in self.user_set:
self.__setitem__(name, value, change_userdefine=False, **opts)
self.system_set.add(name.lower())
def MadGraphSet(self, name, value, **opts):
""" Sets a card attribute, but only if it is absent or not already
user_set."""
try:
force = opts.pop('force')
except KeyError:
force = False
if name.lower() not in self or (force or name.lower() not in self.user_set):
self.__setitem__(name, value, change_userdefine=False, **opts)
self.system_set.add(name.lower())
def defaultSet(self, name, value, **opts):
self.__setitem__(name, value, change_userdefine=False, **opts)
@staticmethod
def pythia8_formatting(value, formatv=None):
"""format the variable into pythia8 card convention.
The type is detected by default"""
if not formatv:
if isinstance(value,UnknownType):
formatv = 'unknown'
elif isinstance(value, bool):
formatv = 'bool'
elif isinstance(value, int):
formatv = 'int'
elif isinstance(value, float):
formatv = 'float'
elif isinstance(value, str):
formatv = 'str'
elif isinstance(value, list):
formatv = 'list'
else:
logger.debug("unknow format for pythia8_formatting: %s" , value)
formatv = 'str'
else:
assert formatv
if formatv == 'unknown':
# No formatting then
return str(value)
if formatv == 'bool':
if str(value) in ['1','T','.true.','True','on']:
return 'on'
else:
return 'off'
elif formatv == 'int':
try:
return str(int(value))
except ValueError:
fl = float(value)
if int(fl) == fl:
return str(int(fl))
else:
raise
elif formatv == 'float':
return '%.10e' % float(value)
elif formatv == 'shortfloat':
return '%.3f' % float(value)
elif formatv == 'str':
return "%s" % value
elif formatv == 'list':
if len(value) and isinstance(value[0],float):
return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value])
else:
return ','.join([PY8Card.pythia8_formatting(arg) for arg in value])
def write(self, output_file, template, read_subrun=False,
print_only_visible=False, direct_pythia_input=False, add_missing=True):
""" Write the card to output_file using a specific template.
> 'print_only_visible' specifies whether or not the hidden parameters
should be written out if they are in the hidden_params_to_always_write
list and system_set.
> If 'direct_pythia_input' is true, then visible parameters which are not
in the self.visible_params_to_always_write list and are not user_set
or system_set are commented.
> If 'add_missing' is False then parameters that should be written_out but are absent
from the template will not be written out."""
# First list the visible parameters
visible_param = [p for p in self if p.lower() not in self.hidden_param
or p.lower() in self.user_set]
# Filter against list of parameters vetoed for write-out
visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write]
# Now the hidden param which must be written out
if print_only_visible:
hidden_output_param = []
else:
hidden_output_param = [p for p in self if p.lower() in self.hidden_param and
not p.lower() in self.user_set and
(p.lower() in self.hidden_params_to_always_write or
p.lower() in self.system_set)]
# Filter against list of parameters vetoed for write-out
hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write]
if print_only_visible:
subruns = []
else:
if not read_subrun:
subruns = sorted(self.subruns.keys())
# Store the subruns to write in a dictionary, with its ID in key
# and the corresponding stringstream in value
subruns_to_write = {}
# Sort these parameters nicely so as to put together parameters
# belonging to the same group (i.e. prefix before the ':' in their name).
def group_params(params):
if len(params)==0:
return []
groups = {}
for p in params:
try:
groups[':'.join(p.split(':')[:-1])].append(p)
except KeyError:
groups[':'.join(p.split(':')[:-1])] = [p,]
res = sum(groups.values(),[])
# Make sure 'Main:subrun' appears first
if 'Main:subrun' in res:
res.insert(0,res.pop(res.index('Main:subrun')))
# Make sure 'LHEFInputs:nSubruns' appears last
if 'LHEFInputs:nSubruns' in res:
res.append(res.pop(res.index('LHEFInputs:nSubruns')))
return res
visible_param = group_params(visible_param)
hidden_output_param = group_params(hidden_output_param)
# First dump in a temporary_output (might need to have a second pass
# at the very end to update 'LHEFInputs:nSubruns')
output = StringIO.StringIO()
# Setup template from which to read
if isinstance(template, str):
if os.path.isfile(template):
tmpl = open(template, 'r')
elif '\n' in template:
tmpl = StringIO.StringIO(template)
else:
raise Exception, "File input '%s' not found." % file_input
elif template is None:
# Then use a dummy empty StringIO, hence skipping the reading
tmpl = StringIO.StringIO()
elif isinstance(template, (StringIO.StringIO, file)):
tmpl = template
else:
raise MadGraph5Error("Incorrect type for argument 'template': %s"%
template.__class__.__name__)
# Read the template
last_pos = tmpl.tell()
line = tmpl.readline()
started_subrun_reading = False
while line!='':
# Skip comments
if line.strip().startswith('!') or line.strip().startswith('\n'):
output.write(line)
# Proceed to next line
last_pos = tmpl.tell()
line = tmpl.readline()
continue
# Read parameter
try:
param_entry, value_entry = line.split('=')
param = param_entry.strip()
value = value_entry.strip()
except ValueError:
line = line.replace('\n','')
raise MadGraph5Error, "Could not read line '%s' of Pythia8 card."%\
line
# Read a subrun if detected:
if param=='Main:subrun':
if read_subrun:
if not started_subrun_reading:
# Record that the subrun reading has started and proceed
started_subrun_reading = True
else:
# We encountered the next subrun. rewind last line and exit
tmpl.seek(last_pos)
break
else:
# Start the reading of this subrun
tmpl.seek(last_pos)
subruns_to_write[int(value)] = StringIO.StringIO()
if int(value) in subruns:
self.subruns[int(value)].write(subruns_to_write[int(value)],
tmpl,read_subrun=True)
# Remove this subrun ID from the list
subruns.pop(subruns.index(int(value)))
else:
# Unknow subrun, create a dummy one
DummySubrun=PY8SubRun()
# Remove all of its variables (so that nothing is overwritten)
DummySubrun.clear()
DummySubrun.write(subruns_to_write[int(value)],
tmpl, read_subrun=True,
print_only_visible=print_only_visible,
direct_pythia_input=direct_pythia_input)
logger.info('Adding new unknown subrun with ID %d.'%
int(value))
# Proceed to next line
last_pos = tmpl.tell()
line = tmpl.readline()
continue
# Change parameters which must be output
if param in visible_param:
new_value = PY8Card.pythia8_formatting(self[param])
visible_param.pop(visible_param.index(param))
elif param in hidden_output_param:
new_value = PY8Card.pythia8_formatting(self[param])
hidden_output_param.pop(hidden_output_param.index(param))
else:
# Just copy parameters which don't need to be specified
if param.lower() not in self.params_to_never_write:
output.write(line)
else:
output.write('! The following parameter was forced to be commented out by MG5aMC.\n')
output.write('! %s'%line)
# Proceed to next line
last_pos = tmpl.tell()
line = tmpl.readline()
continue
# Substitute the value.
# If it is directly the pytia input, then don't write the param if it
# is not in the list of visible_params_to_always_write and was
# not user_set or system_set
if ((not direct_pythia_input) or
(param.lower() in self.visible_params_to_always_write) or
(param.lower() in self.user_set) or
(param.lower() in self.system_set)):
template = '%s=%s'
else:
# These are parameters that the user can edit in AskEditCards
# but if neither the user nor the system edited them,
# then they shouldn't be passed to Pythia
template = '!%s=%s'
output.write(template%(param_entry,
value_entry.replace(value,new_value)))
# Proceed to next line
last_pos = tmpl.tell()
line = tmpl.readline()
# If add_missing is False, make sure to empty the list of remaining parameters
if not add_missing:
visible_param = []
hidden_output_param = []
# Now output the missing parameters. Warn about visible ones.
if len(visible_param)>0 and not template is None:
output.write(
"""!
! Additional general parameters%s.
!
"""%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else ''))
for param in visible_param:
value = PY8Card.pythia8_formatting(self[param])
output.write('%s=%s\n'%(param,value))
if template is None:
if param=='Main:subrun':
output.write(
"""!
! Definition of subrun %d
!
"""%self['Main:subrun'])
elif param.lower() not in self.hidden_param:
logger.debug('Adding parameter %s (missing in the template) to current '+\
'pythia8 card (with value %s)',param, value)
if len(hidden_output_param)>0 and not template is None:
output.write(
"""!
! Additional technical parameters%s set by MG5_aMC.
!
"""%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else ''))
for param in hidden_output_param:
if param.lower() in self.comments:
comment = '\n'.join('! %s'%c for c in
self.comments[param.lower()].split('\n'))
output.write(comment+'\n')
output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param])))
# Don't close the file if we were reading a subrun, but simply write
# output and return now
if read_subrun:
output_file.write(output.getvalue())
return
# Now add subruns not present in the template
for subrunID in subruns:
new_subrun = StringIO.StringIO()
self.subruns[subrunID].write(new_subrun,None,read_subrun=True)
subruns_to_write[subrunID] = new_subrun
# Add all subruns to the output, in the right order
for subrunID in sorted(subruns_to_write):
output.write(subruns_to_write[subrunID].getvalue())
# If 'LHEFInputs:nSubruns' is not user_set, then make sure it is
# updated at least larger or equal to the maximum SubRunID
if 'LHEFInputs:nSubruns'.lower() not in self.user_set and \
len(subruns_to_write)>0 and self['LHEFInputs:nSubruns']<\
max(subruns_to_write.keys()):
logger.info("Updating PY8 parameter 'LHEFInputs:nSubruns' to "+
"%d so as to cover all defined subruns."%max(subruns_to_write.keys()))
self['LHEFInputs:nSubruns'] = max(subruns_to_write.keys())
output = StringIO.StringIO()
self.write(output,template,print_only_visible=print_only_visible)
# Write output
if isinstance(output_file, str):
out = open(output_file,'w')
out.write(output.getvalue())
out.close()
else:
output_file.write(output.getvalue())
def read(self, file_input, read_subrun=False, setter='default'):
"""Read the input file, this can be a path to a file,
a file object, a str with the content of the file.
The setter option choses the authority that sets potential
modified/new parameters. It can be either:
'default' or 'user' or 'system'"""
if isinstance(file_input, str):
if "\n" in file_input:
finput = StringIO.StringIO(file_input)
elif os.path.isfile(file_input):
finput = open(file_input)
else:
raise Exception, "File input '%s' not found." % file_input
elif isinstance(file_input, (StringIO.StringIO, file)):
finput = file_input
else:
raise MadGraph5Error("Incorrect type for argument 'file_input': %s"%
file_input.__class__.__name__)
# Read the template
last_pos = finput.tell()
line = finput.readline()
started_subrun_reading = False
while line!='':
# Skip comments
if line.strip().startswith('!') or line.strip()=='':
# proceed to next line
last_pos = finput.tell()
line = finput.readline()
continue
# Read parameter
try:
param, value = line.split('=',1)
param = param.strip()
value = value.strip()
except ValueError:
line = line.replace('\n','')
raise MadGraph5Error, "Could not read line '%s' of Pythia8 card."%\
line
if '!' in value:
value,_ = value.split('!',1)
# Read a subrun if detected:
if param=='Main:subrun':
if read_subrun:
if not started_subrun_reading:
# Record that the subrun reading has started and proceed
started_subrun_reading = True
else:
# We encountered the next subrun. rewind last line and exit
finput.seek(last_pos)
return
else:
# Start the reading of this subrun
finput.seek(last_pos)
if int(value) in self.subruns:
self.subruns[int(value)].read(finput,read_subrun=True,
setter=setter)
else:
# Unknow subrun, create a dummy one
NewSubrun=PY8SubRun()
NewSubrun.read(finput,read_subrun=True, setter=setter)
self.add_subrun(NewSubrun)
# proceed to next line
last_pos = finput.tell()
line = finput.readline()
continue
# Read parameter. The case of a parameter not defined in the card is
# handled directly in ConfigFile.
# Use the appropriate authority to set the new/changed variable
if setter == 'user':
self.userSet(param,value)
elif setter == 'system':
self.systemSet(param,value)
else:
self.defaultSet(param,value)
# proceed to next line
last_pos = finput.tell()
line = finput.readline()
class PY8SubRun(PY8Card):
""" Class to characterize a specific PY8 card subrun section. """
def add_default_subruns(self, type):
""" Overloading of the homonym function called in the __init__ of PY8Card.
The initialization of the self.subruns attribute should of course not
be performed in PY8SubRun."""
pass
def __init__(self, *args, **opts):
""" Initialize a subrun """
# Force user to set it manually.
subrunID = -1
if 'subrun_id' in opts:
subrunID = opts.pop('subrun_id')
super(PY8SubRun, self).__init__(*args, **opts)
self['Main:subrun']=subrunID
def default_setup(self):
"""Sets up the list of available PY8SubRun parameters."""
# Add all default PY8Card parameters
super(PY8SubRun, self).default_setup()
# Make sure they are all hidden
self.hidden_param = [k.lower() for k in self.keys()]
self.hidden_params_to_always_write = set()
self.visible_params_to_always_write = set()
# Now add Main:subrun and Beams:LHEF. They are not hidden.
self.add_param("Main:subrun", -1)
self.add_param("Beams:LHEF", "events.lhe.gz")
class RunCard(ConfigFile):
filename = 'run_card'
def __new__(cls, finput=None, **opt):
if cls is RunCard:
if not finput:
target_class = RunCardLO
elif isinstance(finput, cls):
target_class = finput.__class__
elif isinstance(finput, str):
if '\n' not in finput:
finput = open(finput).read()
if 'req_acc_FO' in finput:
target_class = RunCardNLO
else:
target_class = RunCardLO
else:
return None
return super(RunCard, cls).__new__(target_class, finput, **opt)
else:
return super(RunCard, cls).__new__(cls, finput, **opt)
def __init__(self, *args, **opts):
# The following parameter are updated in the defaultsetup stage.
#parameter for which no warning should be raised if not define
self.hidden_param = []
# in which include file the parameer should be written
self.includepath = collections.defaultdict(list)
#some parameter have different name in fortran code
self.fortran_name = {}
#parameter which are not supported anymore. (no action on the code)
self.legacy_parameter = {}
#a list with all the cuts variable
self.cuts_parameter = []
# parameter added where legacy requires an older value.
self.system_default = {}
super(RunCard, self).__init__(*args, **opts)
def add_param(self, name, value, fortran_name=None, include=True,
hidden=False, legacy=False, cut=False, system=False, sys_default=None,
**opts):
""" add a parameter to the card. value is the default value and
defines the type (int/float/bool/str) of the input.
fortran_name defines what is the associate name in the f77 code
include defines if we have to put the value in the include file
hidden defines if the parameter is expected to be define by the user.
legacy:Parameter which is not used anymore (raise a warning if not default)
cut: defines the list of cut parameter to allow to set them all to off.
sys_default: default used if the parameter is not in the card
"""
super(RunCard, self).add_param(name, value, system=system,**opts)
name = name.lower()
if fortran_name:
self.fortran_name[name] = fortran_name
if legacy:
self.legacy_parameter[name] = value
include = False
if include is True:
self.includepath[True].append(name)
elif include:
self.includepath[include].append(name)
if hidden or system:
self.hidden_param.append(name)
if cut:
self.cuts_parameter.append(name)
if sys_default is not None:
self.system_default[name] = sys_default
def read(self, finput, consistency=True):
"""Read the input file, this can be a path to a file,
a file object, a str with the content of the file."""
if isinstance(finput, str):
if "\n" in finput:
finput = finput.split('\n')
elif os.path.isfile(finput):
finput = open(finput)
else:
raise Exception, "No such file %s" % finput
for line in finput:
line = line.split('#')[0]
line = line.split('!')[0]
line = line.rsplit('=',1)
if len(line) != 2:
continue
value, name = line
name = name.lower().strip()
if name not in self and ('min' in name or 'max' in name):
#looks like an entry added by one user -> add it nicely
self.add_param(name, float(value), hidden=True, cut=True)
else:
self.set( name, value, user=True)
# parameter not set in the run_card can be set to compatiblity value
if consistency:
try:
self.check_validity()
except InvalidRunCard, error:
if consistency == 'warning':
logger.warning(str(error))
else:
raise
def write(self, output_file, template=None, python_template=False):
"""Write the run_card in output_file according to template
(a path to a valid run_card)"""
to_write = set(self.user_set)
if not template:
raise Exception
if python_template and not to_write:
if not self.list_parameter:
text = file(template,'r').read() % self
else:
data = dict(self)
for name in self.list_parameter:
data[name] = ', '.join(str(v) for v in data[name])
text = file(template,'r').read() % data
else:
text = ""
for line in file(template,'r'):
nline = line.split('#')[0]
nline = nline.split('!')[0]
comment = line[len(nline):]
nline = nline.split('=')
if len(nline) != 2:
text += line
elif nline[1].strip() in self:
name = nline[1].strip().lower()
value = self[name]
if name in self.list_parameter:
value = ', '.join([str(v) for v in value])
if python_template:
text += line % {nline[1].strip():value, name:value}
else:
if not comment or comment[-1]!='\n':
endline = '\n'
else:
endline = ''
text += ' %s\t= %s %s%s' % (value, name, comment, endline)
if name.lower() in to_write:
to_write.remove(nline[1].strip().lower())
else:
logger.info('Adding missing parameter %s to current %s (with default value)',
(name, self.filename))
text += line
if to_write:
text+="""#*********************************************************************
# Additional parameter
#*********************************************************************
"""
for key in to_write:
text += ' %s\t= %s # %s\n' % (self[key], key, 'hidden parameter')
if isinstance(output_file, str):
fsock = open(output_file,'w')
fsock.write(text)
fsock.close()
else:
output_file.write(text)
def get_default(self, name, default=None, log_level=None):
"""return self[name] if exist otherwise default. log control if we
put a warning or not if we use the default value"""
lower_name = name.lower()
if lower_name not in self.user_set:
if log_level is None:
if lower_name in self.system_only:
log_level = 5
elif lower_name in self.auto_set:
log_level = 5
elif lower_name in self.hidden_param:
log_level = 10
else:
log_level = 20
if not default:
default = dict.__getitem__(self, name.lower())
logger.log(log_level, '%s missed argument %s. Takes default: %s'
% (self.filename, name, default))
self[name] = default
return default
else:
return self[name]
@staticmethod
def format(formatv, value):
"""for retro compatibility"""
logger.debug("please use f77_formatting instead of format")
return self.f77_formatting(value, formatv=formatv)
@staticmethod
def f77_formatting(value, formatv=None):
"""format the variable into fortran. The type is detected by default"""
if not formatv:
if isinstance(value, bool):
formatv = 'bool'
elif isinstance(value, int):
formatv = 'int'
elif isinstance(value, float):
formatv = 'float'
elif isinstance(value, str):
formatv = 'str'
else:
logger.debug("unknow format for f77_formatting: %s" , str(value))
formatv = 'str'
else:
assert formatv
if formatv == 'bool':
if str(value) in ['1','T','.true.','True']:
return '.true.'
else:
return '.false.'
elif formatv == 'int':
try:
return str(int(value))
except ValueError:
fl = float(value)
if int(fl) == fl:
return str(int(fl))
else:
raise
elif formatv == 'float':
if isinstance(value, str):
value = value.replace('d','e')
return ('%.10e' % float(value)).replace('e','d')
elif formatv == 'str':
# Check if it is a list
if value.strip().startswith('[') and value.strip().endswith(']'):
elements = (value.strip()[1:-1]).split()
return ['_length = %d'%len(elements)]+\
['(%d) = %s'%(i+1, elem.strip()) for i, elem in \
enumerate(elements)]
else:
return "'%s'" % value
def check_validity(self):
"""check that parameter missing in the card are set to the expected value"""
for name, value in self.system_default.items():
self.set(name, value, changeifuserset=False)
for name in self.legacy_parameter:
if self[name] != self.legacy_parameter[name]:
logger.warning("The parameter %s is not supported anymore this parameter will be ignored." % name)
default_include_file = 'run_card.inc'
def update_system_parameter_for_include(self):
"""update hidden system only parameter for the correct writtin in the
include"""
return
def write_include_file(self, output_dir):
"""Write the various include file in output_dir.
The entry True of self.includepath will be written in run_card.inc
The entry False will not be written anywhere"""
# ensure that all parameter are coherent and fix those if needed
self.check_validity()
#ensusre that system only parameter are correctly set
self.update_system_parameter_for_include()
for incname in self.includepath:
if incname is True:
pathinc = self.default_include_file
else:
pathinc = incname
fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc))
for key in self.includepath[incname]:
#define the fortran name
if key in self.fortran_name:
fortran_name = self.fortran_name[key]
else:
fortran_name = key
#get the value with warning if the user didn't set it
value = self.get_default(key)
# Special treatment for strings containing a list of
# strings. Convert it to a list of strings
if isinstance(value, list):
# in case of a list, add the length of the list as 0th
# element in fortran. Only in case of integer or float
# list (not for bool nor string)
targettype = self.list_parameter[key]
if targettype is bool:
pass
elif targettype is int:
line = '%s(%s) = %s \n' % (fortran_name, 0, self.f77_formatting(len(value)))
fsock.writelines(line)
elif targettype is float:
line = '%s(%s) = %s \n' % (fortran_name, 0, self.f77_formatting(float(len(value))))
fsock.writelines(line)
# output the rest of the list in fortran
for i,v in enumerate(value):
line = '%s(%s) = %s \n' % (fortran_name, i+1, self.f77_formatting(v))
fsock.writelines(line)
elif isinstance(value, dict):
for fortran_name, onevalue in value.items():
line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue))
fsock.writelines(line)
else:
line = '%s = %s \n' % (fortran_name, self.f77_formatting(value))
fsock.writelines(line)
fsock.close()
def get_banner_init_information(self):
"""return a dictionary with the information needed to write
the first line of the <init> block of the lhe file."""
output = {}
def get_idbmup(lpp):
"""return the particle colliding pdg code"""
if lpp in (1,2, -1,-2):
return math.copysign(2212, lpp)
elif lpp in (3,-3):
return math.copysign(11, lpp)
elif lpp == 0:
#logger.critical("Fail to write correct idbmup in the lhe file. Please correct those by hand")
return 0
else:
return lpp
output["idbmup1"] = get_idbmup(self['lpp1'])
output["idbmup2"] = get_idbmup(self['lpp2'])
output["ebmup1"] = self["ebeam1"]
output["ebmup2"] = self["ebeam2"]
output["pdfgup1"] = 0
output["pdfgup2"] = 0
output["pdfsup1"] = self.get_pdf_id(self["pdlabel"])
output["pdfsup2"] = self.get_pdf_id(self["pdlabel"])
return output
def get_pdf_id(self, pdf):
if pdf == "lhapdf":
lhaid = self["lhaid"]
if isinstance(lhaid, list):
return lhaid[0]
else:
return lhaid
else:
return {'none': 0, 'mrs02nl':20250, 'mrs02nn':20270, 'cteq4_m': 19150,
'cteq4_l':19170, 'cteq4_d':19160, 'cteq5_m':19050,
'cteq5_d':19060,'cteq5_l':19070,'cteq5m1':19051,
'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042,
'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800
}[pdf]
def get_lhapdf_id(self):
return self.get_pdf_id(self['pdlabel'])
def remove_all_cut(self):
"""remove all the cut"""
for name in self.cuts_parameter:
targettype = type(self[name])
if targettype == bool:
self[name] = False
elif 'min' in name:
self[name] = 0
elif 'max' in name:
self[name] = -1
elif 'eta' in name:
self[name] = -1
else:
self[name] = 0
class RunCardLO(RunCard):
"""an object to handle in a nice way the run_card information"""
def default_setup(self):
"""default value for the run_card.dat"""
self.add_param("run_tag", "tag_1", include=False)
self.add_param("gridpack", False)
self.add_param("time_of_flight", -1.0, include=False, hidden=True)
self.add_param("nevents", 10000)
self.add_param("iseed", 0)
self.add_param("lpp1", 1, fortran_name="lpp(1)")
self.add_param("lpp2", 1, fortran_name="lpp(2)")
self.add_param("ebeam1", 6500.0, fortran_name="ebeam(1)")
self.add_param("ebeam2", 6500.0, fortran_name="ebeam(2)")
self.add_param("polbeam1", 0.0, fortran_name="pb1")
self.add_param("polbeam2", 0.0, fortran_name="pb2")
self.add_param("pdlabel", "nn23lo1")
self.add_param("lhaid", 230000, hidden=True)
self.add_param("fixed_ren_scale", False)
self.add_param("fixed_fac_scale", False)
self.add_param("scale", 91.1880)
self.add_param("dsqrt_q2fact1", 91.1880, fortran_name="sf1")
self.add_param("dsqrt_q2fact2", 91.1880, fortran_name="sf2")
self.add_param("dynamical_scale_choice", -1, comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2\n '4' is the center of mass energy")
# Bias module options
self.add_param("bias_module", 'None', include=False)
self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc')
#matching
self.add_param("scalefact", 1.0)
self.add_param("ickkw", 0, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.")
self.add_param("highestmult", 1, fortran_name="nhmult", hidden=True)
self.add_param("ktscheme", 1, hidden=True)
self.add_param("alpsfact", 1.0)
self.add_param("chcluster", False, hidden=True)
self.add_param("pdfwgt", True, hidden=True)
self.add_param("asrwgtflavor", 5, comment = 'highest quark flavor for a_s reweighting in MLM')
self.add_param("clusinfo", True)
self.add_param("lhe_version", 3.0)
self.add_param("event_norm", "average", include=False, sys_default='sum')
#cut
self.add_param("auto_ptj_mjj", False)
self.add_param("bwcutoff", 15.0)
self.add_param("cut_decays", False)
self.add_param("nhel", 0, include=False)
#pt cut
self.add_param("ptj", 20.0, cut=True)
self.add_param("ptb", 0.0, cut=True)
self.add_param("pta", 10.0, cut=True)
self.add_param("ptl", 10.0, cut=True)
self.add_param("misset", 0.0, cut=True)
self.add_param("ptheavy", 0.0, cut=True, comment='this cut apply on particle heavier than 10 GeV')
self.add_param("ptonium", 1.0, legacy=True)
self.add_param("ptjmax", -1.0, cut=True)
self.add_param("ptbmax", -1.0, cut=True)
self.add_param("ptamax", -1.0, cut=True)
self.add_param("ptlmax", -1.0, cut=True)
self.add_param("missetmax", -1.0, cut=True)
# E cut
self.add_param("ej", 0.0, cut=True)
self.add_param("eb", 0.0, cut=True)
self.add_param("ea", 0.0, cut=True)
self.add_param("el", 0.0, cut=True)
self.add_param("ejmax", -1.0, cut=True)
self.add_param("ebmax", -1.0, cut=True)
self.add_param("eamax", -1.0, cut=True)
self.add_param("elmax", -1.0, cut=True)
# Eta cut
self.add_param("etaj", 5.0, cut=True)
self.add_param("etab", -1.0, cut=True)
self.add_param("etaa", 2.5, cut=True)
self.add_param("etal", 2.5, cut=True)
self.add_param("etaonium", 0.6, legacy=True)
self.add_param("etajmin", 0.0, cut=True)
self.add_param("etabmin", 0.0, cut=True)
self.add_param("etaamin", 0.0, cut=True)
self.add_param("etalmin", 0.0, cut=True)
# DRJJ
self.add_param("drjj", 0.4, cut=True)
self.add_param("drbb", 0.0, cut=True)
self.add_param("drll", 0.4, cut=True)
self.add_param("draa", 0.4, cut=True)
self.add_param("drbj", 0.0, cut=True)
self.add_param("draj", 0.4, cut=True)
self.add_param("drjl", 0.4, cut=True)
self.add_param("drab", 0.0, cut=True)
self.add_param("drbl", 0.0, cut=True)
self.add_param("dral", 0.4, cut=True)
self.add_param("drjjmax", -1.0, cut=True)
self.add_param("drbbmax", -1.0, cut=True)
self.add_param("drllmax", -1.0, cut=True)
self.add_param("draamax", -1.0, cut=True)
self.add_param("drbjmax", -1.0, cut=True)
self.add_param("drajmax", -1.0, cut=True)
self.add_param("drjlmax", -1.0, cut=True)
self.add_param("drabmax", -1.0, cut=True)
self.add_param("drblmax", -1.0, cut=True)
self.add_param("dralmax", -1.0, cut=True)
# invariant mass
self.add_param("mmjj", 0.0, cut=True)
self.add_param("mmbb", 0.0, cut=True)
self.add_param("mmaa", 0.0, cut=True)
self.add_param("mmll", 0.0, cut=True)
self.add_param("mmjjmax", -1.0, cut=True)
self.add_param("mmbbmax", -1.0, cut=True)
self.add_param("mmaamax", -1.0, cut=True)
self.add_param("mmllmax", -1.0, cut=True)
self.add_param("mmnl", 0.0, cut=True)
self.add_param("mmnlmax", -1.0, cut=True)
#minimum/max pt for sum of leptons
self.add_param("ptllmin", 0.0, cut=True)
self.add_param("ptllmax", -1.0, cut=True)
self.add_param("xptj", 0.0, cut=True)
self.add_param("xptb", 0.0, cut=True)
self.add_param("xpta", 0.0, cut=True)
self.add_param("xptl", 0.0, cut=True)
# ordered pt jet
self.add_param("ptj1min", 0.0, cut=True)
self.add_param("ptj1max", -1.0, cut=True)
self.add_param("ptj2min", 0.0, cut=True)
self.add_param("ptj2max", -1.0, cut=True)
self.add_param("ptj3min", 0.0, cut=True)
self.add_param("ptj3max", -1.0, cut=True)
self.add_param("ptj4min", 0.0, cut=True)
self.add_param("ptj4max", -1.0, cut=True)
self.add_param("cutuse", 0, cut=True)
# ordered pt lepton
self.add_param("ptl1min", 0.0, cut=True)
self.add_param("ptl1max", -1.0, cut=True)
self.add_param("ptl2min", 0.0, cut=True)
self.add_param("ptl2max", -1.0, cut=True)
self.add_param("ptl3min", 0.0, cut=True)
self.add_param("ptl3max", -1.0, cut=True)
self.add_param("ptl4min", 0.0, cut=True)
self.add_param("ptl4max", -1.0, cut=True)
# Ht sum of jets
self.add_param("htjmin", 0.0, cut=True)
self.add_param("htjmax", -1.0, cut=True)
self.add_param("ihtmin", 0.0, cut=True)
self.add_param("ihtmax", -1.0, cut=True)
self.add_param("ht2min", 0.0, cut=True)
self.add_param("ht3min", 0.0, cut=True)
self.add_param("ht4min", 0.0, cut=True)
self.add_param("ht2max", -1.0, cut=True)
self.add_param("ht3max", -1.0, cut=True)
self.add_param("ht4max", -1.0, cut=True)
# photon isolation
self.add_param("ptgmin", 0.0, cut=True)
self.add_param("r0gamma", 0.4)
self.add_param("xn", 1.0)
self.add_param("epsgamma", 1.0)
self.add_param("isoem", True)
self.add_param("xetamin", 0.0, cut=True)
self.add_param("deltaeta", 0.0, cut=True)
self.add_param("ktdurham", -1.0, fortran_name="kt_durham", cut=True)
self.add_param("dparameter", 0.4, fortran_name="d_parameter", cut=True)
self.add_param("ptlund", -1.0, fortran_name="pt_lund", cut=True)
self.add_param("pdgs_for_merging_cut", [21, 1, 2, 3, 4, 5, 6])
self.add_param("maxjetflavor", 4)
self.add_param("xqcut", 0.0, cut=True)
self.add_param("use_syst", True)
self.add_param('systematics_program', 'auto', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc')
self.add_param('systematics_arguments', [''], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.')
self.add_param("sys_scalefact", "0.5 1 2", include=False)
self.add_param("sys_alpsfact", "None", include=False)
self.add_param("sys_matchscale", "auto", include=False)
self.add_param("sys_pdf", "NNPDF23_lo_as_0130_qed", include=False)
self.add_param("sys_scalecorrelation", -1, include=False)
#parameter not in the run_card by default
self.add_param('gridrun', False, hidden=True)
self.add_param('fixed_couplings', True, hidden=True)
self.add_param('mc_grouped_subproc', True, hidden=True)
self.add_param('xmtcentral', 0.0, hidden=True, fortran_name="xmtc")
self.add_param('d', 1.0, hidden=True)
self.add_param('gseed', 0, hidden=True, include=False)
self.add_param('issgridfile', '', hidden=True)
#job handling of the survey/ refine
self.add_param('job_strategy', 0, hidden=True, include=False)
self.add_param('survey_splitting', -1, hidden=True, include=False, comment="for loop-induced control how many core are used at survey for the computation of a single iteration.")
self.add_param('survey_nchannel_per_job', 2, hidden=True, include=False, comment="control how many Channel are integrated inside a single job on cluster/multicore")
self.add_param('refine_evt_by_job', -1, hidden=True, include=False, comment="control the maximal number of events for the first iteration of the refine (larger means less jobs)")
# parameter allowing to define simple cut via the pdg
# Special syntax are related to those. (can not be edit directly)
self.add_param('pt_min_pdg',{'__type__':0.}, include=False)
self.add_param('pt_max_pdg',{'__type__':0.}, include=False)
self.add_param('E_min_pdg',{'__type__':0.}, include=False)
self.add_param('E_max_pdg',{'__type__':0.}, include=False)
self.add_param('eta_min_pdg',{'__type__':0.}, include=False)
self.add_param('eta_max_pdg',{'__type__':0.}, include=False)
self.add_param('mxx_min_pdg',{'__type__':0.}, include=False)
self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True)
self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked
self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min
self.add_param('ptmax4pdg',[-1.], hidden=True, system=True)
self.add_param('Emin4pdg',[0.], hidden=True, system=True) # store pt min
self.add_param('Emax4pdg',[-1.], hidden=True, system=True)
self.add_param('etamin4pdg',[0.], hidden=True, system=True) # store pt min
self.add_param('etamax4pdg',[-1.], hidden=True, system=True)
self.add_param('mxxmin4pdg',[-1.], hidden=True, system=True)
self.add_param('mxxpart_antipart', [False], hidden=True, system=True)
# Not implemetented right now (double particle cut)
#self.add_param('pdg_cut_2',[0], hidden=True, system=True)
# self.add_param('M_min_pdg',[0.], hidden=True, system=True) # store pt min
#self.add_param('M_max_pdg',[0.], hidden=True, system=True)
# self.add_param('DR_min_pdg',[0.], hidden=True, system=True) # store pt min
#self.add_param('DR_max_pdg',[0.], hidden=True, system=True)
def check_validity(self):
""" """
super(RunCardLO, self).check_validity()
#Make sure that nhel is only either 0 (i.e. no MC over hel) or
#1 (MC over hel with importance sampling). In particular, it can
#no longer be > 1.
if 'nhel' not in self.user_set:
raise InvalidRunCard, "Parameter nhel is not defined in the run_card."
if self['nhel'] not in [1,0]:
raise InvalidRunCard, "Parameter nhel can only be '0' or '1', "+\
"not %s." % self['nhel']
if int(self['maxjetflavor']) > 6:
raise InvalidRunCard, 'maxjetflavor should be lower than 5! (6 is partly supported)'
if len(self['pdgs_for_merging_cut']) > 1000:
raise InvalidRunCard, "The number of elements in "+\
"'pdgs_for_merging_cut' should not exceed 1000."
# some cut need to be deactivated in presence of isolation
if self['ptgmin'] > 0:
if self['pta'] > 0:
logger.warning('pta cut discarded since photon isolation is used')
self['pta'] = 0.0
if self['draj'] > 0:
logger.warning('draj cut discarded since photon isolation is used')
self['draj'] = 0.0
# special treatment for gridpack use the gseed instead of the iseed
if self['gridrun']:
self['iseed'] = self['gseed']
#Some parameter need to be fixed when using syscalc
if self['use_syst']:
if self['scalefact'] != 1.0:
logger.warning('Since use_syst=T, We change the value of \'scalefact\' to 1')
self['scalefact'] = 1.0
# CKKW Treatment
if self['ickkw'] > 0:
if self['ickkw'] != 1:
logger.critical('ickkw >1 is pure alpha and only partly implemented.')
import madgraph.interface.extended_cmd as basic_cmd
answer = basic_cmd.smart_input('Do you really want to continue', allow_arg=['y','n'], default='n')
if answer !='y':
raise InvalidRunCard, 'ickkw>1 is still in alpha'
if self['use_syst']:
# some additional parameter need to be fixed for Syscalc + matching
if self['alpsfact'] != 1.0:
logger.warning('Since use_syst=T, We change the value of \'alpsfact\' to 1')
self['alpsfact'] =1.0
if self['maxjetflavor'] == 6:
raise InvalidRunCard, 'maxjetflavor at 6 is NOT supported for matching!'
if self['ickkw'] == 2:
# add warning if ckkw selected but the associate parameter are empty
self.get_default('highestmult', log_level=20)
self.get_default('issgridfile', 'issudgrid.dat', log_level=20)
if self['xqcut'] > 0:
if self['ickkw'] == 0:
logger.error('xqcut>0 but ickkw=0. Potentially not fully consistent setup. Be carefull')
import time
time.sleep(5)
if self['drjj'] != 0:
logger.warning('Since icckw>0, We change the value of \'drjj\' to 0')
self['drjj'] = 0
if self['drjl'] != 0:
logger.warning('Since icckw>0, We change the value of \'drjl\' to 0')
self['drjl'] = 0
if not self['auto_ptj_mjj']:
if self['mmjj'] > self['xqcut']:
logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0')
self['mmjj'] = 0.0
# check validity of the pdf set
possible_set = ['lhapdf', 'mrs02nl','mrs02nn',
'cteq4_m', 'cteq4_l','cteq4_d',
'cteq5_m','cteq5_d','cteq5_l','cteq5m1',
'cteq6_m','cteq6_l', 'cteq6l1',
'nn23lo', 'nn23lo1', 'nn23nlo']
if self['pdlabel'] not in possible_set:
raise InvalidRunCard, 'Invalid PDF set (argument of pdlabel): %s. Possible choice are:\n %s' % (self['pdlabel'], ', '.join(possible_set))
if self['pdlabel'] == 'lhapdf':
#add warning if lhaid not define
self.get_default('lhaid', log_level=20)
def update_system_parameter_for_include(self):
# set the pdg_for_cut fortran parameter
pdg_to_cut = set(self['pt_min_pdg'].keys() +self['pt_max_pdg'].keys() +
self['e_min_pdg'].keys() +self['e_max_pdg'].keys() +
self['eta_min_pdg'].keys() +self['eta_max_pdg'].keys()+
self['mxx_min_pdg'].keys() + self['mxx_only_part_antipart'].keys())
pdg_to_cut.discard('__type__')
pdg_to_cut.discard('default')
if len(pdg_to_cut)>25:
raise Exception, "Maximum 25 different pdgs are allowed for pdg specific cut"
if any(int(pdg)<0 for pdg in pdg_to_cut):
logger.warning('PDG specific cuts are always applied symmetrically on particle/anti-particle. Always use positve PDG codes')
raise MadGraph5Error, 'Some PDG specific cuts are defined with negative pdg code'
if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]):
raise Exception, "Can not use PDG related cut for light quark/b quark/lepton/gluon/photon"
if pdg_to_cut:
self['pdg_cut'] = list(pdg_to_cut)
self['ptmin4pdg'] = []
self['Emin4pdg'] = []
self['etamin4pdg'] =[]
self['ptmax4pdg'] = []
self['Emax4pdg'] = []
self['etamax4pdg'] =[]
self['mxxmin4pdg'] =[]
self['mxxpart_antipart'] = []
for pdg in self['pdg_cut']:
for var in ['pt','e','eta', 'Mxx']:
for minmax in ['min', 'max']:
if var in ['Mxx'] and minmax =='max':
continue
new_var = '%s%s4pdg' % (var, minmax)
old_var = '%s_%s_pdg' % (var, minmax)
default = 0. if minmax=='min' else -1.
self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default)
#special for mxx_part_antipart
old_var = 'mxx_only_part_antipart'
new_var = 'mxxpart_antipart'
if 'default' in self[old_var]:
default = self[old_var]['default']
self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default)
else:
if str(pdg) not in self[old_var]:
raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg))
self[new_var].append(self[old_var][str(pdg)])
else:
self['pdg_cut'] = [0]
self['ptmin4pdg'] = [0.]
self['Emin4pdg'] = [0.]
self['etamin4pdg'] =[0.]
self['ptmax4pdg'] = [-1.]
self['Emax4pdg'] = [-1.]
self['etamax4pdg'] =[-1.]
self['mxxmin4pdg'] =[0.]
self['mxxpart_antipart'] = [False]
def create_default_for_process(self, proc_characteristic, history, proc_def):
"""Rules
process 1->N all cut set on off.
loop_induced -> MC over helicity
e+ e- beam -> lpp:0 ebeam:500
p p beam -> set maxjetflavor automatically
more than one multiplicity: ickkw=1 xqcut=30 use_syst=F
"""
if proc_characteristic['loop_induced']:
self['nhel'] = 1
self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs']
if proc_characteristic['ninitial'] == 1:
#remove all cut
self.remove_all_cut()
self['use_syst'] = False
else:
# check for beam_id
beam_id = set()
for proc in proc_def:
for oneproc in proc:
for leg in oneproc['legs']:
if not leg['state']:
beam_id.add(leg['id'])
if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]):
maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7])
self['maxjetflavor'] = maxjetflavor
self['asrwgtflavor'] = maxjetflavor
pass
elif 11 in beam_id or -11 in beam_id:
self['lpp1'] = 0
self['lpp2'] = 0
self['ebeam1'] = 500
self['ebeam2'] = 500
self['use_syst'] = False
else:
self['lpp1'] = 0
self['lpp2'] = 0
self['use_syst'] = False
# Check if need matching
min_particle = 99
max_particle = 0
for proc in proc_def:
min_particle = min(len(proc[0]['legs']), min_particle)
max_particle = max(len(proc[0]['legs']), max_particle)
if min_particle != max_particle:
#take one of the process with min_particle
for procmin in proc_def:
if len(procmin[0]['legs']) != min_particle:
continue
else:
idsmin = [l['id'] for l in procmin[0]['legs']]
break
matching = False
for procmax in proc_def:
if len(procmax[0]['legs']) != max_particle:
continue
idsmax = [l['id'] for l in procmax[0]['legs']]
for i in idsmin:
if i not in idsmax:
continue
else:
idsmax.remove(i)
for j in idsmax:
if j not in [1,-1,2,-2,3,-3,4,-4,5,-5,21]:
break
else:
# all are jet => matching is ON
matching=True
break
if matching:
self['ickkw'] = 1
self['xqcut'] = 30
#self['use_syst'] = False
self['drjj'] = 0
self['drjl'] = 0
self['sys_alpsfact'] = "0.5 1 2"
# For interference module, the systematics are wrong.
# automatically set use_syst=F and set systematics_program=none
no_systematics = False
for proc in proc_def:
for oneproc in proc:
if '^2' in oneproc.nice_string():
no_systematics = True
break
else:
continue
break
if no_systematics:
self['use_syst'] = False
self['systematics_program'] = 'none'
def write(self, output_file, template=None, python_template=False):
"""Write the run_card in output_file according to template
(a path to a valid run_card)"""
if not template:
if not MADEVENT:
template = pjoin(MG5DIR, 'Template', 'LO', 'Cards',
'run_card.dat')
python_template = True
else:
template = pjoin(MEDIR, 'Cards', 'run_card_default.dat')
python_template = False
super(RunCardLO, self).write(output_file, template=template,
python_template=python_template)
class InvalidMadAnalysis5Card(InvalidCmd):
pass
class MadAnalysis5Card(dict):
""" A class to store a MadAnalysis5 card. Very basic since it is basically
free format."""
_MG5aMC_escape_tag = '@MG5aMC'
_default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root']
_default_parton_inputs = ['*.lhe']
_skip_analysis = False
@classmethod
def events_can_be_reconstructed(cls, file_path):
""" Checks from the type of an event file whether it can be reconstructed or not."""
return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \
file_path.endswith('.root') or file_path.endswith('.root.gz'))
@classmethod
def empty_analysis(cls):
""" A method returning the structure of an empty analysis """
return {'commands':[],
'reconstructions':[]}
@classmethod
def empty_reconstruction(cls):
""" A method returning the structure of an empty reconstruction """
return {'commands':[],
'reco_output':'lhe'}
def default_setup(self):
"""define the default value"""
self['mode'] = 'parton'
self['inputs'] = []
# None is the default stdout level, it will be set automatically by MG5aMC
self['stdout_lvl'] = None
# These two dictionaries are formated as follows:
# {'analysis_name':
# {'reconstructions' : ['associated_reconstructions_name']}
# {'commands':['analysis command lines here']} }
# with values being of the form of the empty_analysis() attribute
# of this class and some other property could be added to this dictionary
# in the future.
self['analyses'] = {}
# The recasting structure contains on set of commands and one set of
# card lines.
self['recasting'] = {'commands':[],'card':[]}
# Add the default trivial reconstruction to use an lhco input
# This is just for the structure
self['reconstruction'] = {'lhco_input':
MadAnalysis5Card.empty_reconstruction(),
'root_input':
MadAnalysis5Card.empty_reconstruction()}
self['reconstruction']['lhco_input']['reco_output']='lhco'
self['reconstruction']['root_input']['reco_output']='root'
# Specify in which order the analysis/recasting were specified
self['order'] = []
def __init__(self, finput=None,mode=None):
if isinstance(finput, self.__class__):
dict.__init__(self, finput)
assert finput.__dict__.keys()
for key in finput.__dict__:
setattr(self, key, copy.copy(getattr(finput, key)) )
return
else:
dict.__init__(self)
# Initialize it with all the default value
self.default_setup()
if not mode is None:
self['mode']=mode
# if input is define read that input
if isinstance(finput, (file, str, StringIO.StringIO)):
self.read(finput, mode=mode)
def read(self, input, mode=None):
""" Read an MA5 card"""
if mode not in [None,'parton','hadron']:
raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+
"'parton' or 'hadron'")
card_mode = mode
if isinstance(input, (file, StringIO.StringIO)):
input_stream = input
elif isinstance(input, str):
if not os.path.isfile(input):
raise InvalidMadAnalysis5Card("Cannot read the MadAnalysis5 card."+\
"File '%s' not found."%input)
if mode is None and 'hadron' in input:
card_mode = 'hadron'
input_stream = open(input,'r')
else:
raise MadGraph5Error('Incorrect input for the read function of'+\
' the MadAnalysis5Card card. Received argument type is: %s'%str(type(input)))
# Reinstate default values
self.__init__()
current_name = 'default'
current_type = 'analyses'
for line in input_stream:
# Skip comments for now
if line.startswith('#'):
continue
if line.endswith('\n'):
line = line[:-1]
if line.strip()=='':
continue
if line.startswith(self._MG5aMC_escape_tag):
try:
option,value = line[len(self._MG5aMC_escape_tag):].split('=')
value = value.strip()
except ValueError:
option = line[len(self._MG5aMC_escape_tag):]
option = option.strip()
if option=='inputs':
self['inputs'].extend([v.strip() for v in value.split(',')])
elif option == 'skip_analysis':
self._skip_analysis = True
elif option=='stdout_lvl':
try: # It is likely an int
self['stdout_lvl']=int(value)
except ValueError:
try: # Maybe the user used something like 'logging.INFO'
self['stdout_lvl']=eval(value)
except:
try:
self['stdout_lvl']=eval('logging.%s'%value)
except:
raise InvalidMadAnalysis5Card(
"MA5 output level specification '%s' is incorrect."%str(value))
elif option=='analysis_name':
current_type = 'analyses'
current_name = value
if current_name in self[current_type]:
raise InvalidMadAnalysis5Card(
"Analysis '%s' already defined in MadAnalysis5 card"%current_name)
else:
self[current_type][current_name] = MadAnalysis5Card.empty_analysis()
elif option=='set_reconstructions':
try:
reconstructions = eval(value)
if not isinstance(reconstructions, list):
raise
except:
raise InvalidMadAnalysis5Card("List of reconstructions"+\
" '%s' could not be parsed in MadAnalysis5 card."%value)
if current_type!='analyses' and current_name not in self[current_type]:
raise InvalidMadAnalysis5Card("A list of reconstructions"+\
"can only be defined in the context of an "+\
"analysis in a MadAnalysis5 card.")
self[current_type][current_name]['reconstructions']=reconstructions
continue
elif option=='reconstruction_name':
current_type = 'reconstruction'
current_name = value
if current_name in self[current_type]:
raise InvalidMadAnalysis5Card(
"Reconstruction '%s' already defined in MadAnalysis5 hadron card"%current_name)
else:
self[current_type][current_name] = MadAnalysis5Card.empty_reconstruction()
elif option=='reco_output':
if current_type!='reconstruction' or current_name not in \
self['reconstruction']:
raise InvalidMadAnalysis5Card(
"Option '%s' is only available within the definition of a reconstruction"%option)
if not value.lower() in ['lhe','root']:
raise InvalidMadAnalysis5Card(
"Option '%s' can only take the values 'lhe' or 'root'"%option)
self['reconstruction'][current_name]['reco_output'] = value.lower()
elif option.startswith('recasting'):
current_type = 'recasting'
try:
current_name = option.split('_')[1]
except:
raise InvalidMadAnalysis5Card('Malformed MA5 recasting option %s.'%option)
if len(self['recasting'][current_name])>0:
raise InvalidMadAnalysis5Card(
"Only one recasting can be defined in MadAnalysis5 hadron card")
else:
raise InvalidMadAnalysis5Card(
"Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option)
if option in ['analysis_name','reconstruction_name'] or \
option.startswith('recasting'):
self['order'].append((current_type,current_name))
continue
# Add the default analysis if needed since the user does not need
# to specify it.
if current_name == 'default' and current_type == 'analyses' and\
'default' not in self['analyses']:
self['analyses']['default'] = MadAnalysis5Card.empty_analysis()
self['order'].append(('analyses','default'))
if current_type in ['recasting']:
self[current_type][current_name].append(line)
elif current_type in ['reconstruction']:
self[current_type][current_name]['commands'].append(line)
elif current_type in ['analyses']:
self[current_type][current_name]['commands'].append(line)
if 'reconstruction' in self['analyses'] or len(self['recasting']['card'])>0:
if mode=='parton':
raise InvalidMadAnalysis5Card(
"A parton MadAnalysis5 card cannot specify a recombination or recasting.")
card_mode = 'hadron'
elif mode is None:
card_mode = 'parton'
self['mode'] = card_mode
if self['inputs'] == []:
if self['mode']=='hadron':
self['inputs'] = self._default_hadron_inputs
else:
self['inputs'] = self._default_parton_inputs
# Make sure at least one reconstruction is specified for each hadron
# level analysis and that it exists.
if self['mode']=='hadron':
for analysis_name, analysis in self['analyses'].items():
if len(analysis['reconstructions'])==0:
raise InvalidMadAnalysis5Card('Hadron-level analysis '+\
"'%s' is not specified any reconstruction(s)."%analysis_name)
if any(reco not in self['reconstruction'] for reco in \
analysis['reconstructions']):
raise InvalidMadAnalysis5Card('A reconstructions specified in'+\
" analysis '%s' is not defined."%analysis_name)
def write(self, output):
""" Write an MA5 card."""
if isinstance(output, (file, StringIO.StringIO)):
output_stream = output
elif isinstance(output, str):
output_stream = open(output,'w')
else:
raise MadGraph5Error('Incorrect input for the write function of'+\
' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output)))
output_lines = []
if self._skip_analysis:
output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag)
output_lines.append('%s inputs = %s'%(self._MG5aMC_escape_tag,','.join(self['inputs'])))
if not self['stdout_lvl'] is None:
output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl']))
for definition_type, name in self['order']:
if definition_type=='analyses':
output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name))
output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag,
str(self['analyses'][name]['reconstructions'])))
elif definition_type=='reconstruction':
output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name))
elif definition_type=='recasting':
output_lines.append('%s recasting_%s'%(self._MG5aMC_escape_tag,name))
if definition_type in ['recasting']:
output_lines.extend(self[definition_type][name])
elif definition_type in ['reconstruction']:
output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag,
self[definition_type][name]['reco_output']))
output_lines.extend(self[definition_type][name]['commands'])
elif definition_type in ['analyses']:
output_lines.extend(self[definition_type][name]['commands'])
output_stream.write('\n'.join(output_lines))
return
def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None,
UFO_model_path=None, run_tag=''):
""" Returns a list of tuples ('AnalysisTag',['commands']) specifying
the commands of the MadAnalysis runs required from this card.
At parton-level, the number of such commands is the number of analysis
asked for. In the future, the idea is that the entire card can be
processed in one go from MA5 directly."""
if isinstance(inputs_arg, list):
inputs = inputs_arg
elif isinstance(inputs_arg, str):
inputs = [inputs_arg]
else:
raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\
" a string or a list for the argument 'inputs_arg'")
if len(inputs)==0:
raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\
" at least one input specified'")
if run_dir_path is None:
run_dir_path = os.path.dirname(inputs_arg)
cmds_list = []
UFO_load = []
# first import the UFO if provided
if UFO_model_path:
UFO_load.append('import %s'%UFO_model_path)
def get_import(input, type=None):
""" Generates the MA5 import commands for that event file. """
dataset_name = os.path.basename(input).split('.')[0]
res = ['import %s as %s'%(input, dataset_name)]
if not type is None:
res.append('set %s.type = %s'%(dataset_name, type))
return res
fifo_status = {'warned_fifo':False,'fifo_used_up':False}
def warn_fifo(input):
if not input.endswith('.fifo'):
return False
if not fifo_status['fifo_used_up']:
fifo_status['fifo_used_up'] = True
return False
else:
if not fifo_status['warned_fifo']:
logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.')
fifo_status['warned_fifo'] = True
return True
# Then the event file(s) input(s)
inputs_load = []
for input in inputs:
inputs_load.extend(get_import(input))
submit_command = 'submit %s'%submit_folder+'_%s'
# Keep track of the reconstruction outpus in the MA5 workflow
# Keys are reconstruction names and values are .lhe.gz reco file paths.
# We put by default already the lhco/root ones present
reconstruction_outputs = {
'lhco_input':[f for f in inputs if
f.endswith('.lhco') or f.endswith('.lhco.gz')],
'root_input':[f for f in inputs if
f.endswith('.root') or f.endswith('.root.gz')]}
# If a recasting card has to be written out, chose here its path
recasting_card_path = pjoin(run_dir_path,
'_'.join([run_tag,os.path.basename(submit_folder),'recasting_card.dat']))
# Make sure to only run over one analysis over each fifo.
for definition_type, name in self['order']:
if definition_type == 'reconstruction':
analysis_cmds = list(self['reconstruction'][name]['commands'])
reco_outputs = []
for i_input, input in enumerate(inputs):
# Skip lhco/root as they must not be reconstructed
if not MadAnalysis5Card.events_can_be_reconstructed(input):
continue
# Make sure the input is not a used up fifo.
if warn_fifo(input):
continue
analysis_cmds.append('import %s as reco_events'%input)
if self['reconstruction'][name]['reco_output']=='lhe':
reco_outputs.append('%s_%s.lhe.gz'%(os.path.basename(
input).replace('_events','').split('.')[0],name))
analysis_cmds.append('set main.outputfile=%s'%reco_outputs[-1])
elif self['reconstruction'][name]['reco_output']=='root':
reco_outputs.append('%s_%s.root'%(os.path.basename(
input).replace('_events','').split('.')[0],name))
analysis_cmds.append('set main.fastsim.rootfile=%s'%reco_outputs[-1])
analysis_cmds.append(
submit_command%('reco_%s_%d'%(name,i_input+1)))
analysis_cmds.append('remove reco_events')
reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out)
for rec_out in reco_outputs]
if len(reco_outputs)>0:
cmds_list.append(('_reco_%s'%name,analysis_cmds))
elif definition_type == 'analyses':
if self['mode']=='parton':
cmds_list.append( (name, UFO_load+inputs_load+
self['analyses'][name]['commands']+[submit_command%name]) )
elif self['mode']=='hadron':
# Also run on the already reconstructed root/lhco files if found.
for reco in self['analyses'][name]['reconstructions']+\
['lhco_input','root_input']:
if len(reconstruction_outputs[reco])==0:
continue
if self['reconstruction'][reco]['reco_output']=='lhe':
# For the reconstructed lhe output we must be in parton mode
analysis_cmds = ['set main.mode = parton']
else:
analysis_cmds = []
analysis_cmds.extend(sum([get_import(rec_out) for
rec_out in reconstruction_outputs[reco]],[]))
analysis_cmds.extend(self['analyses'][name]['commands'])
analysis_cmds.append(submit_command%('%s_%s'%(name,reco)))
cmds_list.append( ('%s_%s'%(name,reco),analysis_cmds) )
elif definition_type == 'recasting':
if len(self['recasting']['card'])==0:
continue
if name == 'card':
# Create the card here
open(recasting_card_path,'w').write('\n'.join(self['recasting']['card']))
if name == 'commands':
recasting_cmds = list(self['recasting']['commands'])
# Exclude LHCO files here of course
n_inputs = 0
for input in inputs:
if not MadAnalysis5Card.events_can_be_reconstructed(input):
continue
# Make sure the input is not a used up fifo.
if warn_fifo(input):
continue
recasting_cmds.extend(get_import(input,'signal'))
n_inputs += 1
recasting_cmds.append('set main.recast.card_path=%s'%recasting_card_path)
recasting_cmds.append(submit_command%'Recasting')
if n_inputs>0:
cmds_list.append( ('Recasting',recasting_cmds))
return cmds_list
class RunCardNLO(RunCard):
"""A class object for the run_card for a (aMC@)NLO pocess"""
def default_setup(self):
"""define the default value"""
self.add_param('run_tag', 'tag_1', include=False)
self.add_param('nevents', 10000)
self.add_param('req_acc', -1.0, include=False)
self.add_param('nevt_job', -1, include=False)
self.add_param('event_norm', 'average')
#FO parameter
self.add_param('req_acc_fo', 0.01, include=False)
self.add_param('npoints_fo_grid', 5000, include=False)
self.add_param('niters_fo_grid', 4, include=False)
self.add_param('npoints_fo', 10000, include=False)
self.add_param('niters_fo', 6, include=False)
#seed and collider
self.add_param('iseed', 0)
self.add_param('lpp1', 1, fortran_name='lpp(1)')
self.add_param('lpp2', 1, fortran_name='lpp(2)')
self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)')
self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)')
self.add_param('pdlabel', 'nn23nlo')
self.add_param('lhaid', [244600],fortran_name='lhaPDFid')
self.add_param('lhapdfsetname', ['internal_use_only'], system=True)
#shower and scale
self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc')
self.add_param('shower_scale_factor',1.0)
self.add_param('fixed_ren_scale', False)
self.add_param('fixed_fac_scale', False)
self.add_param('mur_ref_fixed', 91.118)
self.add_param('muf1_ref_fixed', -1.0, hidden=True)
self.add_param('muf_ref_fixed', 91.118)
self.add_param('muf2_ref_fixed', -1.0, hidden=True)
self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2")
self.add_param('fixed_qes_scale', False, hidden=True)
self.add_param('qes_ref_fixed', -1.0, hidden=True)
self.add_param('mur_over_ref', 1.0)
self.add_param('muf_over_ref', 1.0)
self.add_param('muf1_over_ref', -1.0, hidden=True)
self.add_param('muf2_over_ref', -1.0, hidden=True)
self.add_param('qes_over_ref', -1.0, hidden=True)
self.add_param('reweight_scale', [True], fortran_name='lscalevar')
self.add_param('rw_rscale_down', -1.0, hidden=True)
self.add_param('rw_rscale_up', -1.0, hidden=True)
self.add_param('rw_fscale_down', -1.0, hidden=True)
self.add_param('rw_fscale_up', -1.0, hidden=True)
self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR')
self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF')
self.add_param('reweight_pdf', [False], fortran_name='lpdfvar')
self.add_param('pdf_set_min', 244601, hidden=True)
self.add_param('pdf_set_max', 244700, hidden=True)
self.add_param('store_rwgt_info', False)
self.add_param('systematics_program', 'none', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics')
self.add_param('systematics_arguments', [''], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.')
#merging
self.add_param('ickkw', 0)
self.add_param('bwcutoff', 15.0)
#cuts
self.add_param('jetalgo', 1.0)
self.add_param('jetradius', 0.7)
self.add_param('ptj', 10.0 , cut=True)
self.add_param('etaj', -1.0, cut=True)
self.add_param('ptl', 0.0, cut=True)
self.add_param('etal', -1.0, cut=True)
self.add_param('drll', 0.0, cut=True)
self.add_param('drll_sf', 0.0, cut=True)
self.add_param('mll', 0.0, cut=True)
self.add_param('mll_sf', 30.0, cut=True)
self.add_param('ptgmin', 20.0, cut=True)
self.add_param('etagamma', -1.0)
self.add_param('r0gamma', 0.4)
self.add_param('xn', 1.0)
self.add_param('epsgamma', 1.0)
self.add_param('isoem', True)
self.add_param('maxjetflavor', 4, hidden=True)
self.add_param('iappl', 0)
self.add_param('lhe_version', 3, hidden=True, include=False)
#internal variable related to FO_analyse_card
self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True)
self.add_param('FO_LHE_postprocessing',['grouping','random'],
hidden=True, system=True, include=False)
# parameter allowing to define simple cut via the pdg
self.add_param('g',{'__type__':0.}, include=False)
self.add_param('pt_min_pdg',{'__type__':0.}, include=False)
self.add_param('pt_max_pdg',{'__type__':0.}, include=False)
self.add_param('mxx_min_pdg',{'__type__':0.}, include=False)
self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True)
#hidden parameter that are transfer to the fortran code
self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked
self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min
self.add_param('ptmax4pdg',[-1.], hidden=True, system=True)
self.add_param('mxxmin4pdg',[0.], hidden=True, system=True)
self.add_param('mxxpart_antipart', [False], hidden=True, system=True)
def check_validity(self):
"""check the validity of the various input"""
super(RunCardNLO, self).check_validity()
# for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid'
if self['lpp1']==0 and self['lpp2']==0:
if self['pdlabel']!='nn23nlo' or self['reweight_pdf']:
self['pdlabel']='nn23nlo'
self['reweight_pdf']=[False]
logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''')
# For FxFx merging, make sure that the following parameters are set correctly:
if self['ickkw'] == 3:
# 1. Renormalization and factorization (and ellis-sexton scales) are not fixed
scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale']
for scale in scales:
if self[scale]:
logger.warning('''For consistency in the FxFx merging, \'%s\' has been set to false'''
% scale,'$MG:color:BLACK')
self[scale]= False
#and left to default dynamical scale
if len(self["dynamical_scale_choice"]) > 1 or self["dynamical_scale_choice"][0] != -1:
self["dynamical_scale_choice"] = [-1]
self["reweight_scale"]=[self["reweight_scale"][0]]
logger.warning('''For consistency in the FxFx merging, dynamical_scale_choice has been set to -1 (default)'''
,'$MG:color:BLACK')
# 2. Use kT algorithm for jets with pseudo-code size R=1.0
jetparams=['jetradius','jetalgo']
for jetparam in jetparams:
if float(self[jetparam]) != 1.0:
logger.info('''For consistency in the FxFx merging, \'%s\' has been set to 1.0'''
% jetparam ,'$MG:color:BLACK')
self[jetparam] = 1.0
elif self['ickkw'] == -1 and (self["dynamical_scale_choice"][0] != -1 or
len(self["dynamical_scale_choice"]) > 1):
self["dynamical_scale_choice"] = [-1]
self["reweight_scale"]=[self["reweight_scale"][0]]
logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.'''
,'$MG:color:BLACK')
# For interface to APPLGRID, need to use LHAPDF and reweighting to get scale uncertainties
if self['iappl'] != 0 and self['pdlabel'].lower() != 'lhapdf':
raise InvalidRunCard('APPLgrid generation only possible with the use of LHAPDF')
if self['iappl'] != 0 and not self['reweight_scale']:
raise InvalidRunCard('APPLgrid generation only possible with including' +\
' the reweighting to get scale dependence')
# check that the pdf is set correctly
possible_set = ['lhapdf','mrs02nl','mrs02nn', 'mrs0119','mrs0117','mrs0121','mrs01_j', 'mrs99_1','mrs99_2','mrs99_3','mrs99_4','mrs99_5','mrs99_6', 'mrs99_7','mrs99_8','mrs99_9','mrs9910','mrs9911','mrs9912', 'mrs98z1','mrs98z2','mrs98z3','mrs98z4','mrs98z5','mrs98ht', 'mrs98l1','mrs98l2','mrs98l3','mrs98l4','mrs98l5', 'cteq3_m','cteq3_l','cteq3_d', 'cteq4_m','cteq4_d','cteq4_l','cteq4a1','cteq4a2', 'cteq4a3','cteq4a4','cteq4a5','cteq4hj','cteq4lq', 'cteq5_m','cteq5_d','cteq5_l','cteq5hj','cteq5hq', 'cteq5f3','cteq5f4','cteq5m1','ctq5hq1','cteq5l1', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo']
if self['pdlabel'] not in possible_set:
raise InvalidRunCard, 'Invalid PDF set (argument of pdlabel) possible choice are:\n %s' % ','.join(possible_set)
# Hidden values check
if self['qes_ref_fixed'] == -1.0:
self['qes_ref_fixed']=self['mur_ref_fixed']
if self['qes_over_ref'] == -1.0:
self['qes_over_ref']=self['mur_over_ref']
if self['muf1_over_ref'] != -1.0 and self['muf1_over_ref'] == self['muf2_over_ref']:
self['muf_over_ref']=self['muf1_over_ref']
if self['muf1_over_ref'] == -1.0:
self['muf1_over_ref']=self['muf_over_ref']
if self['muf2_over_ref'] == -1.0:
self['muf2_over_ref']=self['muf_over_ref']
if self['muf1_ref_fixed'] != -1.0 and self['muf1_ref_fixed'] == self['muf2_ref_fixed']:
self['muf_ref_fixed']=self['muf1_ref_fixed']
if self['muf1_ref_fixed'] == -1.0:
self['muf1_ref_fixed']=self['muf_ref_fixed']
if self['muf2_ref_fixed'] == -1.0:
self['muf2_ref_fixed']=self['muf_ref_fixed']
# overwrite rw_rscale and rw_fscale when rw_(r/f)scale_(down/up) are explicitly given in the run_card for backward compatibility.
if (self['rw_rscale_down'] != -1.0 and ['rw_rscale_down'] not in self['rw_rscale']) or\
(self['rw_rscale_up'] != -1.0 and ['rw_rscale_up'] not in self['rw_rscale']):
self['rw_rscale']=[1.0,self['rw_rscale_up'],self['rw_rscale_down']]
if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\
(self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']):
self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']]
# PDF reweighting check
if any(self['reweight_pdf']):
# check that we use lhapdf if reweighting is ON
if self['pdlabel'] != "lhapdf":
raise InvalidRunCard, 'Reweight PDF option requires to use pdf sets associated to lhapdf. Please either change the pdlabel to use LHAPDF or set reweight_pdf to False.'
# make sure set have reweight_pdf and lhaid of length 1 when not including lhapdf
if self['pdlabel'] != "lhapdf":
self['reweight_pdf']=[self['reweight_pdf'][0]]
self['lhaid']=[self['lhaid'][0]]
# make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales:
if self['fixed_ren_scale'] and self['fixed_fac_scale']:
self['reweight_scale']=[self['reweight_scale'][0]]
self['dynamical_scale_choice']=[0]
# If there is only one reweight_pdf/reweight_scale, but
# lhaid/dynamical_scale_choice are longer, expand the
# reweight_pdf/reweight_scale list to have the same length
if len(self['reweight_pdf']) == 1 and len(self['lhaid']) != 1:
self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid'])
logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0])
if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1:
self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice'])
logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0])
# Check that there are no identical elements in lhaid or dynamical_scale_choice
if len(self['lhaid']) != len(set(self['lhaid'])):
raise InvalidRunCard, "'lhaid' has two or more identical entries. They have to be all different for the code to work correctly."
if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])):
raise InvalidRunCard, "'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly."
# Check that lenght of lists are consistent
if len(self['reweight_pdf']) != len(self['lhaid']):
raise InvalidRunCard, "'reweight_pdf' and 'lhaid' lists should have the same length"
if len(self['reweight_scale']) != len(self['dynamical_scale_choice']):
raise InvalidRunCard, "'reweight_scale' and 'dynamical_scale_choice' lists should have the same length"
if len(self['dynamical_scale_choice']) > 10 :
raise InvalidRunCard, "Length of list for 'dynamical_scale_choice' too long: max is 10."
if len(self['lhaid']) > 25 :
raise InvalidRunCard, "Length of list for 'lhaid' too long: max is 25."
if len(self['rw_rscale']) > 9 :
raise InvalidRunCard, "Length of list for 'rw_rscale' too long: max is 9."
if len(self['rw_fscale']) > 9 :
raise InvalidRunCard, "Length of list for 'rw_fscale' too long: max is 9."
# make sure that the first element of rw_rscale and rw_fscale is the 1.0
if 1.0 not in self['rw_rscale']:
logger.warning("'1.0' has to be part of 'rw_rscale', adding it")
self['rw_rscale'].insert(0,1.0)
if 1.0 not in self['rw_fscale']:
logger.warning("'1.0' has to be part of 'rw_fscale', adding it")
self['rw_fscale'].insert(0,1.0)
if self['rw_rscale'][0] != 1.0 and 1.0 in self['rw_rscale']:
a=self['rw_rscale'].index(1.0)
self['rw_rscale'][0],self['rw_rscale'][a]=self['rw_rscale'][a],self['rw_rscale'][0]
if self['rw_fscale'][0] != 1.0 and 1.0 in self['rw_fscale']:
a=self['rw_fscale'].index(1.0)
self['rw_fscale'][0],self['rw_fscale'][a]=self['rw_fscale'][a],self['rw_fscale'][0]
# check that all elements of rw_rscale and rw_fscale are diffent.
if len(self['rw_rscale']) != len(set(self['rw_rscale'])):
raise InvalidRunCard, "'rw_rscale' has two or more identical entries. They have to be all different for the code to work correctly."
if len(self['rw_fscale']) != len(set(self['rw_fscale'])):
raise InvalidRunCard, "'rw_fscale' has two or more identical entries. They have to be all different for the code to work correctly."
def update_system_parameter_for_include(self):
# set the pdg_for_cut fortran parameter
pdg_to_cut = set(self['pt_min_pdg'].keys() +self['pt_max_pdg'].keys()+
self['mxx_min_pdg'].keys()+ self['mxx_only_part_antipart'].keys())
pdg_to_cut.discard('__type__')
pdg_to_cut.discard('default')
if len(pdg_to_cut)>25:
raise Exception, "Maximum 25 different PDGs are allowed for PDG specific cut"
if any(int(pdg)<0 for pdg in pdg_to_cut):
logger.warning('PDG specific cuts are always applied symmetrically on particle/anti-particle. Always use positve PDG codes')
raise MadGraph5Error, 'Some PDG specific cuts are defined with negative PDG codes'
if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ range(self['maxjetflavor']+1)):
# Note that this will double check in the fortran code
raise Exception, "Can not use PDG related cuts for massless SM particles/leptons"
if pdg_to_cut:
self['pdg_cut'] = list(pdg_to_cut)
self['ptmin4pdg'] = []
self['ptmax4pdg'] = []
self['mxxmin4pdg'] = []
self['mxxpart_antipart'] = []
for pdg in self['pdg_cut']:
for var in ['pt','mxx']:
for minmax in ['min', 'max']:
if var == 'mxx' and minmax == 'max':
continue
new_var = '%s%s4pdg' % (var, minmax)
old_var = '%s_%s_pdg' % (var, minmax)
default = 0. if minmax=='min' else -1.
self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default)
#special for mxx_part_antipart
old_var = 'mxx_only_part_antipart'
new_var = 'mxxpart_antipart'
if 'default' in self[old_var]:
default = self[old_var]['default']
self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default)
else:
if str(pdg) not in self[old_var]:
raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg))
self[new_var].append(self[old_var][str(pdg)])
else:
self['pdg_cut'] = [0]
self['ptmin4pdg'] = [0.]
self['ptmax4pdg'] = [-1.]
self['mxxmin4pdg'] = [0.]
self['mxxpart_antipart'] = [False]
def write(self, output_file, template=None, python_template=False):
"""Write the run_card in output_file according to template
(a path to a valid run_card)"""
if not template:
if not MADEVENT:
template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards',
'run_card.dat')
python_template = True
else:
template = pjoin(MEDIR, 'Cards', 'run_card_default.dat')
python_template = False
super(RunCardNLO, self).write(output_file, template=template,
python_template=python_template)
def create_default_for_process(self, proc_characteristic, history, proc_def):
"""Rules
e+ e- beam -> lpp:0 ebeam:500
p p beam -> set maxjetflavor automatically
"""
# check for beam_id
beam_id = set()
for proc in proc_def:
for leg in proc['legs']:
if not leg['state']:
beam_id.add(leg['id'])
if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]):
maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7])
self['maxjetflavor'] = maxjetflavor
pass
elif 11 in beam_id or -11 in beam_id:
self['lpp1'] = 0
self['lpp2'] = 0
self['ebeam1'] = 500
self['ebeam2'] = 500
else:
self['lpp1'] = 0
self['lpp2'] = 0
if proc_characteristic['ninitial'] == 1:
#remove all cut
self.remove_all_cut()
class MadLoopParam(ConfigFile):
""" a class for storing/dealing with the file MadLoopParam.dat
contains a parser to read it, facilities to write a new file,...
"""
_ID_reduction_tool_map = {1:'CutTools',
2:'PJFry++',
3:'IREGI',
4:'Golem95',
5:'Samurai',
6:'Ninja',
7:'COLLIER'}
def default_setup(self):
"""initialize the directory to the default value"""
self.add_param("MLReductionLib", "6|7|1")
self.add_param("IREGIMODE", 2)
self.add_param("IREGIRECY", True)
self.add_param("CTModeRun", -1)
self.add_param("MLStabThres", 1e-3)
self.add_param("NRotations_DP", 0)
self.add_param("NRotations_QP", 0)
self.add_param("ImprovePSPoint", 2)
self.add_param("CTLoopLibrary", 2)
self.add_param("CTStabThres", 1e-2)
self.add_param("CTModeInit", 1)
self.add_param("CheckCycle", 3)
self.add_param("MaxAttempts", 10)
self.add_param("ZeroThres", 1e-9)
self.add_param("OSThres", 1.0e-8)
self.add_param("DoubleCheckHelicityFilter", True)
self.add_param("WriteOutFilters", True)
self.add_param("UseLoopFilter", False)
self.add_param("HelicityFilterLevel", 2)
self.add_param("LoopInitStartOver", False)
self.add_param("HelInitStartOver", False)
self.add_param("UseQPIntegrandForNinja", True)
self.add_param("UseQPIntegrandForCutTools", True)
self.add_param("COLLIERMode", 1)
self.add_param("COLLIERComputeUVpoles", True)
self.add_param("COLLIERComputeIRpoles", True)
self.add_param("COLLIERRequiredAccuracy", 1.0e-8)
self.add_param("COLLIERCanOutput",False)
self.add_param("COLLIERGlobalCache",-1)
self.add_param("COLLIERUseCacheForPoles",False)
self.add_param("COLLIERUseInternalStabilityTest",True)
def read(self, finput):
"""Read the input file, this can be a path to a file,
a file object, a str with the content of the file."""
if isinstance(finput, str):
if "\n" in finput:
finput = finput.split('\n')
elif os.path.isfile(finput):
finput = open(finput)
else:
raise Exception, "No such file %s" % input
previous_line= ''
for line in finput:
if previous_line.startswith('#'):
name = previous_line[1:].split()[0]
value = line.strip()
if len(value) and value[0] not in ['#', '!']:
self.__setitem__(name, value, change_userdefine=True)
previous_line = line
def write(self, outputpath, template=None,commentdefault=False):
if not template:
if not MADEVENT:
template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone',
'Cards', 'MadLoopParams.dat')
else:
template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat')
fsock = open(template, 'r')
template = fsock.readlines()
fsock.close()
if isinstance(outputpath, str):
output = open(outputpath, 'w')
else:
output = outputpath
def f77format(value):
if isinstance(value, bool):
if value:
return '.true.'
else:
return '.false.'
elif isinstance(value, int):
return value
elif isinstance(value, float):
tmp ='%e' % value
return tmp.replace('e','d')
elif isinstance(value, str):
return value
else:
raise Exception, "Can not format input %s" % type(value)
name = ''
done = set()
for line in template:
if name:
done.add(name)
if commentdefault and name.lower() not in self.user_set :
output.write('!%s\n' % f77format(self[name]))
else:
output.write('%s\n' % f77format(self[name]))
name=''
continue
elif line.startswith('#'):
name = line[1:].split()[0]
output.write(line)
| 1.828125 | 2 |
buttons.py | htpc-helper/Home-Theatre-Control | 0 | 12760738 | #!/usr/bin/python
import paho.mqtt.publish as publish
import RPi.GPIO as GPIO
import subprocess
import time
import os
#Define list of button parameters
PIN_CONFIG = [
{'pin':20, 'mqttTopic':'home/htpc/power/restart'},
{'pin':21, 'mqttTopic':'home/htpc/power/off'},
{'pin':13, 'mqttTopic':'home/htpc/kodi/on'},
{'pin':19, 'mqttTopic':'home/htpc/plex/on'},
{'pin':26, 'mqttTopic':'home/htpc/steam/on'}
]
#Define other inputs
HOST = '10.0.1.xxx'
MQTT_BROKER = '10.0.1.xxx'
IR_CMD = 'irsend SEND_ONCE samsung KEY_POWER2'
WOL_CMD = 'sudo etherwake xx:xx:xx:xx:xx:xx'
#Define functions
def MqttMessage(mqtt_topic, mqtt_broker):
publish.single(mqtt_topic, '', hostname=mqtt_broker)
print('MQTT message \"' + mqtt_topic + '\" sent via broker \"' + mqtt_broker + '\"')
def CmdBash(command, message):
subprocess.call(['bash', '-c', command])
if message != '': print(message)
def CheckStatus(host):
print('Checking status of: ' + host)
response = os.system("ping -c 1 " + host)
if response:
print('Fail, host \"' + host + '\" is unreachable')
return False
else:
print('Success')
return True
def Startup(host, mqtt_topic):
#Ping HTPC to check if already powered on, else initiate startup sequence
if CheckStatus(host): return True
print('Initiating startup sequence instead')
#Turn on TV then boot PC using WOL
CmdBash(IR_CMD, 'IR power signal sent to TV')
CmdBash(WOL_CMD, 'Magic packet sent to wake HTPC')
def Callback(channel):
#Find mqttTopic corresponding to PIN
for item in PIN_CONFIG:
if item['pin'] == channel:
mqtt_topic = item['mqttTopic']
#Print details of instruction being processed
print('Button ' + str(channel) + ' was pressed which corresponds to the command: ' + mqtt_topic)
#Process topic
if mqtt_topic == 'home/htpc/power/restart' or 'home/htpc/power/off':
if CheckStatus(HOST): #check whether htpc already on
MqttMessage(mqtt_topic, MQTT_BROKER) #trigger shutdown directly
if mqtt_topic == 'home/htpc/power/off':
time.sleep(5) #wait 5s, then turn off TV
CmdBash(IR_CMD, 'IR power signal sent to TV')
else:
if startup(HOST, mqtt_topic): #send MQTT message if PC on, else initiate startup sequence
MqttMessage(mqtt_topic, MQTT_BROKER)
def GPIOsetup(pin):
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pin, GPIO.FALLING, callback=Callback, bouncetime=10000)
#@@@ MAIN PROGRAM @@@
#Setup buttons according to parameters defined in list
GPIO.setmode(GPIO.BCM)
for item in PIN_CONFIG:
pin = item['pin']
GPIOsetup(pin)
#Infinite loop to prevent program from ending
while True:
continue | 2.75 | 3 |
executables/experiments/single_execution.py | tandriamil/BrFAST | 6 | 12760739 | <filename>executables/experiments/single_execution.py
#!/usr/bin/python3
"""Execute the three exploration algorithms on the a given dataset.
This script requires:
- The dataset preprocessed into a 'fingerprint.csv' file.
-> See executables.dataset.preprocess_*
- The measures of the usability cost of the attributes of the dataset
respectively stored in 'memory.csv' and 'instability.csv'.
-> See executables.measures.memory/instability
"""
import argparse
import importlib
from os import path
from pathlib import PurePath
from loguru import logger
from brfast.data.dataset import FingerprintDatasetFromCSVFile
from brfast.exploration.conditional_entropy import ConditionalEntropy
from brfast.exploration.entropy import Entropy
from brfast.exploration.fpselect import FPSelect
from brfast.measures.sensitivity.fpselect import TopKFingerprints
from brfast.measures.usability_cost.fpselect import (
CostDimension, MemoryInstability)
# Import the engine of the analysis module (pandas or modin)
from brfast.config import params
from executables.experiments import read_csv_analysis_as_dict
pd = importlib.import_module(params['DataAnalysis']['engine'])
# The input files for the execution
FINGERPRINT_DATASET_CSV = 'fingerprints.csv'
MEMORY_COST_CSV = 'memory.csv'
INSTABILITY_COST_CSV = 'instability.csv'
# The weights of the dimensions of the usability cost dimensions
USABILITY_COST_WEIGHTS = {
CostDimension.MEMORY: 1, CostDimension.INSTABILITY: 10000}
def main():
"""Execute the three exploration methods on the dummy FPSelect example."""
args = handle_arguments()
data_path = PurePath(args.input_data_dir[0])
logger.debug(f'Considering the data path {data_path}.')
# Generate the fingerprint dataset
dataset_path = data_path.joinpath(FINGERPRINT_DATASET_CSV)
if not path.isfile(dataset_path):
raise ValueError(f'No fingerprint dataset is at {dataset_path}.')
dataset = FingerprintDatasetFromCSVFile(dataset_path)
logger.info(f'Considering the dataset {dataset}.')
# Read the average fingerprint size and instability of the attributes
memory_result_path = data_path.joinpath(MEMORY_COST_CSV)
memory_results = read_csv_analysis_as_dict(
memory_result_path, dataset.candidate_attributes)
instability_result_path = data_path.joinpath(INSTABILITY_COST_CSV)
instability_results = read_csv_analysis_as_dict(
instability_result_path, dataset.candidate_attributes)
# Generate the usability cost measure
usability_cost_measure = MemoryInstability(
memory_results, instability_results, USABILITY_COST_WEIGHTS)
logger.info('Considering the usability cost measure '
f'{usability_cost_measure}.')
# Generate the sensitivity measure
attacker_submissions = args.attacker_submissions
sensitivity_measure = TopKFingerprints(dataset, attacker_submissions)
logger.info(f'Considering the sensitivity measure {sensitivity_measure}.')
# Generate the exploration method
logger.info(f'Considering the exploration method {args.method}.')
logger.info('Considering the sensitivity threshold '
f'{args.sensitivity_threshold}.')
# Entropy baseline
if args.method == 'entropy':
exploration = Entropy(sensitivity_measure, usability_cost_measure,
dataset, args.sensitivity_threshold)
# Conditional entropy baseline
elif args.method == 'conditional_entropy':
exploration = ConditionalEntropy(
sensitivity_measure, usability_cost_measure, dataset,
args.sensitivity_threshold)
# FPSelect
elif args.method == 'fpselect':
logger.info(f'Considering {args.explored_paths} explored paths.')
if args.no_pruning:
logger.info('Do not use the pruning methods.')
exploration = FPSelect(
sensitivity_measure, usability_cost_measure, dataset,
args.sensitivity_threshold, args.explored_paths,
not args.no_pruning)
# Unknown method
else:
raise ValueError(f'Unknown exploration method {args.method}.')
# The exploration itself
logger.info(f'Considering the exploration method {exploration}.')
logger.info('Beginning of the exploration...')
exploration.run()
solution = exploration.get_solution()
explored_attribute_sets = len(exploration.get_explored_attribute_sets())
logger.info(f'The solution found by {args.method} is {solution} '
f'after exploring {explored_attribute_sets} attribute sets.')
# Save the trace file
if args.trace_file:
exploration.save_exploration_trace(args.trace_file)
def handle_arguments() -> argparse.Namespace:
"""Collect, check and give back the arguments as a Namespace object.
Returns:
The arguments as a Namespace object which are accessible as properties.
"""
# Handle the arguments
parser = argparse.ArgumentParser(
description=('Process the attribute selection on a dataset.'))
parser.add_argument('input_data_dir', type=str, nargs=1,
help='The path to the directory containing the data.')
parser.add_argument('-m', '--method', metavar='selection_method',
type=str, nargs='?', default='fpselect',
choices=['fpselect', 'entropy', 'conditional_entropy'],
help=('The attribute selection method (default is '
'fpselect)'))
parser.add_argument('-t', '--sensitivity-threshold', metavar='threshold',
type=float, nargs='?', default=0.10,
help='The sensitivity threshold (default is 0.10).')
parser.add_argument('-k', '--attacker-submissions', metavar='submissions',
type=int, nargs='?', default=4,
help=('The number of submissions by the attacker '
'(default is 4).'))
parser.add_argument('-o', '--trace-file', metavar='trace_file',
type=str, nargs='?', default=None,
help='If set, save the trace to this file.')
parser.add_argument('-p', '--explored-paths', metavar='paths',
type=int, nargs='?', default=3,
help=('The number of paths explored by FPSelect '
'(default is 3).'))
parser.add_argument('--no-pruning', action='store_false',
help='Do not use the pruning methods of FPSelect.')
args = parser.parse_args()
# Check the path to the dataset
input_data_dir_path = args.input_data_dir[0]
if not path.isdir(input_data_dir_path):
raise ValueError('The input_data_dir_path should point to a valid '
'directory.')
return args
if __name__ == '__main__':
main()
| 2.59375 | 3 |
src/translators/Translator.py | StrandHQ/strand-slack | 0 | 12760740 | <gh_stars>0
from src.utilities.logging import get_logger
class Translator:
def __init__(self, slack_client_wrapper=None, strand_api_client_wrapper=None):
self.logger = get_logger(self.__class__.__name__)
self.slack_client_wrapper = slack_client_wrapper
self.strand_api_client_wrapper = strand_api_client_wrapper
def translate(self):
raise NotImplementedError
| 1.851563 | 2 |
Programming-101-v3/week7/1-Scan-Bg-Web/test_histogram_class.py | pepincho/Python101-and-Algo1-Courses | 2 | 12760741 | import unittest
from histogram_class import Histogram
class Test_Histogram(unittest.TestCase):
def test_add_to_histogram(self):
h = Histogram()
h.add("Apache")
h.add("Apache")
h.add("IIS")
self.assertTrue(h.count("Apache") == 2)
self.assertTrue(h.count("IIS") == 1)
def test_get_dict(self):
h = Histogram()
h.add("Apache")
h.add("Apache")
h.add("IIS")
wanted_result = {"Apache": 2, "IIS": 1}
self.assertEqual(h.get_dict(), wanted_result)
if __name__ == '__main__':
unittest.main()
| 3.359375 | 3 |
src/PartA.3_BroniewskiTirmizi.py | abroniewski/DBLP-Research-Paper-Graph-Modeling | 0 | 12760742 | <reponame>abroniewski/DBLP-Research-Paper-Graph-Modeling
import pandas as pd
from os.path import join
import configparser
from utils import Neo4jConnection
import time
from tqdm import tqdm
##################################
# Global Variable
##################################
# Here we add a local project import directory specific to each local neo4j instance
config = configparser.RawConfigParser()
config.read('local.config')
details_dict = dict(config.items('PROJECT_DIR'))
PROJECT_IMPORT_PATH = details_dict["dir_path"]
PROCESSED_DIR = "../data/processed/"
conn = Neo4jConnection(uri="bolt://localhost:7687", user="neo4j", pwd="<PASSWORD>")
##################################
# Create publication reviews
##################################
def generate_publication_review_data():
"""
Creates a list of dummy data for reviews of a publication based
on the length of the input dataframe. Because it is dummy data,
it is not semantically correct. The reviewers are not chosen from
the same pubication as that of the submitted journal, and there is
a potential for a reviewer to be the author that wrote the paper.
:param pd.DataFrame df:
:return: pd.DataFrame
"""
df = pd.read_csv("../data/processed/publications_processed.csv")
list_of_reviews = []
list_of_decisions = []
for i in range(len(df)):
reviews = []
decision = []
for h in range(3):
reviews.append(f"This is review #{h+1} of the paper. It was a great read!")
decision.append(f"reviewer #{h+1} has approved")
list_of_reviews.append(reviews)
list_of_decisions.append(decision)
df["reviews"] = [','.join(map(str, l)) for l in list_of_reviews]
df["decisions"] = [','.join(map(str, l)) for l in list_of_decisions]
df.to_csv(join(PROCESSED_DIR, "publications_processed_evolution.csv"), index=False)
df.to_csv(join(PROJECT_IMPORT_PATH, "publications_processed_evolution.csv"), index=False)
return df
def create_review_group_unique_constraint():
query_create_review_group_unique_constraint = '''
CREATE CONSTRAINT ON (n:ReviewGroup) ASSERT n.group_id IS UNIQUE
'''
conn.query(query_create_review_group_unique_constraint, db='neo4j')
def query_add_peer_review_group_details(df):
print(f"Starting function to load peer review group details.")
tic = time.perf_counter()
df = df.reset_index()
for df_index, row in tqdm(df.iterrows(), total=len(df)):
group = row['review_group']
for review_index, person in enumerate(row['reviewers'].split(",")):
author = person
review = row['reviews'].split(",")[review_index]
decision = row['decisions'].split(",")[review_index]
query = f'''
MATCH (group:ReviewGroup {{group_id: toInteger({group})}})<-[r:IN_REVIEW_GROUP]-(reviewer:Author
{{name: "{author}"}})
SET r.ReviewText="{review}"
SET r.Decision="{decision}"
'''
conn.query(query, db='neo4j')
toc = time.perf_counter()
print(f"Total time for query was: {toc-tic:0.4f} seconds\n")
def query_add_author_affiliation(df):
print(f"\nStarting query to add affiliation to each author.")
tic = time.perf_counter()
df = df.reset_index()
counter = 0
for df_index, row in tqdm(df.iterrows(), total=len(df)):
author_list = row['authors'].split(",")
affiliation_list = row['authors_with_affiliations'].split(";")
if len(author_list) == len(affiliation_list):
for aff_index, affiliation in enumerate(affiliation_list):
author = author_list[aff_index]
query = f'''
MATCH (a:Author {{name: "{author}"}})
SET a.Affiliation="{affiliation}"
'''
conn.query(query, db='neo4j')
else:
for author in enumerate(author_list):
counter += 1
query = f'''
MATCH (a:Author {{name: "{author}"}})
SET a.Affiliation= "Affiliation information not available"
'''
conn.query(query, db='neo4j')
toc = time.perf_counter()
print(f"There were {counter} authors that did not have an affiliation added.")
print(f"Total time for query was: {toc-tic:0.4f} seconds\n")
##################################
# Main Program Run
##################################
if __name__ == '__main__':
df = generate_publication_review_data()
create_review_group_unique_constraint()
query_add_peer_review_group_details(df)
query_add_author_affiliation(df) | 2.796875 | 3 |
scrapy/apsnypress/scripts/Remove_json_duplicates.py | Plkmoi/Multilingual-Parallel-Corpus | 11 | 12760743 | <reponame>Plkmoi/Multilingual-Parallel-Corpus<filename>scrapy/apsnypress/scripts/Remove_json_duplicates.py
import re
import io
import json
# Format the date and sort by date for ru
with open('../data_ru.json', 'r') as f:
data_ru = json.load(f)
unique = { each['name'] : each for each in data_ru }
with open('../data_ru_see.json', 'w') as outfile:
json.dump(unique, outfile, indent = 1)
| 2.8125 | 3 |
skelm/elm_large.py | GrumpySapiens/scikit-elm | 13 | 12760744 | <gh_stars>10-100
import numpy as np
from sklearn.base import RegressorMixin
from sklearn.metrics import pairwise_distances
from sklearn.utils.validation import check_is_fitted, check_array
import dask.array as da
import dask.dataframe as dd
from .elm import BasicELM
from dask.distributed import Client, LocalCluster, wait
from .utils import _is_list_of_strings, _dense, HiddenLayerType, dummy
def _read_numeric_file(fname):
try:
return dd.read_parquet(fname)
except:
pass
try:
return dd.read_csv(fname)
except:
pass
try:
return np.load(fname)
except:
pass
class LargeELMRegressor(BasicELM, RegressorMixin):
"""ELM Regressor for larger-than-memory problems.
Uses `Dask <https://dask.org>`_ for batch analysis of data in Parquet files.
.. attention:: Why do I need Parquet files?
Parquet files provide necessary information about the data without loading whole file content from
disk. It makes a tremendous runtime difference compared to simpler `.csv` or `.json` file formats.
Reading from files saves memory by loading data in small chunks, supporting arbitrary large input files.
It also solves current memory leaks with Numpy matrix inputs in Dask.
Any data format can be easily converted to Parquet, see `Analytical methods <techniques.html>`_ section.
HDF5 is almost as good as Parquet, but performs worse with Dask due to internal data layout.
.. todo: Write converters.
.. todo: Memo about number of workers: one is good, several cover disk read latency but need more memory.
On one machine matrix operators always run in parallel, do not benefit from Dask.
.. todo: Memory consumption with large number of neurons - 100,000 neurons require 200GB or swap space, with
read+write reaching 1GB/s. Suggested a fast SSD, or HDD + extra workers to hide swap latency.
Mention that Dask is not the perfect solution, kept here for future updates. And it actually solves
stuff larger than memory, albeit at a very high time+swap cost.
.. todo: Avoid large batch sizes as workers can fail, safe bet is 2000-5000 range.
.. todo: Fast HtH and in-place Cholesky solver.
.. todo: Pro tip in documentation: run ELM with dummy 1000 data samples and 1e+9 regularization,
This will test possible memory issues for workers without wasting your time on computing full HH.
.. todo: Option to keep full HH permanently somewhere at disk. Saves before the final step,
avoids failures from memory issues during Cholesky solver.
.. todo: GPU + batch Cholesky solver, for both ELM and LargeELM.
Requirements
------------
* Pandas
* pyarrow
* python-snappy
Parameters
----------
batch_size : int
Batch size used for both data samples and hidden neurons. With batch Cholesky solver, allows for very large
numbers of hidden neurons of over 100,000; limited only by the computation time and disk swap space.
.. hint:: Include bias and original features for best performance.
ELM will include a bias term (1 extra feature), and the original features with `include_original_features=True`.
For optimal performance, choose `batch_size` to be equal or evenly divide the
`n_neurons + 1 (bias) + n_inputs (if include_original_features=True)`.
.. todo:: Exact batch_size vs. GPU performance
"""
def __del__(self):
if hasattr(self, 'client_'):
self.client_.close()
self.cluster_.close()
def _setup_dask_client(self):
self.cluster_ = LocalCluster(
n_workers=4, threads_per_worker=1,
local_dir="/Users/akusok/wrkdir/dask-temp",
memory_limit="8GB"
)
self.client_ = Client(self.cluster_)
W_list = [hl.projection_.components_ for hl in self.hidden_layers_]
W_dask = [da.from_array(_dense(W), chunks=self.bsize_) for W in W_list]
self.W_ = self.client_.persist(W_dask)
def foo():
import os
os.environ['OMP_NUM_THREADS'] = '1'
self.client_.run(foo)
print("Running on:", self.client_)
try:
dashboard = self.client_.scheduler_info()['address'].split(":")
dashboard[0] = "http"
dashboard[-1] = str(self.client_.scheduler_info()['services']['dashboard'])
print("Dashboard at", ":".join(dashboard))
except:
pass
def _project(self, X_dask):
"""Compute hidden layer output with Dask functionality.
"""
H_list = []
for hl, W in zip(self.hidden_layers_, self.W_):
if hl.hidden_layer_ == HiddenLayerType.PAIRWISE:
H0 = X_dask.map_blocks(
pairwise_distances,
W,
dtype=X_dask.dtype,
chunks=(X_dask.chunks[0], (W.shape[0],)),
metric=hl.pairwise_metric
)
else:
XW_dask = da.dot(X_dask, W.transpose())
if hl.ufunc_ is dummy:
H0 = XW_dask
elif hl.ufunc_ is np.tanh:
H0 = da.tanh(XW_dask)
else:
H0 = XW_dask.map_blocks(hl.ufunc_)
H_list.append(H0)
if self.include_original_features:
H_list.append(X_dask)
H_list.append(da.ones((X_dask.shape[0], 1)))
H_dask = da.concatenate(H_list, axis=1).rechunk(self.bsize_)
return H_dask
def _compute(self, X, y, sync_every, HH=None, HY=None):
"""Computing matrices HH and HY, the actually long part.
.. todo: actually distributed computations that scatter batches of data file names,
and reduce-sum the HH,HY matrices.
"""
# processing files
for i, X_file, y_file in zip(range(len(X)), X, y):
X_dask = dd.read_parquet(X_file).to_dask_array(lengths=True)
Y_dask = dd.read_parquet(y_file).to_dask_array(lengths=True)
H_dask = self._project(X_dask)
if HH is None: # first iteration
HH = da.dot(H_dask.transpose(), H_dask)
HY = da.dot(H_dask.transpose(), Y_dask)
else:
HH += da.dot(H_dask.transpose(), H_dask)
HY += da.dot(H_dask.transpose(), Y_dask)
if sync_every is not None and i % sync_every == 0:
wait([HH, HY])
# synchronization
if sync_every is not None and i % sync_every == 0:
HH, HY = self.client_.persist([HH, HY])
# finishing solution
if sync_every is not None:
wait([HH, HY])
return HH, HY
def _solve(self, HH, HY):
"""Compute output weights from HH and HY using Dask functionality.
"""
# make HH/HY divisible by chunk size
n_features, _ = HH.shape
padding = 0
if n_features > self.bsize_ and n_features % self.bsize_ > 0:
print("Adjusting batch size {} to n_features {}".format(self.bsize_, n_features))
padding = self.bsize_ - (n_features % self.bsize_)
P01 = da.zeros((n_features, padding))
P10 = da.zeros((padding, n_features))
P11 = da.zeros((padding, padding))
HH = da.block([[HH, P01],
[P10, P11]])
P1 = da.zeros((padding, HY.shape[1]))
HY = da.block([[HY],
[P1]])
# rechunk, add bias, and solve
HH = HH.rechunk(self.bsize_) + self.alpha * da.eye(HH.shape[1], chunks=self.bsize_)
HY = HY.rechunk(self.bsize_)
B = da.linalg.solve(HH, HY, sym_pos=True)
if padding > 0:
B = B[:n_features]
return B
def fit(self, X, y=None, sync_every=10):
"""Fits an ELM with data in a bunch of files.
Model will use the set of features from the first file.
Same features must have same names across the whole dataset.
.. todo: Check what happens if features are in different order or missing.
Does **not** support sparse data.
.. todo: Check if some sparse data would work.
.. todo: Check that sync_every does not affect results
.. todo: Add single precision
.. todo: Parquet file format examples in documentation
Original features and bias are added to the end of data, for easier rechunk-merge. This way full chunks
of hidden neuron outputs stay intact.
Parameters
----------
X : [str]
List of input data files in Parquet format.
y : [str]
List of target data files in Parquet format.
sync_every : int or None
Synchronize computations after this many files are processed. None for running without synchronization.
Less synchronization improves run speed with smaller data files, but may result in large swap space usage
for large data problems. Use smaller number for more frequent synchronization if swap space
becomes a problem.
"""
if not _is_list_of_strings(X) or not _is_list_of_strings(y):
raise ValueError("Expected X and y as lists of file names.")
if len(X) != len(y):
raise ValueError("Expected X and y as lists of files with the same length. "
"Got len(X)={} and len(y)={}".format(len(X), len(y)))
# read first file and get parameters
X_dask = dd.read_parquet(X[0]).to_dask_array(lengths=True)
Y_dask = dd.read_parquet(y[0]).to_dask_array(lengths=True)
n_samples, n_features = X_dask.shape
if hasattr(self, 'n_features_') and self.n_features_ != n_features:
raise ValueError('Shape of input is different from what was seen in `fit`')
_, n_outputs = Y_dask.shape
if hasattr(self, 'n_outputs_') and self.n_outputs_ != n_outputs:
raise ValueError('Shape of outputs is different from what was seen in `fit`')
# set batch size, default is bsize=2000 or all-at-once with less than 10_000 samples
self.bsize_ = self.batch_size
if self.bsize_ is None:
self.bsize_ = n_samples if n_samples < 10 * 1000 else 2000
# init model if not fit yet
if not hasattr(self, 'hidden_layers_'):
self.n_features_ = n_features
self.n_outputs_ = n_outputs
X_sample = X_dask[:10].compute()
self._init_hidden_layers(X_sample)
self._setup_dask_client()
HH, HY = self._compute(X, y, sync_every=sync_every)
self.B = self._solve(HH, HY)
self.is_fitted_ = True
return self
def predict(self, X):
"""Prediction works with both lists of Parquet files and numeric arrays.
Parameters
----------
X : array-like, [str]
Input data as list of Parquet files, or as a numeric array.
Returns
-------
Yh : array, shape (n_samples, n_outputs)
Predicted values for all input samples.
.. attention:: Returns all outputs as a single in-memory array!
Danger of running out out memory for high-dimensional outputs, if a large set of input
files is provided. Feed data in smaller batches in such case.
"""
check_is_fitted(self, 'is_fitted_')
if _is_list_of_strings(X):
Yh_list = []
# processing files
for X_file in X:
X_dask = dd.read_parquet(X_file).to_dask_array(lengths=True)
H_dask = self._project(X_dask)
Yh_list.append(da.dot(H_dask, self.B))
Yh_dask = da.concatenate(Yh_list, axis=0)
return Yh_dask.compute()
else:
X = check_array(X, accept_sparse=True)
H = [np.ones((X.shape[0], 1))]
if self.include_original_features:
H.append(_dense(X))
H.extend([hl.transform(X) for hl in self.hidden_layers_])
return np.hstack(H) @ self.B.compute()
| 2.3125 | 2 |
server/player/admin.py | Xelia/mahjong-portal | 0 | 12760745 | <filename>server/player/admin.py
from django import forms
from django.contrib import admin
from player.models import Player
from player.tenhou.models import TenhouNickname
class PlayerForm(forms.ModelForm):
class Meta:
model = Player
exclude = ['first_name', 'last_name']
class TenhouNicknameInline(admin.TabularInline):
model = TenhouNickname
extra = 1
class PlayerAdmin(admin.ModelAdmin):
form = PlayerForm
inlines = [
TenhouNicknameInline
]
prepopulated_fields = {'slug': ['last_name_en', 'first_name_en']}
list_display = ['last_name', 'first_name', 'city', 'pantheon_id']
list_filter = ['is_hide']
search_fields = ['first_name_ru', 'first_name_en', 'last_name_ru', 'last_name_en', 'ema_id']
def get_queryset(self, request):
return Player.all_objects.all()
admin.site.register(Player, PlayerAdmin)
| 2.25 | 2 |
src/pipeline/pipelinetree.py | akolonin/language-learning | 0 | 12760746 | import os
import logging
from typing import Dict, List, Any, Union, Callable, NewType
from ..common.absclient import AbstractPipelineComponent
from ..grammar_tester.grammartester import GrammarTesterComponent
from ..grammar_learner import GrammarLearnerComponent
from ..text_parser import TextParserComponent
from ..dash_board.textdashboard import TextFileDashboardComponent
from .varhelper import get_path_from_dict, subst_variables_in_str, subst_variables_in_dict, subst_variables_in_dict2
from .pipelinetreenode import PipelineTreeNode2
__all__ = ['build_tree', 'run_tree']
logger = logging.getLogger(__name__)
class PathCreatorComponent(AbstractPipelineComponent):
def __init__(self):
pass
def validate_parameters(self, **kwargs):
return True
def run(self, **kwargs):
return {}
@staticmethod
def create(**kwargs):
path = kwargs.get("path", None)
if path is not None and not os.path.isdir(path):
os.makedirs(path)
return {"path": path}
PIPELINE_COMPONENTS = {
"path-creator": PathCreatorComponent,
"grammar-tester": GrammarTesterComponent,
"grammar-learner": GrammarLearnerComponent,
"text-parser": TextParserComponent,
"dash-board": TextFileDashboardComponent
}
def get_component(name: str, params: dict) -> AbstractPipelineComponent:
"""
Create an instance of the pipeline component
:param name: Pipeline component name.
:return: AbstractPipelineComponent instance pointer.
"""
try:
# Create an instance of specified pipeline component
component = PIPELINE_COMPONENTS[name](**params)
# Check the instance to be proper pipeline component
if not isinstance(component, AbstractPipelineComponent):
raise Exception("Error: " + str(type(component)) + " is not an instance of AbstractPipelineComponent")
return component
except KeyError:
raise Exception("Error: '{}' is not a valid pipeline component name.".format(name))
except Exception as err:
logger.error(str(type(err)) + ": " + str(err))
raise err
def single_proc_exec(node: PipelineTreeNode2) -> None:
if node is None:
return
leaf = node._environment["LEAF"]
create = node._environment.get("CREATE_LEAF", False)
# Create path if it does not exist
if create and not os.path.isdir(leaf):
os.makedirs(leaf)
parameters = node._parameters
result = {}
pre_exec = parameters.get("pre-exec-req", None)
if pre_exec:
for req in pre_exec:
result = handle_request(node, req)
# Create component instance
component = get_component(node._component_name, parameters)
# Execute component
result = component.run(**{**parameters, **result})
post_exec = parameters.get("post-exec-req", None)
if post_exec:
for req in post_exec:
handle_request(node, {**req, **result})
# Just for debug purposes
logger.info(node._component_name + ": successfull execution")
def handle_request(node: PipelineTreeNode2, req: dict) -> None:
"""
Handle Post-execute Request
:param node: Pipiline tree node reference.
:param req: Request parameter dictionary.
:return: None
"""
obj = req.pop("obj", None)
if obj is None:
raise Exception("Error: Required parameter 'obj' does not exist.")
pos = str(obj).find(".")
if pos < 0:
raise Exception("Error: Object name and method should be separated by comma.")
name = obj[:pos]
meth = obj[pos+1:]
inst = node.static_components.get(name, None)
if inst is None:
raise Exception("Error: static component '{}' does not exist.".format(name))
return getattr(inst, meth)(**req)
def prepare_parameters(parent: PipelineTreeNode2, common: dict, specific: dict, environment: dict, first_char="%",
create_sub_dir: bool=True) -> (dict, dict):
"""
Create built-in variables (PREV, RPREV, LEAF, RLEAF), substitute variables, starting with '%'
with their real values.
:param parent: Parent node of the execution tree.
:param common: Common parameters dictionary.
:param specific: Specific parameters dictionary.
:param environment: Environment dictionary.
:param first_char: Character that delimits variables ('%' is default).
:param create_sub_dir Boolean value forces the program to create subdirectory path based on specific dictionary.
:return: Tuple of two dictionaries: one for parameters, another for environment.
"""
# Merge two dictionaries 'common-parameters' and 'specific-parameters'
all_parameters = {**common, **specific} if common is not None else specific
create_leaf = False
# Check if 'LEAF' path should be created
for v in all_parameters.values():
if type(v) == str and v.find("LEAF") >= 0:
create_leaf = True
# Path parameters should not appear in other paths
non_path = {k: v for k, v in zip(specific.keys(), specific.values())
if (not (isinstance(v, list) or isinstance(v, dict) or isinstance(v, str)))
or (isinstance(v, str) and v.find("/") < 0 and v.find("%") < 0)}
# Get subdir path based on specific parameters if requested
rleaf = get_path_from_dict(non_path, "_") if create_leaf else ""
# rleaf = get_path_from_dict(non_path, "_") if create_sub_dir else ""
logger.debug("RLEAF: " + rleaf)
inherit_prev = all_parameters.get("inherit_prev_path", False)
leaf = environment["PREV"] + "/" + rleaf if inherit_prev else environment["ROOT"] + "/" + rleaf
logger.debug("LEAF: " + leaf)
new_environment = {**environment, **{"RLEAF": rleaf, "LEAF": leaf, "CREATE_LEAF": create_leaf}}
scopes = {"THIS": {**new_environment, **all_parameters}, "PREV": {}} if parent is None else \
{"THIS": {**new_environment, **all_parameters}, "PREV": {**parent._environment, **parent._parameters}}
# Substitute derived path for LEAF, PREV and other variables
all_parameters = subst_variables_in_dict2(all_parameters, scopes, True, first_char)
logger.debug("all_parameters: {}".format(all_parameters))
return all_parameters, new_environment
def build_tree(config: List, globals: dict, first_char="%") -> List[PipelineTreeNode2]:
parents = list()
for level, component_config in enumerate(config):
name = component_config.get("component", None)
type = component_config.get("type", "dynamic")
comm = component_config.get("common-parameters", None)
spec = component_config.get("specific-parameters", None)
if name is None:
raise Exception("No 'component' parameter found in configuration.")
if type == "dynamic" and spec is None:
raise Exception("No 'specific-parameters' section found in configuration.")
if type == "static":
params = subst_variables_in_dict(component_config.get("parameters", {}), globals, first_char)
inst_name = component_config.get("instance-name", None)
if inst_name is not None:
PipelineTreeNode2.static_components[inst_name] = get_component(name, params)
continue
children = list()
if len(parents):
for parent in parents:
# Only if the previous component path should be followed
if parent._parameters.get("follow_exec_path", True):
for specific in spec:
# Create parameter and environment dictionaries
parameters, environment = prepare_parameters(
parent, comm, specific,
{**globals, **{"RPREV": parent._environment["RLEAF"], "PREV": parent._environment["LEAF"]}},
first_char, len(spec) > 1)
children.append(PipelineTreeNode2(level, name, parameters, environment, parent))
else:
for specific in spec:
# Create parameter and environment dictionaries
parameters, environment = prepare_parameters(None, comm, specific, globals, first_char, len(spec) > 1)
children.append(PipelineTreeNode2(level, name, parameters, environment, None))
parents = None
parents = children
children = None
return PipelineTreeNode2.roots
def run_tree() -> None:
PipelineTreeNode2.traverse_all(single_proc_exec) | 2.453125 | 2 |
lib/modeling/contrasted_context_net.py | xixiobba/MVP-Net | 18 | 12760747 | <filename>lib/modeling/contrasted_context_net.py
# Author zhangshu
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from core.config import cfg
import nn as mynn
import utils.net as net_utils
def get_cc_net(input_channels, output_channels):
"""
Get an intance of the contrasted_context_net
"""
return CCNet(input_channels, output_channels)
class _CCBaseNet(nn.Module):
def __init__(self):
super(_CCBaseNet, self).__init__()
def detectron_weight_mapping(self):
return {}, []
class CCNet(_CCBaseNet):
"""An implementation of the Contrasted Contest layer from CVPR2018 paper.
Introduced in << Context contrasted feature and gated multi-scale aggregation ...>>
Params: input_channels, output_channels
"""
def __init__(self, input_channels, output_channels):
super(CCNet, self).__init__()
self.conv_local = nn.Conv2d(input_channels, output_channels, kernel_size=(3,3), stride=1, padding=1, bias=False)
self.conv_context = nn.Conv2d(input_channels,
output_channels, kernel_size=(3,3), stride=1,
padding=cfg.CONTRASTED_CONTEXT.DILATION_SIZE,
dilation=cfg.CONTRASTED_CONTEXT.DILATION_SIZE,
bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
local_info = self.conv_local(x)
context_info = self.conv_context(x)
return self.relu(local_info - context_info)
| 2.296875 | 2 |
bots/dummies/bot.py | justletterh/botsv2 | 1 | 12760748 | import discord,platform,os
from discord.ext import commands
t=os.environ['DISCORD_TOKEN']
pfx=os.environ['DISCORD_PFX']
hid=int(os.environ['HID'])
async def is_owner(ctx):
return ctx.author.id==hid
client=commands.Bot(command_prefix=pfx,case_insensitive=True)
@client.command()
async def tst(ctx):
await ctx.send(content="I'm up!")
@client.command()
@commands.check(is_owner)
async def stop(ctx):
await ctx.send(content="Goodbye!")
await client.close()
@client.event
async def on_message(message):
op=await is_owner(message)
if message.author.id==client.user.id:
return
if message.content.lower().startswith("all.stop") and op:
await message.channel.send(content="Goodbye!")
await client.close()
if message.content.lower().startswith("all.tst"):
await message.channel.send(content="I'm up!")
if message.content.lower().startswith(f"{pfx}token") or message.content.lower().startswith("all.token"):
if op:
print(f"{message.author}<{message.author.id}> requested this bot's token and it was sent to them")
await message.author.send(content=f"Here is the token you requested!\n```\n{t}\n```")
await message.channel.send(content=":white_check_mark: Check your DMs! :white_check_mark:")
elif not op:
print(f"{message.author}<{message.author.id}> requested this bot's token and it was not sent to them because they did not have the required permission")
await message.channel.send(content=":x: You don't have the required permission. This incident has been logged. :x:")
await client.process_commands(message)
@client.event
async def on_ready():
startlat=int(client.latency*1000)
pyver=platform.python_version()
discver=discord.__version__
print(f"We have logged in as {client.user}<@!{client.user.id}>")
await client.change_presence(status="dnd",activity=discord.Game(f"Python {pyver} Discord.py {discver}"))
@client.event
async def on_command_error(ctx, error):
if isinstance(error,commands.errors.CommandNotFound):
pass
client.run(t) | 2.71875 | 3 |
tests/test_boogie_output_parser.py | sreeja/soteria_tool | 2 | 12760749 | from soteria.debug_support.boogie_output_parser import BoogieOutputParser
class TestBoogieOutputParser:
sample1 = '''input(168,1): Error BP5002: A precondition for this call might not hold.
input(146,1): Related location: This is the precondition that might not hold.
Execution trace:
input(164,2): anon0
input(168,1): Error BP5002: A precondition for this call might not hold.
input(145,1): Related location: This is the precondition that might not hold.
Execution trace:
input(164,2): anon0
Boogie program verifier finished with 2 verified, 2 errors'''
sample2 = '''Boogie program verifier version 2.3.0.61016, Copyright (c) 2003-2014, Microsoft.
test_models/stability_decrement_transfer.bpl(167,1): Error BP5002: A precondition for this call might not hold.
test_models/stability_decrement_transfer.bpl(145,1): Related location: This is the precondition that might not hold.
Execution trace:
test_models/stability_decrement_transfer.bpl(163,2): anon0
Boogie program verifier finished with 2 verified, 1 error'''
def test_parse(self):
parser = BoogieOutputParser()
errors = parser.parse(self.sample1)
assert len(errors) == 2
assert errors[0].line == 168
assert errors[0].code == 'BP5002'
assert errors[0].message == 'A precondition for this call might not hold.'
assert len(errors[0].related_locations) == 1
assert errors[0].related_locations[0].line == 146
assert errors[1].related_locations[0].line == 145 | 2.34375 | 2 |
passenger_wsgi.py | SeeDCharlie/seedcorp | 0 | 12760750 | <reponame>SeeDCharlie/seedcorp
import sys, os
INTERP = "/home/seedch/venvSeedCorp/bin/python3.9"
#INTERP is present twice so that the new python interpreter
#knows the actual executable path
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
cwd = os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd + '/SeeDCorp') #You must add your project here
sys.path.insert(0,'/home/seedch/venvSeedCorp/bin')
sys.path.insert(0,cwd+'/home/seedch/venvSeedCorp/lib/python3.9/site-packages')
os.environ['DJANGO_SETTINGS_MODULE'] = "SeeDCorp.settings"
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 1.742188 | 2 |