hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22e9a9f563dd19050f2b5c338c0fc75d7af0c466
| 3,452
|
py
|
Python
|
upcloud_api/cloud_manager/base.py
|
akx/upcloud-python-api
|
c18226ab5f991a495d3461f2cb534da30d147a2d
|
[
"MIT"
] | null | null | null |
upcloud_api/cloud_manager/base.py
|
akx/upcloud-python-api
|
c18226ab5f991a495d3461f2cb534da30d147a2d
|
[
"MIT"
] | null | null | null |
upcloud_api/cloud_manager/base.py
|
akx/upcloud-python-api
|
c18226ab5f991a495d3461f2cb534da30d147a2d
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import json
import requests
from upcloud_api import UpCloudAPIError, __version__
class BaseAPI(object):
"""
CloudManager base that handles basic HTTP communication with API.
"""
api = 'api.upcloud.com'
api_v = '1.3'
def __init__(self, token, timeout=None): # noqa
self.token = token
self.timeout = timeout
def request(self, method, endpoint, body=None, params=None, timeout=-1, request_to_api=True):
"""
Perform a request with a given body to a given endpoint in UpCloud's API or UpCloud's uploader session.
Handles errors with __error_middleware.
"""
if method not in set(['GET', 'POST', 'PUT', 'PATCH', 'DELETE']):
raise Exception('Invalid/Forbidden HTTP method')
url = 'https://api.upcloud.com/' + self.api_v + endpoint if request_to_api else endpoint
headers = {
'Authorization': self.token,
'User-Agent': 'upcloud-python-api/{}'.format(__version__)
}
headers['Content-Type'] = 'application/json' if request_to_api else 'application/octet-stream'
if body and request_to_api:
data = json.dumps(body)
elif body and not request_to_api:
data = body
else:
data = None
call_timeout = timeout if timeout != -1 else self.timeout
APIcall = getattr(requests, method.lower())
res = APIcall(url,
data=data,
params=params,
headers=headers,
timeout=call_timeout)
if res.text:
res_json = res.json()
else:
res_json = {}
return self.__error_middleware(res, res_json)
def get_request(self, endpoint, params=None, timeout=-1):
"""
Perform a GET request to a given endpoint in UpCloud's API.
"""
return self.request('GET', endpoint, params=params, timeout=timeout)
def post_request(self, endpoint, body=None, timeout=-1):
"""
Perform a POST request to a given endpoint in UpCloud's API.
"""
return self.request('POST', endpoint, body=body, timeout=timeout)
def put_request(self, endpoint, body=None, timeout=-1, request_to_api=True):
"""
Perform a PUT request to a given endpoint in UpCloud's API or UpCloud's uploader session.
"""
return self.request('PUT', endpoint, body=body, timeout=timeout, request_to_api=request_to_api)
def patch_request(self, endpoint, body=None, timeout=-1):
"""
Perform a PATCH request to a given endpoint in UpCloud's API.
"""
return self.request('PATCH', endpoint, body=body, timeout=timeout)
def delete_request(self, endpoint, timeout=-1):
"""
Perform a DELETE request to a given endpoint in UpCloud's API.
"""
return self.request('DELETE', endpoint, timeout=timeout)
def __error_middleware(self, res, res_json):
"""
Middleware that raises an exception when HTTP statuscode is an error code.
"""
if(res.status_code in [400, 401, 402, 403, 404, 405, 406, 409]):
err_dict = res_json.get('error', {})
raise UpCloudAPIError(error_code=err_dict.get('error_code'),
error_message=err_dict.get('error_message'))
return res_json
| 34.178218
| 111
| 0.607184
|
de0c315a09299e386335a9dbaa3cb7a35680d8f6
| 3,495
|
py
|
Python
|
src/azure-cli-core/azure/cli/core/file_util.py
|
cliffwoodave14/azure-cli
|
a413f216c0a7e792b4e5c78d0a1acb65753b5d29
|
[
"MIT"
] | null | null | null |
src/azure-cli-core/azure/cli/core/file_util.py
|
cliffwoodave14/azure-cli
|
a413f216c0a7e792b4e5c78d0a1acb65753b5d29
|
[
"MIT"
] | null | null | null |
src/azure-cli-core/azure/cli/core/file_util.py
|
cliffwoodave14/azure-cli
|
a413f216c0a7e792b4e5c78d0a1acb65753b5d29
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from knack.util import CLIError
from azure.cli.core._help import CliCommandHelpFile, CliGroupHelpFile
def get_all_help(cli_ctx):
invoker = cli_ctx.invocation
help_ctx = cli_ctx.help_cls(cli_ctx)
if not invoker:
raise CLIError('CLI context does not contain invocation.')
parser_keys = []
parser_values = []
sub_parser_keys = []
sub_parser_values = []
_store_parsers(invoker.parser, parser_keys, parser_values, sub_parser_keys, sub_parser_values)
for cmd, parser in zip(parser_keys, parser_values):
if cmd not in sub_parser_keys:
sub_parser_keys.append(cmd)
sub_parser_values.append(parser)
help_files = []
for cmd, parser in zip(sub_parser_keys, sub_parser_values):
try:
help_file = CliGroupHelpFile(help_ctx, cmd, parser) if _is_group(parser) \
else CliCommandHelpFile(help_ctx, cmd, parser)
help_file.load(parser)
help_files.append(help_file)
except Exception as ex: # pylint: disable=broad-except
print("Skipped '{}' due to '{}'".format(cmd, ex))
help_files = sorted(help_files, key=lambda x: x.command)
return help_files
def create_invoker_and_load_cmds_and_args(cli_ctx):
from knack import events
from azure.cli.core.commands.arm import register_global_subscription_argument, register_ids_argument
invoker = cli_ctx.invocation_cls(cli_ctx=cli_ctx, commands_loader_cls=cli_ctx.commands_loader_cls,
parser_cls=cli_ctx.parser_cls, help_cls=cli_ctx.help_cls)
cli_ctx.invocation = invoker
invoker.commands_loader.skip_applicability = True
invoker.commands_loader.load_command_table(None)
# turn off applicability check for all loaders
for loaders in invoker.commands_loader.cmd_to_loader_map.values():
for loader in loaders:
loader.skip_applicability = True
for command in invoker.commands_loader.command_table:
invoker.commands_loader.load_arguments(command)
register_global_subscription_argument(cli_ctx)
register_ids_argument(cli_ctx) # global subscription must be registered first!
cli_ctx.raise_event(events.EVENT_INVOKER_POST_CMD_TBL_CREATE, commands_loader=invoker.commands_loader)
invoker.parser.load_command_table(invoker.commands_loader)
def _store_parsers(parser, parser_keys, parser_values, sub_parser_keys, sub_parser_values):
for s in parser.subparsers.values():
parser_keys.append(_get_parser_name(s))
parser_values.append(s)
if _is_group(s):
for c in s.choices.values():
sub_parser_keys.append(_get_parser_name(c))
sub_parser_values.append(c)
_store_parsers(c, parser_keys, parser_values, sub_parser_keys, sub_parser_values)
def _get_parser_name(s):
return (s._prog_prefix if hasattr(s, '_prog_prefix') else s.prog)[3:] # pylint: disable=protected-access
def _is_group(parser):
return getattr(parser, '_subparsers', None) is not None \
or getattr(parser, 'choices', None) is not None
| 42.621951
| 109
| 0.68412
|
c023b07a74ce23ff5f802df1c30d4b622ee4e78a
| 2,139
|
py
|
Python
|
app/core/profiles.py
|
9b/split-key-roast
|
2becb2589bc7031f6fcc5e3527ffc56107c5be08
|
[
"MIT"
] | 3
|
2017-11-30T06:26:13.000Z
|
2020-08-06T21:06:42.000Z
|
app/core/profiles.py
|
9b/split-key-roast
|
2becb2589bc7031f6fcc5e3527ffc56107c5be08
|
[
"MIT"
] | 8
|
2017-11-27T00:49:42.000Z
|
2022-03-12T00:46:44.000Z
|
app/core/profiles.py
|
9b/split-key-roast
|
2becb2589bc7031f6fcc5e3527ffc56107c5be08
|
[
"MIT"
] | 2
|
2020-05-03T14:10:56.000Z
|
2020-08-06T21:06:37.000Z
|
"""Calls related to roast profiles."""
from . import core
from .. import mongo
from ..libs.utils import paranoid_clean
from .forms import ProfileForm
from bson.objectid import ObjectId
from flask import (
render_template, redirect, url_for, jsonify, request
)
from flask import current_app as app
from flask_login import login_required, current_user
@core.route('/profiles')
@login_required
def profiles():
"""Render the profiles page."""
c = mongo.db[app.config['PROFILE_COLLECTION']]
items = c.find({'user': current_user.get_id()})
output = list()
for x in items:
x['id'] = str(x['_id'])
output.append(x)
output.sort(key=lambda x: x['datetime'], reverse=True)
return render_template('profiles.html', profiles=output)
@core.route('/profiles/edit-profile', methods=['POST'])
@login_required
def edit_profile():
"""Render the index page."""
form = ProfileForm(request.form)
if form.validate():
if 'profile_id' not in request.form:
return jsonify({'success': False,
'error': 'ID not found in edit!'})
edit_id = paranoid_clean(request.form.get('profile_id'))
c = mongo.db[app.config['PROFILE_COLLECTION']]
item = {'coffee': form.coffee.data, 'roast': form.roast.data,
'drop_temp': form.drop_temp.data, 'notes': form.notes.data,
'brew_methods': form.brew_methods.data, 'tags': list()}
c.update({'_id': ObjectId(edit_id)}, {'$set': item})
return redirect(url_for('core.profiles'))
errors = ','.join([value[0] for value in list(form.errors.values())])
return jsonify({'errors': errors})
@core.route('/profiles/remove-item', methods=['POST'])
@login_required
def remove_profile():
"""Render the index page."""
args = request.get_json()
if 'id' not in args:
return jsonify({'success': False,
'error': 'ID not found in request!'})
c = mongo.db[app.config['PROFILE_COLLECTION']]
remove_id = paranoid_clean(args.get('id'))
c.remove({'_id': ObjectId(remove_id)})
return jsonify({'success': True})
| 35.65
| 75
| 0.640954
|
2bca33cdbc6eb32e4015628d8fd825b2fdcae13a
| 3,587
|
py
|
Python
|
cities_light/contrib/restframework3.py
|
st4lk/django-cities-light
|
ad303f500f506d44d287ec3d531ff9fd8bc33e34
|
[
"MIT"
] | 1
|
2021-02-17T13:11:35.000Z
|
2021-02-17T13:11:35.000Z
|
cities_light/contrib/restframework3.py
|
st4lk/django-cities-light
|
ad303f500f506d44d287ec3d531ff9fd8bc33e34
|
[
"MIT"
] | null | null | null |
cities_light/contrib/restframework3.py
|
st4lk/django-cities-light
|
ad303f500f506d44d287ec3d531ff9fd8bc33e34
|
[
"MIT"
] | null | null | null |
"""
Couple djangorestframework and cities_light.
It defines a urlpatterns variables, with the following urls:
- cities-light-api-city-list
- cities-light-api-city-detail
- cities-light-api-region-list
- cities-light-api-region-detail
- cities-light-api-country-list
- cities-light-api-country-detail
If rest_framework (v3) is installed, all you have to do is add this url
include::
url(r'^cities_light/api/', include('cities_light.contrib.restframework3')),
And that's all !
"""
from rest_framework import viewsets, relations
from rest_framework.serializers import HyperlinkedModelSerializer
from rest_framework import routers
try:
from django.conf.urls.defaults import patterns, url, include
except ImportError:
from django.conf.urls import patterns, url, include
from ..models import Country, Region, City
class CitySerializer(HyperlinkedModelSerializer):
"""
HyperlinkedModelSerializer for City.
"""
url = relations.HyperlinkedIdentityField(
view_name='cities-light-api-city-detail')
country = relations.HyperlinkedRelatedField(
view_name='cities-light-api-country-detail', read_only=True)
region = relations.HyperlinkedRelatedField(
view_name='cities-light-api-region-detail', read_only=True)
class Meta:
model = City
exclude = ('slug',)
class RegionSerializer(HyperlinkedModelSerializer):
"""
HyperlinkedModelSerializer for Region.
"""
url = relations.HyperlinkedIdentityField(
view_name='cities-light-api-region-detail')
country = relations.HyperlinkedRelatedField(
view_name='cities-light-api-country-detail', read_only=True)
class Meta:
model = Region
exclude = ('slug',)
class CountrySerializer(HyperlinkedModelSerializer):
"""
HyperlinkedModelSerializer for Country.
"""
url = relations.HyperlinkedIdentityField(
view_name='cities-light-api-country-detail')
class Meta:
model = Country
class CitiesLightListModelViewSet(viewsets.ReadOnlyModelViewSet):
def get_queryset(self):
"""
Allows a GET param, 'q', to be used against name_ascii.
"""
queryset = super(CitiesLightListModelViewSet, self).get_queryset()
if self.request.GET.get('q', None):
return queryset.filter(name_ascii__icontains=self.request.GET['q'])
return queryset
class CountryModelViewSet(CitiesLightListModelViewSet):
serializer_class = CountrySerializer
queryset = Country.objects.all()
class RegionModelViewSet(CitiesLightListModelViewSet):
serializer_class = RegionSerializer
queryset = Region.objects.all()
class CityModelViewSet(CitiesLightListModelViewSet):
"""
ListRetrieveView for City.
"""
serializer_class = CitySerializer
queryset = City.objects.all()
def get_queryset(self):
"""
Allows a GET param, 'q', to be used against search_names.
"""
queryset = super(CitiesLightListModelViewSet, self).get_queryset()
if self.request.GET.get('q', None):
return queryset.filter(
search_names__icontains=self.request.GET['q'])
return queryset
router = routers.SimpleRouter()
router.register(r'cities', CityModelViewSet, base_name='cities-light-api-city')
router.register(r'countries', CountryModelViewSet,
base_name='cities-light-api-country')
router.register(r'regions', RegionModelViewSet,
base_name='cities-light-api-region')
urlpatterns = patterns('',
url(r'^', include(router.urls)),
)
| 28.244094
| 79
| 0.708113
|
078748f7515aaa7ebb738471f523ec2987b26d76
| 5,540
|
py
|
Python
|
bundle/sagemaker_rl_agent/lib/python3.5/site-packages/markov/agent_ctrl/constants.py
|
Asdafers/deepracer-simapp
|
539ee72942c18c453c65fb7300beb586dd440690
|
[
"MIT"
] | null | null | null |
bundle/sagemaker_rl_agent/lib/python3.5/site-packages/markov/agent_ctrl/constants.py
|
Asdafers/deepracer-simapp
|
539ee72942c18c453c65fb7300beb586dd440690
|
[
"MIT"
] | null | null | null |
bundle/sagemaker_rl_agent/lib/python3.5/site-packages/markov/agent_ctrl/constants.py
|
Asdafers/deepracer-simapp
|
539ee72942c18c453c65fb7300beb586dd440690
|
[
"MIT"
] | null | null | null |
'''This module houses the constants for the agent ctlr package'''
from enum import Enum
# Default max number of steps to allow per episode
MAX_STEPS = 10000
# Local offset of the front of the car
RELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]
# Normalized track distance to move with each reset
# now config ROUND_ROBIN_ADVANCE_DIST = 0.05
# Reward to give the car when it paused and parked
ZERO_REWARD = 0.0
# Reward to give the car when it "crashes"
CRASHED = 1e-8
# The number of steps to wait before checking if the car is stuck
# This number should correspond to the camera FPS, since it is pacing the
# step rate.
NUM_STEPS_TO_CHECK_STUCK = 15
# Radius of the wheels of the car in meters
WHEEL_RADIUS = 0.1
# Allowed closest object distance
CLOSEST_OBJ_GAP = 1.00
# Reset behind object distance
RESET_BEHIND_DIST = 1.00
# Bot car z
BOT_CAR_Z = 0.0
# Obstacle z
OBSTACLE_Z = 0.1
# Obstacle
OBSTACLE_NAME_PREFIX = "obstacle"
BLINK_MIN_ALPHA = 0.3
# Single Blink interval in sec
BLINK_INTERVAL = 0.5
class ConfigParams(Enum):
'''This enum defines the keys for the input keys for the rollout
ctr config dict
'''
AGENT_NAME = 'agent_name'
LINK_NAME_LIST = 'agent_link_name_list'
STEERING_LIST = 'steering_list'
VELOCITY_LIST = 'velocity_list'
REWARD = 'reward'
ACTION_SPACE_PATH = 'path_to_json'
CHANGE_START = 'change_start'
ALT_DIR = 'alternate_dir'
VERSION = 'version'
CAR_CTRL_CONFIG = 'car_ctrl_config'
NUMBER_OF_RESETS = 'number_of_resets'
PENALTY_SECONDS = 'penalty_seconds'
IS_CONTINUOUS = 'is_continuous'
NUMBER_OF_TRIALS = 'number_of_trials'
RACE_TYPE = 'race_type'
COLLISION_PENALTY = 'collision_penalty'
OFF_TRACK_PENALTY = 'off_track_penalty'
START_POSITION = 'start_position'
PARK_POSITIONS = 'park_positions'
DONE_CONDITION = 'done_condition'
ROUND_ROBIN_ADVANCE_DIST = 'round_robin_advance_dist'
START_POSITION_OFFSET = 'start_position_offset'
class RewardParam(Enum):
'''This enum contains the keys and default values for the parameters to be
feed into the reward function.
'''
# boolean: all wheel on track
WHEELS_ON_TRACK = ['all_wheels_on_track', True]
X = ['x', 0.0] # float: race car x position
Y = ['y', 0.0] # float: race car y position
HEADING = ['heading', 0.0] # float: race car heading angle
CENTER_DIST = ['distance_from_center', 0.0] # float: race car distance from centerline
PROJECTION_DISTANCE = ['projection_distance', 0.0] # float: race car distance projected on the centerline
PROG = ['progress', 0.0] # float: race car track progress [0,1]
STEPS = ['steps', 0] # int: number of steps race car have taken
SPEED = ['speed', 0.0] # float: race car speed
STEER = ['steering_angle', 0.0] # float: race car steering angle
TRACK_WIDTH = ['track_width', 0.0] # float: track width
TRACK_LEN = ['track_length', 0.0] # float: track length
WAYPNTS = ['waypoints', 0] # list of tuple: list of waypoints (x, y) tuple
CLS_WAYPNY = ['closest_waypoints', [0, 0]] # list of int: list of int with size 2 containing closest prev and next waypoint indexes
LEFT_CENT = ['is_left_of_center', False] # boolean: race car left of centerline
REVERSE = ['is_reversed', False] # boolean: race car direction
CLOSEST_OBJECTS = ['closest_objects', [0, 0]] # list of int: list of int with size 2 containing closest prev and next object indexes
OBJECT_LOCATIONS = ['objects_location', []] # list of tuple: list of all object (x, y) locations
OBJECTS_LEFT_OF_CENTER = ['objects_left_of_center', []] # list of boolean: list of all object to the left of centerline or not
OBJECT_IN_CAMERA = ['object_in_camera', False] # boolean: any object in camera
OBJECT_SPEEDS = ['objects_speed', []] # list of float: list of objects speed
OBJECT_HEADINGS = ['objects_heading', []] # list of float: list of objects heading
OBJECT_CENTER_DISTS = ['objects_distance_from_center', []] # list of float: list of object distance from centerline
OBJECT_CENTERLINE_PROJECTION_DISTANCES = ['objects_distance', []] # list of float: list of object distance projected on the centerline
CRASHED = ['is_crashed', False] # boolean: crashed into an object or bot car
OFFTRACK = ['is_offtrack', False] # boolean: all four wheels went off-track
@classmethod
def make_default_param(cls):
'''Returns a dictionary with the default values for the reward function'''
return {key.value[0] : key.value[-1] for key in cls}
@classmethod
def validate_dict(cls, input_dict):
'''Will raise an exception if input dict does not contain all the keys in the enum'''
for key in cls:
_ = input_dict[key.value[0]]
| 48.596491
| 160
| 0.611913
|
f389a5c60527c83e20cc357286b37fc44a02d662
| 4,385
|
py
|
Python
|
main.py
|
kmhmubin/Flash-Card
|
3ab4f179d775949f65190e5ac8c11b4d84d0c6a8
|
[
"MIT"
] | null | null | null |
main.py
|
kmhmubin/Flash-Card
|
3ab4f179d775949f65190e5ac8c11b4d84d0c6a8
|
[
"MIT"
] | null | null | null |
main.py
|
kmhmubin/Flash-Card
|
3ab4f179d775949f65190e5ac8c11b4d84d0c6a8
|
[
"MIT"
] | null | null | null |
import random
import pandas
from tkinter import *
# ------------------------- CONSTANT ---------------------------------- #
BLACK = "#2C333D"
YELLOW = "#FCD836"
WHITE = "#FFFFFF"
GRAY_WHITE = "#F4F4F4"
BAHNSCHRIFT = "Bahnschrift"
CALIBRI = "Calibri"
# -------------------------- WORD DICT ------------------------------- #
# default card is empty
current_card = {}
# know word dictionary is empty
known_words = {}
# reading the data from know data
try:
know_data = pandas.read_csv("data/know_word.csv")
except FileNotFoundError:
# if know data not found then go to original data
original_data = pandas.read_csv("data/Bangla_word_list.csv")
learning = original_data.to_dict(orient="records")
else:
# creating dictionary using pandas
learning = know_data.to_dict(orient="records")
# -------------------------- NEXT CARD ------------------------------- #
# TODO: when cross button pressed show the next word in english and flip the image
def next_card():
"""Return next value randomly from the dictionary"""
# global current cart
global current_card, flip_timer
# cancel the timer
window.after_cancel(flip_timer)
# randomly choose word from the dictionary
current_card = random.choice(learning)
# replace the title text in the UI
canvas.itemconfig(card_title, text="English", fill=BLACK)
# replace the word text in the UI
canvas.itemconfig(card_word, text=current_card["English"], fill=BLACK)
# change the background images if button pressed
canvas.itemconfig(card_background, image=front_card_image)
# flip timer
flip_timer = window.after(3000, func=flip_card)
# ------------------------- FLIP CARD -------------------------------- #
# TODO: Flip card after 3 seconds and show the bangla value
def flip_card():
"""Flip the card after 3 seconds """
canvas.itemconfig(card_title, text="Bangla", fill=WHITE)
# show the equivalent meaning of the current word
canvas.itemconfig(card_word, text=current_card["Bangla"], fill=WHITE)
# changing the background images
canvas.itemconfig(card_background, image=back_card_image)
# --------------------------- KNOWN WORD ------------------------------ #
# TODO: When know button pressed it save in the know dictionary
def know_word():
"""Save Know word into new file"""
learning.remove(current_card)
# remove data from current card
new_data = pandas.DataFrame(learning)
# create a new csv file using pandas without index
new_data.to_csv("data/know_word.csv", index=False)
# show the next word
next_card()
# --------------------------- UI SETUP -------------------------------- #
# TODO: Creating Program window
# creating window object
window = Tk()
# add title to the program
window.title("Learn English to Bangla Vocabulary")
# window size
window.config(padx=50, pady=50, bg=GRAY_WHITE)
# add custom favicon
window.iconbitmap(r'images/favicon.ico')
# flip the card after 3 seconds
flip_timer = window.after(3000, func=flip_card)
# TODO: Creating canvas
# creating a canvas
canvas = Canvas(width=800, height=526)
# front card image
front_card_image = PhotoImage(file="images/card_front.png")
# back card image
back_card_image = PhotoImage(file="images/card_back.png")
# assigning the position for front card
card_background = canvas.create_image(400, 263, image=front_card_image)
# Canvas card title
card_title = canvas.create_text(400, 150, text="Title", font=(BAHNSCHRIFT, 40, "normal"))
# canvas card word
card_word = canvas.create_text(400, 263, text="Word", font=(CALIBRI, 60, "bold"))
# canvas config
canvas.config(bg=GRAY_WHITE, highlightthicknes=0)
# canvas grid
canvas.grid(row=0, column=0, columnspan=2)
# TODO: Buttons
# cross icon
cross_icon = PhotoImage(file="images/cancel.png")
# assign icon to the button without border or background thickness
cross_button = Button(image=cross_icon, highlightthicknes=0, borderwidth=0, command=next_card)
# cross button grid
cross_button.grid(row=1, column=0)
# check icon
check_icon = PhotoImage(file="images/checked.png")
# assign icon to the button without border or background thickness
cross_button = Button(image=check_icon, highlightthicknes=0, borderwidth=0, command=know_word)
# check button grid
cross_button.grid(row=1, column=1)
# calling the next card function
next_card()
# run the window
window.mainloop()
| 29.42953
| 94
| 0.680274
|
77cbfac7e852bc334c2209e8c4ddfb7bf76e840b
| 2,269
|
py
|
Python
|
src/database.py
|
vtainio/Nordea-to-YNAB
|
f8b336f08fbcbab518d58a07b3590fac883ec4b6
|
[
"MIT"
] | 5
|
2018-10-21T15:17:39.000Z
|
2020-04-25T15:32:39.000Z
|
src/database.py
|
vtainio/Nordea-to-YNAB
|
f8b336f08fbcbab518d58a07b3590fac883ec4b6
|
[
"MIT"
] | 6
|
2017-06-05T19:44:45.000Z
|
2017-07-11T07:39:32.000Z
|
src/database.py
|
vtainio/Nordea-to-YNAB
|
f8b336f08fbcbab518d58a07b3590fac883ec4b6
|
[
"MIT"
] | 1
|
2017-06-29T20:51:17.000Z
|
2017-06-29T20:51:17.000Z
|
from __future__ import print_function
import sqlite3
from tabulate import tabulate
DATABASE_NAME = 'nordea_to_ynab.db'
def prepare_tables(cursor):
cursor.execute('CREATE TABLE IF NOT EXISTS category (category_id text primary key not null, name text)')
cursor.execute('CREATE TABLE IF NOT EXISTS payment (name text primary key not null, category_id text, FOREIGN KEY(category_id) REFERENCES category(category_id))')
def get_db_connection():
conn = get_sqlite_connection()
conn.text_factory = str
c = conn.cursor()
prepare_tables(c)
return conn, c
def get_sqlite_connection():
return sqlite3.connect(DATABASE_NAME)
def store_categories(categories):
conn, c = get_db_connection()
for category in categories:
c.execute("INSERT OR REPLACE INTO category VALUES (?, ?)", (category.id, category.name))
conn.commit()
conn.close()
def get_subcategory_for_transaction(transaction):
conn, c = get_db_connection()
c.execute("SELECT category_id FROM payment WHERE name=:name", {"name": transaction.target})
category_id = c.fetchone()
if not category_id:
category_id = get_subcategory_from_user(c, transaction.target)
c.execute("INSERT INTO payment VALUES (?, ?)", (transaction.target, category_id))
else:
category_id = category_id[0] # Get the value from a single element tuple
conn.commit()
conn.close()
return category_id
def get_subcategory_from_user(cursor, target):
cursor.execute("SELECT * FROM category")
categories = cursor.fetchall()
options = []
categories_by_name = {}
for index, category in enumerate(categories):
category_id, name = category
categories_by_name[name] = category_id
options.append([index, name])
id = prompt_user_for_id(target, options)
return categories_by_name[options[id][1]]
def prompt_user_for_id(target, options):
print("No category found for %s. Please select one from below:\n\n" % target)
print(tabulate(options, headers=["ID, Name"]))
while True:
selection = raw_input("Enter the ID for %s: " % target)
if selection.isdigit() and int(selection) >= 0 and int(selection) < len(options):
break
return int(selection)
| 29.855263
| 166
| 0.70119
|
94afc5e553115de04643f3387991b359a9ec4cfa
| 471
|
py
|
Python
|
Pygame/Pygame18.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
Pygame/Pygame18.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
Pygame/Pygame18.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
import pygame
pygame.init()
windowSurface = pygame.display.set_mode([500,400])
music = pygame.mixer.Sound("/Users/chenchaoyang/Desktop/python/Python/Music/Music2.wav")
music.play()
Running = True
while Running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
Running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
music.stop()
pygame.display.update()
pygame.quit()
| 31.4
| 88
| 0.653928
|
7872dadb269a553e1f6d144943d1572209d7ef65
| 648
|
py
|
Python
|
algorithms/QuickSort.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
algorithms/QuickSort.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
algorithms/QuickSort.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def partition(arr, left, right):
low = left
high = right
key = arr[low]
while low < high:
while low < high and arr[high] >= key:
high -= 1
arr[low] = arr[high]
while low < high and arr[low] <= key:
low += 1
arr[high] = arr[low]
arr[low] = key
return low
def QuickSort(arr, left, right):
if left < right:
low = partition(arr, left, right)
QuickSort(arr, left, low-1)
QuickSort(arr, low+1, right)
return arr
if __name__ == '__main__':
arr = [6, 8, 1, 4, 3, 9]
print QuickSort(arr, 0, len(arr)-1)
| 21.6
| 46
| 0.515432
|
c29f595de702c2143dad579ff5bd308fad528c4f
| 16,342
|
py
|
Python
|
sdk/python/pulumi_azure_native/datashare/v20200901/share_subscription.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datashare/v20200901/share_subscription.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datashare/v20200901/share_subscription.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['ShareSubscriptionArgs', 'ShareSubscription']
@pulumi.input_type
class ShareSubscriptionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
invitation_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
source_share_location: pulumi.Input[str],
expiration_date: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ShareSubscription resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] invitation_id: The invitation id.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] source_share_location: Source share location.
:param pulumi.Input[str] expiration_date: The expiration date of the share subscription.
:param pulumi.Input[str] share_subscription_name: The name of the shareSubscription.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "invitation_id", invitation_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "source_share_location", source_share_location)
if expiration_date is not None:
pulumi.set(__self__, "expiration_date", expiration_date)
if share_subscription_name is not None:
pulumi.set(__self__, "share_subscription_name", share_subscription_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the share account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="invitationId")
def invitation_id(self) -> pulumi.Input[str]:
"""
The invitation id.
"""
return pulumi.get(self, "invitation_id")
@invitation_id.setter
def invitation_id(self, value: pulumi.Input[str]):
pulumi.set(self, "invitation_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sourceShareLocation")
def source_share_location(self) -> pulumi.Input[str]:
"""
Source share location.
"""
return pulumi.get(self, "source_share_location")
@source_share_location.setter
def source_share_location(self, value: pulumi.Input[str]):
pulumi.set(self, "source_share_location", value)
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> Optional[pulumi.Input[str]]:
"""
The expiration date of the share subscription.
"""
return pulumi.get(self, "expiration_date")
@expiration_date.setter
def expiration_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration_date", value)
@property
@pulumi.getter(name="shareSubscriptionName")
def share_subscription_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the shareSubscription.
"""
return pulumi.get(self, "share_subscription_name")
@share_subscription_name.setter
def share_subscription_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "share_subscription_name", value)
class ShareSubscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
invitation_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
source_share_location: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A share subscription data transfer object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] expiration_date: The expiration date of the share subscription.
:param pulumi.Input[str] invitation_id: The invitation id.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the shareSubscription.
:param pulumi.Input[str] source_share_location: Source share location.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareSubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A share subscription data transfer object.
:param str resource_name: The name of the resource.
:param ShareSubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareSubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
invitation_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
source_share_location: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareSubscriptionArgs.__new__(ShareSubscriptionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["expiration_date"] = expiration_date
if invitation_id is None and not opts.urn:
raise TypeError("Missing required property 'invitation_id'")
__props__.__dict__["invitation_id"] = invitation_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["share_subscription_name"] = share_subscription_name
if source_share_location is None and not opts.urn:
raise TypeError("Missing required property 'source_share_location'")
__props__.__dict__["source_share_location"] = source_share_location
__props__.__dict__["created_at"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provider_email"] = None
__props__.__dict__["provider_name"] = None
__props__.__dict__["provider_tenant_name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["share_description"] = None
__props__.__dict__["share_kind"] = None
__props__.__dict__["share_name"] = None
__props__.__dict__["share_subscription_status"] = None
__props__.__dict__["share_terms"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_email"] = None
__props__.__dict__["user_name"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare/v20200901:ShareSubscription"), pulumi.Alias(type_="azure-native:datashare:ShareSubscription"), pulumi.Alias(type_="azure-nextgen:datashare:ShareSubscription"), pulumi.Alias(type_="azure-native:datashare/v20181101preview:ShareSubscription"), pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:ShareSubscription"), pulumi.Alias(type_="azure-native:datashare/v20191101:ShareSubscription"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:ShareSubscription"), pulumi.Alias(type_="azure-native:datashare/v20201001preview:ShareSubscription"), pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:ShareSubscription"), pulumi.Alias(type_="azure-native:datashare/v20210801:ShareSubscription"), pulumi.Alias(type_="azure-nextgen:datashare/v20210801:ShareSubscription")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ShareSubscription, __self__).__init__(
'azure-native:datashare/v20200901:ShareSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ShareSubscription':
"""
Get an existing ShareSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ShareSubscriptionArgs.__new__(ShareSubscriptionArgs)
__props__.__dict__["created_at"] = None
__props__.__dict__["expiration_date"] = None
__props__.__dict__["invitation_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provider_email"] = None
__props__.__dict__["provider_name"] = None
__props__.__dict__["provider_tenant_name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["share_description"] = None
__props__.__dict__["share_kind"] = None
__props__.__dict__["share_name"] = None
__props__.__dict__["share_subscription_status"] = None
__props__.__dict__["share_terms"] = None
__props__.__dict__["source_share_location"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_email"] = None
__props__.__dict__["user_name"] = None
return ShareSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Time at which the share subscription was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> pulumi.Output[Optional[str]]:
"""
The expiration date of the share subscription.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter(name="invitationId")
def invitation_id(self) -> pulumi.Output[str]:
"""
The invitation id.
"""
return pulumi.get(self, "invitation_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerEmail")
def provider_email(self) -> pulumi.Output[str]:
"""
Email of the provider who created the resource
"""
return pulumi.get(self, "provider_email")
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> pulumi.Output[str]:
"""
Name of the provider who created the resource
"""
return pulumi.get(self, "provider_name")
@property
@pulumi.getter(name="providerTenantName")
def provider_tenant_name(self) -> pulumi.Output[str]:
"""
Tenant name of the provider who created the resource
"""
return pulumi.get(self, "provider_tenant_name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the share subscription
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="shareDescription")
def share_description(self) -> pulumi.Output[str]:
"""
Description of share
"""
return pulumi.get(self, "share_description")
@property
@pulumi.getter(name="shareKind")
def share_kind(self) -> pulumi.Output[str]:
"""
Kind of share
"""
return pulumi.get(self, "share_kind")
@property
@pulumi.getter(name="shareName")
def share_name(self) -> pulumi.Output[str]:
"""
Name of the share
"""
return pulumi.get(self, "share_name")
@property
@pulumi.getter(name="shareSubscriptionStatus")
def share_subscription_status(self) -> pulumi.Output[str]:
"""
Gets the current status of share subscription.
"""
return pulumi.get(self, "share_subscription_status")
@property
@pulumi.getter(name="shareTerms")
def share_terms(self) -> pulumi.Output[str]:
"""
Terms of a share
"""
return pulumi.get(self, "share_terms")
@property
@pulumi.getter(name="sourceShareLocation")
def source_share_location(self) -> pulumi.Output[str]:
"""
Source share location.
"""
return pulumi.get(self, "source_share_location")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userEmail")
def user_email(self) -> pulumi.Output[str]:
"""
Email of the user who created the resource
"""
return pulumi.get(self, "user_email")
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Output[str]:
"""
Name of the user who created the resource
"""
return pulumi.get(self, "user_name")
| 41.163728
| 881
| 0.651817
|
9ea2cffda4c7fc49158cc6da3f2e0056635f1192
| 2,109
|
py
|
Python
|
neural_compressor/data/dataloaders/dataloader.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
neural_compressor/data/dataloaders/dataloader.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
neural_compressor/data/dataloaders/dataloader.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neural_compressor.experimental.data.dataloaders import DATALOADERS
# THIS API IS TO BE DEPRECATED!
class DataLoader(object):
"""Entrance of all configured DataLoaders. Will dispatch the DataLoaders to framework
specific one. Users will be not aware of the dispatching, and the Interface is unified.
"""
def __new__(cls, framework, dataset, batch_size=1, collate_fn=None,
last_batch='rollover', sampler=None, batch_sampler=None,
num_workers=0, pin_memory=False, shuffle=False, distributed=False):
assert framework in ('tensorflow', 'tensorflow_itex',
'pytorch', 'pytorch_ipex', 'pytorch_fx', 'onnxrt_qdq', \
'onnxrt_qlinearops', 'onnxrt_integerops', 'mxnet'), \
"framework support tensorflow pytorch mxnet onnxruntime"
return DATALOADERS[framework](dataset=dataset,
batch_size=batch_size,
last_batch=last_batch,
collate_fn=collate_fn,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=shuffle,
distributed=distributed)
| 46.866667
| 94
| 0.598862
|
6b04f08f5cb5b74b115b84fe92c354a9bea448ee
| 608
|
py
|
Python
|
lib/webui/tool.py
|
pcn/resgate
|
3aa6cda0f31d2b1bc5a74dbac3fa22a5fb3043ed
|
[
"Apache-2.0"
] | 1
|
2021-03-22T13:40:15.000Z
|
2021-03-22T13:40:15.000Z
|
lib/webui/tool.py
|
pcn/resgate
|
3aa6cda0f31d2b1bc5a74dbac3fa22a5fb3043ed
|
[
"Apache-2.0"
] | null | null | null |
lib/webui/tool.py
|
pcn/resgate
|
3aa6cda0f31d2b1bc5a74dbac3fa22a5fb3043ed
|
[
"Apache-2.0"
] | null | null | null |
import json.decoder
import logging
import json
import pprint
from aiohttp import web
from pywebio.platform.aiohttp import webio_handler
from jmespath import exceptions as jmesex
import webui.ui_webhook as webhook
import webui.ui_rules as webrules
import extractions
import rules
routes = web.RouteTableDef()
app = web.Application()
app.add_routes(
[
web.get(
"/tool",
webio_handler(
{
"edit_rules": webrules.edit_rules,
"edit_webhook": webhook.edit_webhook,
}
),
)
]
)
| 17.882353
| 57
| 0.615132
|
0db8647925b61fff94e12a33af34dd3c5c5ca390
| 800
|
py
|
Python
|
tests/conftest.py
|
santegoeds/bfair
|
36c80fe60a570b6aa4ac030fc202648acb8d08c9
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
santegoeds/bfair
|
36c80fe60a570b6aa4ac030fc202648acb8d08c9
|
[
"Apache-2.0"
] | 36
|
2015-07-06T15:10:33.000Z
|
2015-07-06T15:10:39.000Z
|
tests/conftest.py
|
santegoeds/bfair
|
36c80fe60a570b6aa4ac030fc202648acb8d08c9
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from bfair.session import Session
def pytest_addoption(parser):
parser.addoption("--user", action="store")
parser.addoption("--password", action="store")
def setup_session(request):
if not request.config.option.user or not request.config.option.password:
pytest.skip("needs --user and --password")
user = request.config.option.user
password = request.config.option.password
session = Session(user, password)
session.login()
return session
def teardown_session(session):
session.logout()
def pytest_funcarg__session(request):
return request.cached_setup(setup = lambda: setup_session(request),
teardown = teardown_session,
scope = "session")
| 25.806452
| 76
| 0.65
|
bbe2858b8159a919f47241a208480827f11d4ca5
| 7,270
|
py
|
Python
|
grr/server/grr_response_server/hunts/process_results.py
|
ahmednofal/grr
|
08a57f6873ee13f425d0106e4143663bc6dbdd60
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/hunts/process_results.py
|
ahmednofal/grr
|
08a57f6873ee13f425d0106e4143663bc6dbdd60
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/hunts/process_results.py
|
ahmednofal/grr
|
08a57f6873ee13f425d0106e4143663bc6dbdd60
|
[
"Apache-2.0"
] | 2
|
2020-08-24T00:22:03.000Z
|
2020-11-14T08:34:43.000Z
|
#!/usr/bin/env python
"""Cron job to process hunt results.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from future.utils import iteritems
from future.utils import itervalues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.util import collection
from grr_response_core.stats import stats_collector_instance
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import output_plugin
from grr_response_server.aff4_objects import cronjobs
from grr_response_server.hunts import implementation
from grr_response_server.hunts import results as hunts_results
class ResultsProcessingError(Exception):
"""This exception is raised when errors happen during results processing."""
def __init__(self):
self.exceptions_by_hunt = {}
super(ResultsProcessingError, self).__init__()
def RegisterSubException(self, hunt_urn, plugin_name, exception):
self.exceptions_by_hunt.setdefault(hunt_urn, {}).setdefault(
plugin_name, []).append(exception)
def __repr__(self):
messages = []
for hunt_urn, exceptions_by_plugin in iteritems(self.exceptions_by_hunt):
for plugin_name, exception in iteritems(exceptions_by_plugin):
messages.append("Exception for hunt %s (plugin %s): %s" %
(hunt_urn, plugin_name, exception))
return "\n".join(messages)
class ProcessHuntResultCollectionsCronFlow(cronjobs.SystemCronFlow):
"""Periodic cron flow that processes hunt results.
The ProcessHuntResultCollectionsCronFlow reads hunt results stored in
HuntResultCollections and feeds runs output plugins on them.
"""
frequency = rdfvalue.Duration("5m")
lifetime = rdfvalue.Duration("40m")
allow_overruns = True
BATCH_SIZE = 5000
def CheckIfRunningTooLong(self):
if self.max_running_time:
elapsed = rdfvalue.RDFDatetime.Now() - self.start_time
if elapsed > self.max_running_time:
return True
return False
def LoadPlugins(self, metadata_obj):
output_plugins = metadata_obj.Get(metadata_obj.Schema.OUTPUT_PLUGINS)
if not output_plugins:
return output_plugins, []
output_plugins = output_plugins.ToDict()
used_plugins = []
unused_plugins = []
for plugin_def, state in itervalues(output_plugins):
if not hasattr(plugin_def, "GetPluginForState"):
logging.error("Invalid plugin_def: %s", plugin_def)
continue
used_plugins.append((plugin_def, plugin_def.GetPluginForState(state)))
return output_plugins, used_plugins
def RunPlugins(self, hunt_urn, plugins, results, exceptions_by_plugin):
for plugin_def, plugin in plugins:
try:
plugin.ProcessResponses(results)
plugin.Flush()
plugin_status = output_plugin.OutputPluginBatchProcessingStatus(
plugin_descriptor=plugin_def,
status="SUCCESS",
batch_size=len(results))
stats_collector_instance.Get().IncrementCounter(
"hunt_results_ran_through_plugin",
delta=len(results),
fields=[plugin_def.plugin_name])
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Error processing hunt results: hunt %s, "
"plugin %s", hunt_urn, utils.SmartStr(plugin))
self.Log("Error processing hunt results (hunt %s, "
"plugin %s): %s" % (hunt_urn, utils.SmartStr(plugin), e))
stats_collector_instance.Get().IncrementCounter(
"hunt_output_plugin_errors", fields=[plugin_def.plugin_name])
plugin_status = output_plugin.OutputPluginBatchProcessingStatus(
plugin_descriptor=plugin_def,
status="ERROR",
summary=utils.SmartStr(e),
batch_size=len(results))
exceptions_by_plugin.setdefault(plugin_def, []).append(e)
with data_store.DB.GetMutationPool() as pool:
implementation.GRRHunt.PluginStatusCollectionForHID(hunt_urn).Add(
plugin_status, mutation_pool=pool)
if plugin_status.status == plugin_status.Status.ERROR:
implementation.GRRHunt.PluginErrorCollectionForHID(hunt_urn).Add(
plugin_status, mutation_pool=pool)
def ProcessOneHunt(self, exceptions_by_hunt):
"""Reads results for one hunt and process them."""
hunt_results_urn, results = (
hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token, lease_time=self.lifetime))
logging.debug("Found %d results for hunt %s", len(results),
hunt_results_urn)
if not results:
return 0
hunt_urn = rdfvalue.RDFURN(hunt_results_urn.Dirname())
batch_size = self.BATCH_SIZE
metadata_urn = hunt_urn.Add("ResultsMetadata")
exceptions_by_plugin = {}
num_processed_for_hunt = 0
collection_obj = implementation.GRRHunt.ResultCollectionForHID(hunt_urn)
try:
with aff4.FACTORY.OpenWithLock(
metadata_urn, lease_time=600, token=self.token) as metadata_obj:
all_plugins, used_plugins = self.LoadPlugins(metadata_obj)
num_processed = int(
metadata_obj.Get(metadata_obj.Schema.NUM_PROCESSED_RESULTS))
for batch in collection.Batch(results, batch_size):
results = list(
collection_obj.MultiResolve(
[r.value.ResultRecord() for r in batch]))
self.RunPlugins(hunt_urn, used_plugins, results, exceptions_by_plugin)
hunts_results.HuntResultQueue.DeleteNotifications(
batch, token=self.token)
num_processed += len(batch)
num_processed_for_hunt += len(batch)
self.HeartBeat()
metadata_obj.Set(
metadata_obj.Schema.NUM_PROCESSED_RESULTS(num_processed))
metadata_obj.UpdateLease(600)
if self.CheckIfRunningTooLong():
logging.warning("Run too long, stopping.")
break
metadata_obj.Set(metadata_obj.Schema.OUTPUT_PLUGINS(all_plugins))
metadata_obj.Set(
metadata_obj.Schema.NUM_PROCESSED_RESULTS(num_processed))
except aff4.LockError:
logging.warn(
"ProcessHuntResultCollectionsCronFlow: "
"Could not get lock on hunt metadata %s.", metadata_urn)
return 0
if exceptions_by_plugin:
for plugin, exceptions in iteritems(exceptions_by_plugin):
exceptions_by_hunt.setdefault(hunt_urn, {}).setdefault(
plugin, []).extend(exceptions)
logging.debug("Processed %d results.", num_processed_for_hunt)
return len(results)
def Start(self):
self.start_time = rdfvalue.RDFDatetime.Now()
exceptions_by_hunt = {}
self.max_running_time = self.lifetime * 0.6
while not self.CheckIfRunningTooLong():
count = self.ProcessOneHunt(exceptions_by_hunt)
if not count:
break
if exceptions_by_hunt:
e = ResultsProcessingError()
for hunt_urn, exceptions_by_plugin in iteritems(exceptions_by_hunt):
for plugin, exceptions in iteritems(exceptions_by_plugin):
for exception in exceptions:
e.RegisterSubException(hunt_urn, plugin, exception)
raise e
| 37.282051
| 80
| 0.705365
|
a562f6d8ca95b18ef2b75191aad086604d769616
| 5,524
|
py
|
Python
|
venv/Lib/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils._testing import ignore_warnings, fails_if_pypy
pytestmark = fails_if_pypy
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert "dict" == h.input_type
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37}, {"foo": "baz", "gaga": "string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [
["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"],
]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(
n_features=n_features, input_type="string", alternate_sign=False
)
X = h.transform(it)
assert X.shape[0] == len(raw_X)
assert X.shape[1] == n_features
assert X[0].sum() == 4
assert X[1].sum() == 3
assert X.nnz == 6
def test_hashing_transform_seed():
# check the influence of the seed when computing the hashes
# import is here to avoid importing on pypy
from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform
raw_X = [
["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"],
]
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices, indptr, _ = _hashing_transform(raw_X_, 2 ** 7, str, False)
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2 ** 7, str, False, seed=0)
assert_array_equal(indices, indices_0)
assert_array_equal(indptr, indptr_0)
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices_1, _, _ = _hashing_transform(raw_X_, 2 ** 7, str, False, seed=1)
with pytest.raises(AssertionError):
assert_array_equal(indices, indices_1)
def test_feature_hasher_pairs():
raw_X = (
iter(d.items())
for d in [{"foo": 1, "bar": 2}, {"baz": 3, "quux": 4, "foo": -1}]
)
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert [1, 2] == x1_nz
assert [1, 3, 4] == x2_nz
def test_feature_hasher_pairs_with_string_values():
raw_X = (
iter(d.items())
for d in [{"foo": 1, "bar": "a"}, {"baz": "abc", "quux": 4, "foo": -1}]
)
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert [1, 1] == x1_nz
assert [1, 1, 4] == x2_nz
raw_X = (iter(d.items()) for d in [{"bax": "abc"}, {"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert [1] == x1_nz
assert [1] == x2_nz
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
with pytest.raises(ValueError):
FeatureHasher(input_type="gobbledygook")
with pytest.raises(ValueError):
FeatureHasher(n_features=-1)
with pytest.raises(ValueError):
FeatureHasher(n_features=0)
with pytest.raises(TypeError):
FeatureHasher(n_features="ham")
h = FeatureHasher(n_features=np.uint16(2 ** 6))
with pytest.raises(ValueError):
h.transform([])
with pytest.raises(Exception):
h.transform([[5.5]])
with pytest.raises(Exception):
h.transform([[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
with pytest.raises(TypeError):
hasher.fit()
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{"foo": 0}])
assert X.data.shape == (0,)
@ignore_warnings(category=FutureWarning)
def test_hasher_alternate_sign():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, input_type="string").fit_transform(X)
assert Xt.data.min() < 0 and Xt.data.max() > 0
Xt = FeatureHasher(alternate_sign=False, input_type="string").fit_transform(X)
assert Xt.data.min() > 0
def test_hash_collisions():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(
alternate_sign=True, n_features=1, input_type="string"
).fit_transform(X)
# check that some of the hashed tokens are added
# with an opposite sign and cancel out
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(
alternate_sign=False, n_features=1, input_type="string"
).fit_transform(X)
assert Xt.data[0] == len(X[0])
| 31.747126
| 89
| 0.612238
|
d99ffa7a76dbc0efee5284edd8b729835de299f7
| 5,197
|
py
|
Python
|
src/oci/core/models/update_vcn_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/core/models/update_vcn_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/core/models/update_vcn_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateVcnDetails(object):
"""
UpdateVcnDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateVcnDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param defined_tags:
The value to assign to the defined_tags property of this UpdateVcnDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this UpdateVcnDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateVcnDetails.
:type freeform_tags: dict(str, str)
"""
self.swagger_types = {
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'freeform_tags': 'dict(str, str)'
}
self.attribute_map = {
'defined_tags': 'definedTags',
'display_name': 'displayName',
'freeform_tags': 'freeformTags'
}
self._defined_tags = None
self._display_name = None
self._freeform_tags = None
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateVcnDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateVcnDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateVcnDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateVcnDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this UpdateVcnDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this UpdateVcnDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateVcnDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this UpdateVcnDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateVcnDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateVcnDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateVcnDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateVcnDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.529032
| 245
| 0.653069
|
f1e67489bbc1cf9c0aa668e037509418887d6c49
| 2,329
|
py
|
Python
|
authapi/tests/test_pagination.py
|
praekeltfoundation/seed-auth-api
|
2238f7ecde2f75143bea0ac36f875793a19dde9b
|
[
"BSD-3-Clause"
] | null | null | null |
authapi/tests/test_pagination.py
|
praekeltfoundation/seed-auth-api
|
2238f7ecde2f75143bea0ac36f875793a19dde9b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T08:30:42.000Z
|
2020-02-12T06:32:54.000Z
|
authapi/tests/test_pagination.py
|
praekeltfoundation/seed-auth-api
|
2238f7ecde2f75143bea0ac36f875793a19dde9b
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework.generics import ListAPIView
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from authapi.serializers import OrganizationSummarySerializer
from authapi.models import SeedOrganization
from authapi.pagination import LinkHeaderPagination
class DummyView(ListAPIView):
queryset = SeedOrganization.objects.all()
serializer_class = OrganizationSummarySerializer
pagination_class = LinkHeaderPagination
class LinkHeaderPaginationTests(APITestCase):
def setUp(self):
self.requests = APIRequestFactory()
def handle(self, req):
resp = DummyView.as_view()(req)
resp.render()
return resp
def test_next(self):
''''The paginator should set the Link header to a next link if there is
a next page'''
for _ in range(3):
SeedOrganization.objects.create()
resp = self.handle(self.requests.get('/?page=1&page_size=2'))
self.assertEqual(
resp['Link'],
'<http://testserver/?page=2&page_size=2>; rel="next"')
def test_prev(self):
''''The paginator should set the Link header to a previous link if
there is a previous page'''
for _ in range(3):
SeedOrganization.objects.create()
resp = self.handle(self.requests.get('/?page=2&page_size=2'))
self.assertEqual(
resp['Link'],
'<http://testserver/?page_size=2>; rel="prev"')
def test_next_and_prev(self):
''''The paginator should set the Link header to a next and previous
link if there are both a next and a previous page'''
pass
for _ in range(5):
SeedOrganization.objects.create()
resp = self.handle(self.requests.get('/?page=2&page_size=2'))
self.assertEqual(
resp['Link'],
'<http://testserver/?page=3&page_size=2>; rel="next", '
'<http://testserver/?page_size=2>; rel="prev"')
def test_no_next_no_prev(self):
'''The paginator should not set the Link header if there is not a next
or previous page'''
for _ in range(2):
SeedOrganization.objects.create()
resp = self.handle(self.requests.get('/?page=1&page_size=2'))
self.assertTrue('Link' not in resp)
| 32.347222
| 79
| 0.644912
|
71fb3f01c863748cab11ec79b0488c344479af57
| 269
|
py
|
Python
|
scripts/valid_features.py
|
SaraLatif99/udacity-mlnd-deeplearning-capstone
|
b781a98bad40032803a4270457e5b27e2b4e4ed7
|
[
"MIT"
] | 27
|
2017-03-01T11:06:40.000Z
|
2021-02-01T07:32:39.000Z
|
scripts/valid_features.py
|
SaraLatif99/udacity-mlnd-deeplearning-capstone
|
b781a98bad40032803a4270457e5b27e2b4e4ed7
|
[
"MIT"
] | null | null | null |
scripts/valid_features.py
|
SaraLatif99/udacity-mlnd-deeplearning-capstone
|
b781a98bad40032803a4270457e5b27e2b4e4ed7
|
[
"MIT"
] | 24
|
2017-05-20T19:49:29.000Z
|
2021-06-11T00:25:06.000Z
|
import numpy as np
from keras.applications import VGG16
model = VGG16(weights="imagenet",include_top=False)
valid_images = np.load('validation_images.npy')
valid_features = model.predict(valid_images,batch_size=1,verbose=1)
np.save("valid_features.npy",valid_features)
| 38.428571
| 67
| 0.817844
|
dde10098eb08fc43c5cda5d430d6f7d5f5dd14ac
| 586
|
py
|
Python
|
face_detection.py
|
VinayDagar/movement-detection
|
1c3ce2874f4903c167f065e928d4b77d6edcf05a
|
[
"MIT"
] | null | null | null |
face_detection.py
|
VinayDagar/movement-detection
|
1c3ce2874f4903c167f065e928d4b77d6edcf05a
|
[
"MIT"
] | null | null | null |
face_detection.py
|
VinayDagar/movement-detection
|
1c3ce2874f4903c167f065e928d4b77d6edcf05a
|
[
"MIT"
] | null | null | null |
import cv2
# using haar cascader classifier for face detection
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (118, 223, 111), 3)
cv2.imshow('Face Detection', frame)
if cv2.waitKey(0):
break
cap.release()
cv2.destroyAllWindows()
| 24.416667
| 79
| 0.636519
|
08d36a940b0b267910cc2f29bc0846b645210cea
| 639
|
py
|
Python
|
djgumroad/products/migrations/0002_product_user.py
|
Maharshi-Pathak/gumroad-clone
|
97ab1bd71585ee7a4279ad0189980e1b69c31948
|
[
"MIT"
] | 11
|
2021-04-22T06:26:42.000Z
|
2022-03-27T21:19:57.000Z
|
djgumroad/products/migrations/0002_product_user.py
|
Maharshi-Pathak/gumroad-clone
|
97ab1bd71585ee7a4279ad0189980e1b69c31948
|
[
"MIT"
] | null | null | null |
djgumroad/products/migrations/0002_product_user.py
|
Maharshi-Pathak/gumroad-clone
|
97ab1bd71585ee7a4279ad0189980e1b69c31948
|
[
"MIT"
] | 6
|
2021-02-10T18:12:27.000Z
|
2022-03-14T02:17:38.000Z
|
# Generated by Django 3.0.11 on 2021-01-29 12:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='products', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 27.782609
| 146
| 0.671362
|
79c5c30a8f8726e97c45f0d8cd8ce92450bddabc
| 1,483
|
py
|
Python
|
tests/counterfit/core/test_state.py
|
Mandroide/counterfit
|
3252588d45514192edd4444b3bff0bf006f92bf0
|
[
"MIT"
] | null | null | null |
tests/counterfit/core/test_state.py
|
Mandroide/counterfit
|
3252588d45514192edd4444b3bff0bf006f92bf0
|
[
"MIT"
] | null | null | null |
tests/counterfit/core/test_state.py
|
Mandroide/counterfit
|
3252588d45514192edd4444b3bff0bf006f92bf0
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from unittest.mock import Mock
import pytest
from counterfit.core.state import CFState
class TestCFState:
@pytest.fixture(scope='function')
def target_singleton_handler(self):
target_singleton_obj = CFState.get_instance()
return target_singleton_obj
def test_singleton_obj(self, target_singleton_handler):
a = target_singleton_handler
b = CFState.get_instance()
assert a == b
def test_set_active_target(self, target_singleton_handler):
target_singleton_handler.loaded_targets['TEMP_MODEL_NAME'] = Mock()
target_singleton_handler.set_active_target('TEMP_MODEL_NAME')
assert target_singleton_handler.active_target._mock_parent == None
def test_set_active_attack(self, target_singleton_handler):
target_singleton_handler.active_target = Mock()
target_singleton_handler.active_target.attacks = defaultdict()
target_singleton_handler.active_target.attacks['TEMP_ATTACK_ID'] = Mock()
target_singleton_handler.set_active_attack('TEMP_ATTACK_ID')
assert target_singleton_handler.active_target.active_attack._mock_new_name == 'active_attack'
def test_load_attack(self, target_singleton_handler):
attack_obj = Mock()
attack_obj.attack_name = 'TEMP_ATTACK'
target_singleton_handler.load_attack(attack_obj)
assert 'TEMP_ATTACK' in target_singleton_handler.loaded_attacks
| 40.081081
| 101
| 0.751854
|
c30c7aa9e8c8e507f6e64b82e05f179c20efd08f
| 138,828
|
py
|
Python
|
lib/galaxy/model/mapping.py
|
ClayBirkett/galaxy
|
b5afa3c1a90d269f1d438ffde481ff2e4178a72b
|
[
"CC-BY-3.0"
] | 1
|
2019-11-15T01:50:38.000Z
|
2019-11-15T01:50:38.000Z
|
lib/galaxy/model/mapping.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/mapping.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Details of how the data model objects are mapped onto the relational database
are encapsulated here.
"""
import logging
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
false,
ForeignKey,
func,
Index,
Integer,
MetaData,
not_,
Numeric,
select,
String, Table,
TEXT,
Text,
true,
Unicode,
UniqueConstraint,
VARCHAR
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import backref, class_mapper, column_property, deferred, mapper, object_session, relation
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.sql import exists
from sqlalchemy.types import BigInteger
from galaxy import model
from galaxy.model.base import ModelMapping
from galaxy.model.custom_types import JSONType, MetadataType, TrimmedString, UUIDType
from galaxy.model.orm.engine_factory import build_engine
from galaxy.model.orm.now import now
from galaxy.model.security import GalaxyRBACAgent
log = logging.getLogger(__name__)
metadata = MetaData()
model.WorkerProcess.table = Table(
'worker_process',
metadata,
Column("id", Integer, primary_key=True),
Column("server_name", String(255), index=True),
Column("hostname", String(255)),
Column("update_time", DateTime, default=now, onupdate=now),
UniqueConstraint('server_name', 'hostname'),
)
model.User.table = Table(
"galaxy_user", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("email", TrimmedString(255), index=True, nullable=False),
Column("username", TrimmedString(255), index=True, unique=True),
Column("password", TrimmedString(255), nullable=False),
Column("last_password_change", DateTime, default=now),
Column("external", Boolean, default=False),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("disk_usage", Numeric(15, 0), index=True),
Column("active", Boolean, index=True, default=True, nullable=False),
Column("activation_token", TrimmedString(64), nullable=True, index=True))
model.UserAddress.table = Table(
"user_address", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("desc", TrimmedString(255)),
Column("name", TrimmedString(255), nullable=False),
Column("institution", TrimmedString(255)),
Column("address", TrimmedString(255), nullable=False),
Column("city", TrimmedString(255), nullable=False),
Column("state", TrimmedString(255), nullable=False),
Column("postal_code", TrimmedString(255), nullable=False),
Column("country", TrimmedString(255), nullable=False),
Column("phone", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.PSAAssociation.table = Table(
"psa_association", metadata,
Column('id', Integer, primary_key=True),
Column('server_url', VARCHAR(255)),
Column('handle', VARCHAR(255)),
Column('secret', VARCHAR(255)),
Column('issued', Integer),
Column('lifetime', Integer),
Column('assoc_type', VARCHAR(64)))
model.PSACode.table = Table(
"psa_code", metadata,
Column('id', Integer, primary_key=True),
Column('email', VARCHAR(200)),
Column('code', VARCHAR(32)))
model.PSANonce.table = Table(
"psa_nonce", metadata,
Column('id', Integer, primary_key=True),
Column('server_url', VARCHAR(255)),
Column('timestamp', Integer),
Column('salt', VARCHAR(40)))
model.PSAPartial.table = Table(
"psa_partial", metadata,
Column('id', Integer, primary_key=True),
Column('token', VARCHAR(32)),
Column('data', TEXT),
Column('next_step', Integer),
Column('backend', VARCHAR(32)))
model.UserAuthnzToken.table = Table(
"oidc_user_authnz_tokens", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id"), index=True),
Column('uid', VARCHAR(255)),
Column('provider', VARCHAR(32)),
Column('extra_data', JSONType, nullable=True),
Column('lifetime', Integer),
Column('assoc_type', VARCHAR(64)))
model.CustosAuthnzToken.table = Table(
"custos_authnz_token", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id")),
Column('external_user_id', String(64)),
Column('provider', String(255)),
Column('access_token', Text),
Column('id_token', Text),
Column('refresh_token', Text),
Column("expiration_time", DateTime),
Column("refresh_expiration_time", DateTime),
UniqueConstraint("user_id", "external_user_id", "provider"),
UniqueConstraint("external_user_id", "provider"),
)
model.CloudAuthz.table = Table(
"cloudauthz", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id"), index=True),
Column('provider', String(255)),
Column('config', JSONType),
Column('authn_id', Integer, ForeignKey("oidc_user_authnz_tokens.id"), index=True),
Column('tokens', JSONType),
Column('last_update', DateTime),
Column('last_activity', DateTime),
Column('description', TEXT),
Column('create_time', DateTime, default=now))
model.PasswordResetToken.table = Table(
"password_reset_token", metadata,
Column("token", String(32), primary_key=True, unique=True, index=True),
Column("expiration_time", DateTime),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DynamicTool.table = Table(
"dynamic_tool", metadata,
Column("id", Integer, primary_key=True),
Column("uuid", UUIDType()),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("tool_id", Unicode(255)),
Column("tool_version", Unicode(255)),
Column("tool_format", Unicode(255)),
Column("tool_path", Unicode(255)),
Column("tool_directory", Unicode(255)),
Column("hidden", Boolean, default=True),
Column("active", Boolean, default=True),
Column("value", JSONType()),
)
model.History.table = Table(
"history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("name", TrimmedString(255)),
Column("hid_counter", Integer, default=1),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("importing", Boolean, index=True, default=False),
Column("genome_build", TrimmedString(40)),
Column("importable", Boolean, default=False),
Column("slug", TEXT),
Column("published", Boolean, index=True, default=False),
Index('ix_history_slug', 'slug', mysql_length=200),
)
model.HistoryUserShareAssociation.table = Table(
"history_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.HistoryDatasetAssociation.table = Table(
"history_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True, key="_state"),
Column("copied_from_history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), nullable=True),
Column("copied_from_library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), nullable=True),
Column("name", TrimmedString(255)),
Column("info", TrimmedString(255)),
Column("blurb", TrimmedString(255)),
Column("peek", TEXT, key="_peek"),
Column("tool_version", TEXT),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("parent_id", Integer, ForeignKey("history_dataset_association.id"), nullable=True),
Column("designation", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("visible", Boolean),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
Column("version", Integer, default=1, nullable=True, index=True),
Column("hid", Integer),
Column("purged", Boolean, index=True, default=False),
Column("validated_state", TrimmedString(64), default='unvalidated', nullable=False),
Column("validated_state_message", TEXT),
Column("hidden_beneath_collection_instance_id",
ForeignKey("history_dataset_collection_association.id"), nullable=True))
model.HistoryDatasetAssociationHistory.table = Table(
"history_dataset_association_history", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("update_time", DateTime, default=now),
Column("version", Integer),
Column("name", TrimmedString(255)),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
)
model.Dataset.table = Table(
"dataset", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("purgable", Boolean, default=True),
Column("object_store_id", TrimmedString(255), index=True),
Column("external_filename", TEXT),
Column("_extra_files_path", TEXT),
Column("created_from_basename", TEXT),
Column('file_size', Numeric(15, 0)),
Column('total_size', Numeric(15, 0)),
Column('uuid', UUIDType()))
model.DatasetSource.table = Table(
"dataset_source", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("source_uri", TEXT),
Column("extra_files_path", TEXT),
Column("transform", JSONType)
)
model.DatasetHash.table = Table(
"dataset_hash", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("hash_function", TEXT),
Column("hash_value", TEXT),
Column("extra_files_path", TEXT),
)
model.DatasetSourceHash.table = Table(
"dataset_source_hash", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_source_id", Integer, ForeignKey("dataset_source.id"), index=True),
Column("hash_function", TEXT),
Column("hash_value", TEXT)
)
# hda read access permission given by a user to a specific site (gen. for external display applications)
model.HistoryDatasetAssociationDisplayAtAuthorization.table = Table(
"history_dataset_association_display_at_authorization", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("site", TrimmedString(255)))
model.HistoryDatasetAssociationSubset.table = Table(
"history_dataset_association_subset", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("history_dataset_association_subset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("location", Unicode(255), index=True))
model.ImplicitlyConvertedDatasetAssociation.table = Table(
"implicitly_converted_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("hda_parent_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("ldda_parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("metadata_safe", Boolean, index=True, default=True),
Column("type", TrimmedString(255)))
model.ValidationError.table = Table(
"validation_error", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("message", TrimmedString(255)),
Column("err_type", TrimmedString(64)),
Column("attributes", TEXT))
model.Group.table = Table(
"galaxy_group", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("deleted", Boolean, index=True, default=False))
model.UserGroupAssociation.table = Table(
"user_group_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.UserRoleAssociation.table = Table(
"user_role_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.GroupRoleAssociation.table = Table(
"group_role_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.Role.table = Table(
"role", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("type", String(40), index=True),
Column("deleted", Boolean, index=True, default=False))
model.UserQuotaAssociation.table = Table(
"user_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.GroupQuotaAssociation.table = Table(
"group_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.Quota.table = Table(
"quota", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("bytes", BigInteger),
Column("operation", String(8)),
Column("deleted", Boolean, index=True, default=False))
model.DefaultQuotaAssociation.table = Table(
"default_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("type", String(32), index=True, unique=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True))
model.DatasetPermissions.table = Table(
"dataset_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryPermissions.table = Table(
"library_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_id", Integer, ForeignKey("library.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryFolderPermissions.table = Table(
"library_folder_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDatasetPermissions.table = Table(
"library_dataset_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDatasetDatasetAssociationPermissions.table = Table(
"library_dataset_dataset_association_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"),
nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.DefaultUserPermissions.table = Table(
"default_user_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("action", TEXT),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.DefaultHistoryPermissions.table = Table(
"default_history_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("action", TEXT),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDataset.table = Table(
"library_dataset", metadata,
Column("id", Integer, primary_key=True),
# current version of dataset, if null, there is not a current version selected
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fk"),
nullable=True, index=True),
Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True),
# not currently being used, but for possible future use
Column("order_id", Integer),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
# when not None/null this will supercede display in library (but not when imported into user's history?)
Column("name", TrimmedString(255), key="_name", index=True),
# when not None/null this will supercede display in library (but not when imported into user's history?)
Column("info", TrimmedString(255), key="_info"),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.LibraryDatasetDatasetAssociation.table = Table(
"library_dataset_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True, key="_state"),
Column("copied_from_history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id", use_alter=True, name='history_dataset_association_dataset_id_fkey'),
nullable=True),
Column("copied_from_library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id", use_alter=True, name='library_dataset_dataset_association_id_fkey'),
nullable=True),
Column("name", TrimmedString(255), index=True),
Column("info", TrimmedString(255)),
Column("blurb", TrimmedString(255)),
Column("peek", TEXT, key="_peek"),
Column("tool_version", TEXT),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), nullable=True),
Column("designation", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("validated_state", TrimmedString(64), default='unvalidated', nullable=False),
Column("validated_state_message", TEXT),
Column("visible", Boolean),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("message", TrimmedString(255)))
model.ExtendedMetadata.table = Table(
"extended_metadata", metadata,
Column("id", Integer, primary_key=True),
Column("data", JSONType))
model.ExtendedMetadataIndex.table = Table(
"extended_metadata_index", metadata,
Column("id", Integer, primary_key=True),
Column("extended_metadata_id", Integer,
ForeignKey("extended_metadata.id", onupdate="CASCADE", ondelete="CASCADE"), index=True),
Column("path", String(255)),
Column("value", TEXT))
model.Library.table = Table(
"library", metadata,
Column("id", Integer, primary_key=True),
Column("root_folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("description", TEXT),
Column("synopsis", TEXT))
model.LibraryFolder.table = Table(
"library_folder", metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TEXT),
Column("description", TEXT),
Column("order_id", Integer), # not currently being used, but for possible future use
Column("item_count", Integer),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("genome_build", TrimmedString(40)),
Index('ix_library_folder_name', 'name', mysql_length=200),
)
model.LibraryInfoAssociation.table = Table(
"library_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_id", Integer, ForeignKey("library.id"), index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("inheritable", Boolean, index=True, default=False),
Column("deleted", Boolean, index=True, default=False))
model.LibraryFolderInfoAssociation.table = Table(
"library_folder_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("inheritable", Boolean, index=True, default=False),
Column("deleted", Boolean, index=True, default=False))
model.LibraryDatasetDatasetInfoAssociation.table = Table(
"library_dataset_dataset_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), nullable=True, index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("deleted", Boolean, index=True, default=False))
model.Job.table = Table(
"job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("tool_id", String(255)),
Column("tool_version", TEXT, default="1.0.0"),
Column("galaxy_version", String(64), default=None),
Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), index=True, nullable=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)),
Column("copied_from_job_id", Integer, nullable=True),
Column("command_line", TEXT),
Column("dependencies", JSONType, nullable=True),
Column("job_messages", JSONType, nullable=True),
Column("param_filename", String(1024)),
Column("runner_name", String(255)),
Column("job_stdout", TEXT),
Column("job_stderr", TEXT),
Column("tool_stdout", TEXT),
Column("tool_stderr", TEXT),
Column("exit_code", Integer, nullable=True),
Column("traceback", TEXT),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("job_runner_name", String(255)),
Column("job_runner_external_id", String(255), index=True),
Column("destination_id", String(255), nullable=True),
Column("destination_params", JSONType, nullable=True),
Column("object_store_id", TrimmedString(255), index=True),
Column("imported", Boolean, default=False, index=True),
Column("params", TrimmedString(255), index=True),
Column("handler", TrimmedString(255), index=True))
model.JobStateHistory.table = Table(
"job_state_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)))
model.JobParameter.table = Table(
"job_parameter", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("name", String(255)),
Column("value", TEXT))
model.JobToInputDatasetAssociation.table = Table(
"job_to_input_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("dataset_version", Integer),
Column("name", String(255)))
model.JobToOutputDatasetAssociation.table = Table(
"job_to_output_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("name", String(255)))
model.JobToInputDatasetCollectionAssociation.table = Table(
"job_to_input_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.JobToImplicitOutputDatasetCollectionAssociation.table = Table(
"job_to_implicit_output_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("name", Unicode(255)))
model.JobToOutputDatasetCollectionAssociation.table = Table(
"job_to_output_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.JobToInputLibraryDatasetAssociation.table = Table(
"job_to_input_library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("name", String(255)))
model.JobToOutputLibraryDatasetAssociation.table = Table(
"job_to_output_library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("name", String(255)))
model.ImplicitlyCreatedDatasetCollectionInput.table = Table(
"implicitly_created_dataset_collection_inputs", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("input_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.ImplicitCollectionJobs.table = Table(
"implicit_collection_jobs", metadata,
Column("id", Integer, primary_key=True),
Column("populated_state", TrimmedString(64), default='new', nullable=False),
)
model.ImplicitCollectionJobsJobAssociation.table = Table(
"implicit_collection_jobs_job_association", metadata,
Column("id", Integer, primary_key=True),
Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True), # Consider making this nullable...
Column("order_index", Integer, nullable=False),
)
model.JobExternalOutputMetadata.table = Table(
"job_external_output_metadata", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("is_valid", Boolean, default=True),
Column("filename_in", String(255)),
Column("filename_out", String(255)),
Column("filename_results_code", String(255)),
Column("filename_kwds", String(255)),
Column("filename_override_metadata", String(255)),
Column("job_runner_external_pid", String(255)))
model.JobExportHistoryArchive.table = Table(
"job_export_history_archive", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("compressed", Boolean, index=True, default=False),
Column("history_attrs_filename", TEXT))
model.JobImportHistoryArchive.table = Table(
"job_import_history_archive", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("archive_dir", TEXT))
model.JobMetricText.table = Table(
"job_metric_text", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Unicode(model.JOB_METRIC_MAX_LENGTH)))
model.TaskMetricText.table = Table(
"task_metric_text", metadata,
Column("id", Integer, primary_key=True),
Column("task_id", Integer, ForeignKey("task.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Unicode(model.JOB_METRIC_MAX_LENGTH)))
model.JobMetricNumeric.table = Table(
"job_metric_numeric", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Numeric(model.JOB_METRIC_PRECISION, model.JOB_METRIC_SCALE)))
model.TaskMetricNumeric.table = Table(
"task_metric_numeric", metadata,
Column("id", Integer, primary_key=True),
Column("task_id", Integer, ForeignKey("task.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Numeric(model.JOB_METRIC_PRECISION, model.JOB_METRIC_SCALE)))
model.GenomeIndexToolData.table = Table(
"genome_index_tool_data", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("deferred_job_id", Integer, ForeignKey("deferred_job.id"), index=True),
Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("fasta_path", String(255)),
Column("created_time", DateTime, default=now),
Column("modified_time", DateTime, default=now, onupdate=now),
Column("indexer", String(64)),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.InteractiveToolEntryPoint.table = Table(
"interactivetool_entry_point", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("name", TEXT),
Column("token", TEXT),
Column("tool_port", Integer),
Column("host", TEXT),
Column("port", Integer),
Column("protocol", TEXT),
Column("entry_url", TEXT),
Column("info", JSONType, nullable=True),
Column("configured", Boolean, default=False),
Column("deleted", Boolean, default=False),
Column("created_time", DateTime, default=now),
Column("modified_time", DateTime, default=now, onupdate=now))
model.JobContainerAssociation.table = Table(
"job_container_association", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("container_type", TEXT),
Column("container_name", TEXT),
Column("container_info", JSONType, nullable=True),
Column("created_time", DateTime, default=now),
Column("modified_time", DateTime, default=now, onupdate=now))
model.Task.table = Table(
"task", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("execution_time", DateTime),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("command_line", TEXT),
Column("param_filename", String(1024)),
Column("runner_name", String(255)),
Column("job_stdout", TEXT), # job_stdout makes sense here because it is short for job script standard out.
Column("job_stderr", TEXT),
Column("tool_stdout", TEXT),
Column("tool_stderr", TEXT),
Column("exit_code", Integer, nullable=True),
Column("job_messages", JSONType, nullable=True),
Column("info", TrimmedString(255)),
Column("traceback", TEXT),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
Column("working_directory", String(1024)),
Column("task_runner_name", String(255)),
Column("task_runner_external_id", String(255)),
Column("prepare_input_files_cmd", TEXT))
model.PostJobAction.table = Table(
"post_job_action", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("action_type", String(255), nullable=False),
Column("output_name", String(255), nullable=True),
Column("action_arguments", JSONType, nullable=True))
model.PostJobActionAssociation.table = Table(
"post_job_action_association", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False))
model.DeferredJob.table = Table(
"deferred_job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("plugin", String(128), index=True),
Column("params", JSONType))
model.TransferJob.table = Table(
"transfer_job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("path", String(1024)),
Column("info", TEXT),
Column("pid", Integer),
Column("socket", Integer),
Column("params", JSONType))
model.DatasetCollection.table = Table(
"dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("collection_type", Unicode(255), nullable=False),
Column("populated_state", TrimmedString(64), default='ok', nullable=False),
Column("populated_state_message", TEXT),
Column("element_count", Integer, nullable=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.HistoryDatasetCollectionAssociation.table = Table(
"history_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("name", TrimmedString(255)),
Column("hid", Integer),
Column("visible", Boolean),
Column("deleted", Boolean, default=False),
Column("copied_from_history_dataset_collection_association_id", Integer,
ForeignKey("history_dataset_collection_association.id"), nullable=True),
Column("implicit_output_name", Unicode(255), nullable=True),
Column("job_id", ForeignKey("job.id"), index=True, nullable=True),
Column("implicit_collection_jobs_id", ForeignKey("implicit_collection_jobs.id"), index=True, nullable=True),
)
model.LibraryDatasetCollectionAssociation.table = Table(
"library_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("name", TrimmedString(255)),
Column("deleted", Boolean, default=False))
model.DatasetCollectionElement.table = Table(
"dataset_collection_element", metadata,
Column("id", Integer, primary_key=True),
# Parent collection id describing what collection this element belongs to.
Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=False),
# Child defined by this association - HDA, LDDA, or another dataset association...
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("child_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=True),
# Element index and identifier to define this parent-child relationship.
Column("element_index", Integer),
Column("element_identifier", Unicode(255), ))
model.Event.table = Table(
"event", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True, nullable=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("message", TrimmedString(1024)),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True),
Column("tool_id", String(255)))
model.GalaxySession.table = Table(
"galaxy_session", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("remote_host", String(255)),
Column("remote_addr", String(255)),
Column("referer", TEXT),
Column("current_history_id", Integer, ForeignKey("history.id"), nullable=True),
# unique 128 bit random number coerced to a string
Column("session_key", TrimmedString(255), index=True, unique=True),
Column("is_valid", Boolean, default=False),
# saves a reference to the previous session so we have a way to chain them together
Column("prev_session_id", Integer),
Column("disk_usage", Numeric(15, 0), index=True),
Column("last_action", DateTime))
model.GalaxySessionToHistoryAssociation.table = Table(
"galaxy_session_to_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
model.StoredWorkflow.table = Table(
"stored_workflow", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_workflow_id", Integer,
ForeignKey("workflow.id", use_alter=True, name='stored_workflow_latest_workflow_id_fk'), index=True),
Column("name", TEXT),
Column("deleted", Boolean, default=False),
Column("importable", Boolean, default=False),
Column("slug", TEXT),
Column("from_path", TEXT),
Column("published", Boolean, index=True, default=False),
Index('ix_stored_workflow_slug', 'slug', mysql_length=200),
)
model.Workflow.table = Table(
"workflow", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
# workflows will belong to either a stored workflow or a parent/nesting workflow.
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True, nullable=True),
Column("parent_workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=True),
Column("name", TEXT),
Column("has_cycles", Boolean),
Column("has_errors", Boolean),
Column("reports_config", JSONType),
Column("uuid", UUIDType, nullable=True))
model.WorkflowStep.table = Table(
"workflow_step", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False),
Column("subworkflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=True),
Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), index=True, nullable=True),
Column("type", String(64)),
Column("tool_id", TEXT),
Column("tool_version", TEXT),
Column("tool_inputs", JSONType),
Column("tool_errors", JSONType),
Column("position", JSONType),
Column("config", JSONType),
Column("order_index", Integer),
Column("uuid", UUIDType),
# Column( "input_connections", JSONType ),
Column("label", Unicode(255)))
model.WorkflowStepInput.table = Table(
"workflow_step_input", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("name", TEXT),
Column("merge_type", TEXT),
Column("scatter_type", TEXT),
Column("value_from", JSONType),
Column("value_from_type", TEXT),
Column("default_value", JSONType),
Column("default_value_set", Boolean, default=False),
Column("runtime_value", Boolean, default=False),
Index('ix_workflow_step_input_workflow_step_id_name_unique', "workflow_step_id", "name", unique=True, mysql_length={'name': 200}),
)
model.WorkflowRequestStepState.table = Table(
"workflow_request_step_states", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer,
ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE")),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("value", JSONType))
model.WorkflowRequestInputParameter.table = Table(
"workflow_request_input_parameters", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer,
ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE")),
Column("name", Unicode(255)),
Column("value", TEXT),
Column("type", Unicode(255)))
model.WorkflowRequestInputStepParameter.table = Table(
"workflow_request_input_step_parameter", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("parameter_value", JSONType),
)
model.WorkflowRequestToInputDatasetAssociation.table = Table(
"workflow_request_to_input_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True))
model.WorkflowRequestToInputDatasetCollectionAssociation.table = Table(
"workflow_request_to_input_collection_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True))
model.WorkflowStepConnection.table = Table(
"workflow_step_connection", metadata,
Column("id", Integer, primary_key=True),
Column("output_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("input_step_input_id", Integer, ForeignKey("workflow_step_input.id"), index=True),
Column("output_name", TEXT),
Column("input_subworkflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
)
model.WorkflowOutput.table = Table(
"workflow_output", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("output_name", String(255), nullable=True),
Column("label", Unicode(255)),
Column("uuid", UUIDType),
)
model.WorkflowInvocation.table = Table(
"workflow_invocation", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False),
Column("state", TrimmedString(64), index=True),
Column("scheduler", TrimmedString(255), index=True),
Column("handler", TrimmedString(255), index=True),
Column('uuid', UUIDType()),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
model.WorkflowInvocationStep.table = Table(
"workflow_invocation_step", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True, nullable=False),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("state", TrimmedString(64), index=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=True),
Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True, nullable=True),
Column("action", JSONType, nullable=True))
model.WorkflowInvocationOutputDatasetAssociation.table = Table(
"workflow_invocation_output_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("workflow_output_id", Integer, ForeignKey("workflow_output.id")),
)
model.WorkflowInvocationOutputDatasetCollectionAssociation.table = Table(
"workflow_invocation_output_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id", name='fk_wiodca_wii'), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id", name='fk_wiodca_wsi')),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id", name='fk_wiodca_dci'), index=True),
Column("workflow_output_id", Integer, ForeignKey("workflow_output.id", name='fk_wiodca_woi')),
)
model.WorkflowInvocationStepOutputDatasetAssociation.table = Table(
"workflow_invocation_step_output_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("output_name", String(255), nullable=True),
)
model.WorkflowInvocationStepOutputDatasetCollectionAssociation.table = Table(
"workflow_invocation_step_output_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id", name='fk_wisodca_wisi'), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id", name='fk_wisodca_wsi')),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id", name='fk_wisodca_dci'), index=True),
Column("output_name", String(255), nullable=True),
)
model.WorkflowInvocationToSubworkflowInvocationAssociation.table = Table(
"workflow_invocation_to_subworkflow_invocation_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id", name='fk_wfi_swi_wfi'), index=True),
Column("subworkflow_invocation_id", Integer, ForeignKey("workflow_invocation.id", name='fk_wfi_swi_swi'), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id", name='fk_wfi_swi_ws')),
)
model.StoredWorkflowUserShareAssociation.table = Table(
"stored_workflow_user_share_connection", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.StoredWorkflowMenuEntry.table = Table(
"stored_workflow_menu_entry", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("order_index", Integer))
model.MetadataFile.table = Table(
"metadata_file", metadata,
Column("id", Integer, primary_key=True),
Column("name", TEXT),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("lda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("object_store_id", TrimmedString(255), index=True),
Column("uuid", UUIDType(), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.FormDefinitionCurrent.table = Table(
"form_definition_current", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("latest_form_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("deleted", Boolean, index=True, default=False))
model.FormDefinition.table = Table(
"form_definition", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TrimmedString(255), nullable=False),
Column("desc", TEXT),
Column("form_definition_current_id", Integer, ForeignKey("form_definition_current.id", use_alter=True), index=True, nullable=False),
Column("fields", JSONType()),
Column("type", TrimmedString(255), index=True),
Column("layout", JSONType()))
model.FormValues.table = Table(
"form_values", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("content", JSONType()))
model.Page.table = Table(
"page", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_revision_id", Integer,
ForeignKey("page_revision.id", use_alter=True, name='page_latest_revision_id_fk'), index=True),
Column("title", TEXT),
Column("deleted", Boolean, index=True, default=False),
Column("importable", Boolean, index=True, default=False),
Column("slug", TEXT),
Column("published", Boolean, index=True, default=False),
Index('ix_page_slug', 'slug', mysql_length=200),
)
model.PageRevision.table = Table(
"page_revision", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("page_id", Integer, ForeignKey("page.id"), index=True, nullable=False),
Column("title", TEXT),
Column("content", TEXT))
model.PageUserShareAssociation.table = Table(
"page_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.Visualization.table = Table(
"visualization", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_revision_id", Integer,
ForeignKey("visualization_revision.id", use_alter=True, name='visualization_latest_revision_id_fk'), index=True),
Column("title", TEXT),
Column("type", TEXT),
Column("dbkey", TEXT),
Column("deleted", Boolean, default=False, index=True),
Column("importable", Boolean, default=False, index=True),
Column("slug", TEXT),
Column("published", Boolean, default=False, index=True),
Index('ix_visualization_dbkey', 'dbkey', mysql_length=200),
Index('ix_visualization_slug', 'slug', mysql_length=200),
)
model.VisualizationRevision.table = Table(
"visualization_revision", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True, nullable=False),
Column("title", TEXT),
Column("dbkey", TEXT),
Column("config", JSONType),
Index('ix_visualization_revision_dbkey', 'dbkey', mysql_length=200),
)
model.VisualizationUserShareAssociation.table = Table(
"visualization_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
# Data Manager tables
model.DataManagerHistoryAssociation.table = Table(
"data_manager_history_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DataManagerJobAssociation.table = Table(
"data_manager_job_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("data_manager_id", TEXT),
Index('ix_data_manager_job_association_data_manager_id', 'data_manager_id', mysql_length=200),
)
# Tagging tables.
model.Tag.table = Table(
"tag", metadata,
Column("id", Integer, primary_key=True),
Column("type", Integer),
Column("parent_id", Integer, ForeignKey("tag.id")),
Column("name", TrimmedString(255)),
UniqueConstraint("name"))
model.HistoryTagAssociation.table = Table(
"history_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.DatasetTagAssociation.table = Table(
"dataset_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryDatasetAssociationTagAssociation.table = Table(
"history_dataset_association_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.LibraryDatasetDatasetAssociationTagAssociation.table = Table(
"library_dataset_dataset_association_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.StoredWorkflowTagAssociation.table = Table(
"stored_workflow_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
model.PageTagAssociation.table = Table(
"page_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.WorkflowStepTagAssociation.table = Table(
"workflow_step_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
model.VisualizationTagAssociation.table = Table(
"visualization_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryDatasetCollectionTagAssociation.table = Table(
"history_dataset_collection_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.LibraryDatasetCollectionTagAssociation.table = Table(
"library_dataset_collection_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.ToolTagAssociation.table = Table(
"tool_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("tool_id", TrimmedString(255), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
# Annotation tables.
model.HistoryAnnotationAssociation.table = Table(
"history_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index('ix_history_anno_assoc_annotation', 'annotation', mysql_length=200),
)
model.HistoryDatasetAssociationAnnotationAssociation.table = Table(
"history_dataset_association_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index('ix_history_dataset_anno_assoc_annotation', 'annotation', mysql_length=200),
)
model.StoredWorkflowAnnotationAssociation.table = Table(
"stored_workflow_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index('ix_stored_workflow_ann_assoc_annotation', 'annotation', mysql_length=200),
)
model.WorkflowStepAnnotationAssociation.table = Table(
"workflow_step_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index('ix_workflow_step_ann_assoc_annotation', 'annotation', mysql_length=200),
)
model.PageAnnotationAssociation.table = Table(
"page_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index('ix_page_annotation_association_annotation', 'annotation', mysql_length=200),
)
model.VisualizationAnnotationAssociation.table = Table(
"visualization_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index('ix_visualization_annotation_association_annotation', 'annotation', mysql_length=200),
)
model.HistoryDatasetCollectionAssociationAnnotationAssociation.table = Table(
"history_dataset_collection_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
)
model.LibraryDatasetCollectionAnnotationAssociation.table = Table(
"library_dataset_collection_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
)
# Ratings tables.
model.HistoryRatingAssociation.table = Table("history_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.HistoryDatasetAssociationRatingAssociation.table = Table(
"history_dataset_association_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.StoredWorkflowRatingAssociation.table = Table(
"stored_workflow_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.PageRatingAssociation.table = Table(
"page_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.VisualizationRatingAssociation.table = Table(
"visualization_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.HistoryDatasetCollectionRatingAssociation.table = Table(
"history_dataset_collection_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.LibraryDatasetCollectionRatingAssociation.table = Table(
"library_dataset_collection_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
# User tables.
model.UserPreference.table = Table(
"user_preference", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("name", Unicode(255), index=True),
Column("value", Text))
model.UserAction.table = Table(
"user_action", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True),
Column("action", Unicode(255)),
Column("context", Unicode(512)),
Column("params", Unicode(1024)))
model.APIKeys.table = Table(
"api_keys", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("key", TrimmedString(32), index=True, unique=True))
CleanupEvent_table = Table("cleanup_event", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("message", TrimmedString(1024)))
CleanupEventDatasetAssociation_table = Table("cleanup_event_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True))
CleanupEventMetadataFileAssociation_table = Table("cleanup_event_metadata_file_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("metadata_file_id", Integer, ForeignKey("metadata_file.id"), index=True))
CleanupEventHistoryAssociation_table = Table("cleanup_event_history_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
CleanupEventHistoryDatasetAssociationAssociation_table = Table("cleanup_event_hda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True))
CleanupEventLibraryAssociation_table = Table("cleanup_event_library_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_id", Integer, ForeignKey("library.id"), index=True))
CleanupEventLibraryFolderAssociation_table = Table("cleanup_event_library_folder_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True))
CleanupEventLibraryDatasetAssociation_table = Table("cleanup_event_library_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True))
CleanupEventLibraryDatasetDatasetAssociationAssociation_table = Table("cleanup_event_ldda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True))
CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table = Table("cleanup_event_icda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("icda_id", Integer, ForeignKey("implicitly_converted_dataset_association.id"), index=True))
# With the tables defined we can define the mappers and setup the
# relationships between the model objects.
def simple_mapping(model, **kwds):
mapper(model, model.table, properties=kwds)
simple_mapping(model.WorkerProcess)
mapper(model.FormValues, model.FormValues.table, properties=dict(
form_definition=relation(model.FormDefinition,
primaryjoin=(model.FormValues.table.c.form_definition_id == model.FormDefinition.table.c.id))
))
mapper(model.FormDefinition, model.FormDefinition.table, properties=dict(
current=relation(model.FormDefinitionCurrent,
primaryjoin=(model.FormDefinition.table.c.form_definition_current_id == model.FormDefinitionCurrent.table.c.id))
))
mapper(model.FormDefinitionCurrent, model.FormDefinitionCurrent.table, properties=dict(
forms=relation(model.FormDefinition,
backref='form_definition_current',
cascade="all, delete-orphan",
primaryjoin=(model.FormDefinitionCurrent.table.c.id == model.FormDefinition.table.c.form_definition_current_id)),
latest_form=relation(model.FormDefinition,
post_update=True,
primaryjoin=(model.FormDefinitionCurrent.table.c.latest_form_id == model.FormDefinition.table.c.id))
))
mapper(model.UserAddress, model.UserAddress.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.UserAddress.table.c.user_id == model.User.table.c.id),
backref='addresses',
order_by=desc(model.UserAddress.table.c.update_time)),
))
mapper(model.PSAAssociation, model.PSAAssociation.table, properties=None)
mapper(model.PSACode, model.PSACode.table, properties=None)
mapper(model.PSANonce, model.PSANonce.table, properties=None)
mapper(model.PSAPartial, model.PSAPartial.table, properties=None)
mapper(model.UserAuthnzToken, model.UserAuthnzToken.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.UserAuthnzToken.table.c.user_id == model.User.table.c.id),
backref='social_auth')
))
mapper(model.CustosAuthnzToken, model.CustosAuthnzToken.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.CustosAuthnzToken.table.c.user_id == model.User.table.c.id),
backref='custos_auth')
))
mapper(model.CloudAuthz, model.CloudAuthz.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.CloudAuthz.table.c.user_id == model.User.table.c.id),
backref='cloudauthz'),
authn=relation(model.UserAuthnzToken,
primaryjoin=(model.CloudAuthz.table.c.authn_id == model.UserAuthnzToken.table.c.id),
backref='cloudauthz')
))
mapper(model.ValidationError, model.ValidationError.table)
simple_mapping(model.DynamicTool)
simple_mapping(model.HistoryDatasetAssociation,
dataset=relation(model.Dataset,
primaryjoin=(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id), lazy=False),
# .history defined in History mapper
copied_from_history_dataset_association=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id),
remote_side=[model.HistoryDatasetAssociation.table.c.id],
uselist=False),
copied_to_history_dataset_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id)),
copied_from_library_dataset_dataset_association=relation(
model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
uselist=False),
copied_to_library_dataset_dataset_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
implicitly_converted_datasets=relation(model.ImplicitlyConvertedDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id ==
model.HistoryDatasetAssociation.table.c.id)),
tags=relation(model.HistoryDatasetAssociationTagAssociation,
order_by=model.HistoryDatasetAssociationTagAssociation.table.c.id,
backref='history_tag_associations'),
annotations=relation(model.HistoryDatasetAssociationAnnotationAssociation,
order_by=model.HistoryDatasetAssociationAnnotationAssociation.table.c.id,
backref="hdas"),
ratings=relation(model.HistoryDatasetAssociationRatingAssociation,
order_by=model.HistoryDatasetAssociationRatingAssociation.table.c.id,
backref="hdas"),
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.HistoryDatasetAssociation.table.c.extended_metadata_id ==
model.ExtendedMetadata.table.c.id))),
hidden_beneath_collection_instance=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=((model.HistoryDatasetAssociation.table.c.hidden_beneath_collection_instance_id ==
model.HistoryDatasetCollectionAssociation.table.c.id)),
uselist=False,
backref="hidden_dataset_instances"),
_metadata=deferred(model.HistoryDatasetAssociation.table.c._metadata)
)
simple_mapping(model.Dataset,
history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id)),
active_history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id) &
(model.HistoryDatasetAssociation.table.c.deleted == false()) &
(model.HistoryDatasetAssociation.table.c.purged == false()))),
purged_history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id) &
(model.HistoryDatasetAssociation.table.c.purged == true()))),
library_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.Dataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.dataset_id)),
active_library_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.dataset_id) &
(model.LibraryDatasetDatasetAssociation.table.c.deleted == false()))),
tags=relation(model.DatasetTagAssociation,
order_by=model.DatasetTagAssociation.table.c.id,
backref='datasets')
)
mapper(model.DatasetHash, model.DatasetHash.table, properties=dict(
dataset=relation(model.Dataset, backref='hashes')
))
mapper(model.DatasetSource, model.DatasetSource.table, properties=dict(
dataset=relation(model.Dataset, backref='sources')
))
mapper(model.DatasetSourceHash, model.DatasetSourceHash.table, properties=dict(
source=relation(model.DatasetSource, backref='hashes')
))
mapper(model.HistoryDatasetAssociationHistory, model.HistoryDatasetAssociationHistory.table)
mapper(model.HistoryDatasetAssociationDisplayAtAuthorization, model.HistoryDatasetAssociationDisplayAtAuthorization.table, properties=dict(
history_dataset_association=relation(model.HistoryDatasetAssociation),
user=relation(model.User)
))
mapper(model.HistoryDatasetAssociationSubset, model.HistoryDatasetAssociationSubset.table, properties=dict(
hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociationSubset.table.c.history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id)),
subset=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociationSubset.table.c.history_dataset_association_subset_id ==
model.HistoryDatasetAssociation.table.c.id))
))
mapper(model.ImplicitlyConvertedDatasetAssociation, model.ImplicitlyConvertedDatasetAssociation.table, properties=dict(
parent_hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id ==
model.HistoryDatasetAssociation.table.c.id)),
parent_ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
dataset_ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
backref="implicitly_converted_parent_datasets"),
dataset=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_id ==
model.HistoryDatasetAssociation.table.c.id),
backref="implicitly_converted_parent_datasets")
))
mapper(model.History, model.History.table, properties=dict(
galaxy_sessions=relation(model.GalaxySessionToHistoryAssociation),
datasets=relation(model.HistoryDatasetAssociation,
backref="history",
order_by=asc(model.HistoryDatasetAssociation.table.c.hid)),
exports=relation(model.JobExportHistoryArchive,
primaryjoin=(model.JobExportHistoryArchive.table.c.history_id == model.History.table.c.id),
order_by=desc(model.JobExportHistoryArchive.table.c.id)),
active_datasets=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetAssociation.table.c.deleted)
),
order_by=asc(model.HistoryDatasetAssociation.table.c.hid),
viewonly=True),
active_dataset_collections=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(
(model.HistoryDatasetCollectionAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetCollectionAssociation.table.c.deleted)
),
order_by=asc(model.HistoryDatasetCollectionAssociation.table.c.hid),
viewonly=True),
visible_datasets=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetAssociation.table.c.deleted) &
model.HistoryDatasetAssociation.table.c.visible
),
order_by=asc(model.HistoryDatasetAssociation.table.c.hid),
viewonly=True),
visible_dataset_collections=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(
(model.HistoryDatasetCollectionAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetCollectionAssociation.table.c.deleted) &
model.HistoryDatasetCollectionAssociation.table.c.visible
),
order_by=asc(model.HistoryDatasetCollectionAssociation.table.c.hid),
viewonly=True),
tags=relation(model.HistoryTagAssociation,
order_by=model.HistoryTagAssociation.table.c.id,
backref="histories"),
annotations=relation(model.HistoryAnnotationAssociation,
order_by=model.HistoryAnnotationAssociation.table.c.id,
backref="histories"),
ratings=relation(model.HistoryRatingAssociation,
order_by=model.HistoryRatingAssociation.table.c.id,
backref="histories"),
average_rating=column_property(
select([func.avg(model.HistoryRatingAssociation.table.c.rating)]).where(model.HistoryRatingAssociation.table.c.history_id == model.History.table.c.id),
deferred=True
),
users_shared_with_count=column_property(
select([func.count(model.HistoryUserShareAssociation.table.c.id)]).where(model.History.table.c.id == model.HistoryUserShareAssociation.table.c.history_id),
deferred=True
)
))
# Set up proxy so that
# History.users_shared_with
# returns a list of users that history is shared with.
model.History.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.HistoryUserShareAssociation, model.HistoryUserShareAssociation.table, properties=dict(
user=relation(model.User, backref='histories_shared_by_others'),
history=relation(model.History, backref='users_shared_with')
))
mapper(model.User, model.User.table, properties=dict(
histories=relation(model.History,
backref="user",
order_by=desc(model.History.table.c.update_time)),
active_histories=relation(model.History,
primaryjoin=(
(model.History.table.c.user_id == model.User.table.c.id) &
(not_(model.History.table.c.deleted))
),
order_by=desc(model.History.table.c.update_time)),
galaxy_sessions=relation(model.GalaxySession,
order_by=desc(model.GalaxySession.table.c.update_time)),
stored_workflow_menu_entries=relation(model.StoredWorkflowMenuEntry,
primaryjoin=(
(model.StoredWorkflowMenuEntry.table.c.user_id == model.User.table.c.id) &
(model.StoredWorkflowMenuEntry.table.c.stored_workflow_id == model.StoredWorkflow.table.c.id) &
not_(model.StoredWorkflow.table.c.deleted)
),
backref="user",
cascade="all, delete-orphan",
collection_class=ordering_list('order_index')),
_preferences=relation(model.UserPreference,
backref="user",
collection_class=attribute_mapped_collection('name')),
# addresses=relation( UserAddress,
# primaryjoin=( User.table.c.id == UserAddress.table.c.user_id ) ),
values=relation(model.FormValues,
primaryjoin=(model.User.table.c.form_values_id == model.FormValues.table.c.id)),
api_keys=relation(model.APIKeys,
backref="user",
order_by=desc(model.APIKeys.table.c.create_time)),
cloudauthzs=relation(model.CloudAuthz,
primaryjoin=model.CloudAuthz.table.c.user_id == model.User.table.c.id),
))
mapper(model.PasswordResetToken, model.PasswordResetToken.table,
properties=dict(user=relation(model.User, backref="reset_tokens")))
# Set up proxy so that this syntax is possible:
# <user_obj>.preferences[pref_name] = pref_value
model.User.preferences = association_proxy('_preferences', 'value', creator=model.UserPreference)
mapper(model.Group, model.Group.table, properties=dict(
users=relation(model.UserGroupAssociation)
))
mapper(model.UserGroupAssociation, model.UserGroupAssociation.table, properties=dict(
user=relation(model.User, backref="groups"),
group=relation(model.Group, backref="members")
))
mapper(model.DefaultUserPermissions, model.DefaultUserPermissions.table, properties=dict(
user=relation(model.User, backref="default_permissions"),
role=relation(model.Role)
))
mapper(model.DefaultHistoryPermissions, model.DefaultHistoryPermissions.table, properties=dict(
history=relation(model.History, backref="default_permissions"),
role=relation(model.Role)
))
mapper(model.Role, model.Role.table, properties=dict(
users=relation(model.UserRoleAssociation),
groups=relation(model.GroupRoleAssociation)
))
mapper(model.UserRoleAssociation, model.UserRoleAssociation.table, properties=dict(
user=relation(model.User, backref="roles"),
non_private_roles=relation(
model.User,
backref="non_private_roles",
primaryjoin=(
(model.User.table.c.id == model.UserRoleAssociation.table.c.user_id) &
(model.UserRoleAssociation.table.c.role_id == model.Role.table.c.id) &
not_(model.Role.table.c.name == model.User.table.c.email))
),
role=relation(model.Role)
))
mapper(model.GroupRoleAssociation, model.GroupRoleAssociation.table, properties=dict(
group=relation(model.Group, backref="roles"),
role=relation(model.Role)
))
mapper(model.Quota, model.Quota.table, properties=dict(
users=relation(model.UserQuotaAssociation),
groups=relation(model.GroupQuotaAssociation)
))
mapper(model.UserQuotaAssociation, model.UserQuotaAssociation.table, properties=dict(
user=relation(model.User, backref="quotas"),
quota=relation(model.Quota)
))
mapper(model.GroupQuotaAssociation, model.GroupQuotaAssociation.table, properties=dict(
group=relation(model.Group, backref="quotas"),
quota=relation(model.Quota)
))
mapper(model.DefaultQuotaAssociation, model.DefaultQuotaAssociation.table, properties=dict(
quota=relation(model.Quota, backref="default")
))
mapper(model.DatasetPermissions, model.DatasetPermissions.table, properties=dict(
dataset=relation(model.Dataset, backref="actions"),
role=relation(model.Role, backref="dataset_actions")
))
mapper(model.LibraryPermissions, model.LibraryPermissions.table, properties=dict(
library=relation(model.Library, backref="actions"),
role=relation(model.Role, backref="library_actions")
))
mapper(model.LibraryFolderPermissions, model.LibraryFolderPermissions.table, properties=dict(
folder=relation(model.LibraryFolder, backref="actions"),
role=relation(model.Role, backref="library_folder_actions")
))
mapper(model.LibraryDatasetPermissions, model.LibraryDatasetPermissions.table, properties=dict(
library_dataset=relation(model.LibraryDataset, backref="actions"),
role=relation(model.Role, backref="library_dataset_actions")
))
mapper(model.LibraryDatasetDatasetAssociationPermissions, model.LibraryDatasetDatasetAssociationPermissions.table, properties=dict(
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation, backref="actions"),
role=relation(model.Role, backref="library_dataset_dataset_actions")
))
mapper(model.Library, model.Library.table, properties=dict(
root_folder=relation(model.LibraryFolder, backref=backref("library_root"))
))
mapper(model.ExtendedMetadata, model.ExtendedMetadata.table, properties=dict(
children=relation(model.ExtendedMetadataIndex,
primaryjoin=(model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id),
backref=backref("parent",
primaryjoin=(model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id)))
))
mapper(model.ExtendedMetadataIndex, model.ExtendedMetadataIndex.table, properties=dict(
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id)))
))
mapper(model.LibraryInfoAssociation, model.LibraryInfoAssociation.table, properties=dict(
library=relation(model.Library,
primaryjoin=(
(model.LibraryInfoAssociation.table.c.library_id == model.Library.table.c.id) &
(not_(model.LibraryInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.LibraryFolder, model.LibraryFolder.table, properties=dict(
folders=relation(model.LibraryFolder,
primaryjoin=(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id),
order_by=asc(model.LibraryFolder.table.c.name),
backref=backref("parent",
primaryjoin=(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id),
remote_side=[model.LibraryFolder.table.c.id])),
active_folders=relation(model.LibraryFolder,
primaryjoin=(
(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryFolder.table.c.deleted))
),
order_by=asc(model.LibraryFolder.table.c.name),
# """sqlalchemy.exc.ArgumentError: Error creating eager relationship 'active_folders'
# on parent class '<class 'galaxy.model.LibraryFolder'>' to child class '<class 'galaxy.model.LibraryFolder'>':
# Cant use eager loading on a self referential relationship."""
lazy=True,
viewonly=True),
datasets=relation(model.LibraryDataset,
primaryjoin=((model.LibraryDataset.table.c.folder_id == model.LibraryFolder.table.c.id)),
order_by=asc(model.LibraryDataset.table.c._name),
lazy=True,
viewonly=True),
active_datasets=relation(model.LibraryDataset,
primaryjoin=(
(model.LibraryDataset.table.c.folder_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryDataset.table.c.deleted))
),
order_by=asc(model.LibraryDataset.table.c._name),
lazy=True,
viewonly=True)
))
mapper(model.LibraryFolderInfoAssociation, model.LibraryFolderInfoAssociation.table, properties=dict(
folder=relation(model.LibraryFolder,
primaryjoin=(
(model.LibraryFolderInfoAssociation.table.c.library_folder_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryFolderInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryFolderInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryFolderInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.LibraryDataset, model.LibraryDataset.table, properties=dict(
folder=relation(model.LibraryFolder),
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDataset.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
expired_datasets=relation(model.LibraryDatasetDatasetAssociation,
foreign_keys=[model.LibraryDataset.table.c.id, model.LibraryDataset.table.c.library_dataset_dataset_association_id],
primaryjoin=(
(model.LibraryDataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.library_dataset_id) &
(not_(model.LibraryDataset.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id))
),
viewonly=True,
uselist=True)
))
mapper(model.LibraryDatasetDatasetAssociation, model.LibraryDatasetDatasetAssociation.table, properties=dict(
dataset=relation(model.Dataset),
library_dataset=relation(model.LibraryDataset,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.library_dataset_id == model.LibraryDataset.table.c.id)),
# user=relation( model.User.mapper ),
user=relation(model.User),
copied_from_library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
remote_side=[model.LibraryDatasetDatasetAssociation.table.c.id],
uselist=False),
copied_to_library_dataset_dataset_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
copied_from_history_dataset_association=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id),
uselist=False),
copied_to_history_dataset_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
implicitly_converted_datasets=relation(model.ImplicitlyConvertedDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
tags=relation(model.LibraryDatasetDatasetAssociationTagAssociation,
order_by=model.LibraryDatasetDatasetAssociationTagAssociation.table.c.id,
backref='history_tag_associations'),
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.LibraryDatasetDatasetAssociation.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id))
),
_metadata=deferred(model.LibraryDatasetDatasetAssociation.table.c._metadata)
))
mapper(model.LibraryDatasetDatasetInfoAssociation, model.LibraryDatasetDatasetInfoAssociation.table, properties=dict(
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(
(model.LibraryDatasetDatasetInfoAssociation.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id) &
(not_(model.LibraryDatasetDatasetInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryDatasetDatasetInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryDatasetDatasetInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.JobToInputDatasetAssociation, model.JobToInputDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.HistoryDatasetAssociation,
lazy=False,
backref="dependent_jobs")
))
mapper(model.JobToOutputDatasetAssociation, model.JobToOutputDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.HistoryDatasetAssociation,
lazy=False)
))
mapper(model.JobToInputDatasetCollectionAssociation, model.JobToInputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation,
lazy=False)
))
mapper(model.JobToOutputDatasetCollectionAssociation, model.JobToOutputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection_instance=relation(model.HistoryDatasetCollectionAssociation,
lazy=False,
backref="output_dataset_collection_instances")
))
mapper(model.JobToImplicitOutputDatasetCollectionAssociation, model.JobToImplicitOutputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection=relation(model.DatasetCollection,
backref="output_dataset_collections")
))
mapper(model.JobToInputLibraryDatasetAssociation, model.JobToInputLibraryDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.LibraryDatasetDatasetAssociation,
lazy=False,
backref="dependent_jobs")
))
mapper(model.JobToOutputLibraryDatasetAssociation, model.JobToOutputLibraryDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.LibraryDatasetDatasetAssociation,
lazy=False)
))
simple_mapping(model.JobStateHistory,
job=relation(model.Job, backref="state_history"))
simple_mapping(model.JobMetricText,
job=relation(model.Job, backref="text_metrics"))
simple_mapping(model.TaskMetricText,
task=relation(model.Task, backref="text_metrics"))
simple_mapping(model.JobMetricNumeric,
job=relation(model.Job, backref="numeric_metrics"))
simple_mapping(model.TaskMetricNumeric,
task=relation(model.Task, backref="numeric_metrics"))
simple_mapping(model.ImplicitlyCreatedDatasetCollectionInput,
input_dataset_collection=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=((model.HistoryDatasetCollectionAssociation.table.c.id ==
model.ImplicitlyCreatedDatasetCollectionInput.table.c.input_dataset_collection_id)),
# backref="implicitly_created_dataset_collections",
),
)
simple_mapping(model.ImplicitCollectionJobs)
# simple_mapping(
# model.ImplicitCollectionJobsHistoryDatasetCollectionAssociation,
# history_dataset_collection_associations=relation(
# model.HistoryDatasetCollectionAssociation,
# backref=backref("implicit_collection_jobs_association", uselist=False),
# uselist=True,
# ),
# )
simple_mapping(
model.ImplicitCollectionJobsJobAssociation,
implicit_collection_jobs=relation(
model.ImplicitCollectionJobs,
backref=backref("jobs", uselist=True),
uselist=False,
),
job=relation(
model.Job,
backref=backref("implicit_collection_jobs_association", uselist=False),
uselist=False,
),
)
mapper(model.JobParameter, model.JobParameter.table)
mapper(model.JobExternalOutputMetadata, model.JobExternalOutputMetadata.table, properties=dict(
job=relation(model.Job),
history_dataset_association=relation(model.HistoryDatasetAssociation, lazy=False),
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation, lazy=False)
))
mapper(model.JobExportHistoryArchive, model.JobExportHistoryArchive.table, properties=dict(
job=relation(model.Job),
history=relation(model.History),
dataset=relation(model.Dataset, backref='job_export_history_archive')
))
mapper(model.JobImportHistoryArchive, model.JobImportHistoryArchive.table, properties=dict(
job=relation(model.Job),
history=relation(model.History)
))
mapper(model.GenomeIndexToolData, model.GenomeIndexToolData.table, properties=dict(
job=relation(model.Job, backref='job'),
dataset=relation(model.Dataset, backref='genome_index_tool_data'),
user=relation(model.User),
deferred=relation(model.DeferredJob, backref='deferred_job'),
transfer=relation(model.TransferJob, backref='transfer_job')
))
mapper(model.InteractiveToolEntryPoint, model.InteractiveToolEntryPoint.table, properties=dict(
job=relation(model.Job, backref=backref('interactivetool_entry_points', uselist=True), uselist=False)
))
mapper(model.JobContainerAssociation, model.JobContainerAssociation.table, properties=dict(
job=relation(model.Job, backref=backref('container', uselist=False), uselist=False)
))
mapper(model.PostJobAction, model.PostJobAction.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref='post_job_actions',
primaryjoin=(model.WorkflowStep.table.c.id == model.PostJobAction.table.c.workflow_step_id))
))
mapper(model.PostJobActionAssociation, model.PostJobActionAssociation.table, properties=dict(
job=relation(model.Job),
post_job_action=relation(model.PostJobAction)
))
mapper(model.Job, model.Job.table, properties=dict(
# user=relation( model.User.mapper ),
user=relation(model.User),
galaxy_session=relation(model.GalaxySession),
history=relation(model.History, backref="jobs"),
library_folder=relation(model.LibraryFolder, lazy=True),
parameters=relation(model.JobParameter, lazy=True),
input_datasets=relation(model.JobToInputDatasetAssociation),
input_dataset_collections=relation(model.JobToInputDatasetCollectionAssociation, lazy=True),
output_datasets=relation(model.JobToOutputDatasetAssociation, lazy=True),
any_output_dataset_deleted=column_property(
exists([model.HistoryDatasetAssociation],
and_(model.Job.table.c.id == model.JobToOutputDatasetAssociation.table.c.job_id,
model.HistoryDatasetAssociation.table.c.id == model.JobToOutputDatasetAssociation.table.c.dataset_id,
model.HistoryDatasetAssociation.table.c.deleted == true())
)
),
any_output_dataset_collection_instances_deleted=column_property(
exists([model.HistoryDatasetCollectionAssociation.table.c.id],
and_(model.Job.table.c.id == model.JobToOutputDatasetCollectionAssociation.table.c.job_id,
model.HistoryDatasetCollectionAssociation.table.c.id == model.JobToOutputDatasetCollectionAssociation.table.c.dataset_collection_id,
model.HistoryDatasetCollectionAssociation.table.c.deleted == true())
)
),
output_dataset_collection_instances=relation(model.JobToOutputDatasetCollectionAssociation, lazy=True),
output_dataset_collections=relation(model.JobToImplicitOutputDatasetCollectionAssociation, lazy=True),
post_job_actions=relation(model.PostJobActionAssociation, lazy=False),
input_library_datasets=relation(model.JobToInputLibraryDatasetAssociation),
output_library_datasets=relation(model.JobToOutputLibraryDatasetAssociation, lazy=True),
external_output_metadata=relation(model.JobExternalOutputMetadata, lazy=True),
tasks=relation(model.Task)
))
mapper(model.Task, model.Task.table, properties=dict(
job=relation(model.Job)
))
mapper(model.DeferredJob, model.DeferredJob.table, properties={})
mapper(model.TransferJob, model.TransferJob.table, properties={})
simple_mapping(model.DatasetCollection,
elements=relation(model.DatasetCollectionElement,
primaryjoin=(model.DatasetCollection.table.c.id == model.DatasetCollectionElement.table.c.dataset_collection_id),
remote_side=[model.DatasetCollectionElement.table.c.dataset_collection_id],
backref="collection",
order_by=model.DatasetCollectionElement.table.c.element_index)
)
simple_mapping(model.HistoryDatasetCollectionAssociation,
collection=relation(model.DatasetCollection),
history=relation(model.History,
backref='dataset_collections'),
copied_from_history_dataset_collection_association=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(model.HistoryDatasetCollectionAssociation.table.c.copied_from_history_dataset_collection_association_id ==
model.HistoryDatasetCollectionAssociation.table.c.id),
remote_side=[model.HistoryDatasetCollectionAssociation.table.c.id],
uselist=False),
copied_to_history_dataset_collection_associations=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(model.HistoryDatasetCollectionAssociation.table.c.copied_from_history_dataset_collection_association_id ==
model.HistoryDatasetCollectionAssociation.table.c.id)),
implicit_input_collections=relation(model.ImplicitlyCreatedDatasetCollectionInput,
primaryjoin=((model.HistoryDatasetCollectionAssociation.table.c.id ==
model.ImplicitlyCreatedDatasetCollectionInput.table.c.dataset_collection_id)),
backref="dataset_collection",
),
implicit_collection_jobs=relation(
model.ImplicitCollectionJobs,
backref=backref("history_dataset_collection_associations", uselist=True),
uselist=False,
),
job=relation(
model.Job,
backref=backref("history_dataset_collection_associations", uselist=True),
uselist=False,
),
tags=relation(model.HistoryDatasetCollectionTagAssociation,
order_by=model.HistoryDatasetCollectionTagAssociation.table.c.id,
backref='dataset_collections'),
annotations=relation(model.HistoryDatasetCollectionAssociationAnnotationAssociation,
order_by=model.HistoryDatasetCollectionAssociationAnnotationAssociation.table.c.id,
backref="dataset_collections"),
ratings=relation(model.HistoryDatasetCollectionRatingAssociation,
order_by=model.HistoryDatasetCollectionRatingAssociation.table.c.id,
backref="dataset_collections")
)
simple_mapping(model.LibraryDatasetCollectionAssociation,
collection=relation(model.DatasetCollection),
folder=relation(model.LibraryFolder,
backref='dataset_collections'),
tags=relation(model.LibraryDatasetCollectionTagAssociation,
order_by=model.LibraryDatasetCollectionTagAssociation.table.c.id,
backref='dataset_collections'),
annotations=relation(model.LibraryDatasetCollectionAnnotationAssociation,
order_by=model.LibraryDatasetCollectionAnnotationAssociation.table.c.id,
backref="dataset_collections"),
ratings=relation(model.LibraryDatasetCollectionRatingAssociation,
order_by=model.LibraryDatasetCollectionRatingAssociation.table.c.id,
backref="dataset_collections"))
simple_mapping(model.DatasetCollectionElement,
hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.DatasetCollectionElement.table.c.hda_id == model.HistoryDatasetAssociation.table.c.id)),
ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.DatasetCollectionElement.table.c.ldda_id == model.LibraryDatasetDatasetAssociation.table.c.id)),
child_collection=relation(model.DatasetCollection,
primaryjoin=(model.DatasetCollectionElement.table.c.child_collection_id == model.DatasetCollection.table.c.id)))
mapper(model.Event, model.Event.table, properties=dict(
history=relation(model.History),
galaxy_session=relation(model.GalaxySession),
# user=relation( model.User.mapper ) ) )
user=relation(model.User)
))
mapper(model.GalaxySession, model.GalaxySession.table, properties=dict(
histories=relation(model.GalaxySessionToHistoryAssociation),
current_history=relation(model.History),
# user=relation( model.User.mapper ) ) )
user=relation(model.User)
))
mapper(model.GalaxySessionToHistoryAssociation, model.GalaxySessionToHistoryAssociation.table, properties=dict(
galaxy_session=relation(model.GalaxySession),
history=relation(model.History)
))
mapper(model.Workflow, model.Workflow.table, properties=dict(
steps=relation(model.WorkflowStep,
backref='workflow',
primaryjoin=((model.Workflow.table.c.id == model.WorkflowStep.table.c.workflow_id)),
order_by=asc(model.WorkflowStep.table.c.order_index),
cascade="all, delete-orphan",
lazy=False),
step_count=column_property(
select([func.count(model.WorkflowStep.table.c.id)]).where(model.Workflow.table.c.id == model.WorkflowStep.table.c.workflow_id),
deferred=True
)
))
mapper(model.WorkflowStep, model.WorkflowStep.table, properties=dict(
subworkflow=relation(model.Workflow,
primaryjoin=(model.Workflow.table.c.id == model.WorkflowStep.table.c.subworkflow_id),
backref="parent_workflow_steps"),
dynamic_tool=relation(model.DynamicTool,
primaryjoin=(model.DynamicTool.table.c.id == model.WorkflowStep.table.c.dynamic_tool_id),
backref="workflow_steps"),
tags=relation(model.WorkflowStepTagAssociation,
order_by=model.WorkflowStepTagAssociation.table.c.id,
backref="workflow_steps"),
annotations=relation(model.WorkflowStepAnnotationAssociation,
order_by=model.WorkflowStepAnnotationAssociation.table.c.id,
backref="workflow_steps")
))
mapper(model.WorkflowStepInput, model.WorkflowStepInput.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref=backref("inputs", uselist=True),
cascade="all",
primaryjoin=(model.WorkflowStepInput.table.c.workflow_step_id == model.WorkflowStep.table.c.id))
))
mapper(model.WorkflowOutput, model.WorkflowOutput.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref='workflow_outputs',
primaryjoin=(model.WorkflowStep.table.c.id == model.WorkflowOutput.table.c.workflow_step_id))
))
mapper(model.WorkflowStepConnection, model.WorkflowStepConnection.table, properties=dict(
input_step_input=relation(model.WorkflowStepInput,
backref="connections",
cascade="all",
primaryjoin=(model.WorkflowStepConnection.table.c.input_step_input_id == model.WorkflowStepInput.table.c.id)),
input_subworkflow_step=relation(model.WorkflowStep,
backref=backref("parent_workflow_input_connections", uselist=True),
primaryjoin=(model.WorkflowStepConnection.table.c.input_subworkflow_step_id == model.WorkflowStep.table.c.id),
),
output_step=relation(model.WorkflowStep,
backref="output_connections",
cascade="all",
primaryjoin=(model.WorkflowStepConnection.table.c.output_step_id == model.WorkflowStep.table.c.id)),
))
mapper(model.StoredWorkflow, model.StoredWorkflow.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.User.table.c.id == model.StoredWorkflow.table.c.user_id),
backref='stored_workflows'),
workflows=relation(model.Workflow,
backref='stored_workflow',
cascade="all, delete-orphan",
primaryjoin=(model.StoredWorkflow.table.c.id == model.Workflow.table.c.stored_workflow_id),
order_by=-model.Workflow.id),
latest_workflow=relation(model.Workflow,
post_update=True,
primaryjoin=(model.StoredWorkflow.table.c.latest_workflow_id == model.Workflow.table.c.id),
lazy=False),
tags=relation(model.StoredWorkflowTagAssociation,
order_by=model.StoredWorkflowTagAssociation.table.c.id,
backref="stored_workflows"),
owner_tags=relation(model.StoredWorkflowTagAssociation,
primaryjoin=(
and_(model.StoredWorkflow.table.c.id == model.StoredWorkflowTagAssociation.table.c.stored_workflow_id,
model.StoredWorkflow.table.c.user_id == model.StoredWorkflowTagAssociation.table.c.user_id)
),
order_by=model.StoredWorkflowTagAssociation.table.c.id),
annotations=relation(model.StoredWorkflowAnnotationAssociation,
order_by=model.StoredWorkflowAnnotationAssociation.table.c.id,
backref="stored_workflows"),
ratings=relation(model.StoredWorkflowRatingAssociation,
order_by=model.StoredWorkflowRatingAssociation.table.c.id,
backref="stored_workflows"),
average_rating=column_property(
select([func.avg(model.StoredWorkflowRatingAssociation.table.c.rating)]).where(model.StoredWorkflowRatingAssociation.table.c.stored_workflow_id == model.StoredWorkflow.table.c.id),
deferred=True
)
))
# Set up proxy so that
# StoredWorkflow.users_shared_with
# returns a list of users that workflow is shared with.
model.StoredWorkflow.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.StoredWorkflowUserShareAssociation, model.StoredWorkflowUserShareAssociation.table, properties=dict(
user=relation(model.User,
backref='workflows_shared_by_others'),
stored_workflow=relation(model.StoredWorkflow,
backref='users_shared_with')
))
mapper(model.StoredWorkflowMenuEntry, model.StoredWorkflowMenuEntry.table, properties=dict(
stored_workflow=relation(model.StoredWorkflow)
))
mapper(model.WorkflowInvocation, model.WorkflowInvocation.table, properties=dict(
history=relation(model.History, backref=backref('workflow_invocations', uselist=True)),
input_parameters=relation(model.WorkflowRequestInputParameter),
step_states=relation(model.WorkflowRequestStepState),
input_step_parameters=relation(model.WorkflowRequestInputStepParameter),
input_datasets=relation(model.WorkflowRequestToInputDatasetAssociation),
input_dataset_collections=relation(model.WorkflowRequestToInputDatasetCollectionAssociation),
subworkflow_invocations=relation(model.WorkflowInvocationToSubworkflowInvocationAssociation,
primaryjoin=((model.WorkflowInvocationToSubworkflowInvocationAssociation.table.c.workflow_invocation_id == model.WorkflowInvocation.table.c.id)),
backref=backref("parent_workflow_invocation", uselist=False),
uselist=True,
),
steps=relation(model.WorkflowInvocationStep,
backref="workflow_invocation"),
workflow=relation(model.Workflow)
))
mapper(model.WorkflowInvocationToSubworkflowInvocationAssociation, model.WorkflowInvocationToSubworkflowInvocationAssociation.table, properties=dict(
subworkflow_invocation=relation(model.WorkflowInvocation,
primaryjoin=((model.WorkflowInvocationToSubworkflowInvocationAssociation.table.c.subworkflow_invocation_id == model.WorkflowInvocation.table.c.id)),
backref="parent_workflow_invocation_association",
uselist=False,
),
workflow_step=relation(model.WorkflowStep),
))
simple_mapping(model.WorkflowInvocationStep,
workflow_step=relation(model.WorkflowStep),
job=relation(model.Job, backref=backref('workflow_invocation_step', uselist=False), uselist=False),
implicit_collection_jobs=relation(model.ImplicitCollectionJobs, backref=backref('workflow_invocation_step', uselist=False), uselist=False),)
simple_mapping(model.WorkflowRequestInputParameter,
workflow_invocation=relation(model.WorkflowInvocation))
simple_mapping(model.WorkflowRequestStepState,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep))
simple_mapping(model.WorkflowRequestInputStepParameter,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep))
simple_mapping(model.WorkflowRequestToInputDatasetAssociation,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep),
dataset=relation(model.HistoryDatasetAssociation))
simple_mapping(model.WorkflowRequestToInputDatasetCollectionAssociation,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation))
mapper(model.MetadataFile, model.MetadataFile.table, properties=dict(
history_dataset=relation(model.HistoryDatasetAssociation),
library_dataset=relation(model.LibraryDatasetDatasetAssociation)
))
simple_mapping(
model.WorkflowInvocationOutputDatasetAssociation,
workflow_invocation=relation(model.WorkflowInvocation, backref="output_datasets"),
workflow_step=relation(model.WorkflowStep),
dataset=relation(model.HistoryDatasetAssociation),
workflow_output=relation(model.WorkflowOutput),
)
simple_mapping(
model.WorkflowInvocationOutputDatasetCollectionAssociation,
workflow_invocation=relation(model.WorkflowInvocation, backref="output_dataset_collections"),
workflow_step=relation(model.WorkflowStep),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation),
workflow_output=relation(model.WorkflowOutput),
)
simple_mapping(
model.WorkflowInvocationStepOutputDatasetAssociation,
workflow_invocation_step=relation(model.WorkflowInvocationStep, backref="output_datasets"),
dataset=relation(model.HistoryDatasetAssociation),
)
simple_mapping(
model.WorkflowInvocationStepOutputDatasetCollectionAssociation,
workflow_invocation_step=relation(model.WorkflowInvocationStep, backref="output_dataset_collections"),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation),
)
mapper(model.PageRevision, model.PageRevision.table)
mapper(model.Page, model.Page.table, properties=dict(
user=relation(model.User),
revisions=relation(model.PageRevision,
backref='page',
cascade="all, delete-orphan",
primaryjoin=(model.Page.table.c.id == model.PageRevision.table.c.page_id)),
latest_revision=relation(model.PageRevision,
post_update=True,
primaryjoin=(model.Page.table.c.latest_revision_id == model.PageRevision.table.c.id),
lazy=False),
tags=relation(model.PageTagAssociation,
order_by=model.PageTagAssociation.table.c.id,
backref="pages"),
annotations=relation(model.PageAnnotationAssociation,
order_by=model.PageAnnotationAssociation.table.c.id,
backref="pages"),
ratings=relation(model.PageRatingAssociation,
order_by=model.PageRatingAssociation.table.c.id,
backref="pages"),
average_rating=column_property(
select([func.avg(model.PageRatingAssociation.table.c.rating)]).where(model.PageRatingAssociation.table.c.page_id == model.Page.table.c.id),
deferred=True
)
))
# Set up proxy so that
# Page.users_shared_with
# returns a list of users that page is shared with.
model.Page.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.PageUserShareAssociation, model.PageUserShareAssociation.table,
properties=dict(user=relation(model.User, backref='pages_shared_by_others'),
page=relation(model.Page, backref='users_shared_with')))
mapper(model.VisualizationRevision, model.VisualizationRevision.table)
mapper(model.Visualization, model.Visualization.table, properties=dict(
user=relation(model.User),
revisions=relation(model.VisualizationRevision,
backref='visualization',
cascade="all, delete-orphan",
primaryjoin=(model.Visualization.table.c.id == model.VisualizationRevision.table.c.visualization_id)),
latest_revision=relation(model.VisualizationRevision,
post_update=True,
primaryjoin=(model.Visualization.table.c.latest_revision_id == model.VisualizationRevision.table.c.id),
lazy=False),
tags=relation(model.VisualizationTagAssociation,
order_by=model.VisualizationTagAssociation.table.c.id,
backref="visualizations"),
annotations=relation(model.VisualizationAnnotationAssociation,
order_by=model.VisualizationAnnotationAssociation.table.c.id,
backref="visualizations"),
ratings=relation(model.VisualizationRatingAssociation,
order_by=model.VisualizationRatingAssociation.table.c.id,
backref="visualizations"),
average_rating=column_property(
select([func.avg(model.VisualizationRatingAssociation.table.c.rating)]).where(model.VisualizationRatingAssociation.table.c.visualization_id == model.Visualization.table.c.id),
deferred=True
)
))
# Set up proxy so that
# Visualization.users_shared_with
# returns a list of users that visualization is shared with.
model.Visualization.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.VisualizationUserShareAssociation, model.VisualizationUserShareAssociation.table, properties=dict(
user=relation(model.User,
backref='visualizations_shared_by_others'),
visualization=relation(model.Visualization,
backref='users_shared_with')
))
# Tag tables.
simple_mapping(model.Tag,
children=relation(model.Tag, backref=backref('parent', remote_side=[model.Tag.table.c.id])))
def tag_mapping(tag_association_class, backref_name):
simple_mapping(tag_association_class, tag=relation(model.Tag, backref=backref_name), user=relation(model.User))
tag_mapping(model.HistoryTagAssociation, "tagged_histories")
tag_mapping(model.DatasetTagAssociation, "tagged_datasets")
tag_mapping(model.HistoryDatasetAssociationTagAssociation, "tagged_history_dataset_associations")
tag_mapping(model.LibraryDatasetDatasetAssociationTagAssociation, "tagged_library_dataset_dataset_associations")
tag_mapping(model.PageTagAssociation, "tagged_pages")
tag_mapping(model.StoredWorkflowTagAssociation, "tagged_workflows")
tag_mapping(model.WorkflowStepTagAssociation, "tagged_workflow_steps")
tag_mapping(model.VisualizationTagAssociation, "tagged_visualizations")
tag_mapping(model.HistoryDatasetCollectionTagAssociation, "tagged_history_dataset_collections")
tag_mapping(model.LibraryDatasetCollectionTagAssociation, "tagged_library_dataset_collections")
tag_mapping(model.ToolTagAssociation, "tagged_tools")
# Annotation tables.
def annotation_mapping(annotation_class, **kwds):
kwds = dict((key, relation(value)) for key, value in kwds.items())
simple_mapping(annotation_class, **dict(user=relation(model.User), **kwds))
annotation_mapping(model.HistoryAnnotationAssociation, history=model.History)
annotation_mapping(model.HistoryDatasetAssociationAnnotationAssociation, hda=model.HistoryDatasetAssociation)
annotation_mapping(model.StoredWorkflowAnnotationAssociation, stored_workflow=model.StoredWorkflow)
annotation_mapping(model.WorkflowStepAnnotationAssociation, workflow_step=model.WorkflowStep)
annotation_mapping(model.PageAnnotationAssociation, page=model.Page)
annotation_mapping(model.VisualizationAnnotationAssociation, visualization=model.Visualization)
annotation_mapping(model.HistoryDatasetCollectionAssociationAnnotationAssociation,
history_dataset_collection=model.HistoryDatasetCollectionAssociation)
annotation_mapping(model.LibraryDatasetCollectionAnnotationAssociation,
library_dataset_collection=model.LibraryDatasetCollectionAssociation)
# Rating tables.
def rating_mapping(rating_class, **kwds):
kwds = dict((key, relation(value)) for key, value in kwds.items())
simple_mapping(rating_class, **dict(user=relation(model.User), **kwds))
rating_mapping(model.HistoryRatingAssociation, history=model.History)
rating_mapping(model.HistoryDatasetAssociationRatingAssociation, hda=model.HistoryDatasetAssociation)
rating_mapping(model.StoredWorkflowRatingAssociation, stored_workflow=model.StoredWorkflow)
rating_mapping(model.PageRatingAssociation, page=model.Page)
rating_mapping(model.VisualizationRatingAssociation, visualizaiton=model.Visualization)
rating_mapping(model.HistoryDatasetCollectionRatingAssociation,
history_dataset_collection=model.HistoryDatasetCollectionAssociation)
rating_mapping(model.LibraryDatasetCollectionRatingAssociation,
libary_dataset_collection=model.LibraryDatasetCollectionAssociation)
# Data Manager tables
mapper(model.DataManagerHistoryAssociation, model.DataManagerHistoryAssociation.table, properties=dict(
history=relation(model.History),
user=relation(model.User,
backref='data_manager_histories')
))
mapper(model.DataManagerJobAssociation, model.DataManagerJobAssociation.table, properties=dict(
job=relation(model.Job,
backref=backref('data_manager_association', uselist=False),
uselist=False)
))
# User tables.
mapper(model.UserPreference, model.UserPreference.table, properties={})
mapper(model.UserAction, model.UserAction.table, properties=dict(
# user=relation( model.User.mapper )
user=relation(model.User)
))
mapper(model.APIKeys, model.APIKeys.table, properties={})
# model.HistoryDatasetAssociation.mapper.add_property( "creating_job_associations",
# relation( model.JobToOutputDatasetAssociation ) )
# model.LibraryDatasetDatasetAssociation.mapper.add_property( "creating_job_associations",
# relation( model.JobToOutputLibraryDatasetAssociation ) )
class_mapper(model.HistoryDatasetAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputDatasetAssociation))
class_mapper(model.LibraryDatasetDatasetAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputLibraryDatasetAssociation))
class_mapper(model.HistoryDatasetCollectionAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputDatasetCollectionAssociation))
# Helper methods.
def db_next_hid(self, n=1):
"""
db_next_hid( self )
Override __next_hid to generate from the database in a concurrency safe way.
Loads the next history ID from the DB and returns it.
It also saves the future next_id into the DB.
:rtype: int
:returns: the next history id
"""
session = object_session(self)
table = self.table
trans = session.begin()
try:
if "postgres" not in session.bind.dialect.name:
next_hid = select([table.c.hid_counter], table.c.id == model.cached_id(self)).with_for_update().scalar()
table.update(table.c.id == self.id).execute(hid_counter=(next_hid + n))
else:
stmt = table.update().where(table.c.id == model.cached_id(self)).values(hid_counter=(table.c.hid_counter + n)).returning(table.c.hid_counter)
next_hid = session.execute(stmt).scalar() - n
trans.commit()
return next_hid
except Exception:
trans.rollback()
raise
model.History._next_hid = db_next_hid
def _workflow_invocation_update(self):
session = object_session(self)
table = self.table
now_val = now()
stmt = table.update().values(update_time=now_val).where(and_(table.c.id == self.id, table.c.update_time < now_val))
session.execute(stmt)
model.WorkflowInvocation.update = _workflow_invocation_update
def init(file_path, url, engine_options=None, create_tables=False, map_install_models=False,
database_query_profiling_proxy=False, object_store=None, trace_logger=None, use_pbkdf2=True,
slow_query_log_threshold=0, thread_local_log=None, log_query_counts=False):
"""Connect mappings to the database"""
if engine_options is None:
engine_options = {}
# Connect dataset to the file path
model.Dataset.file_path = file_path
# Connect dataset to object store
model.Dataset.object_store = object_store
# Use PBKDF2 password hashing?
model.User.use_pbkdf2 = use_pbkdf2
# Load the appropriate db module
engine = build_engine(url, engine_options, database_query_profiling_proxy, trace_logger, slow_query_log_threshold, thread_local_log=thread_local_log, log_query_counts=log_query_counts)
# Connect the metadata to the database.
metadata.bind = engine
model_modules = [model]
if map_install_models:
import galaxy.model.tool_shed_install.mapping # noqa: F401
from galaxy.model import tool_shed_install
galaxy.model.tool_shed_install.mapping.init(url=url, engine_options=engine_options, create_tables=create_tables)
model_modules.append(tool_shed_install)
result = ModelMapping(model_modules, engine=engine)
# Create tables if needed
if create_tables:
metadata.create_all()
# metadata.engine.commit()
result.create_tables = create_tables
# load local galaxy security policy
result.security_agent = GalaxyRBACAgent(result)
result.thread_local_log = thread_local_log
return result
| 48.473464
| 188
| 0.732136
|
2692985671d579b373859ef43680428b22258561
| 4,764
|
py
|
Python
|
migrations/versions/264fa39c91e2_breaking_column_size_increase.py
|
LambArchie/Competition-Manager
|
7da4c8625f0bd0c1b002b4f2aef72529e1ede4c6
|
[
"MIT"
] | null | null | null |
migrations/versions/264fa39c91e2_breaking_column_size_increase.py
|
LambArchie/Competition-Manager
|
7da4c8625f0bd0c1b002b4f2aef72529e1ede4c6
|
[
"MIT"
] | 1
|
2021-06-14T14:48:06.000Z
|
2021-06-14T15:42:40.000Z
|
migrations/versions/264fa39c91e2_breaking_column_size_increase.py
|
LambArchie/Competition-Manager
|
7da4c8625f0bd0c1b002b4f2aef72529e1ede4c6
|
[
"MIT"
] | null | null | null |
"""breaking - column size increase
Revision ID: 264fa39c91e2
Revises:
Create Date: 2020-01-27 18:15:23.530134
"""
from alembic import op
import sqlalchemy as sa
#manually added below
from app.database.uuid import GUID
sa.GUID = GUID
# revision identifiers, used by Alembic.
revision = '264fa39c91e2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('competition',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('body', sa.String(length=1024), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('organisation', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar', sa.String(length=70), nullable=True),
sa.Column('admin', sa.Boolean(), nullable=True),
sa.Column('reviewer', sa.Boolean(), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('token_expiration', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_token'), 'user', ['token'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('body', sa.String(length=1024), nullable=True),
sa.Column('comp_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['comp_id'], ['competition.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('submission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('body', sa.String(length=32768), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('comp_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['comp_id'], ['competition.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_submission_timestamp'), 'submission', ['timestamp'], unique=False)
op.create_table('category_submission_assoc',
sa.Column('categories', sa.Integer(), nullable=True),
sa.Column('submissions', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['categories'], ['category.id'], ),
sa.ForeignKeyConstraint(['submissions'], ['submission.id'], )
)
op.create_table('submission_uploads',
sa.Column('uuid', sa.GUID(), nullable=False),
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('filename', sa.String(length=64), nullable=True),
sa.Column('submission_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['submission_id'], ['submission.id'], ),
sa.PrimaryKeyConstraint('uuid')
)
op.create_table('votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comp_id', sa.Integer(), nullable=True),
sa.Column('cat_id', sa.Integer(), nullable=True),
sa.Column('submission_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.Column('comments', sa.String(length=8192), nullable=True),
sa.ForeignKeyConstraint(['cat_id'], ['category.id'], ),
sa.ForeignKeyConstraint(['comp_id'], ['competition.id'], ),
sa.ForeignKeyConstraint(['submission_id'], ['submission.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('votes')
op.drop_table('submission_uploads')
op.drop_table('category_submission_assoc')
op.drop_index(op.f('ix_submission_timestamp'), table_name='submission')
op.drop_table('submission')
op.drop_table('category')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_token'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_table('competition')
# ### end Alembic commands ###
| 42.535714
| 95
| 0.674013
|
63dd643fab0d8ceee80796f662e1e8b04089aadc
| 24,715
|
py
|
Python
|
ambari-server/src/test/python/TestMpacks.py
|
hortonworks/ambari-perf
|
71305effa9ac00e2e9adb36e6a66a13c9105a811
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-05-06T06:24:04.000Z
|
2021-05-06T06:24:04.000Z
|
ambari-server/src/test/python/TestMpacks.py
|
gcxtx/ambari
|
133d9c4661b21182482c25f96c3f0bf0a9740a9f
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/TestMpacks.py
|
gcxtx/ambari
|
133d9c4661b21182482c25f96c3f0bf0a9740a9f
|
[
"Apache-2.0"
] | 3
|
2017-10-31T11:42:31.000Z
|
2021-04-26T07:17:53.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
from mock.mock import patch, MagicMock, call
from ambari_commons.exceptions import FatalException
from ambari_server.setupMpacks import install_mpack, upgrade_mpack, replay_mpack_logs, purge_stacks_and_mpacks, \
STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME
from unittest import TestCase
from ambari_server.serverConfiguration import STACK_LOCATION_KEY, COMMON_SERVICES_PATH_PROPERTY, MPACKS_STAGING_PATH_PROPERTY
with patch.object(os, "geteuid", new=MagicMock(return_value=0)):
from resource_management.core import sudo
reload(sudo)
def get_configs():
test_directory = os.path.dirname(os.path.abspath(__file__))
mpacks_directory = os.path.join(test_directory, "mpacks")
configs = {
STACK_LOCATION_KEY : "/var/lib/ambari-server/resources/stacks",
COMMON_SERVICES_PATH_PROPERTY : "/var/lib/ambari-server/resources/common-services",
MPACKS_STAGING_PATH_PROPERTY : mpacks_directory
}
return configs
configs = get_configs()
class TestMpacks(TestCase):
def test_install_mpack_with_no_mpack_path(self):
options = self._create_empty_options_mock()
fail = False
try:
install_mpack(options)
except FatalException as e:
self.assertEquals("Management pack not specified!", e.reason)
fail = True
self.assertTrue(fail)
@patch("ambari_server.setupMpacks.download_mpack")
def test_install_mpack_with_invalid_mpack_path(self, download_mpack_mock):
options = self._create_empty_options_mock()
options.mpack_path = "/invalid_path/mpack.tar.gz"
download_mpack_mock.return_value = None
fail = False
try:
install_mpack(options)
except FatalException as e:
self.assertEquals("Management pack could not be downloaded!", e.reason)
fail = True
self.assertTrue(fail)
@patch("os.path.exists")
@patch("ambari_server.setupMpacks.get_ambari_properties")
def test_purge_stacks_and_mpacks(self, get_ambari_version_mock, os_path_exists_mock):
options = self._create_empty_options_mock()
get_ambari_version_mock.return_value = configs
stacks_directory = configs[STACK_LOCATION_KEY]
common_services_directory = configs[COMMON_SERVICES_PATH_PROPERTY]
mpacks_directory = configs[MPACKS_STAGING_PATH_PROPERTY]
os_path_exists_mock.return_value = False
purge_stacks_and_mpacks(None)
os_path_exists_calls = []
os_path_exists_mock.assert_has_calls(os_path_exists_calls)
purge_stacks_and_mpacks(options.purge_list.split(","))
os_path_exists_calls = [
call(stacks_directory),
call(mpacks_directory)
]
os_path_exists_mock.assert_has_calls(os_path_exists_calls)
options.purge_list = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
purge_stacks_and_mpacks(options.purge_list.split(","))
os_path_exists_calls = [
call(stacks_directory),
call(common_services_directory),
call(mpacks_directory)
]
os_path_exists_mock.assert_has_calls(os_path_exists_calls)
options.purge_list = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
options.replay_mode = True
purge_stacks_and_mpacks(options.purge_list.split(","))
os_path_exists_calls = [
call(stacks_directory),
call(common_services_directory)
]
os_path_exists_mock.assert_has_calls(os_path_exists_calls)
@patch("os.path.exists")
@patch("ambari_server.setupMpacks.extract_archive")
@patch("ambari_server.setupMpacks.get_archive_root_dir")
@patch("ambari_server.setupMpacks.download_mpack")
def test_install_mpack_with_malformed_mpack(self, download_mpack_mock, get_archive_root_dir_mock, extract_archive_mock, os_path_exists_mock):
options = self._create_empty_options_mock()
options.mpack_path = "/path/to/mpack.tar.gz"
download_mpack_mock.return_value = "/tmp/mpack.tar.gz"
os_path_exists_mock.return_value = True
get_archive_root_dir_mock.return_value = None
fail = False
try:
install_mpack(options)
except FatalException as e:
self.assertEquals("Malformed management pack. Root directory missing!", e.reason)
fail = True
self.assertTrue(fail)
get_archive_root_dir_mock.return_value = "mpack"
os_path_exists_mock.side_effect = [True, False, False]
extract_archive_mock.return_value = None
fail = False
try:
install_mpack(options)
except FatalException as e:
self.assertEquals("Malformed management pack. Failed to expand management pack!", e.reason)
fail = True
self.assertTrue(fail)
get_archive_root_dir_mock.return_value = "mpack"
os_path_exists_mock.side_effect = [True, False, True, False]
extract_archive_mock.return_value = None
fail = False
try:
install_mpack(options)
except FatalException as e:
self.assertEquals("Malformed management pack {0}. Metadata file missing!".format(options.mpack_path), e.reason)
fail = True
self.assertTrue(fail)
@patch("os.path.exists")
@patch("shutil.move")
@patch("os.mkdir")
@patch("ambari_server.setupMpacks.create_symlink")
@patch("ambari_server.setupMpacks.get_ambari_version")
@patch("ambari_server.setupMpacks.get_ambari_properties")
@patch("ambari_server.setupMpacks.add_replay_log")
@patch("ambari_server.setupMpacks.purge_stacks_and_mpacks")
@patch("ambari_server.setupMpacks.expand_mpack")
@patch("ambari_server.setupMpacks.download_mpack")
def test_install_stack_mpack(self, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
add_replay_log_mock, get_ambari_properties_mock, get_ambari_version_mock,
create_symlink_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
options = self._create_empty_options_mock()
options.mpack_path = "/path/to/mystack.tar.gz"
options.purge = True
download_mpack_mock.return_value = "/tmp/mystack.tar.gz"
expand_mpack_mock.return_value = "mpacks/mystack-ambari-mpack-1.0.0.0"
get_ambari_version_mock.return_value = "2.4.0.0"
"""
os_path_exists_calls = [call('/tmp/mystack.tar.gz'),
call('mpacks/mystack-ambari-mpack-1.0.0.0/mpack.json'),
call('/var/lib/ambari-server/resources/stacks'),
call('/var/lib/ambari-server/resources/common-services'),
call(mpacks_directory),
call(mpacks_directory + '/cache'),
call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0'),
call('/var/lib/ambari-server/resources/common-services/SERVICEA'),
call('/var/lib/ambari-server/resources/common-services/SERVICEB'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0/services'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.1'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.1/services'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/2.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/2.0/services')]
"""
os_path_exists_mock.side_effect = [True, True, False, False, False, False,
False, False, False, False, False, False,
False, False, False, False]
get_ambari_properties_mock.return_value = configs
shutil_move_mock.return_value = True
install_mpack(options)
stacks_directory = configs[STACK_LOCATION_KEY]
common_services_directory = configs[COMMON_SERVICES_PATH_PROPERTY]
mpacks_directory = configs[MPACKS_STAGING_PATH_PROPERTY]
mpacks_staging_directory = os.path.join(mpacks_directory, "mystack-ambari-mpack-1.0.0.0")
os_mkdir_calls = [
call(stacks_directory),
call(common_services_directory),
call(mpacks_directory),
call(mpacks_directory + '/cache'),
call(os.path.join(common_services_directory, "SERVICEA")),
call(os.path.join(common_services_directory, "SERVICEB")),
call(os.path.join(stacks_directory, "MYSTACK")),
call(os.path.join(stacks_directory, "MYSTACK/1.0")),
call(os.path.join(stacks_directory, "MYSTACK/1.0/services")),
call(os.path.join(stacks_directory, "MYSTACK/1.1")),
call(os.path.join(stacks_directory, "MYSTACK/1.1/services")),
call(os.path.join(stacks_directory, "MYSTACK/2.0")),
call(os.path.join(stacks_directory, "MYSTACK/2.0/services"))
]
create_symlink_calls = [
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEA"),
os.path.join(common_services_directory, "SERVICEA"),
"1.0", None),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEA"),
os.path.join(common_services_directory, "SERVICEA"),
"2.0", None),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEB"),
os.path.join(common_services_directory, "SERVICEB"),
"1.0.0", None),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEB"),
os.path.join(common_services_directory, "SERVICEB"),
"2.0.0", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.0"),
os.path.join(stacks_directory, "MYSTACK/1.0"),
"metainfo.xml", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.0/services"),
os.path.join(stacks_directory, "MYSTACK/1.0/services"),
"SERVICEA", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.1"),
os.path.join(stacks_directory, "MYSTACK/1.1"),
"metainfo.xml", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.1/services"),
os.path.join(stacks_directory, "MYSTACK/1.1/services"),
"SERVICEA", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/2.0"),
os.path.join(stacks_directory, "MYSTACK/2.0"),
"metainfo.xml", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/2.0/services"),
os.path.join(stacks_directory, "MYSTACK/2.0/services"),
"SERVICEA", None),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/2.0/services"),
os.path.join(stacks_directory, "MYSTACK/2.0/services"),
"SERVICEB", None)
]
self.assertTrue(purge_stacks_and_mpacks_mock.called)
os_mkdir_mock.assert_has_calls(os_mkdir_calls)
create_symlink_mock.assert_has_calls(create_symlink_calls)
self.assertTrue(add_replay_log_mock.called)
@patch("os.path.exists")
@patch("os.path.isdir")
@patch("os.symlink")
@patch("shutil.move")
@patch("os.mkdir")
@patch("ambari_server.setupMpacks.create_symlink")
@patch("ambari_server.setupMpacks.get_ambari_version")
@patch("ambari_server.setupMpacks.get_ambari_properties")
@patch("ambari_server.setupMpacks.add_replay_log")
@patch("ambari_server.setupMpacks.purge_stacks_and_mpacks")
@patch("ambari_server.setupMpacks.expand_mpack")
@patch("ambari_server.setupMpacks.download_mpack")
def test_install_addon_service_mpack(self, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
add_replay_log_mock, get_ambari_properties_mock, get_ambari_version_mock,
create_symlink_mock, os_mkdir_mock, shutil_move_mock,os_symlink_mock,
os_path_isdir_mock, os_path_exists_mock):
options = self._create_empty_options_mock()
options.mpack_path = "/path/to/myservice.tar.gz"
options.purge = False
download_mpack_mock.return_value = "/tmp/myservice.tar.gz"
expand_mpack_mock.return_value = "mpacks/myservice-ambari-mpack-1.0.0.0"
get_ambari_version_mock.return_value = "2.4.0.0"
"""
os_path_exists_calls = [call('/tmp/myservice.tar.gz'),
call('mpacks/myservice-ambari-mpack-1.0.0.0/mpack.json'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
call('/var/lib/ambari-server/resources/stacks'),
call('/var/lib/ambari-server/resources/common-services'),
call(mpacks_directory),
call(mpacks_directory + '/cache'),
call(mpacks_directory + '/myservice-ambari-mpack-1.0.0.0'),
call('/var/lib/ambari-server/resources/common-services/MYSERVICE'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0/services'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/2.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/2.0/services')]
"""
os_path_exists_mock.side_effect = [True, True, True, True, True,
True, True, False, False, True,
True, True, True, True, True]
get_ambari_properties_mock.return_value = configs
shutil_move_mock.return_value = True
os_path_isdir_mock.return_value = True
install_mpack(options)
stacks_directory = configs[STACK_LOCATION_KEY]
common_services_directory = configs[COMMON_SERVICES_PATH_PROPERTY]
mpacks_directory = configs[MPACKS_STAGING_PATH_PROPERTY]
mpacks_staging_directory = os.path.join(mpacks_directory, "myservice-ambari-mpack-1.0.0.0")
os_mkdir_calls = [
call(os.path.join(common_services_directory, "MYSERVICE"))
]
os_symlink_calls = [
call(os.path.join(mpacks_staging_directory, "common-services/MYSERVICE/1.0.0"),
os.path.join(common_services_directory, "MYSERVICE/1.0.0")),
call(os.path.join(mpacks_staging_directory, "custom-services/MYSERVICE/1.0.0"),
os.path.join(stacks_directory, "MYSTACK/1.0/services/MYSERVICE")),
call(os.path.join(mpacks_staging_directory, "custom-services/MYSERVICE/2.0.0"),
os.path.join(stacks_directory, "MYSTACK/2.0/services/MYSERVICE"))
]
self.assertFalse(purge_stacks_and_mpacks_mock.called)
os_mkdir_mock.assert_has_calls(os_mkdir_calls)
os_symlink_mock.assert_has_calls(os_symlink_calls)
self.assertTrue(add_replay_log_mock.called)
@patch("os.path.exists")
@patch("shutil.move")
@patch("os.mkdir")
@patch("ambari_server.setupMpacks.create_symlink")
@patch("ambari_server.setupMpacks.get_ambari_version")
@patch("ambari_server.setupMpacks.get_ambari_properties")
@patch("ambari_server.setupMpacks.add_replay_log")
@patch("ambari_server.setupMpacks.uninstall_mpack")
@patch("ambari_server.setupMpacks.purge_stacks_and_mpacks")
@patch("ambari_server.setupMpacks.expand_mpack")
@patch("ambari_server.setupMpacks.download_mpack")
def test_upgrade_stack_mpack(self, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
uninstall_mpack_mock, add_replay_log_mock, get_ambari_properties_mock,
get_ambari_version_mock, create_symlink_mock, os_mkdir_mock, shutil_move_mock,
os_path_exists_mock):
options = self._create_empty_options_mock()
options.mpack_path = "/path/to/mystack-1.0.0.1.tar.gz"
download_mpack_mock.return_value = "/tmp/mystack-1.0.0.1.tar.gz"
expand_mpack_mock.return_value = "mpacks/mystack-ambari-mpack-1.0.0.1"
get_ambari_version_mock.return_value = "2.4.0.0"
"""
os_path_exists_calls = [call('/tmp/mystack-1.0.0.1.tar.gz'),
call('mpacks/mystack-ambari-mpack-1.0.0.1/mpack.json'),
call('/var/lib/ambari-server/resources/stacks'),
call('/var/lib/ambari-server/resources/common-services'),
call(mpacks_directory),
call(mpacks_directory + '/cache'),
call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1'),
call('/var/lib/ambari-server/resources/common-services/SERVICEA'),
call('/var/lib/ambari-server/resources/common-services/SERVICEB'),
call('/var/lib/ambari-server/resources/common-services/SERVICEC'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0/services'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.1'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.1/services'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/2.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/2.0/services')]
call('/var/lib/ambari-server/resources/stacks/MYSTACK/3.0'),
call('/var/lib/ambari-server/resources/stacks/MYSTACK/3.0/services'),
call(mpacks_directory),
call(mpacks_directory + '/myservice-ambari-mpack-1.0.0.0/mpack.json'),
call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/mpack.json'),
call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1/mpack.json')]
"""
os_path_exists_mock.side_effect = [True, True, True, True, True, True,
False, True, True, False, True, True, True,
True, True, True, True, False, False,
True, True, True, True]
get_ambari_properties_mock.return_value = configs
shutil_move_mock.return_value = True
upgrade_mpack(options)
stacks_directory = configs[STACK_LOCATION_KEY]
common_services_directory = configs[COMMON_SERVICES_PATH_PROPERTY]
mpacks_directory = configs[MPACKS_STAGING_PATH_PROPERTY]
mpacks_staging_directory = os.path.join(mpacks_directory, "mystack-ambari-mpack-1.0.0.1")
os_mkdir_calls = [
call(os.path.join(common_services_directory, "SERVICEC")),
call(os.path.join(stacks_directory, "MYSTACK/3.0")),
call(os.path.join(stacks_directory, "MYSTACK/3.0/services"))
]
create_symlink_calls = [
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEA"),
os.path.join(common_services_directory, "SERVICEA"),
"1.0", True),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEA"),
os.path.join(common_services_directory, "SERVICEA"),
"2.0", True),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEB"),
os.path.join(common_services_directory, "SERVICEB"),
"1.0.0", True),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEB"),
os.path.join(common_services_directory, "SERVICEB"),
"2.0.0", True),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEC"),
os.path.join(common_services_directory, "SERVICEC"),
"1.0.0", True),
call(os.path.join(mpacks_staging_directory, "common-services/SERVICEC"),
os.path.join(common_services_directory, "SERVICEC"),
"2.0.0", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.0"),
os.path.join(stacks_directory, "MYSTACK/1.0"),
"metainfo.xml", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.0/services"),
os.path.join(stacks_directory, "MYSTACK/1.0/services"),
"SERVICEA", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.1"),
os.path.join(stacks_directory, "MYSTACK/1.1"),
"metainfo.xml", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/1.1/services"),
os.path.join(stacks_directory, "MYSTACK/1.1/services"),
"SERVICEA", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/2.0"),
os.path.join(stacks_directory, "MYSTACK/2.0"),
"metainfo.xml", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/2.0/services"),
os.path.join(stacks_directory, "MYSTACK/2.0/services"),
"SERVICEA", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/2.0/services"),
os.path.join(stacks_directory, "MYSTACK/2.0/services"),
"SERVICEB", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/3.0"),
os.path.join(stacks_directory, "MYSTACK/3.0"),
"metainfo.xml", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/3.0/services"),
os.path.join(stacks_directory, "MYSTACK/3.0/services"),
"SERVICEA", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/3.0/services"),
os.path.join(stacks_directory, "MYSTACK/3.0/services"),
"SERVICEB", True),
call(os.path.join(mpacks_staging_directory, "stacks/MYSTACK/3.0/services"),
os.path.join(stacks_directory, "MYSTACK/3.0/services"),
"SERVICEC", True)
]
self.assertFalse(purge_stacks_and_mpacks_mock.called)
os_mkdir_mock.assert_has_calls(os_mkdir_calls)
create_symlink_mock.assert_has_calls(create_symlink_calls)
uninstall_mpack_mock.assert_has_calls([call("mystack-ambari-mpack", "1.0.0.0")])
self.assertTrue(add_replay_log_mock.called)
@patch("os.path.exists")
@patch("ambari_server.setupMpacks.get_replay_log_file")
@patch("ambari_server.setupMpacks.upgrade_mpack")
@patch("ambari_server.setupMpacks.install_mpack")
def test_replay_mpack_logs(self, install_mpack_mock, upgrade_mpack_mock, get_replay_log_file_mock, os_path_exists_mock):
test_directory = os.path.dirname(os.path.abspath(__file__))
resources_directory = os.path.join(test_directory, os.pardir, "resources")
get_replay_log_file_mock.return_value = os.path.join(resources_directory, "mpacks_replay.log")
os_path_exists_mock.return_value = True
replay_mpack_logs()
install_replay_options = {
'purge' : True,
'mpack_command' : 'install-mpack',
'mpack_path': '/var/lib/ambari-server/resources/mpacks/cache/hdp-1.0.0.0.tar.gz',
'force': False,
'verbose': True
}
upgrade_replay_options = {
'purge' : False,
'mpack_command' : 'upgrade-mpack',
'mpack_path': '/var/lib/ambari-server/resources/mpacks/cache/hdp-1.0.0.1.tar.gz',
'force': True,
'verbose': True
}
install_mpack_mock.assert_has_calls([call(install_replay_options, replay_mode=True)])
upgrade_mpack_mock.assert_has_calls([call(upgrade_replay_options, replay_mode=True)])
def _create_empty_options_mock(self):
options = MagicMock()
options.mpack_path = None
options.purge = None
options.purge_list = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
options.force = None
options.verbose = None
options.replay_mode = False
return options
| 50.438776
| 143
| 0.677564
|
415595ea9d2e259a932ac79ada97a99dcff2b755
| 11,011
|
py
|
Python
|
rally/plugins/openstack/context/keystone/users.py
|
mail2nsrajesh/rally
|
d8995226fe75c573d6d64c7ade8a4ceca0758366
|
[
"Apache-2.0"
] | null | null | null |
rally/plugins/openstack/context/keystone/users.py
|
mail2nsrajesh/rally
|
d8995226fe75c573d6d64c7ade8a4ceca0758366
|
[
"Apache-2.0"
] | null | null | null |
rally/plugins/openstack/context/keystone/users.py
|
mail2nsrajesh/rally
|
d8995226fe75c573d6d64c7ade8a4ceca0758366
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import uuid
from oslo_config import cfg
from rally.common import broker
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally import exceptions
from rally import osclients
from rally.plugins.openstack import credential
from rally.plugins.openstack.services.identity import identity
from rally.plugins.openstack.wrappers import network
from rally.task import context
from rally.task import validation
from rally.common import opts
opts.register()
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RESOURCE_MANAGEMENT_WORKERS_DESCR = ("The number of concurrent threads to use "
"for serving users context.")
PROJECT_DOMAIN_DESCR = "ID of domain in which projects will be created."
USER_DOMAIN_DESCR = "ID of domain in which users will be created."
@validation.add("required_platform", platform="openstack", admin=True)
@context.configure(name="users", namespace="openstack", order=100)
class UserGenerator(context.Context):
"""Context class for generating temporary users/tenants for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"tenants": {
"type": "integer",
"minimum": 1,
"description": "The number of tenants to create."
},
"users_per_tenant": {
"type": "integer",
"minimum": 1,
"description": "The number of users to create per one tenant."
},
"resource_management_workers": {
"type": "integer",
"minimum": 1,
"description": RESOURCE_MANAGEMENT_WORKERS_DESCR,
},
"project_domain": {
"type": "string",
"description": PROJECT_DOMAIN_DESCR
},
"user_domain": {
"type": "string",
"description": USER_DOMAIN_DESCR
},
"user_choice_method": {
"enum": ["random", "round_robin"],
"description": "The mode of balancing usage of users between "
"scenario iterations."
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"tenants": 1,
"users_per_tenant": 1,
"resource_management_workers":
cfg.CONF.users_context.resource_management_workers,
"user_choice_method": "random",
}
def __init__(self, context):
self.credential = context["admin"]["credential"]
project_domain = (self.credential.project_domain_name or
cfg.CONF.users_context.project_domain)
user_domain = (self.credential.user_domain_name or
cfg.CONF.users_context.user_domain)
self.DEFAULT_CONFIG["project_domain"] = project_domain
self.DEFAULT_CONFIG["user_domain"] = user_domain
super(UserGenerator, self).__init__(context)
def _remove_default_security_group(self):
"""Delete default security group for tenants."""
clients = osclients.Clients(self.credential)
if consts.Service.NEUTRON not in clients.services().values():
return
use_sg, msg = network.wrap(clients, self).supports_extension(
"security-group")
if not use_sg:
LOG.debug("Security group context is disabled: %s" % msg)
return
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
with logging.ExceptionLogger(
LOG, _("Unable to delete default security group")):
uclients = osclients.Clients(user["credential"])
security_groups = uclients.neutron().list_security_groups()
default = [sg for sg in security_groups["security_groups"]
if sg["name"] == "default"]
if default:
clients.neutron().delete_security_group(default[0]["id"])
def _create_tenants(self):
threads = self.config["resource_management_workers"]
tenants = collections.deque()
def publish(queue):
for i in range(self.config["tenants"]):
args = (self.config["project_domain"], self.task["uuid"], i)
queue.append(args)
def consume(cache, args):
domain, task_id, i = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(
clients, name_generator=self.generate_random_name)
tenant = cache["client"].create_project(domain_name=domain)
tenant_dict = {"id": tenant.id, "name": tenant.name, "users": []}
tenants.append(tenant_dict)
# NOTE(msdubov): consume() will fill the tenants list in the closure.
broker.run(publish, consume, threads)
tenants_dict = {}
for t in tenants:
tenants_dict[t["id"]] = t
return tenants_dict
def _create_users(self):
# NOTE(msdubov): This should be called after _create_tenants().
threads = self.config["resource_management_workers"]
users_per_tenant = self.config["users_per_tenant"]
default_role = cfg.CONF.users_context.keystone_default_role
users = collections.deque()
def publish(queue):
for tenant_id in self.context["tenants"]:
for user_id in range(users_per_tenant):
username = self.generate_random_name()
password = str(uuid.uuid4())
args = (username, password, self.config["project_domain"],
self.config["user_domain"], tenant_id)
queue.append(args)
def consume(cache, args):
username, password, project_dom, user_dom, tenant_id = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(
clients, name_generator=self.generate_random_name)
client = cache["client"]
user = client.create_user(username, password=password,
project_id=tenant_id,
domain_name=user_dom,
default_role=default_role)
user_credential = credential.OpenStackCredential(
auth_url=self.credential.auth_url,
username=user.name,
password=password,
tenant_name=self.context["tenants"][tenant_id]["name"],
permission=consts.EndpointPermission.USER,
project_domain_name=project_dom,
user_domain_name=user_dom,
endpoint_type=self.credential.endpoint_type,
https_insecure=self.credential.https_insecure,
https_cacert=self.credential.https_cacert,
region_name=self.credential.region_name,
profiler_hmac_key=self.credential.profiler_hmac_key)
users.append({"id": user.id,
"credential": user_credential,
"tenant_id": tenant_id})
# NOTE(msdubov): consume() will fill the users list in the closure.
broker.run(publish, consume, threads)
return list(users)
def _get_consumer_for_deletion(self, func_name):
def consume(cache, resource_id):
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(clients)
getattr(cache["client"], func_name)(resource_id)
return consume
def _delete_tenants(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for tenant_id in self.context["tenants"]:
queue.append(tenant_id)
broker.run(publish, self._get_consumer_for_deletion("delete_project"),
threads)
self.context["tenants"] = {}
def _delete_users(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for user in self.context["users"]:
queue.append(user["id"])
broker.run(publish, self._get_consumer_for_deletion("delete_user"),
threads)
self.context["users"] = []
@logging.log_task_wrapper(LOG.info, _("Enter context: `users`"))
def setup(self):
"""Create tenants and users, using the broker pattern."""
super(UserGenerator, self).setup()
self.context["users"] = []
self.context["tenants"] = {}
self.context["user_choice_method"] = self.config["user_choice_method"]
threads = self.config["resource_management_workers"]
LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" %
{"tenants": self.config["tenants"], "threads": threads})
self.context["tenants"] = self._create_tenants()
if len(self.context["tenants"]) < self.config["tenants"]:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg=_("Failed to create the requested number of tenants."))
users_num = self.config["users_per_tenant"] * self.config["tenants"]
LOG.debug("Creating %(users)d users using %(threads)s threads" %
{"users": users_num, "threads": threads})
self.context["users"] = self._create_users()
for user in self.context["users"]:
self.context["tenants"][user["tenant_id"]]["users"].append(user)
if len(self.context["users"]) < users_num:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg=_("Failed to create the requested number of users."))
@logging.log_task_wrapper(LOG.info, _("Exit context: `users`"))
def cleanup(self):
"""Delete tenants and users, using the broker pattern."""
self._remove_default_security_group()
self._delete_users()
self._delete_tenants()
| 39.894928
| 79
| 0.602398
|
08efa4d3894d298a4647e9c159af690241f59809
| 4,436
|
py
|
Python
|
predict.py
|
madtomy/udacity_image_classifier
|
e5f6fa54ebe8f405b99905a12c29588e9aaf4d1d
|
[
"MIT"
] | null | null | null |
predict.py
|
madtomy/udacity_image_classifier
|
e5f6fa54ebe8f405b99905a12c29588e9aaf4d1d
|
[
"MIT"
] | null | null | null |
predict.py
|
madtomy/udacity_image_classifier
|
e5f6fa54ebe8f405b99905a12c29588e9aaf4d1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
usage: python predict.py /path/to/image checkpoint
Options:
- Return most likely classes: python predict.py input checkpoint --top_k 3
- Use a mapping of categories to names: python predict.py input checkpoint --category_names cat_to_name.json
- Use GPU for inference: python predict.py input checkpoint --gpu
'''
import argparse
import numpy as np
import torch
from PIL import Image
import json
class Predict_class :
@staticmethod
def process_image(img_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
img = Image.open(img_path)
width,height = img.size
aspect_ratio = width / height
if aspect_ratio > 1 :
img = img.resize((round(aspect_ratio*256),256))
else:
img = img.resize((256,round(256/aspect_ratio)))
#Crop
width, height =img.size
n_width = 224
n_height = 224
top = (height - n_height)/2
right = (width + n_width)/2
bottom = (height + n_height) / 2
left = (width - n_width)/2
img = img.crop((round(left),round(top),round(right),round(bottom)))
#convert channels, normalize, rorder dimmensions
np_img = np.array(img) /225
np_img = (np_img - np.array([0.485,0.456,0.406])/np.array([0.229,0.224,0.225]))
np_img = np_img.transpose((2,0,1))
return np_img
@staticmethod
def predict(np_image, model,gpu,topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.'''
device = torch.device("cuda:0" if gpu else "cpu")
model.to(device)
model.eval()
with torch.no_grad():
imgs = torch.from_numpy(np_image)
imgs = imgs.unsqueeze(0)
imgs = imgs.type(torch.FloatTensor)
imgs = imgs.to(device)
out = model.forward(imgs)
ps = torch.exp(out)
pbs, inds = torch.topk(ps,topk)
pbs = [float(pb) for pb in pbs[0]]
inv_map = {val:key for key, val in model.class_to_idx.items()}
clss = [inv_map[int(idx)] for idx in inds[0]]
return pbs, clss
# Get the command line input
parser = argparse.ArgumentParser()
parser.add_argument('image_path', action='store',
default = 'flowers/test/1/image_06743.jpg',
help='Path to image, e.g., "flowers/test/1/image_06743.jpg"')
parser.add_argument('checkpoint', action='store',
default = '.',
help='Directory of saved checkpoints, e.g., "assets"')
# Return top KK most likely classes
parser.add_argument('--top_k', action='store',
default = 5,
dest='top_k',
help='Return top KK most likely classes, e.g., 5')
# Use a mapping of categories to real names
parser.add_argument('--category_names', action='store',
default = 'cat_to_name.json',
dest='category_names',
help='File name of the mapping of flower categories to real names, e.g., "cat_to_name.json"')
# Use GPU for inference
parser.add_argument('--gpu', action='store_true',
default=False,
dest='gpu',
help='Use GPU for inference, set a switch to true')
parse_results = parser.parse_args()
image_path = parse_results.image_path
checkpoint = parse_results.checkpoint
top_k = int(parse_results.top_k)
category_names = parse_results.category_names
gpu = parse_results.gpu
# Label mapping
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
# Load the checkpoint
filepath = checkpoint + '/checkpoint.pth'
checkpoint = torch.load(filepath, map_location='cpu')
model = checkpoint["model"]
model.load_state_dict(checkpoint['state_dict'])
#Create an object of class predict
pred_obj = Predict_class()
# Image preprocessing
np_image = pred_obj.process_image(image_path)
# Predict class and probabilities
print(f"Predicting top {top_k} most likely flower names from image {image_path}.")
probs, classes = pred_obj.predict(np_image, model,gpu, top_k )
classes_name = [cat_to_name[class_i] for class_i in classes]
print("\nFlower name (probability): ")
print("*********")
for i in range(len(probs)):
print(f"{classes_name[i]} ({round(probs[i], 3)})")
print("")
| 34.123077
| 113
| 0.626465
|
fd3afbdc6f2c5b3ea514b48e0cda0e1b0c831abc
| 4,716
|
py
|
Python
|
recipes/Python/577840_Josephus_problem/recipe-577840.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/577840_Josephus_problem/recipe-577840.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/577840_Josephus_problem/recipe-577840.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
#!/usr/bin/env python
#let's speak about cladiators and the survivor (Josephus)
import sys
from math import log
CLDTRS_NUMBER = 0
last2first = lambda L : L[-1::] + L[0:len(L) -1]
def wholives(n):
## ---We search Within lowest-highest power of 2 that n Gladiators resides---
## wholives Function assumes that we have assigned each Gladiator a number in ascending order
## (in a way that Gladiators are ordered as they --have,need or must -- to. The numbers just follow their order)
## wholives is a FAST FUNCTION WITH CONSTANT TIME COMPLEXITY
## We calculate the log2 of the number of Gladiators if it is integer then we subtract one from the number raised in
## powers of 2 then we subtract the number of Gladiators from the base power and finally we subtract it from the number of
## Gladiators. If it the log2 is not integer we take the next exponent (successor) as Base
## The key here is that at every increment of exponent of power of 2 (minus 1) we can calculate all the previous Gladiators down to
## the previous exponent( minus 1) just by subtracting the nearest higher power of 2 (minus 1) and from Gladiators n and then
## subtracting the result from the Gladiators n itself.
## in order to select efficiently the correct nearest higher exponent we simply calculate the log2 of n Gladiators
## if it is integer we are in (we can use it as our Base exponent)
## it it is not then it means we need to take the next higher exponent for our Base exponent
## we are not interesting into any result of log2 of n Gladiators that is not integer since the subtractions
## between the limits of the lower power and higher power can give us the result
#there are two base cases
# if there are two Gladiators the first survives because he has the Sword
# if there is only one Gladiator ..he is already the Survivor...
if n == 1:
return 1
if n == 2:
return 1
LogN = log(n,2)
if not LogN.is_integer():
BaseExpo = int(LogN) + 1
BasePower = int(pow(2,BaseExpo)) - 1
Sub = BasePower - n
Res = n - Sub
return Res
else:
#Here we need to restart counting
#eg 7 lives 7 (2^3 -1) ,15 lives 15 (2^4 -1) ,31 lives 31 (2^5 -1) ,63 lives 63 (2^6 -1)\
# 127 lives 127 (2^7 -1 ) so we can just return 1 to restart at 8 , 16 , 32, 64, 128 respectively
# and so on and so forth...
#BaseExpo = int(LogN)
#BasePower = int(pow(2,BaseExpo)) - 1
#Sub = BasePower - n
#Res = n - Sub
#return Res
return 1
def isNotEven(x):
if not x % 2:
return False
else:
return True
def PrepareCladiators(NUMBER):
cladiators = tuple(xrange(1,NUMBER + 1))
return cladiators
def Survivor(cladiators):
if len(cladiators) < 2:
raise Exception ,"\n\n***** Cladiators must be at least 2!!! ***** \n"
##
##
## print"\nCeasar says:\n\tLET THE DEATH MATCH BEGIN!!!\
## \n\nThey started kiling each other... \nEach one kills the next one\
## \nand passes the Sword to the next one alive.. \
## \nthere are all",len(cladiators)," still alive and here they are \n" ,cladiators
FirstClads = len(cladiators)
Clads = cladiators
deathcycle =0
while len(Clads) > 1 :
if isNotEven(len(Clads)):
deathcycle += 1
Clads = Clads[::2]
Clads = last2first(Clads)
##
## print "\n",len(Clads), "left alive after the",deathcycle,"death cycle and "\
## ,FirstClads - len(Clads)," have died till know"
## print "\nThese are the live ones\n",Clads
##
else:
deathcycle += 1
Clads = Clads[::2]
##
## print "\n",len(Clads), "left alive after the",deathcycle,"death cycle and "\
## ,FirstClads - len(Clads)," have died till know"
## if len(Clads) > 1 :
## print "\nThese are the live ones\n",Clads
## else :
## print "\n**** The last Survivor **** \nis:\n***\n\tCladiator",Clads[0]\
## ,"\n\n*********************************"
return Clads[0]
if __name__ == "__main__":
try :
CLDTRS_NUMBER = int(sys.argv[1])
## print "\n\t**** Welcome to the Deadly Arena Survivor ****\n"
## print "\n",CLDTRS_NUMBER," Cladiators will fight and \n",CLDTRS_NUMBER -1 ," Cladiators are going to die ...\n"
## print "\tONLY ONE SHALL SURVIVE...\n"
## print "\tBUT who???\n"
## print " ohhh , HERE THEY ARE .. \n"
cladiators = PrepareCladiators(CLDTRS_NUMBER)
## print cladiators, "\n\n!!! HAIL Ceasar !!! \nthey say loudly..."
print CLDTRS_NUMBER,Survivor(cladiators)
except (IndexError ,ValueError ):
print "Please place one integer value as arguement\n"
| 34.933333
| 131
| 0.635708
|
77c59fcc078ed2cfe6b19594c4cd25ee46ba3d04
| 8,701
|
py
|
Python
|
vizexec.py
|
sunaga-lab/vizexec
|
28a42e4e994c57db7fbc458af2b260899cdf7cdc
|
[
"MIT"
] | null | null | null |
vizexec.py
|
sunaga-lab/vizexec
|
28a42e4e994c57db7fbc458af2b260899cdf7cdc
|
[
"MIT"
] | null | null | null |
vizexec.py
|
sunaga-lab/vizexec
|
28a42e4e994c57db7fbc458af2b260899cdf7cdc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("./lib")
try:
import pygtk
pygtk.require("2.8")
except:
pass
try:
import gtk
import gtk.glade
import cairo
import pango
import time
except:
sys.exit(1)
import threading
import gobject
from seqdata import SequenceData
from vizexec_server import *
WINDOW_TITLE = "VizEXEC"
class VizexecGUI:
def __init__(self):
self.seqdata = None
self.current_thread_group_id_max = 0
self.UpdateInterval = 10
self.seqdata_lock = threading.RLock()
self.mouse_dragging = False
self.builder = gtk.Builder()
self.builder.add_from_file("data/vizexec_ui.glade")
self.builder.connect_signals(self)
self.import_control("window")
self.import_control("AboutDialog")
self.import_control("drawing_scroll")
self.import_control("drawing_area")
self.import_control("FileChooserDialog")
self.import_control("DlgSaveFile")
self.import_control("TbfInfo")
self.import_control("TvwInfo")
self.import_control("DlgRunServer")
self.import_control("EntPortNum")
self.window.show()
self.hadjust = gtk.Adjustment()
self.adjustment_default_setup(self.hadjust)
self.hadjust.connect("value-changed", self.hadjust_value_changed)
self.vadjust = gtk.Adjustment()
self.adjustment_default_setup(self.vadjust)
self.vadjust.connect("value-changed", self.vadjust_value_changed)
self.drawing_scroll.set_hadjustment(self.hadjust)
self.drawing_scroll.set_vadjustment(self.vadjust)
self.back_buffer = None
self.update_back_buffer()
self.EntPortNum.set_text("5112")
self.new_data()
self.fit_figure_size()
self.updated = False
gobject.timeout_add(self.UpdateInterval, self.update_timeout)
flt = gtk.FileFilter()
flt.set_name("ログファイル")
flt.add_pattern("*.log")
self.FileChooserDialog.add_filter(flt)
self.DlgSaveFile.add_filter(flt)
flt = gtk.FileFilter()
flt.set_name("すべてのファイル")
flt.add_pattern("*")
self.FileChooserDialog.add_filter(flt)
self.DlgSaveFile.add_filter(flt)
pangoFont = pango.FontDescription("monospace 9")
self.TvwInfo.modify_font(pangoFont)
def new_thread_group_id(self):
self.current_thread_group_id_max += 1
return "g" + str(self.current_thread_group_id_max)
def update_back_buffer(self):
alloc = self.drawing_area.get_allocation()
if (self.back_buffer
and self.back_buffer.get_width() == alloc.width
and self.back_buffer.get_height() == alloc.height):
return
self.back_buffer = cairo.ImageSurface(cairo.FORMAT_RGB24, alloc.width, alloc.height)
def fit_figure_size(self):
is_max = self.vadjust.get_value() >= (self.vadjust.get_upper() - self.vadjust.get_page_size() - 32)
alloc = self.drawing_area.get_allocation()
if self.seqdata.get_width() > alloc.width:
self.hadjust.set_upper(self.seqdata.get_width() + 60)
self.hadjust.set_page_size(alloc.width)
if self.seqdata.get_height() > alloc.height:
self.vadjust.set_upper(self.seqdata.get_height() + 60)
self.vadjust.set_page_size(alloc.height)
if is_max:
self.vadjust.set_value(self.vadjust.get_upper() - self.vadjust.get_page_size())
def adjustment_default_setup(self, adj):
adj.set_lower(0)
adj.set_upper(0)
adj.set_page_size(100)
adj.set_step_increment(10)
def import_control(self, name):
setattr(self, name, self.builder.get_object(name))
def btn_clicked(self, w):
print "Btn Clicked."
def window_hide(self, widget):
gtk.main_quit()
def hadjust_value_changed(self, w):
self.redraw()
def vadjust_value_changed(self, w):
self.redraw()
def drawing_area_expose_event_cb(self, w, e):
self.redraw()
def update_timeout(self):
time.sleep(0.001)
if self.updated:
self.redraw()
self.updated = False
gobject.timeout_add(self.UpdateInterval, self.update_timeout)
def redraw(self, check_first = False):
with self.seqdata_lock:
self.update_back_buffer()
self.fit_figure_size()
offset_x = self.hadjust.get_value()
offset_y = self.vadjust.get_value()
alloc = self.drawing_area.get_allocation()
w,h = alloc.width, alloc.height
ctx = cairo.Context(self.back_buffer)
drawarea_ctx = self.drawing_area.window.cairo_create()
# ctx = self.drawing_area.window.cairo_create()
ctx.set_source_rgb(1.0,1.0,1.0)
ctx.rectangle(0,0, w,h)
ctx.fill()
if check_first:
self.seqdata.draw(ctx, offset_x, offset_y, w, h, True)
self.seqdata.draw(ctx, offset_x, offset_y, w, h)
drawarea_ctx.set_source_surface(self.back_buffer, 0, 0)
drawarea_ctx.paint()
def new_data(self):
self.seqdata = SequenceData()
self.redraw()
def open_new(self, filename):
self.new_data()
self.open_append(filename)
self.window.set_title(WINDOW_TITLE + " - File " + filename)
def open_append(self, filename):
read_thread = ReadThread(filename, self)
read_thread.start()
def file_choose(self):
resp = self.FileChooserDialog.run()
self.FileChooserDialog.hide()
if resp == 1:
return "open", self.FileChooserDialog.get_filename()
elif resp == 2:
return "append", self.FileChooserDialog.get_filename()
else:
return "cancel", ""
def MniOpen_activate_cb(self, e):
mode, fn = self.file_choose()
if mode == "open":
self.open_new(fn)
elif mode == "append":
self.open_append(fn)
def MniOpenAppend_activate_cb(self, e):
fn = self.file_choose()
if fn:
self.open_new(fn)
def MniExit_activate_cb(self, e):
gtk.main_quit()
def MniAbout_activate_cb(self, e):
self.AboutDialog.run()
self.AboutDialog.hide()
def drawing_area_button_press_event_cb(self, e, data):
self.mouse_dragging = True
self.mouse_dragging_start = (
self.hadjust.get_value() + data.x,
self.vadjust.get_value() + data.y
)
self.seqdata.selected_object = None
self.seqdata.selected_pos = (data.x,data.y)
self.redraw(True)
self.seqdata.selected_pos = None
if self.seqdata.selected_object:
obj = self.seqdata.selected_object
if hasattr(obj, "get_info_text"):
self.TbfInfo.set_text(obj.get_info_text())
else:
self.TbfInfo.set_text(str(obj))
def drawing_area_button_release_event_cb(self, e, data):
if self.mouse_dragging:
self.hadjust.set_value(self.mouse_dragging_start[0] - data.x)
self.vadjust.set_value(self.mouse_dragging_start[1] - data.y)
self.mouse_dragging = False
def drawing_area_motion_notify_event_cb(self, e, data):
if self.mouse_dragging:
self.hadjust.set_value(self.mouse_dragging_start[0] - data.x)
self.vadjust.set_value(self.mouse_dragging_start[1] - data.y)
def open_server(self, portnum):
server_thread = TCPServerThread(portnum, self)
server_thread.start()
self.window.set_title(WINDOW_TITLE + " - Server *:" + str(portnum))
def MniRunServer_activate_cb(self, e):
resp = self.DlgRunServer.run()
self.DlgRunServer.hide()
if resp == 1:
self.new_data()
portnum = int(self.EntPortNum.get_text())
self.open_server(portnum)
else:
return
def MniSaveAs_activate_cb(self, e):
resp = self.DlgSaveFile.run()
self.DlgSaveFile.hide()
if resp == 1:
with self.seqdata_lock:
self.seqdata.save_log_to(self.DlgSaveFile.get_filename())
if __name__ == "__main__":
mainwindow = VizexecGUI()
if len(sys.argv) >= 2:
if sys.argv[1] == "-s":
mainwindow.open_server(int(sys.argv[2]))
else:
mainwindow.open_new(sys.argv[1])
gtk.main()
| 30.423077
| 107
| 0.614642
|
d7ef796154d28cb9ddc57192a47b804055bdc0d8
| 2,524
|
py
|
Python
|
modules/ckanext-ytp_request/ckanext/ytp_request/plugin.py
|
vrk-kpa/opendata-ckan
|
8936e2d9e700b9e5534fe2a51eedc2d1ede8c10b
|
[
"MIT"
] | null | null | null |
modules/ckanext-ytp_request/ckanext/ytp_request/plugin.py
|
vrk-kpa/opendata-ckan
|
8936e2d9e700b9e5534fe2a51eedc2d1ede8c10b
|
[
"MIT"
] | 10
|
2021-12-02T10:33:42.000Z
|
2022-03-31T11:00:54.000Z
|
modules/ckanext-ytp_request/ckanext/ytp_request/plugin.py
|
vrk-kpa/opendata-ckan
|
8936e2d9e700b9e5534fe2a51eedc2d1ede8c10b
|
[
"MIT"
] | null | null | null |
import ckan.plugins as plugins
from ckan.plugins import implements, toolkit
from ckan.lib.plugins import DefaultTranslation
import logging
from .cli import get_commands
from . import views
log = logging.getLogger(__name__)
class YtpRequestPlugin(plugins.SingletonPlugin, DefaultTranslation):
implements(plugins.IConfigurer, inherit=True)
implements(plugins.IActions, inherit=True)
implements(plugins.IAuthFunctions, inherit=True)
implements(plugins.ITranslation)
implements(plugins.IClick)
implements(plugins.IBlueprint)
# IConfigurer #
def update_config(self, config):
toolkit.add_template_directory(config, 'templates')
toolkit.add_resource('public', 'request_js')
# IActions
def get_actions(self):
from ckanext.ytp_request.logic.action import get, create, update, delete
return {
"member_request_create": create.member_request_create,
"member_request_cancel": delete.member_request_cancel,
"member_request_reject": update.member_request_reject,
"member_request_approve": update.member_request_approve,
"member_request_membership_cancel": delete.member_request_membership_cancel,
"member_requests_list": get.member_requests_list,
"member_requests_mylist": get.member_requests_mylist,
"get_available_roles": get.get_available_roles,
"member_request_show": get.member_request,
"organization_list_without_memberships": get.organization_list_without_memberships
}
# IAuthFunctions
def get_auth_functions(self):
from ckanext.ytp_request.logic.auth import get, create, update, delete
return {
"member_request_create": create.member_request_create,
"member_request_cancel": delete.member_request_cancel,
"member_request_reject": update.member_request_reject,
"member_request_approve": update.member_request_approve,
"member_request_membership_cancel": delete.member_request_membership_cancel,
"member_requests_list": get.member_requests_list,
"member_requests_mylist": get.member_requests_mylist,
"member_request_show": get.member_request,
"organization_list_without_memberships": get.organization_list_without_memberships
}
# IClick
def get_commands(self):
return get_commands()
# IBlueprint
def get_blueprint(self):
return views.get_blueprint()
| 39.4375
| 94
| 0.723455
|
559f37de5d1fde48d6145be440ad39dda379cbe2
| 1,912
|
py
|
Python
|
lib/surface/dataproc/jobs/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/dataproc/jobs/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/dataproc/jobs/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for cloud dataproc jobs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class Jobs(base.Group):
"""Submit and manage Google Cloud Dataproc jobs.
Submit and manage Google Cloud Dataproc jobs.
## EXAMPLES
To learn about the types of jobs that can be submitted, run:
$ {command} submit
To view the output of a job as it runs, run:
$ {command} wait job_id
To cancel an active job, run:
$ {command} kill job_id
To view the details of a job, run:
$ {command} describe job_id
To see the list of all jobs, run:
$ {command} list
To delete the record of an inactive job, run:
$ {command} delete job_id
"""
@classmethod
def Args(cls, parser):
region_prop = properties.VALUES.dataproc.region
parser.add_argument(
'--region',
help=region_prop.help_text,
# Don't set default, because it would override users' property setting.
action=actions.StoreProperty(region_prop))
| 27.710145
| 79
| 0.722803
|
3b298de25be6cd3c6824911febc5bdfce7404105
| 15,543
|
py
|
Python
|
montreal_forced_aligner/trainers/sat.py
|
potipot/Montreal-Forced-Aligner
|
6d665e9c63a4e3c795d27ec3bb8d9d1a5604bb91
|
[
"MIT"
] | 2
|
2021-06-10T10:18:44.000Z
|
2022-01-26T07:08:54.000Z
|
montreal_forced_aligner/trainers/sat.py
|
potipot/Montreal-Forced-Aligner
|
6d665e9c63a4e3c795d27ec3bb8d9d1a5604bb91
|
[
"MIT"
] | null | null | null |
montreal_forced_aligner/trainers/sat.py
|
potipot/Montreal-Forced-Aligner
|
6d665e9c63a4e3c795d27ec3bb8d9d1a5604bb91
|
[
"MIT"
] | null | null | null |
import os
from tqdm import tqdm
import subprocess
import shutil
import time
from ..multiprocessing import (align, compile_train_graphs,
acc_stats, tree_stats, convert_alignments,
calc_fmllr, compute_alignment_improvement)
from ..helper import thirdparty_binary, make_path_safe, log_kaldi_errors, parse_logs
from ..exceptions import KaldiProcessingError
from .triphone import TriphoneTrainer
class SatTrainer(TriphoneTrainer):
"""
Configuration class for speaker adapted training (SAT)
Attributes
----------
fmllr_update_type : str
Type of fMLLR estimation, defaults to ``'full'``
fmllr_iterations : list
List of iterations to perform fMLLR estimation
silence_weight : float
Weight on silence in fMLLR estimation
"""
def __init__(self, default_feature_config):
super(SatTrainer, self).__init__(default_feature_config)
self.fmllr_update_type = 'full'
self.fmllr_iterations = []
max_fmllr_iter = int(self.num_iterations/2) - 1
for i in range(1, max_fmllr_iter):
if i < max_fmllr_iter / 2 and i % 2 == 0:
self.fmllr_iterations.append(i)
self.fmllr_iterations.append(max_fmllr_iter)
self.silence_weight = 0.0
self.feature_config.fmllr = True
def compute_calculated_properties(self):
super(SatTrainer, self).compute_calculated_properties()
self.fmllr_iterations = []
max_fmllr_iter = int(self.num_iterations / 2) - 1
for i in range(1, max_fmllr_iter):
if i < max_fmllr_iter / 2 and i % 2 == 0:
self.fmllr_iterations.append(i)
self.fmllr_iterations.append(max_fmllr_iter)
@property
def train_type(self):
return 'sat'
def train(self, call_back=None):
done_path = os.path.join(self.train_directory, 'done')
dirty_path = os.path.join(self.train_directory, 'dirty')
if os.path.exists(done_path):
self.logger.info('{} training already done, skipping initialization.'.format(self.identifier))
return
begin = time.time()
num_gauss = self.initial_gaussians
if call_back == print:
iters = tqdm(range(1, self.num_iterations))
else:
iters = range(1, self.num_iterations)
sil_phones = self.dictionary.silence_csl
try:
for i in iters:
model_path = os.path.join(self.train_directory, '{}.mdl'.format(i))
occs_path = os.path.join(self.train_directory, '{}.occs'.format(i + 1))
next_model_path = os.path.join(self.train_directory, '{}.mdl'.format(i + 1))
if os.path.exists(next_model_path):
continue
if i in self.realignment_iterations:
align(i, self.train_directory, self.data_directory,
self.dictionary.optional_silence_csl,
self.corpus.num_jobs, self)
if self.debug:
compute_alignment_improvement(i, self, self.train_directory, self.corpus.num_jobs)
if i in self.fmllr_iterations:
calc_fmllr(self.train_directory, self.data_directory, sil_phones,
self.corpus.num_jobs, self, initial=False, iteration=i)
acc_stats(i, self.train_directory, self.data_directory, self.corpus.num_jobs, self)
log_path = os.path.join(self.log_directory, 'update.{}.log'.format(i))
with open(log_path, 'w') as log_file:
acc_files = [os.path.join(self.train_directory, '{}.{}.acc'.format(i, x))
for x in range(self.corpus.num_jobs)]
est_proc = subprocess.Popen([thirdparty_binary('gmm-est'),
'--write-occs=' + occs_path,
'--mix-up=' + str(num_gauss), '--power=' + str(self.power),
model_path,
"{} - {}|".format(thirdparty_binary('gmm-sum-accs'),
' '.join(map(make_path_safe, acc_files))),
next_model_path],
stderr=log_file)
est_proc.communicate()
parse_logs(self.log_directory)
if not os.path.exists(next_model_path):
raise(Exception('There was an error training in iteration {}, please check the logs.'.format(i)))
if not self.debug:
for f in acc_files:
os.remove(f)
self.parse_log_directory(self.log_directory, i, self.corpus.num_jobs, call_back)
if i < self.final_gaussian_iteration:
num_gauss += self.gaussian_increment
shutil.copy(os.path.join(self.train_directory, '{}.mdl'.format(self.num_iterations)),
os.path.join(self.train_directory, 'final.mdl'))
shutil.copy(os.path.join(self.train_directory, '{}.occs'.format(self.num_iterations)),
os.path.join(self.train_directory, 'final.occs'))
if not self.debug:
for i in range(1, self.num_iterations):
model_path = os.path.join(self.train_directory, '{}.mdl'.format(i))
try:
os.remove(model_path)
except FileNotFoundError:
pass
try:
os.remove(os.path.join(self.train_directory, '{}.occs'.format(i)))
except FileNotFoundError:
pass
except Exception as e:
with open(dirty_path, 'w'):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
raise
with open(done_path, 'w'):
pass
self.logger.info('Training complete!')
self.logger.debug('Training took {} seconds'.format(time.time() - begin))
def align(self, subset, call_back=None):
dirty_path = os.path.join(self.align_directory, 'dirty')
if os.path.exists(dirty_path): # if there was an error, let's redo from scratch
shutil.rmtree(self.align_directory)
done_path = os.path.join(self.align_directory, 'done')
if not os.path.exists(done_path):
message = 'Generating alignments using {} models'.format(self.identifier)
if subset:
message += ' using {} utterances...'.format(subset)
else:
message += ' for the whole corpus...'
self.logger.info(message)
begin = time.time()
self.logger.debug('Using {} as the feature name'.format(self.feature_file_base_name))
if subset is None:
align_data_directory = self.corpus.split_directory()
else:
align_data_directory = self.corpus.subset_directory(subset, self.feature_config)
try:
log_dir = os.path.join(self.align_directory, 'log')
os.makedirs(log_dir, exist_ok=True)
shutil.copy(os.path.join(self.train_directory, 'tree'), self.align_directory)
shutil.copyfile(os.path.join(self.train_directory, 'final.mdl'),
os.path.join(self.align_directory, 'final.mdl'))
if os.path.exists(os.path.join(self.train_directory, 'lda.mat')):
shutil.copyfile(os.path.join(self.train_directory, 'lda.mat'),
os.path.join(self.align_directory, 'lda.mat'))
shutil.copyfile(os.path.join(self.train_directory, 'final.occs'),
os.path.join(self.align_directory, 'final.occs'))
compile_train_graphs(self.align_directory, self.dictionary.output_directory,
align_data_directory, self.corpus.num_jobs, self)
if align_data_directory == self.data_directory and os.path.exists(os.path.join(self.train_directory, 'trans.0')):
for i in range(self.corpus.num_jobs):
shutil.copy(os.path.join(self.train_directory, 'trans.{}'.format(i)),
os.path.join(self.align_directory, 'trans.{}'.format(i)))
align('final', self.align_directory, align_data_directory,
self.dictionary.optional_silence_csl,
self.corpus.num_jobs, self, self.align_directory)
if not os.path.exists(os.path.join(self.align_directory, 'trans.0')):
calc_fmllr(self.align_directory, align_data_directory,
self.dictionary.optional_silence_csl, self.corpus.num_jobs, self, initial=True, iteration='final')
align('final', self.align_directory, align_data_directory,
self.dictionary.optional_silence_csl,
self.corpus.num_jobs, self, self.align_directory)
self.save(os.path.join(self.align_directory, 'acoustic_model.zip'))
except Exception as e:
with open(dirty_path, 'w'):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
raise
with open(done_path, 'w'):
pass
self.logger.debug('Alignment took {} seconds'.format(time.time() - begin))
else:
self.logger.info('Alignments using {} models already done'.format(self.identifier))
if self.debug:
self.export_textgrids()
def init_training(self, identifier, temporary_directory, corpus, dictionary, previous_trainer):
self.feature_config.fmllr = False
self._setup_for_init(identifier, temporary_directory, corpus, dictionary)
done_path = os.path.join(self.train_directory, 'done')
dirty_path = os.path.join(self.train_directory, 'dirty')
if os.path.exists(done_path):
self.logger.info('{} training already done, skipping initialization.'.format(self.identifier))
return
begin = time.time()
if os.path.exists(os.path.join(self.train_directory, '1.mdl')):
return
self.feature_config.fmllr = True
self.logger.info('Initializing speaker-adapted triphone training...')
align_directory = previous_trainer.align_directory
context_opts = []
ci_phones = self.dictionary.silence_csl
try:
if os.path.exists(os.path.join(align_directory, 'lda.mat')):
shutil.copyfile(os.path.join(align_directory, 'lda.mat'), os.path.join(self.train_directory, 'lda.mat'))
tree_stats(self.train_directory, align_directory,
self.data_directory, ci_phones, self.corpus.num_jobs, self)
log_path = os.path.join(self.log_directory, 'questions.log')
tree_path = os.path.join(self.train_directory, 'tree')
treeacc_path = os.path.join(self.train_directory, 'treeacc')
sets_int_path = os.path.join(self.dictionary.phones_dir, 'sets.int')
roots_int_path = os.path.join(self.dictionary.phones_dir, 'roots.int')
extra_question_int_path = os.path.join(self.dictionary.phones_dir, 'extra_questions.int')
topo_path = os.path.join(self.dictionary.output_directory, 'topo')
questions_path = os.path.join(self.train_directory, 'questions.int')
questions_qst_path = os.path.join(self.train_directory, 'questions.qst')
with open(log_path, 'w') as log_file:
subprocess.call([thirdparty_binary('cluster-phones')] + context_opts +
[treeacc_path, sets_int_path, questions_path], stderr=log_file)
with open(extra_question_int_path, 'r') as in_file, \
open(questions_path, 'a') as out_file:
for line in in_file:
out_file.write(line)
log_path = os.path.join(self.log_directory, 'compile_questions.log')
with open(log_path, 'w') as log_file:
subprocess.call([thirdparty_binary('compile-questions')] + context_opts +
[topo_path, questions_path, questions_qst_path],
stderr=log_file)
log_path = os.path.join(self.log_directory, 'build_tree.log')
with open(log_path, 'w') as log_file:
subprocess.call([thirdparty_binary('build-tree')] + context_opts +
['--verbose=1', '--max-leaves={}'.format(self.initial_gaussians),
'--cluster-thresh={}'.format(self.cluster_threshold),
treeacc_path, roots_int_path, questions_qst_path,
topo_path, tree_path], stderr=log_file)
log_path = os.path.join(self.log_directory, 'init_model.log')
occs_path = os.path.join(self.train_directory, '0.occs')
mdl_path = os.path.join(self.train_directory, '0.mdl')
with open(log_path, 'w') as log_file:
subprocess.call([thirdparty_binary('gmm-init-model'),
'--write-occs=' + occs_path, tree_path, treeacc_path,
topo_path, mdl_path], stderr=log_file)
log_path = os.path.join(self.log_directory, 'mixup.log')
with open(log_path, 'w') as log_file:
subprocess.call([thirdparty_binary('gmm-mixup'),
'--mix-up={}'.format(self.initial_gaussians),
mdl_path, occs_path, mdl_path], stderr=log_file)
os.remove(treeacc_path)
compile_train_graphs(self.train_directory, self.dictionary.output_directory,
self.data_directory, self.corpus.num_jobs, self)
os.rename(occs_path, os.path.join(self.train_directory, '1.occs'))
os.rename(mdl_path, os.path.join(self.train_directory, '1.mdl'))
convert_alignments(self.train_directory, align_directory, self.corpus.num_jobs, self)
if os.path.exists(os.path.join(align_directory, 'trans.0')):
for i in range(self.corpus.num_jobs):
shutil.copy(os.path.join(align_directory, 'trans.{}'.format(i)),
os.path.join(self.train_directory, 'trans.{}'.format(i)))
else:
calc_fmllr(self.train_directory, self.data_directory,
self.dictionary.silence_csl, self.corpus.num_jobs, self, initial=True)
parse_logs(self.log_directory)
except Exception as e:
with open(dirty_path, 'w'):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
raise
self.logger.info('Initialization complete!')
self.logger.debug('Initialization took {} seconds'.format(time.time() - begin))
| 53.782007
| 129
| 0.576594
|
a5839b33a7578761cb685995cf7d55209ca5e925
| 2,038
|
py
|
Python
|
backend/app/app/api/v1/endpoints/upload.py
|
benlau6/fastapi-fullstack
|
68a46d576310a1c846315228c1251f36ea23f056
|
[
"MIT"
] | 1
|
2022-01-29T07:53:35.000Z
|
2022-01-29T07:53:35.000Z
|
backend/app/app/api/v1/endpoints/upload.py
|
benlau6/fastapi-fullstack
|
68a46d576310a1c846315228c1251f36ea23f056
|
[
"MIT"
] | null | null | null |
backend/app/app/api/v1/endpoints/upload.py
|
benlau6/fastapi-fullstack
|
68a46d576310a1c846315228c1251f36ea23f056
|
[
"MIT"
] | null | null | null |
from typing import List
import os
import shutil
import asyncio
from fastapi import APIRouter, Depends, File, UploadFile, BackgroundTasks
from app import schemas
from app.core import config
from app.api import deps
from app.api.deps import Permission
router = APIRouter()
def write_file_to_local(
form: schemas.UploadForm, file: UploadFile, settings: config.Settings
) -> None:
file_dir = f"{settings.FILE_ROOT_PATH}/{form.base_dir}"
file_path = f"{file_dir}/{file.filename}"
if not os.path.exists(file_dir):
os.makedirs(file_dir)
with open(file_path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
@router.post(
"/files",
dependencies=[Depends(deps.verify_content_length)],
response_model=schemas.UploadRecords,
)
async def upload_files(
files: List[UploadFile] = File(...),
form: schemas.UploadForm = Permission("submit", schemas.UploadForm.as_form),
current_user: schemas.UserInDB = Depends(deps.get_current_active_user),
settings: config.Settings = Depends(deps.get_settings),
*,
background_tasks: BackgroundTasks,
) -> schemas.UploadRecords:
async def copy_file(file: UploadFile) -> schemas.UploadRecord:
background_tasks.add_task(write_file_to_local, form, file, settings)
record = schemas.UploadRecord(
filename=file.filename,
# file_size=file.file.tell(), # its not working, needa read all to return actual size, but it slow down the processing, which now put to background
file_content_type=file.content_type,
owner=current_user["email"],
)
return record
record_list = await asyncio.gather(*map(copy_file, files))
records = schemas.UploadRecords(records=record_list)
return records
@router.get("/info", response_model=schemas.UserFromDB)
async def get_info(
current_user: schemas.UserInDB = Depends(deps.get_current_active_user),
) -> schemas.UserInDB:
return current_user
| 32.870968
| 160
| 0.699706
|
dca201fbda1ac1f5b0f6a7510a6fd8d49e0dc5fd
| 6,171
|
py
|
Python
|
torchvision/prototype/models/segmentation/deeplabv3.py
|
husthyc/vision
|
e95e54386a603c8e9d3142b7f0c0dd43d86db479
|
[
"BSD-3-Clause"
] | 2
|
2021-04-01T17:19:21.000Z
|
2021-04-01T18:04:08.000Z
|
torchvision/prototype/models/segmentation/deeplabv3.py
|
husthyc/vision
|
e95e54386a603c8e9d3142b7f0c0dd43d86db479
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/prototype/models/segmentation/deeplabv3.py
|
husthyc/vision
|
e95e54386a603c8e9d3142b7f0c0dd43d86db479
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from functools import partial
from typing import Any, Optional
from torchvision.prototype.transforms import VocEval
from torchvision.transforms.functional import InterpolationMode
from ....models.segmentation.deeplabv3 import DeepLabV3, _deeplabv3_mobilenetv3, _deeplabv3_resnet
from .._api import Weights, WeightEntry
from .._meta import _VOC_CATEGORIES
from ..mobilenetv3 import MobileNetV3LargeWeights, mobilenet_v3_large
from ..resnet import resnet50, resnet101
from ..resnet import ResNet50Weights, ResNet101Weights
__all__ = [
"DeepLabV3",
"DeepLabV3ResNet50Weights",
"DeepLabV3ResNet101Weights",
"DeepLabV3MobileNetV3LargeWeights",
"deeplabv3_mobilenet_v3_large",
"deeplabv3_resnet50",
"deeplabv3_resnet101",
]
_COMMON_META = {
"categories": _VOC_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
class DeepLabV3ResNet50Weights(Weights):
CocoWithVocLabels_RefV1 = WeightEntry(
url="https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth",
transforms=partial(VocEval, resize_size=520),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50",
"mIoU": 66.4,
"acc": 92.4,
},
)
class DeepLabV3ResNet101Weights(Weights):
CocoWithVocLabels_RefV1 = WeightEntry(
url="https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth",
transforms=partial(VocEval, resize_size=520),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101",
"mIoU": 67.4,
"acc": 92.4,
},
)
class DeepLabV3MobileNetV3LargeWeights(Weights):
CocoWithVocLabels_RefV1 = WeightEntry(
url="https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
transforms=partial(VocEval, resize_size=520),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large",
"mIoU": 60.3,
"acc": 91.2,
},
)
def deeplabv3_resnet50(
weights: Optional[DeepLabV3ResNet50Weights] = None,
weights_backbone: Optional[ResNet50Weights] = None,
progress: bool = True,
num_classes: int = 21,
aux_loss: Optional[bool] = None,
**kwargs: Any,
) -> DeepLabV3:
if "pretrained" in kwargs:
warnings.warn("The parameter pretrained is deprecated, please use weights instead.")
weights = DeepLabV3ResNet50Weights.CocoWithVocLabels_RefV1 if kwargs.pop("pretrained") else None
weights = DeepLabV3ResNet50Weights.verify(weights)
if "pretrained_backbone" in kwargs:
warnings.warn("The parameter pretrained_backbone is deprecated, please use weights_backbone instead.")
weights_backbone = ResNet50Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained_backbone") else None
weights_backbone = ResNet50Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
aux_loss = True
num_classes = len(weights.meta["categories"])
backbone = resnet50(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
model = _deeplabv3_resnet(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
def deeplabv3_resnet101(
weights: Optional[DeepLabV3ResNet101Weights] = None,
weights_backbone: Optional[ResNet101Weights] = None,
progress: bool = True,
num_classes: int = 21,
aux_loss: Optional[bool] = None,
**kwargs: Any,
) -> DeepLabV3:
if "pretrained" in kwargs:
warnings.warn("The parameter pretrained is deprecated, please use weights instead.")
weights = DeepLabV3ResNet101Weights.CocoWithVocLabels_RefV1 if kwargs.pop("pretrained") else None
weights = DeepLabV3ResNet101Weights.verify(weights)
if "pretrained_backbone" in kwargs:
warnings.warn("The parameter pretrained_backbone is deprecated, please use weights_backbone instead.")
weights_backbone = ResNet101Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained_backbone") else None
weights_backbone = ResNet101Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
aux_loss = True
num_classes = len(weights.meta["categories"])
backbone = resnet101(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
model = _deeplabv3_resnet(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
def deeplabv3_mobilenet_v3_large(
weights: Optional[DeepLabV3MobileNetV3LargeWeights] = None,
weights_backbone: Optional[MobileNetV3LargeWeights] = None,
progress: bool = True,
num_classes: int = 21,
aux_loss: Optional[bool] = None,
**kwargs: Any,
) -> DeepLabV3:
if "pretrained" in kwargs:
warnings.warn("The parameter pretrained is deprecated, please use weights instead.")
weights = DeepLabV3MobileNetV3LargeWeights.CocoWithVocLabels_RefV1 if kwargs.pop("pretrained") else None
weights = DeepLabV3MobileNetV3LargeWeights.verify(weights)
if "pretrained_backbone" in kwargs:
warnings.warn("The parameter pretrained_backbone is deprecated, please use weights_backbone instead.")
weights_backbone = MobileNetV3LargeWeights.ImageNet1K_RefV1 if kwargs.pop("pretrained_backbone") else None
weights_backbone = MobileNetV3LargeWeights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
aux_loss = True
num_classes = len(weights.meta["categories"])
backbone = mobilenet_v3_large(weights=weights_backbone, dilated=True)
model = _deeplabv3_mobilenetv3(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
| 37.174699
| 121
| 0.725004
|
d9528f691c0ee177631b72c4f703b761cc7ca7ca
| 291
|
py
|
Python
|
Neighbours/forms.py
|
YVONNEANYANGO/Neighbour_Hood
|
ccda87b188f5921748b4719369409a769defd8e7
|
[
"MIT"
] | null | null | null |
Neighbours/forms.py
|
YVONNEANYANGO/Neighbour_Hood
|
ccda87b188f5921748b4719369409a769defd8e7
|
[
"MIT"
] | null | null | null |
Neighbours/forms.py
|
YVONNEANYANGO/Neighbour_Hood
|
ccda87b188f5921748b4719369409a769defd8e7
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Neighbourhood, Profile ,Business
class ProfileForm(forms.ModelForm):
class Meta:
model= Profile
exclude = ['user']
class NewHoodForm(forms.ModelForm):
class Meta:
model = Neighbourhood
exclude = ['user']
| 22.384615
| 52
| 0.670103
|
d45239f1f64511639ef7bd52a6ff72bde26d1267
| 13,385
|
py
|
Python
|
routing/routing_algorithm.py
|
graham-riches/multi-agent-pathing
|
f862da8eb9b4f6dec706bd28af5e6f39eaf3835d
|
[
"MIT"
] | null | null | null |
routing/routing_algorithm.py
|
graham-riches/multi-agent-pathing
|
f862da8eb9b4f6dec706bd28af5e6f39eaf3835d
|
[
"MIT"
] | null | null | null |
routing/routing_algorithm.py
|
graham-riches/multi-agent-pathing
|
f862da8eb9b4f6dec706bd28af5e6f39eaf3835d
|
[
"MIT"
] | 2
|
2020-09-11T23:33:58.000Z
|
2022-01-14T08:09:21.000Z
|
"""
@file routing_algorithm.py
@brief abstract base class for various routing algorithms
@author Graham Riches
@details
Abstract base class for a routing algorithm. This lets the routing manager accept any time of routing algorithm
as long as it supplies specific methods.
"""
from abc import ABC, abstractmethod
from core.arena import Arena
from core.agent import *
from routing.status import RoutingStatus
from routing.biased_grid import BiasedGrid
class Node(ABC):
@abstractmethod
def __init__(self, location: tuple, parent=None) -> None:
"""
Initialize a single routing node object
:param location: a (X, Y) location tuple for the node
:param parent: another Node object that is the parent of the current nodes for pathing
"""
self.location = location
self.parent = parent
class SingleAgentAlgorithm(ABC):
@abstractmethod
def __init__(self, arena: Arena, agents: list, biased_grid: BiasedGrid) -> None:
"""
Initialize a routing algorithm with the Arena and a list of agents. This finds an "optimal" path to
a goal for a single agent using whatever means the child class chooses.
:param arena: the arena for the simulation
:param agents: the list of all simulation agents
:param biased_grid: 2D array of preferred routing directions for each grid location
"""
self.arena = arena
self.agents = agents
self.biased_grid = biased_grid
self.node_path = list() # contains all the nodes that are part of a target route
self.path = list() # contains a list of agent tasks to create the route
@abstractmethod
def route(self, agent: Agent, target: tuple) -> RoutingStatus:
"""
Abstract routing method that any algorithm can implement to do a custom route from
start to target.
:param agent: agent to route
:param target: ending location tuple (x,y)
:return: RoutingStatus enumeration
"""
pass
@abstractmethod
def reset(self) -> None:
"""
Reset the routing algorithm to clear any internal state variables
:return:
"""
pass
def create_path(self) -> RoutingStatus:
"""
Traverses a list of nodes that compose the path's node_path and constructs a list of agent
tasks required to travel the path
:return:
"""
if len(self.node_path) == 0:
return RoutingStatus.INVALID_PATH
while len(self.node_path) > 1:
# initialize the first path and direction
task_start_node = self.node_path.pop(0)
last_node = self.node_path[0]
if last_node.location[0] == task_start_node.location[0]:
task_direction = AgentCoordinates.Y
elif last_node.location[1] == task_start_node.location[1]:
task_direction = AgentCoordinates.X
else:
return RoutingStatus.INVALID_PATH
# traverse the nodes until we see a turn
nodes = list(self.node_path)
pop_count = 0
for next_node in nodes:
if task_direction == AgentCoordinates.Y:
if next_node.location[0] != last_node.location[0]:
break
else:
last_node = next_node
else:
if next_node.location[1] != last_node.location[1]:
break
else:
last_node = next_node
pop_count += 1
# pop everything up until the turn (current last index - 1)
while pop_count > 1:
self.node_path.pop(0)
pop_count -= 1
# create the task and add it to the path list
if task_direction == AgentCoordinates.X:
move_distance = last_node.location[0] - task_start_node.location[0]
else:
move_distance = last_node.location[1] - task_start_node.location[1]
self.path.append(AgentTask(AgentTasks.MOVE, [task_direction, move_distance]))
return RoutingStatus.SUCCESS
class MultiAgentAlgorithm(ABC):
@abstractmethod
def __init__(self, arena: Arena, agents: list, algorithm: SingleAgentAlgorithm) -> None:
"""
Creates a multi-agent routing algorithm. This manages routing a group of agents towards a goal.
Note: this is an abstract base class that is meant to be inherited by new routing algorithm classes. This
provides a lot of useful features that can be used by a child algorithm that has more specific requirement.
The virtual methods that the child MUST implement are the route, and run_time_step methods.
:param arena: arena object
:param agents: lists of agents
:param algorithm: the single agent routing algorithm to use
"""
self.arena = arena # the arena object
self.agents = agents # list of agents
self.routing_algorithm = algorithm # single agent routing algorithm
self.initialized = False # initializer for the simulation
self.active_agents = [False for agent in self.agents]
self.agent_tasks = [list() for agent in self.agents] # empty task list for each agent
self.agent_reserved_squares = [list() for agent in self.agents] # empty reserved squares lists
self.agent_goals = [list() for agent in self.agents] # goal locations list for each agent
self.agent_callbacks = {AgentEvent.TASK_COMPLETED: self.agent_move_completed_callback}
self.agent_routing_state = [None for agent in self.agents]
self.agent_max_distance = [1000 for agent in self.agents]
@abstractmethod
def run_time_step(self) -> None:
"""
abstract method to run a simulation time step. This method will contain the multi-agent management algorithm
that manages each simulation time step. This is where you can modify the run-time behaviour of your algorithm.
:return: None
"""
pass
@abstractmethod
def route(self, agent_id: int, target: tuple) -> None:
"""
Run the routing algorithm to route an agent to a specific location
:param agent_id: the agent id
:param target: (x, y) tuple of the target location
:return: None
"""
pass
@abstractmethod
def is_locked(self) -> bool:
"""
checks if all agents cannot route because they have moved to block themselves. This is an abstract method
that must be implemented by child classes
:return: boolean
"""
pass
def initialize(self) -> None:
"""
Initialize the simulation
:param agent_id: the agent id
:return:
"""
for agent_id, agent in enumerate(self.agents):
goals = self.agent_goals[agent_id]
if goals is not None and len(goals) > 0:
first_goal = goals[0]
self.route(agent_id, first_goal)
self.initialized = True
def is_simulation_complete(self) -> bool:
"""
Returns true if all agents have successfully reached their target locations
and have no remaining tasks in their queue
:return: boolean
"""
for idx, agent in enumerate(self.agents):
if not self.is_agent_at_goal(idx) or not self.agent_goals_completed(idx):
return False
return True
def agent_goals_completed(self, agent_id: int) -> bool:
"""
check if an agent has completed all of its goal routed
:param agent_id: the agent ID
:return: true if the agent has completed all goal routes
"""
pending_goals = self.agent_goals[agent_id] # current list of remaining goals
if len(pending_goals) > 1:
return False
else:
return True
def is_agent_at_goal(self, agent_id: int) -> bool:
"""
check if an agent has reached its current goal location
:param agent_id: the id of the agent
:return: boolean
"""
agent = self.agents[agent_id]
location = (agent.location.X, agent.location.Y)
current_goal = self.agent_goals[agent_id][0]
if location != current_goal:
return False
else:
return True
def update_agent_goal(self, agent_id: int) -> None:
"""
Update an agents current goal by popping the the last completed goal from the agents goal list
:param agent_id: the agent id
:return: None
"""
pending_goals = self.agent_goals[agent_id] # current list of remaining goals
if len(pending_goals) > 1:
self.agent_goals[agent_id] = pending_goals[1:]
def add_agent_goal(self, agent_id: int, location: tuple) -> None:
"""
set the goal location for an agent. The algorithm will continually route to here until 'Done'
:param agent_id: the id of the agent
:param location: the target/goal location
:return: None
"""
self.agent_goals[agent_id].append(location)
def add_agent_task(self, agent_id: int, task: AgentTask) -> None:
"""
add a new task to an agents task list
:param agent_id: the Id of the agent to append the task to
:param task: the AgentTask object
:return: None
"""
# add routing blockages for move tasks
if task.task_id == AgentTasks.MOVE:
self.reserve_squares_for_routing(agent_id, task)
self.agent_tasks[agent_id].append(task)
def start_new_task(self, agent_id: int) -> None:
"""
start a new agent task from it's queue
:param agent_id: the agents id
:return: None
"""
if len(self.agent_tasks[agent_id]) > 0:
new_task = self.agent_tasks[agent_id].pop(0)
self.agents[agent_id].start_task(new_task)
self.active_agents[agent_id] = True
def signal_agent_event(self, agent_id: int, event: AgentEvent) -> None:
"""
signal to the routing manager that something of interest has happened
:param agent_id: the ID of the agent that is signalling
:param event: the event type
:return: None
"""
# call the callback associated with the event type
self.agent_callbacks[event](agent_id)
def agent_move_completed_callback(self, agent_id: int) -> None:
"""
callback function for when an agent completes a move.
:param agent_id: the agents ID
:return: None
"""
# clear any blockages
self.clear_last_task_blockage(agent_id)
# set the agent to not active
self.active_agents[agent_id] = False
def clear_last_task_blockage(self, agent_id: int) -> None:
"""
callback to call when an agent task has completed. This will clear
the routing blocks from the last task
:param agent_id: the id of the agent
:return:
"""
# clear any previous routing blockages
reserved_squares = self.agent_reserved_squares[agent_id]
if len(reserved_squares) > 0:
squares = reserved_squares.pop(0)
self.arena.clear_blockage(squares['x'], squares['y'])
def reserve_squares_for_routing(self, agent_id: int, task: AgentTask) -> tuple:
"""
Reserve grid squares for routing an agent. Note: if the agents task list depth is greater than 0, the
reserved squares will start from the endpoint of the last task in the task list.
:param agent_id: the agent id of the agent being routed
:param task: the task containing the route details
:return:
"""
agent = self.agents[agent_id]
x = int(agent.location.X)
y = int(agent.location.Y)
# calculate the routing squares based on the queued tasks
for queued_task in self.agent_tasks[agent_id]:
if queued_task.task_id == AgentTasks.MOVE:
direction = queued_task.args[0]
distance = queued_task.args[1]
if direction == AgentCoordinates.X:
x += distance
else:
y += distance
task_args = task.args
if abs(task.args[1]) < 1:
return None, None
sign = np.sign(task_args[1])
if task_args[0] == AgentCoordinates.X:
x_start = x + 1 if sign > 0 else x - 1
x_target = int(x_start + task_args[1])
tiles = list(range(int(x_start), int(x_target), int(sign)))
x_tiles = tiles
y_tiles = [y]
else:
y_start = y + 1 if sign > 0 else y - 1
y_target = int(y_start + task_args[1])
tiles = list(range(int(y_start), int(y_target), int(sign)))
x_tiles = [x]
y_tiles = tiles
self.arena.set_reserved(x_tiles, y_tiles)
self.agent_reserved_squares[agent_id].append({'x': x_tiles, 'y': y_tiles})
# set the last square as an agent target square
self.arena.set_agent_target(x_tiles[-1], y_tiles[-1])
return x_tiles, y_tiles
| 39.718101
| 118
| 0.61681
|
168692b371ed0e65f80c7d49e394bcb0f4a7ccbf
| 3,639
|
py
|
Python
|
recipe_scrapers/__init__.py
|
timandrews335/recipe-scrapers
|
6e2af0838596bc51a9c2f041f6b7acc113ecdeff
|
[
"MIT"
] | null | null | null |
recipe_scrapers/__init__.py
|
timandrews335/recipe-scrapers
|
6e2af0838596bc51a9c2f041f6b7acc113ecdeff
|
[
"MIT"
] | null | null | null |
recipe_scrapers/__init__.py
|
timandrews335/recipe-scrapers
|
6e2af0838596bc51a9c2f041f6b7acc113ecdeff
|
[
"MIT"
] | null | null | null |
import re
from .allrecipes import AllRecipes
from .allrecipesbr import AllRecipesBr
from .bbcfood import BBCFood
from .bbcgoodfood import BBCGoodFood
from .bonappetit import BonAppetit
from .closetcooking import ClosetCooking
from .cookstr import Cookstr
from .epicurious import Epicurious
from .finedininglovers import FineDiningLovers
from .foodnetwork import FoodNetwork
from .foodrepublic import FoodRepublic
from .giallozafferano import GialloZafferano
from .hellofresh import HelloFresh
from .hundredandonecookbooks import HundredAndOneCookbooks
from .inspiralized import Inspiralized
from .jamieoliver import JamieOliver
from .mybakingaddiction import MyBakingAddiction
from .nihhealthyeating import NIHHealthyEating
from .paninihappy import PaniniHappy
from .realsimple import RealSimple
from .simplyrecipes import SimplyRecipes
from .steamykitchen import SteamyKitchen
from .tastesoflizzyt import TastesOfLizzyT
from .tastykitchen import TastyKitchen
from .thehappyfoodie import TheHappyFoodie
from .thepioneerwoman import ThePioneerWoman
from .thevintagemixer import TheVintageMixer
from .tudogostoso import TudoGostoso
from .twopeasandtheirpod import TwoPeasAndTheirPod
from .whatsgabycooking import WhatsGabyCooking
from .yummly import Yummly
from .geniuskitchen import GeniusKitchen
SCRAPERS = {
AllRecipes.host(): AllRecipes,
AllRecipesBr.host(): AllRecipesBr,
BBCFood.host(): BBCFood,
BBCFood.host(domain='co.uk'): BBCFood,
BBCGoodFood.host(): BBCGoodFood,
BonAppetit.host(): BonAppetit,
ClosetCooking.host(): ClosetCooking,
Cookstr.host(): Cookstr,
Epicurious.host(): Epicurious,
FineDiningLovers.host(): FineDiningLovers,
FoodNetwork.host(): FoodNetwork,
FoodRepublic.host(): FoodRepublic,
GialloZafferano.host(): GialloZafferano,
HelloFresh.host(): HelloFresh,
HelloFresh.host(domain='co.uk'): HelloFresh,
HundredAndOneCookbooks.host(): HundredAndOneCookbooks,
Inspiralized.host(): Inspiralized,
JamieOliver.host(): JamieOliver,
MyBakingAddiction.host(): MyBakingAddiction,
NIHHealthyEating.host(): NIHHealthyEating,
PaniniHappy.host(): PaniniHappy,
RealSimple.host(): RealSimple,
SimplyRecipes.host(): SimplyRecipes,
SteamyKitchen.host(): SteamyKitchen,
TastesOfLizzyT.host(): TastesOfLizzyT,
TastyKitchen.host(): TastyKitchen,
TheHappyFoodie.host(): TheHappyFoodie,
ThePioneerWoman.host(): ThePioneerWoman,
TheVintageMixer.host(): TheVintageMixer,
TudoGostoso.host(): TudoGostoso,
TwoPeasAndTheirPod.host(): TwoPeasAndTheirPod,
WhatsGabyCooking.host(): WhatsGabyCooking,
Yummly.host(): Yummly,
GeniusKitchen.host(): GeniusKitchen,
}
def url_path_to_dict(path):
pattern = (r'^'
r'((?P<schema>.+?)://)?'
r'((?P<user>.+?)(:(?P<password>.*?))?@)?'
r'(?P<host>.*?)'
r'(:(?P<port>\d+?))?'
r'(?P<path>/.*?)?'
r'(?P<query>[?].*?)?'
r'$'
)
regex = re.compile(pattern)
matches = regex.match(path)
url_dict = matches.groupdict() if matches is not None else None
return url_dict
class WebsiteNotImplementedError(NotImplementedError):
'''Error for when the website is not supported by this library.'''
pass
def scrape_me(url_path):
host_name = url_path_to_dict(url_path.replace('://www.', '://'))['host']
try:
scraper = SCRAPERS[host_name]
except KeyError:
raise WebsiteNotImplementedError(
"Website ({}) is not supported".format(host_name))
return scraper(url_path)
__all__ = ['scrape_me']
| 33.385321
| 76
| 0.725474
|
451cbc2b843b0a407900e916df4a331663d72e99
| 1,149
|
py
|
Python
|
spinup/examples/pytorch/sac_goal.py
|
jesbu1/spinningup
|
fd54d9e06febc7ff5696a63d1e84e2c16d38e486
|
[
"MIT"
] | null | null | null |
spinup/examples/pytorch/sac_goal.py
|
jesbu1/spinningup
|
fd54d9e06febc7ff5696a63d1e84e2c16d38e486
|
[
"MIT"
] | null | null | null |
spinup/examples/pytorch/sac_goal.py
|
jesbu1/spinningup
|
fd54d9e06febc7ff5696a63d1e84e2c16d38e486
|
[
"MIT"
] | null | null | null |
from spinup.utils.run_utils import ExperimentGrid
from spinup import sac_pytorch
import torch
import gym
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--num_runs', type=int, default=1)
args = parser.parse_args()
eg = ExperimentGrid(name='sac-goal')
eg.add('env_name', 'SawyerPush-v0', '', True)
eg.add('seed', [10*i for i in range(args.num_runs)])
eg.add('epochs', 300)
eg.add('steps_per_epoch', 10 * 150)
eg.add('lr', [3e-4, 1e-3])
eg.add('start_steps', 1000)
eg.add('ac_kwargs:hidden_sizes', [(400,400)], 'hid')
eg.add('ac_kwargs:activation', [torch.nn.ReLU], '')
eg.run(sac_pytorch, num_cpu=args.cpu)
#from metaworld.benchmarks import MT10
#
#env_fn = lambda : MTEnv(MT10.get_train_tasks())
#
#ac_kwargs = dict(hidden_sizes=[400,400], activation=torch.nn.ReLU)
#
#logger_kwargs = dict(output_dir='~/spinup/data/', exp_name='SAC_MT10')
#
#sac_pytorch(env_fn=env_fn, ac_kwargs=ac_kwargs, steps_per_epoch=128 * 10, epochs=1000, start_steps=1000, lr=3e-4, logger_kwargs=logger_kwargs)
| 35.90625
| 143
| 0.697998
|
7f2eb85a4cd114343cbb29c7ac440fdd16451e3d
| 319
|
py
|
Python
|
ex050 # soma de inteiros pares for range .py
|
jbmarcos/Python-Curso-em-video-Mundo-1-2-3-
|
a5bd705b2437c281f8f7ac02dc7ff54a09a37046
|
[
"MIT"
] | null | null | null |
ex050 # soma de inteiros pares for range .py
|
jbmarcos/Python-Curso-em-video-Mundo-1-2-3-
|
a5bd705b2437c281f8f7ac02dc7ff54a09a37046
|
[
"MIT"
] | null | null | null |
ex050 # soma de inteiros pares for range .py
|
jbmarcos/Python-Curso-em-video-Mundo-1-2-3-
|
a5bd705b2437c281f8f7ac02dc7ff54a09a37046
|
[
"MIT"
] | null | null | null |
#soma de inteiros pares for range
soma = 0
cont = 0
print(' ')
for c in range(1, 7):
num = int(input('Digite o {} valor. '.format(c)))
if num % 2 == 0:
soma = soma + num #soma +=1
cont = cont + 1 #cont += 1
print(' ')
print('Você infotmou {} números PARES e a SOMA foi {}'.format(cont, soma))
| 26.583333
| 74
| 0.561129
|
c76c93bf2fb3a866c0727dbd5f19d444277f25d8
| 60,816
|
py
|
Python
|
semi_final/pytorch_toy/nezha_pytorch/helper/modeling.py
|
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
|
a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f
|
[
"Apache-2.0"
] | 22
|
2021-06-04T13:01:08.000Z
|
2022-02-18T13:19:46.000Z
|
semi_final/pytorch_toy/nezha_pytorch/helper/modeling.py
|
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
|
a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f
|
[
"Apache-2.0"
] | null | null | null |
semi_final/pytorch_toy/nezha_pytorch/helper/modeling.py
|
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
|
a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f
|
[
"Apache-2.0"
] | 2
|
2021-06-06T09:41:08.000Z
|
2021-06-09T01:05:10.000Z
|
import math
import os
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from helper.configuration import NeZhaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
from transformers.models.bert.modeling_bert import (
BertOutput,
BertPooler,
BertSelfOutput,
BertIntermediate,
BertOnlyMLMHead,
BertOnlyNSPHead,
BertPreTrainingHeads,
BERT_START_DOCSTRING,
BERT_INPUTS_DOCSTRING,
)
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "NeZhaConfig"
_TOKENIZER_FOR_DOC = "NeZhaTokenizer"
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = []
NEZHA_PRETRAINED_MODEL_ARCHIVE_MAP = {}
def load_tf_weights_in_nezha(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
# logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "lamb_m", "lamb_v", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1",
"global_step", "good_steps", "loss_scale", 'bad_steps']
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class NeZhaEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.use_relative_position = config.use_relative_position
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def relative_position_encoding(depth, max_length=512, max_relative_position=127):
vocab_size = max_relative_position * 2 + 1
range_vec = torch.arange(max_length)
range_mat = range_vec.repeat(max_length).view(max_length, max_length)
distance_mat = range_mat - torch.t(range_mat)
distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
final_mat = distance_mat_clipped + max_relative_position
embeddings_table = torch.zeros(vocab_size, depth)
position = torch.arange(0, vocab_size, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth))
embeddings_table[:, 0::2] = torch.sin(position * div_term)
embeddings_table[:, 1::2] = torch.cos(position * div_term)
embeddings_table = embeddings_table.unsqueeze(0).transpose(0, 1).squeeze(1)
flat_relative_positions_matrix = final_mat.view(-1)
one_hot_relative_positions_matrix = torch.nn.functional.one_hot(flat_relative_positions_matrix,
num_classes=vocab_size).float()
positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table)
my_shape = list(final_mat.size())
my_shape.append(depth)
positions_encoding = positions_encoding.view(my_shape)
return positions_encoding
class NeZhaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.relative_positions_encoding = relative_position_encoding(max_length=config.max_position_embeddings,
depth=self.attention_head_size,
max_relative_position=config.max_relative_position)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size()
relations_keys = self.relative_positions_encoding[:to_seq_length, :to_seq_length, :].to(hidden_states.device)
query_layer_t = query_layer.permute(2, 0, 1, 3)
query_layer_r = query_layer_t.contiguous().view(from_seq_length, batch_size * num_attention_heads,
self.attention_head_size)
key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1))
key_position_scores_r = key_position_scores.view(from_seq_length, batch_size,
num_attention_heads, from_seq_length)
key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3)
attention_scores = attention_scores + key_position_scores_r_t
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
relations_values = self.relative_positions_encoding[:to_seq_length, :to_seq_length, :].to(hidden_states.device)
attention_probs_t = attention_probs.permute(2, 0, 1, 3)
attentions_probs_r = attention_probs_t.contiguous().view(from_seq_length, batch_size * num_attention_heads,
to_seq_length)
value_position_scores = torch.matmul(attentions_probs_r, relations_values)
value_position_scores_r = value_position_scores.view(from_seq_length, batch_size,
num_attention_heads, self.attention_head_size)
value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3)
context_layer = context_layer + value_position_scores_r_t
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class NeZhaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = NeZhaSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class NeZhaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = NeZhaAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = NeZhaAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(
attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class NeZhaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([NeZhaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class NeZhaPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = NeZhaConfig
pretrained_model_archive_map = NEZHA_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_nezha
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class NeZhaModel(NeZhaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = NeZhaEmbeddings(config)
self.encoder = NeZhaEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, self.device
)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING,
)
class NeZhaForPreTraining(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = NeZhaModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
):
r"""
masked_lm_labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForPreTraining
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
# add hidden states and attention if they are here
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:]
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class NeZhaForMaskedLM(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = NeZhaModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the left-to-right language modeling loss (next word prediction).
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
ltr_lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_labels` is provided):
Next token prediction loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMaskedLM
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
# Although this may seem awkward, BertForMaskedLM supports two scenarios:
# 1. If a tensor that contains the indices of masked labels is provided,
# the cross-entropy is the MLM cross-entropy that measures the likelihood
# of predictions for masked words.
# 2. If `lm_labels` is provided we are in a causal scenario where we
# try to predict the next token for each input in the decoder.
masked_lm_labels = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (ltr_lm_loss), (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# if model is does not use a causal mask then add a dummy token
if self.config.is_decoder is False:
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat(
[attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1
)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING,
)
class NeZhaForNextSentencePrediction(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = NeZhaModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
inputs_embeds=None,
next_sentence_label=None,
):
r"""
next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForNextSentencePrediction
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING,
)
class NeZhaForSequenceClassification(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = NeZhaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING,
)
class NeZhaForMultipleChoice(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = NeZhaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMultipleChoice
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING,
)
class NeZhaForTokenClassification(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = NeZhaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForTokenClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING,
)
class NeZhaForQuestionAnswering(NeZhaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = NeZhaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
position_ids=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForQuestionAnswering
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
encoding = tokenizer.encode_plus(question, text)
input_ids, token_type_ids = encoding["input_ids"], encoding["token_type_ids"]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
assert answer == "a nice puppet"
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 51.714286
| 150
| 0.650503
|
41927d1f834b9286e58b80acaf8a1e35e98071db
| 7,788
|
py
|
Python
|
radiopadre/settings_manager.py
|
ratt-ru/radiopadre
|
3bf934eba69144d9707777a57da0e827625517a3
|
[
"MIT"
] | 9
|
2019-08-08T12:32:20.000Z
|
2021-07-06T17:50:35.000Z
|
radiopadre/settings_manager.py
|
ratt-ru/radiopadre
|
3bf934eba69144d9707777a57da0e827625517a3
|
[
"MIT"
] | 70
|
2019-03-26T12:42:23.000Z
|
2022-02-14T13:45:03.000Z
|
radiopadre/settings_manager.py
|
ratt-ru/radiopadre
|
3bf934eba69144d9707777a57da0e827625517a3
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from contextlib import contextmanager
_BASE = OrderedDict
class DocString(str):
"""Class used to identify documentation strings"""
pass
class Section(_BASE):
def __init__(self, name, doc=""):
super(Section, self).__init__()
self._name = name
self._docstring = doc
self._docs = {}
def __getattribute__(self, name):
if name[0] != "_" and name in self:
return self[name]
return _BASE.__getattribute__(self, name)
def __setattr__(self, key, value):
if key[0] == "_":
return _BASE.__setattr__(self, key, value)
if type(value) is tuple and len(value) == 2 and type(value[1]) is DocString:
_BASE.__getattribute__(self, '_docs')[key] = value[1]
value = value[0]
self[key] = value
def get(self, default=None, **kw):
if not kw:
raise RuntimeError("Section.get() must be called with at least one keyword argument")
retval = []
for key, value in kw.items():
if value is None:
value = _BASE.get(self, key)
if value is None:
value = default
retval.append(value)
if len(retval) == 1:
retval = retval[0]
return retval
@contextmanager
def __call__(self, **kw):
prev_values = { key:self[key] for key in kw.keys() if key in self }
new_values = set(kw.keys()) - set(self.keys())
self.update(**kw)
yield
self.update(**prev_values)
for key in new_values:
del self[key]
def __repr__(self):
txt = ""
for key, value in self.items():
txt += "{}.{} = {}\n".format(self._name, key, repr(value))
return txt
def _repr_table(self, data, styles, prefix=""):
styles["description"] = "padding-left: 32px"
styles[len(data)] = "border: 0px; border-bottom: 1px double; border-top: 1px double; background-color: #f2f2f2"
styles[len(data), "name"] = styles[len(data), "description"] = "text-align: center"
data.append(("<B>{}{}</B>".format(prefix, self._name), '', "{}".format(self._docstring)))
for key, value in self.items():
styles[len(data)] = "background-color: white"
data.append(("{}{}.{}".format(prefix, self._name, key), repr(value), self._docs.get(key, '')))
def _repr_html_(self):
from radiopadre import render
data = []
styles = {}
self._repr_table(data, styles)
styles["TABLE"] = "width: 100%"
return render.render_table(data, ("name", "value", "description"), html={"name","description"},
styles=styles, header=False, numbering=False)
def show(self):
from IPython.display import display,HTML
return display(HTML(self._repr_html_()))
class SettingsManager(object):
def __init__(self, name="settings"):
self._name = name
self._sections = OrderedDict()
def add_section(self, name, doc=""):
self._sections[name] = Section(name, doc)
setattr(self, name, self._sections[name])
return self._sections[name]
def __repr__(self):
txt = ""
for sec_name, section in self._sections.items():
if isinstance(section, Section):
for key, value in section.items():
txt += "{}.{}.{} = {}\n".format(self._name, sec_name, key, repr(value))
return txt
def _repr_html_(self):
from radiopadre import render
data = []
styles = {}
for sec_name, section in self._sections.items():
if isinstance(section, Section):
section._repr_table(data, styles, self._name+".")
return render.render_table(data, ("name", "value", "description"), html=set(["name","description"]),
styles=styles, header=False, numbering=False)
def show(self):
from IPython.display import display,HTML
return display(HTML(self._repr_html_()))
class RadiopadreSettingsManager(SettingsManager):
def __init__(self, name="settings"):
SettingsManager.__init__(self, name=name)
D = DocString
gen = self.add_section("gen", "general radiopadre settings") # generic settings
gen.twocolumn_list_width = 40, D("file lists will default to dual-column if all names are within this length")
gen.timeformat = "%H:%M:%S %b %d", D("time format")
gen.collapsible = True, D("enable collapsible displays by default")
gen.ncpu = 0, D("number of CPU cores to use, 0 to detect automatically ")
gen.max_ncpu = 32, D("max number of CPU cores to use (when detecting automatically)")
files = self.add_section("files", "file settings") # generic settings
# files.include = "*.jpg *.png *.fits *.txt *.log", D("filename patterns to include in the listings. If None, all files will be included")
files.include = None, D("filename patterns to include in the listings. If None, all files will be included")
files.exclude = None, D("patterns to explicitly exclude from the listings")
files.include_dir = None, D("subdirectory patterns to include in the listings. If None, all subdirectories will be included")
files.exclude_dir = None, D("subdirectory patterns to explicitly exclude from the listings")
files.include_empty = False, D("if True, empty subdirectories will also be included.")
files.show_hidden = False, D("if True, hidden files and subdirectories will also be included.")
display = self.add_section("display", "display settings, should be set up auto-magically") # generic settings
display.cell_width = 800, D("width of Jupyter cell output, in pixels")
display.window_width = 1024, D("width of browser window")
display.window_height = 768, D("height of browser window")
display.auto_reset = True, D("auto-reset when the browser window is resized")
plot = self.add_section("plot", "settings for rendering of plots")
# globally fix a plot width (in inches)
plot.width = None, D("fix a display plot width (in inches)")
plot.screen_dpi = 80, D("plot DPI")
thumb = self.add_section("thumb", "settings for rendering of thumbnails")
thumb.mincol = 2, D("minimum number of columns to display in thumbnail view")
thumb.maxcol = 4, D("maximum number of columns to display in thumbnail view")
thumb.width = 0, D("default thumbnail width, 0 to set automatically")
thumb.collapsed = None, D("if not None, makes thumbnail display collapsible")
fits = self.add_section("fits", "settings for rendering of FITS files")
fits.colormap = 'cubehelix', D("default FITS colormap")
fits.scale = 'linear', D("default FITS scaling")
fits.vmin = None, D("lower clip value")
fits.vmax = None, D("upper clip value")
fits.max_js9_slice = 2048, D("size of active segment for JS9 display of large images")
fits.js9_preview_size = 1024, D("size of preview image for JS9 display of large images")
text = self.add_section("text", "settings for rendering of text files")
text.head = 10, D("default number of lines to show from head of file")
text.tail = 10, D("default number of lines to show from tail of file")
text.fs = 0.8, D("font size for text display")
html = self.add_section("html", "settings for rendering of HTML thumbnails")
html.width = 1920, D("default width of HTML canvas")
html.height = 1024, D("default height of HTML canvas")
| 42.097297
| 151
| 0.61492
|
f8b1ae5d016446b762be2b796ba761a2f148edbb
| 2,331
|
py
|
Python
|
tvm/dmlc-core/tracker/dmlc_tracker/local.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 236
|
2019-05-19T01:48:11.000Z
|
2022-03-31T09:03:54.000Z
|
tvm/dmlc-core/tracker/dmlc_tracker/local.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 248
|
2019-05-17T19:18:36.000Z
|
2022-03-30T21:25:47.000Z
|
tvm/dmlc-core/tracker/dmlc_tracker/local.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 85
|
2019-05-17T20:09:27.000Z
|
2022-02-28T20:19:00.000Z
|
"""Submission job for local jobs."""
# pylint: disable=invalid-name
from __future__ import absolute_import
import sys
import os
import subprocess
import logging
from threading import Thread
from . import tracker
def exec_cmd(cmd, role, taskid, pass_env):
"""Execute the command line command."""
if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt':
cmd[0] = './' + cmd[0]
cmd = ' '.join(cmd)
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_TASK_ID'] = str(taskid)
env['DMLC_ROLE'] = role
env['DMLC_JOB_CLUSTER'] = 'local'
num_retry = 0
if 'DMLC_NUM_ATTEMPT' in env:
num_retry = env['DMLC_NUM_ATTEMPT']
while True:
if os.name == 'nt':
ret = subprocess.call(cmd, shell=True, env=env)
else:
ret = subprocess.call(cmd, shell=True, executable='bash', env=env)
if ret == 0:
logging.debug('Thread %d exit with 0', taskid)
return
else:
num_retry -= 1
if num_retry >= 0:
continue
if os.name == 'nt':
sys.exit(-1)
else:
raise RuntimeError('Get nonzero return code=%d' % ret)
def submit(args):
"""Submit function of local jobs."""
def mthread_submit(nworker, nserver, envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
----------
nworker: number of slave process to start up
nserver: number of server nodes to start up
envs: enviroment variables to be added to the starting programs
"""
procs = {}
for i in range(nworker + nserver):
if i < nworker:
role = 'worker'
else:
role = 'server'
procs[i] = Thread(target=exec_cmd, args=(args.command, role, i, envs))
procs[i].setDaemon(True)
procs[i].start()
# call submit, with nslave, the commands to run each job and submit function
tracker.submit(args.num_workers, args.num_servers, fun_submit=mthread_submit,
pscmd=(' '.join(args.command)))
| 31.931507
| 94
| 0.577006
|
886e586deb4f9465644a1f94cad3bc984aecab2e
| 468
|
py
|
Python
|
patient/forms.py
|
ShawonBarman/Blood-and-Platelet-Management-System
|
0a1d4be41d42eca69dd8f8f3ed6ba7b15bcf5fc1
|
[
"MIT"
] | null | null | null |
patient/forms.py
|
ShawonBarman/Blood-and-Platelet-Management-System
|
0a1d4be41d42eca69dd8f8f3ed6ba7b15bcf5fc1
|
[
"MIT"
] | null | null | null |
patient/forms.py
|
ShawonBarman/Blood-and-Platelet-Management-System
|
0a1d4be41d42eca69dd8f8f3ed6ba7b15bcf5fc1
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
from . import models
class PatientUserForm(forms.ModelForm):
class Meta:
model=User
fields=['first_name','last_name','username','password']
widgets = {
'password': forms.PasswordInput()
}
class PatientForm(forms.ModelForm):
class Meta:
model=models.Patient
fields=['age','bloodgroup','disease','address','mobile','profile_pic']
| 26
| 78
| 0.655983
|
3746b9498a2c270a0261af3d9a717c6c4bc38b9c
| 4,371
|
py
|
Python
|
RefNAAP.py
|
jiangweiyao/RefNAAP
|
b3ad097443233e191d6a211bdbd851583f1ba6ae
|
[
"Apache-1.1"
] | 2
|
2021-01-07T23:25:48.000Z
|
2021-04-27T23:05:49.000Z
|
RefNAAP.py
|
jiangweiyao/RefNAAP
|
b3ad097443233e191d6a211bdbd851583f1ba6ae
|
[
"Apache-1.1"
] | null | null | null |
RefNAAP.py
|
jiangweiyao/RefNAAP
|
b3ad097443233e191d6a211bdbd851583f1ba6ae
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/env python
import sys
import os
import glob
import re
from datetime import date
from gooey import Gooey, GooeyParser
import subprocess
from pathlib import Path
@Gooey(program_name='RefNAAP',
default_size=(720, 900),
progress_regex=r"^progress: (?P<current>\d+)/(?P<total>\d+)$",
progress_expr="current / total * 100")
def main():
local_path = os.path.dirname(os.path.realpath(__file__))
#print(local_path)
data_path = f"{local_path}"
scaffold_helper = f"{local_path}/scaffold_cutter.R"
gapfixer_helper = f"{local_path}/gapfixer.R"
now = date.today()
home = str(Path.home())
cli = GooeyParser(description="Reference Based Nanopore Amplicon Analysis Pipeline")
required_args = cli.add_argument_group("Input Output Location", gooey_options={'columns': 1, 'show_border': True})
required_args.add_argument('--InputFolder', help="Folder containing barcoded fastq", required=True, widget='DirChooser')
required_args.add_argument('--OutputFolder', help="Output Folder", required=False, default=f"{home}/refnaap_results/output_{now}", widget='DirChooser')
required_args.add_argument('--RefFile', help="Reference File ", required=False, default=f'{local_path}/Americas2.fasta', widget='FileChooser')
parser = cli.add_argument_group("Optional Arguments", gooey_options={'columns': 2, 'show_border': True})
parser.add_argument('--TopN', help="The top N reference sequences with the most depth are analyzed.", type=int, required=False, default=1)
parser.add_argument('--MinCov', help="Amplicon regions need a minimum of this average coverage number", type=int, required=False, default=1)
parser.add_argument('--Left', help="Bases to trim from left side of read", type=int, required=False, default=25)
parser.add_argument('--Right', help="Bases to trim from right side of read", type=int, required=False, default=25)
parser.add_argument('--Size', help="Filter reads less than this length", type=int, required=False, default=50)
parser.add_argument('--threads', help="Number of threads. More is faster if your computer supports it", type=int, required=False, default=4)
parser.add_argument('--verbose', help = "Keep Intermediate Files", required=False, widget='BlockCheckbox', action='store_true', gooey_options={ 'checkbox_label': "Yes" })
parser.add_argument('--model', help="Basecall Model", required=False, type=str, default='r10_min_high_g303')
args = cli.parse_args()
#Run fastqc and multiqc on all the fastq/fastq.gz files in the folder
subprocess.check_output(['python', local_path+'/fastqc_multiqc.py', '-i', args.InputFolder, '-o', args.OutputFolder+'/multiqc'])
subprocess.check_output(['cp', args.OutputFolder+'/multiqc/multiqc_report.html', args.OutputFolder+'/multiqc_report.html'])
#Interate over all the fastq/fastq.gz files
files = sorted([f for f in glob.glob(args.InputFolder+"/**", recursive = True) if re.search(r'(.*)\.((fastq|fq)(|\.gz))$', f)])
print(files)
OutputFolder = os.path.expanduser(args.OutputFolder)
for i in range(0, len(files)):
filec = files[i]
base = os.path.splitext(os.path.basename(filec))[0]
base = os.path.splitext(base)[0]
print(base)
filec2 = args.OutputFolder+'/'+"filtered/"+base+"_filtered.fastq"
#Trim and filter the reads
subprocess.check_output(['python', local_path+'/seqtk_sizefilter_trim.py', '-i', filec, '-o', filec2, '-l', str(args.Left), '-r', str(args.Right), '-s', str(args.Size)])
#Get assembly
subprocess.check_output(['python', local_path+'/refnaap_cli_helper.py', '-i', filec2, '-o', args.OutputFolder+'/assembly/'+base+"_assembly/", '-r', args.RefFile, '-t', str(args.threads), '--TopN', str(args.TopN), '--MinCov', str(args.MinCov)])
subprocess.check_output(['cp', args.OutputFolder+'/assembly/'+base+"_assembly/final_scaffold.fasta", args.OutputFolder+"/"+base+"_final_scaffold.fasta"])
print("progress: {}/{}".format(i+1, len(files)))
if not args.verbose:
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/assembly'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/filtered'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/multiqc'])
if __name__ == "__main__":
sys.exit(main())
| 52.662651
| 251
| 0.692061
|
6d7c609c93822756c1a17a4bbdd10ea89ed97943
| 2,191
|
py
|
Python
|
vkwave/bots/core/dispatching/filters/base.py
|
tdakkota/vkwave
|
8d8f55a541f51ee76be398e0a646131697d3ba17
|
[
"MIT"
] | null | null | null |
vkwave/bots/core/dispatching/filters/base.py
|
tdakkota/vkwave
|
8d8f55a541f51ee76be398e0a646131697d3ba17
|
[
"MIT"
] | null | null | null |
vkwave/bots/core/dispatching/filters/base.py
|
tdakkota/vkwave
|
8d8f55a541f51ee76be398e0a646131697d3ba17
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Awaitable, Callable, NewType
from vkwave.bots.core.dispatching.events.base import BaseEvent
FilterResult = NewType("FilterResult", bool)
class BaseFilter(ABC):
@abstractmethod
async def check(self, event: BaseEvent) -> FilterResult:
...
def __and__(self, other: "BaseFilter") -> "AndFilter":
return AndFilter(self, other)
def __not__(self) -> "NotFilter":
return NotFilter(self)
def __or__(self, other: "BaseFilter") -> "OrFilter":
return OrFilter(self, other)
# sfilter: some filter
class NotFilter(BaseFilter):
def __init__(self, sfilter: BaseFilter):
self.func = sfilter
async def check(self, event: BaseEvent) -> FilterResult:
res = await self.func.check(event)
return FilterResult(not res)
class AndFilter(BaseFilter):
def __init__(self, *sfilters: BaseFilter):
self.funcs = sfilters
async def check(self, event: BaseEvent) -> FilterResult:
for func in self.funcs:
res = await func.check(event)
if not res:
return FilterResult(False)
return FilterResult(True)
class OrFilter(BaseFilter):
def __init__(self, *sfilters: BaseFilter):
self.funcs = sfilters
async def check(self, event: BaseEvent) -> FilterResult:
res: bool = True
for func in self.funcs:
if await func.check(event):
res = True
break
else:
res = False
break
return FilterResult(res)
class SyncFuncFilter(BaseFilter):
"""It accepts lambda and sync functions."""
def __init__(self, func: Callable[[BaseEvent], bool]):
self.func = func
async def check(self, event: BaseEvent) -> FilterResult:
return FilterResult(self.func(event))
class AsyncFuncFilter(BaseFilter):
"""It accepts any callables that return awaitables."""
def __init__(self, func: Callable[[BaseEvent], Awaitable[bool]]):
self.func = func
async def check(self, event: BaseEvent) -> FilterResult:
return FilterResult(await self.func(event))
| 26.39759
| 69
| 0.639434
|
3b5a7096d7c86eb935e426d817f69396afa11d31
| 24,346
|
py
|
Python
|
atomixtest/entropy.py
|
atomix/atomix-test
|
519b70846c806b6b3e25151f0268fa1f1e53f8d8
|
[
"Apache-2.0"
] | 6
|
2018-02-24T00:04:30.000Z
|
2020-07-02T07:27:08.000Z
|
atomixtest/entropy.py
|
atomix/atomix-test
|
519b70846c806b6b3e25151f0268fa1f1e53f8d8
|
[
"Apache-2.0"
] | 2
|
2020-02-07T22:12:43.000Z
|
2020-02-09T11:12:17.000Z
|
atomixtest/entropy.py
|
atomix/atomix-test
|
519b70846c806b6b3e25151f0268fa1f1e53f8d8
|
[
"Apache-2.0"
] | null | null | null |
import json
import random
import sys
import time
import uuid
from abc import ABCMeta, abstractmethod
from atomix import AtomixClient
from collections import OrderedDict
from threading import Thread, Lock
from cluster import Cluster
from logging import logger
def _generate_test_name():
"""Generates a unique test name."""
return "entropy-test-" + str(uuid.uuid4())
def run(
name=None,
nodes=3,
configs=(),
version='latest',
dry_run=False,
processes=8,
scale=1000,
prime=0,
ops=1,
run_time=60,
functions=(),
function_delay=(15, 30)
):
"""Runs the entropy test."""
if name is None:
name = _generate_test_name()
# Initialize the test cluster.
cluster = _init_test_cluster(name, nodes, configs, version, dry_run)
# Create a history object with which to track history
history = History()
controller = Controller(cluster, functions, function_delay, history)
nodes = cluster.nodes()
primer = Primer(name, scale, history, cluster, prime)
if ops < processes:
processes = [Process(i+1, name, scale, history, 1 if i < ops else 0, run_time, nodes[i % len(nodes)]) for i in range(processes)]
else:
processes = [Process(i+1, name, scale, history, ops / processes, run_time, nodes[i % len(nodes)]) for i in range(processes)]
# Start the test.
_start_test(primer, controller, processes)
# Run the controller and processes until complete.
_block_until_complete(controller, processes)
# Shuts down the test cluster.
_teardown_test_cluster(cluster, history)
class DryCluster(object):
def __init__(self, name, version, nodes):
self.name = name
self._nodes = [DryNode(name + str(i+1), name + str(i+1), self, version, True) for i in range(nodes)]
def nodes(self):
return self._nodes
def __getattr__(self, name):
try:
return super(DryCluster, self).__getattr__(name)
except AttributeError:
return lambda *args, **kwargs: None
class DryNode(object):
def __init__(self, name, ip, cluster, version, bootstrap):
self.name = name
self.ip = ip
self.version = version
self.bootstrap = bootstrap
self.http_port = 5678
self.tcp_port = 5679
self.cluster = cluster
self.client = DryClient(port=self.http_port)
def __getattr__(self, name):
try:
return super(DryNode, self).__getattr__(name)
except AttributeError:
try:
return getattr(self.client, name)
except AttributeError:
return lambda *args, **kwargs: None
def __str__(self):
return self.name
class DryClient(AtomixClient):
"""Atomix test client."""
def __init__(self, host='127.0.0.1', port=5678):
super(DryClient, self).__init__(host, port)
def get(self, path, headers=None, *args, **kwargs):
logger.debug('GET {}'.format(path.format(*args, **kwargs)))
def post(self, path, data=None, headers=None, *args, **kwargs):
logger.debug('POST {}'.format(path.format(*args, **kwargs)))
def put(self, path, data=None, headers=None, *args, **kwargs):
logger.debug('PUT {}'.format(path.format(*args, **kwargs)))
def delete(self, path, headers=None, *args, **kwargs):
logger.debug('DELETE {}'.format(path.format(*args, **kwargs)))
def _init_test_cluster(name, nodes=3, configs=(), version='latest', dry_run=False):
"""Initializes a test cluster."""
if dry_run:
return DryCluster(name, version, nodes)
cluster = Cluster(name)
cluster.setup(*configs, nodes=nodes, version=version, trace=True)
return cluster
def _teardown_test_cluster(cluster, history):
"""Shuts down the test cluster."""
if history.count('fail') > 0:
cluster.shutdown()
else:
cluster.teardown()
def _start_test(primer, controller, processes):
"""Starts the test threads."""
primer.run()
for process in processes:
process.start()
controller.start()
def _block_until_complete(controller, processes):
"""Runs the given controller and processes until complete."""
while True:
# If any process is still running, sleep and then continue to the next iteration of the loop.
if len([process for process in processes if process.is_running()]) == 0:
# Once all processes have completed, stop the controller.
controller.stop()
# Wait for the controller thread to complete to ensure partitions are healed and crashed nodes are recovered.
if not controller.is_running():
break
# If we haven't broken out of the loop by now, sleep and then check again.
time.sleep(1)
class History(object):
"""Records and logs the history of operations.
This object directly mimics the format expected by the Knossos linearizability checker. Events are logged in
edn format, and str(history) will return the full history in edn format.
"""
def __init__(self):
self.entries = []
def record(self, entry):
"""Records an entry in the history."""
self.entries.append(entry)
message = '[{}] {} {} ({})'.format(entry.process, entry.action, entry.operation, ', '.join([str(value) for value in entry.values]))
if entry.action == 'invoke':
logger.warn(message)
elif entry.action == 'ok':
logger.debug(message)
elif entry.action == 'fail':
logger.error(message)
elif entry.action == 'function':
logger.info(message)
def count(self, action):
"""Returns the number of entries for the given action."""
return len([entry for entry in self.entries if entry.action == action])
def __str__(self):
return json.dumps([entry.format() for entry in self.entries])
class HistoryEntry(object):
"""History entry."""
__metaclass__ = ABCMeta
def format(self):
return OrderedDict([
('process', self.process),
('type', self.action),
('function', self.operation),
('value', list(self.values))
])
def __str__(self):
return json.dumps(self.format())
class ProcessEntry(HistoryEntry):
"""Process entry."""
def __init__(self, process, action, operation, *values):
self.process = process
self.action = action
self.operation = operation
self.values = values
class ControllerEntry(HistoryEntry):
"""Controller history entry."""
def __init__(self, event, message):
self.process = 'controller'
self.action = 'function'
self.operation = event
self.values = (message,)
self.event = event
self.message = message
class Runnable(object):
"""Base class for managing the lifecycle of a threaded test process."""
__metaclass__ = ABCMeta
def __init__(self):
self.thread = None
self.running = False
def start(self):
"""Starts the runnable thread."""
self.thread = Thread(target=self.run)
self.thread.daemon = True
self.running = True
self.thread.start()
@abstractmethod
def run(self):
"""Runs the thread. This method should be overridden by implementors."""
def is_running(self):
"""Returns a boolean indicating whether the runnable is running."""
return self.running or self.thread.is_alive()
def stop(self):
"""Stops the runnable thread.
Calling this method will not immediately stop the thread. Instead, a flag will be set, and the run() method
is expected to exit according to the 'running' flag. Use 'is_running()' to determine whether the thread is
stopped and has exited.
"""
self.running = False
class Operator(Runnable):
"""Base class for runnables that operate on the cluster state."""
CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
VALUES = [''.join([CHARS[random.randint(0, len(CHARS)-1)] for _ in range(1024)]) for _ in range(1000)]
def __init__(self, id, name, scale, history):
super(Operator, self).__init__()
self.id = id
self.name = name
self._keys = [str(uuid.uuid4()) for _ in range(scale)]
self.history = history
self.operations = tuple()
def _run(self):
"""Runs a random operation."""
try:
return random.choice(self.operations)()
except:
pass
def _random_node(self):
"""Returns a random node on which to perform an operation."""
return random.choice(self.cluster.nodes())
def _random_key(self):
"""Returns a random key to get or set."""
return random.choice(self._keys)
def _random_value(self):
"""Returns the next random value to set."""
return random.choice(self.VALUES)
def _log(self, action, operation, *values):
"""Logs an operation."""
self.history.record(ProcessEntry(self.id, action, operation, *values))
def _invoke(self, operation, *values):
"""Logs an operation invocation event in the process history."""
self._log('invoke', operation, *values)
def _ok(self, operation, *values):
"""Logs an operation success event in the process history."""
self._log('ok', operation, *values)
return True
def _fail(self, operation, *values):
"""Logs an operation failure event in the process history."""
self._log('fail', operation, *values)
return True
def _function(self, operation, *values):
"""Logs an operation function event in the process history and stops the process."""
self._log('function', operation, *values)
self.stop()
return False
class Primer(Operator):
def __init__(self, name, scale, history, cluster, prime=0):
super(Primer, self).__init__('primer', name, scale, history)
self.cluster = cluster
self.prime = prime
self._lock = Lock()
self._count = 0
def _invoke(self, operation, *values):
"""Logs an operation invocation event in the process history."""
def _ok(self, operation, *values):
"""Logs an operation success event in the process history."""
return True
def _fail(self, operation, *values):
"""Logs an operation failure event in the process history."""
return True
def run(self):
"""Runs the primer."""
self._function('prime', self.prime)
if self.prime == 0:
return
threads = []
for _ in range(32):
thread = Thread(target=self._run)
thread.setDaemon(True)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def _run(self):
"""Runs a thread."""
while True:
self._lock.acquire()
try:
count = self._count + 1
if count <= self.prime:
self._count = count
else:
return
finally:
self._lock.release()
key, value = self._random_key(), self._random_value()
self._random_node().map(self.name).put(key, value)
class Process(Operator):
"""Test runner for a single process.
A process simulates operations from a single actor in the cluster. When the process is started, it will begin
performing random read, write, or cas operations, sleeping for random intervals between operations. Each operation
performed by the process will be logged in the History object provided to the constructor. The process runs for a
predefined number of operations or until an operation fails with an unknown error (e.g. a timeout).
"""
def __init__(self, id, name, scale, history, ops, run_time, node):
super(Process, self).__init__(id, name, scale, history)
self.run_time = run_time
self.node = node
self.operations = (self.read, self.write, self.delete)
self.start_time = None
self.ops = ops
self._op = 0
self._remaining = 1.0
def run(self):
"""Runs the process."""
if self.ops > 0:
self.start_time = time.time()
while True:
self._wait()
self._run()
self._check_stop()
if not self.running:
break
def _check_stop(self):
"""Checks whether the run time has completed."""
if time.time() - self.start_time > self.run_time:
self.stop()
def _wait(self):
"""Blocks for a uniform random delay according to the process configuration."""
self._op += 1
if self._op < self.ops:
sleep = random.uniform(0, self._remaining / 2)
self._remaining -= sleep
time.sleep(sleep)
else:
sleep = self._remaining
self._op = 0
self._remaining = 1.0
time.sleep(sleep)
def read(self):
"""Executes a read operation."""
key = self._random_key()
self._invoke('read', key)
try:
return self._ok('read', key, self.node.map(self.name).get(key))
except:
return self._fail('read', key)
def write(self):
"""Executes a write operation."""
key, value = self._random_key(), self._random_value()
self._invoke('write', key, value)
try:
self.node.map(self.name).put(key, value)
return self._ok('write', key, value)
except:
return self._fail('write', key, value)
def delete(self):
"""Executes a delete operation."""
key = self._random_key()
self._invoke('delete', key)
try:
self.node.map(self.name).remove(key)
return self._ok('delete', key)
except:
return self._fail('delete', key)
class Controller(Runnable):
"""Cluster controller.
The controller periodically disrupts the cluster using a random disruptor function to e.g. partition the network,
crash a node, or slow communication within the network. The disruptor guarantees that only one disruptor function
will run at any given time and the previous disruptor will be healed prior to the next disruptor beginning.
The disruptor sleeps for a uniform random interval between disruptor functions.
"""
def __init__(self, cluster, functions, function_delay, history):
super(Controller, self).__init__()
self.cluster = cluster
self.function_delay = function_delay
self.history = history
self.functions = []
for func in functions:
try:
self.functions.append((getattr(self, func[0]), func[1], func[2]))
except AttributeError:
print "Unknown entropy function %s" % (func[0],)
sys.exit(1)
def run(self):
"""Runs the controller until stopped."""
if len(self.functions) > 0:
while self.running:
self._run()
def _run(self):
"""Runs a random function."""
function, delay, args = random.choice(self.functions)
self._wait(*delay)
if self.running:
function(**dict(args))
def _wait(self, start=None, end=None):
"""Waits for a uniform random delay."""
if start is None:
time.sleep(random.uniform(self.function_delay[0], self.function_delay[1]))
elif end is None:
time.sleep(start)
else:
time.sleep(random.uniform(start, end))
def _random_node(self):
"""Returns a random node on which to perform an operation."""
return random.choice(self.cluster.nodes())
def _log(self, event, message):
"""Logs an event in the function history."""
self.history.record(ControllerEntry(event, message))
def _enter(self, function):
"""Logs a start event in the function history."""
self._log('enter', function)
def _exit(self, function):
"""Logs a stop event in the function history."""
self._log('exit', function)
def _partition(self, node1, node2):
"""Partitions node1 from node2."""
node1.partition(node2)
def _isolate(self, node):
"""Isolates the given node from all other nodes."""
for peer in self.cluster.nodes():
if node.name != peer.name:
self._partition(node, peer)
def _partition_halves(self):
"""Partitions the cluster into two halves."""
nodes = self.cluster.nodes()
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j and i % 2 == 0 and j % 2 == 1:
nodes[i].partition(nodes[j])
nodes[j].partition(nodes[i])
def _partition_bridge(self, node):
"""Partitions the cluster with the given node as a bridge between two halves."""
nodes = self.cluster.nodes()
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j and nodes[i].name != node.name and nodes[j].name != node.name and i % 2 == 0 and j % 2 == 1:
nodes[i].partition(nodes[j])
nodes[j].partition(nodes[i])
def _heal(self, node1=None, node2=None):
"""Heals a partition between two nodes or between all nodes if the given nodes are None."""
if node1 is not None and node2 is not None:
node1.heal(node2)
node2.heal(node1)
elif node1 is not None:
for node2 in self.cluster.nodes():
if node1.name != node2.name:
node1.heal(node2)
else:
for node1 in self.cluster.nodes():
for node2 in self.cluster.nodes():
if node1.name != node2.name:
node1.heal(node2)
def _crash(self, node):
"""Crashes the given node."""
node.kill()
def _recover(self, node):
"""Recovers the given node from a crash."""
node.recover()
def _delay(self, node=None, latency=100):
"""Delays communication from all nodes or from the given node if specified."""
if node is not None:
node.delay(latency=latency)
else:
for node in self.cluster.nodes():
node.delay(latency=latency)
def _restore(self, node=None):
"""Restores communication on all nodes or on the given node if specified."""
if node is not None:
node.restore()
else:
for node in self.cluster.nodes():
node.restore()
def _shutdown(self):
"""Shuts down the entire cluster."""
self.cluster.shutdown()
def _startup(self):
"""Starts up the entire cluster."""
self.cluster.startup()
def _stress_cpu(self, node=None, processes=1):
if node is not None:
node.stress(cpu=processes)
else:
for node in self.cluster.nodes():
node.stress(cpu=processes)
def _stress_io(self, node=None, processes=1):
if node is not None:
node.stress(io=processes)
else:
for node in self.cluster.nodes():
node.stress(io=processes)
def _stress_memory(self, node=None, processes=1):
if node is not None:
node.stress(memory=processes)
else:
for node in self.cluster.nodes():
node.stress(memory=processes)
def _destress(self, node=None):
if node is not None:
node.destress()
else:
for node in self.cluster.nodes():
node.destress()
def partition_random(self):
"""Partitions two random nodes from each other."""
node1 = self._random_node()
node2 = node1
while node2 == node1:
node2 = self._random_node()
self._enter("Cut off %s->%s" % (node1, node2))
self._partition(node1, node2)
self._wait()
self._heal(node1, node2)
self._exit("Fully connected")
def isolate_random(self, start=15, end=30):
"""Isolates a random node from all other nodes."""
node = self._random_node()
self._enter("Isolate %s" % (node,))
self._isolate(node)
self._wait(start, end)
self._heal(node)
self._exit("Fully connected")
def partition_halves(self, start=15, end=30):
"""Partitions the cluster into two halves."""
self._enter("Partitioning network into two halves")
self._partition_halves()
self._wait(start, end)
self._heal()
self._exit("Fully connected")
def partition_bridge(self, start=15, end=30):
"""Partitions the cluster into two halves with a bridge between them."""
node = self._random_node()
self._enter("Partitioning network with bridge %s" % (node,))
self._partition_bridge(node)
self._wait(start, end)
self._heal()
self._exit("Fully connected")
def crash_random(self, start=15, end=30):
"""Crashes a random node."""
node = self._random_node()
self._enter("Crashing %s" % (node,))
self._crash(node)
self._wait(start, end)
self._recover(node)
self._exit("Recovered %s" % (node,))
def delay(self, latency=100, start=15, end=30):
"""Delays messages on all nodes."""
self._enter("Delay communication on all nodes")
self._delay(latency=latency)
self._wait(start, end)
self._restore()
self._exit("Communication restored")
def delay_random(self, latency=100, start=15, end=30):
"""Delays communication on a random node."""
node = self._random_node()
self._enter("Delay communication on %s" % (node,))
self._delay(node, latency=latency)
self._wait(start, end)
self._restore(node)
self._exit("Communication restored on %s" % (node,))
def restart(self):
"""Restarts the entire cluster."""
self._enter("Restarting cluster")
self._shutdown()
self._wait()
self._startup()
self._exit("Cluster restarted")
def stress_cpu(self, processes=1, start=15, end=30):
self._enter("Increase CPU usage on all nodes")
self._stress_cpu(processes=processes)
self._wait(start, end)
self._destress()
self._exit("CPU usage reduced on all nodes")
def stress_io(self, processes=1, start=15, end=30):
self._enter("Increase I/O on all nodes")
self._stress_io(processes=processes)
self._wait(start, end)
self._destress()
self._exit("I/O reduced on all nodes")
def stress_memory(self, processes=1, start=15, end=30):
self._enter("Increase memory usage on all nodes")
self._stress_memory(processes=processes)
self._wait(start, end)
self._destress()
self._exit("Memory usage reduced on all nodes")
def stress_cpu_random(self, processes=1, start=15, end=30):
node = self._random_node()
self._enter("Increase CPU usage on %s" % (node,))
self._stress_cpu(node, processes)
self._wait(start, end)
self._destress(node)
self._exit("CPU usage reduced on %s" % (node,))
def stress_io_random(self, processes=1, start=15, end=30):
node = self._random_node()
self._enter("Increase I/O on %s" % (node,))
self._stress_io(node, processes)
self._wait(start, end)
self._destress(node)
self._exit("I/O reduced on %s" % (node,))
def stress_memory_random(self, processes=1, start=15, end=30):
node = self._random_node()
self._enter("Increase memory usage on %s" % (node,))
self._stress_memory(node, processes)
self._wait(start, end)
self._destress(node)
self._exit("Memory usage reduced on %s" % (node,))
| 33.673582
| 139
| 0.599811
|
0b55efb902cabb2d9bd31ce6e1b66f8f63ffbd8b
| 7,390
|
py
|
Python
|
pychron/envisage/tasks/advanced_editor_area_pane.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/envisage/tasks/advanced_editor_area_pane.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/envisage/tasks/advanced_editor_area_pane.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from functools import cmp_to_key
from pyface import confirmation_dialog
from pyface.constant import NO
from pyface.qt import QtGui
from pyface.tasks.advanced_editor_area_pane import AdvancedEditorAreaPane
from pyface.ui.qt4.tasks.advanced_editor_area_pane import EditorAreaWidget
from pyface.ui.qt4.tasks.editor_area_pane import EditorAreaDropFilter
# ============= standard library imports ========================
import sys
from pyface.qt import QtCore
from pyface.qt.QtGui import QAction, QCursor
from six.moves import range
# ============= local library imports ==========================
# class myEditorWidget(EditorWidget):
# def __init__(self, editor, parent=None):
# super(EditorWidget, self).__init__(parent)
# self.editor = editor
# self.editor.create(self)
# self.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
# self.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
# self.setWidget(editor.control)
# self.update_title()
#
# # Update the minimum size.
# contents_minsize = editor.control.minimumSize()
# style = self.style()
# contents_minsize.setHeight(contents_minsize.height()
# + style.pixelMetric(style.PM_DockWidgetHandleExtent))
# self.setMinimumSize(contents_minsize)
#
# self.dockLocationChanged.connect(self.update_title_bar)
# self.visibilityChanged.connect(self.update_title_bar)
#
# # print self.setTitleBarWidget()
# # print self.titleBarWidget()
# def update_title_bar(self):
# if self not in self.parent()._tear_widgets:
# tabbed = self.parent().tabifiedDockWidgets(self)
# self.set_title_bar(not tabbed)
# current = self.titleBarWidget()
# current.setTabsClosable(False)
class myEditorAreaWidget(EditorAreaWidget):
def contextMenuEvent(self, event):
epos = event.pos()
if epos.y() > 25:
return
menu = QtGui.QMenu(self)
for name, func in (
("Close", "close_action"),
("Close All", "close_all_action"),
("Close Others", "close_others_action"),
):
act = QAction(name, self)
act.triggered.connect(getattr(self, func))
menu.addAction(act)
menu.exec_(event.globalPos())
def close_action(self):
current = self._get_closest_editor()
if current:
current.editor.close()
def get_dock_widgets_ordered(self, visible_only=False):
"""Gets all dock widgets in left-to-right, top-to-bottom order."""
def cmp(a, b):
return (a > b) - (a < b)
def compare(one, two):
y = cmp(one.pos().y(), two.pos().y())
return cmp(one.pos().x(), two.pos().x()) if y == 0 else y
children = []
for child in self.children():
if (
child.isWidgetType()
and child.isVisible()
and (
(isinstance(child, QtGui.QTabBar) and not visible_only)
or (
isinstance(child, QtGui.QDockWidget)
and (visible_only or not self.tabifiedDockWidgets(child))
)
)
):
children.append(child)
children = sorted(children, key=cmp_to_key(compare))
# children.sort(cmp=compare)
widgets = []
for child in children:
if isinstance(child, QtGui.QTabBar):
widgets.extend(self.get_dock_widgets_for_bar(child))
else:
widgets.append(child)
return widgets
def close_all_action(self):
for di in self.get_dock_widgets():
di.editor.close()
def close_others_action(self):
current = self._get_closest_editor()
if current:
for di in self.get_dock_widgets():
if di != current:
di.editor.close()
def _get_closest_editor(self):
pos = QCursor.pos()
key = lambda w: QtGui.QVector2D(pos - w.pos()).lengthSquared()
all_widgets = self.get_dock_widgets()
if all_widgets:
return min(all_widgets, key=key)
class myAdvancedEditorAreaPane(AdvancedEditorAreaPane):
# def add_editor(self, editor):
# """ Adds an editor to the pane.
# """
# editor.editor_area = self
# editor_widget = EditorWidget(editor, self.control)
# self.control.add_editor_widget(editor_widget)
# self.editors.append(editor)
def create(self, parent):
"""Create and set the toolkit-specific control that represents the
pane.
"""
self.control = control = myEditorAreaWidget(self, parent)
self._filter = EditorAreaDropFilter(self)
self.control.installEventFilter(self._filter)
# Add shortcuts for scrolling through tabs.
if sys.platform == "darwin":
next_seq = "Ctrl+}"
prev_seq = "Ctrl+{"
else:
next_seq = "Ctrl+PgDown"
prev_seq = "Ctrl+PgUp"
shortcut = QtGui.QShortcut(QtGui.QKeySequence(next_seq), self.control)
shortcut.activated.connect(self._next_tab)
shortcut = QtGui.QShortcut(QtGui.QKeySequence(prev_seq), self.control)
shortcut.activated.connect(self._previous_tab)
# Add shortcuts for switching to a specific tab.
mod = "Ctrl+" if sys.platform == "darwin" else "Alt+"
mapper = QtCore.QSignalMapper(self.control)
mapper.mapped.connect(self._activate_tab)
for i in range(1, 10):
sequence = QtGui.QKeySequence(mod + str(i))
shortcut = QtGui.QShortcut(sequence, self.control)
shortcut.activated.connect(mapper.map)
mapper.setMapping(shortcut, i - 1)
def remove_editor(self, editor):
"""Removes an editor from the pane."""
editor_widget = editor.control.parent()
if editor.dirty:
ret = confirmation_dialog.confirm(
editor_widget,
'Unsaved changes to "{}". '
"Do you want to continue".format(editor.name),
)
if ret == NO:
return
self.editors.remove(editor)
self.control.remove_editor_widget(editor_widget)
editor.editor_area = None
if not self.editors:
self.active_editor = None
# ============= EOF =============================================
| 35.873786
| 81
| 0.591204
|
12c6320e6a8d6e9e7eea116936a0c7daad92468e
| 5,067
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ec2_customer_gateway_info
version_added: 1.0.0
short_description: Gather information about customer gateways in AWS
description:
- Gather information about customer gateways in AWS.
- This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: Madhura Naniwadekar (@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters.
type: dict
customer_gateway_ids:
description:
- Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list.
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all customer gateways
community.aws.ec2_customer_gateway_info:
- name: Gather information about a filtered list of customer gateways, based on tags
community.aws.ec2_customer_gateway_info:
region: ap-southeast-2
filters:
"tag:Name": test-customer-gateway
"tag:AltName": test-customer-gateway-alt
register: cust_gw_info
- name: Gather information about a specific customer gateway by specifying customer gateway ID
community.aws.ec2_customer_gateway_info:
region: ap-southeast-2
customer_gateway_ids:
- 'cgw-48841a09'
- 'cgw-fec021ce'
register: cust_gw_info
'''
RETURN = r'''
customer_gateways:
description: List of one or more customer gateways.
returned: always
type: list
sample: [
{
"bgp_asn": "65000",
"customer_gateway_id": "cgw-fec844ce",
"customer_gateway_name": "test-customer-gw",
"ip_address": "110.112.113.120",
"state": "available",
"tags": [
{
"key": "Name",
"value": "test-customer-gw"
}
],
"type": "ipsec.1"
}
]
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def list_customer_gateways(connection, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
try:
result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
if snaked_customer_gateways:
for customer_gateway in snaked_customer_gateways:
customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
customer_gateway_name = customer_gateway['tags'].get('Name')
if customer_gateway_name:
customer_gateway['customer_gateway_name'] = customer_gateway_name
module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
def main():
argument_spec = dict(
customer_gateway_ids=dict(default=[], type='list', elements='str'),
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[['customer_gateway_ids', 'filters']],
supports_check_mode=True)
if module._module._name == 'ec2_customer_gateway_facts':
module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'",
date='2021-12-01', collection_name='community.aws')
connection = module.client('ec2')
list_customer_gateways(connection, module)
if __name__ == '__main__':
main()
| 36.192857
| 125
| 0.663114
|
e8e9fc41d74a6a42ee7952716b1b064100009433
| 1,017
|
py
|
Python
|
django_react_paypal/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
justdjango/django_react_paypal
|
d3aa6a16ff0bf08d30ce79204a37d8bb7b806bd5
|
[
"MIT"
] | 11
|
2021-08-15T17:56:16.000Z
|
2022-02-08T19:48:58.000Z
|
django_react_paypal/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
justdjango/django_react_paypal
|
d3aa6a16ff0bf08d30ce79204a37d8bb7b806bd5
|
[
"MIT"
] | null | null | null |
django_react_paypal/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
justdjango/django_react_paypal
|
d3aa6a16ff0bf08d30ce79204a37d8bb7b806bd5
|
[
"MIT"
] | 1
|
2022-01-26T13:35:33.000Z
|
2022-01-26T13:35:33.000Z
|
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "Django React PayPal",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| 29.057143
| 129
| 0.687316
|
164f09a787532e70f38fa12435ae6cfa7df204dd
| 803
|
py
|
Python
|
ballast/compat.py
|
justincsmith/ballast
|
083b2fa649321f85ab6d5ff686c2d61917a91b7e
|
[
"Apache-2.0"
] | 1
|
2017-08-18T19:46:23.000Z
|
2017-08-18T19:46:23.000Z
|
ballast/compat.py
|
justincsmith/ballast
|
083b2fa649321f85ab6d5ff686c2d61917a91b7e
|
[
"Apache-2.0"
] | 2
|
2017-08-18T20:00:36.000Z
|
2017-08-18T20:49:19.000Z
|
ballast/compat.py
|
justincsmith/ballast
|
083b2fa649321f85ab6d5ff686c2d61917a91b7e
|
[
"Apache-2.0"
] | 3
|
2017-08-18T19:48:50.000Z
|
2021-03-22T07:20:08.000Z
|
import sys
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
if PY3:
from queue import Queue
def cmp(x, y):
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return (x > y) - (x < y)
unicode = str
basestring = str
unichr = chr
xrange = range
else:
import __builtin__
from Queue import Queue
cmp = __builtin__.cmp
unicode = __builtin__.unicode
basestring = __builtin__.basestring
unichr = __builtin__.unichr
xrange = __builtin__.xrange
__all__ = [
'Queue',
'cmp',
'unicode',
'basestring',
'unichr',
'xrange'
]
| 18.25
| 62
| 0.585305
|
3d913d2bf152c97cb39720448ba90cde2acebb7f
| 1,063
|
py
|
Python
|
questions/maximum-depth-of-n-ary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/maximum-depth-of-n-ary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/maximum-depth-of-n-ary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
Given a n-ary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Nary-Tree input serialization is represented in their level order traversal, each group of children is separated by the null value (See examples).
Example 1:
Input: root = [1,null,3,2,4,null,5,6]
Output: 3
Example 2:
Input: root = [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14]
Output: 5
Constraints:
The depth of the n-ary tree is less than or equal to 1000.
The total number of nodes is between [0, 104].
"""
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def maxDepth(self, root):
"""
:type root: Node
:rtype: int
"""
if root is None:
return 0
ml = 0
for ch in root.children:
ml = max(ml, self.maxDepth(ch))
return ml + 1
| 22.617021
| 146
| 0.636877
|
5abe0102e6c9712dde8e93d2d93b39493e3d84ae
| 1,365
|
py
|
Python
|
uninas/training/devices/cpu.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 18
|
2020-11-22T16:03:08.000Z
|
2022-03-15T12:11:46.000Z
|
uninas/training/devices/cpu.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 2
|
2022-01-04T08:10:17.000Z
|
2022-01-05T08:13:14.000Z
|
uninas/training/devices/cpu.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 6
|
2021-03-08T07:08:52.000Z
|
2022-02-24T12:00:43.000Z
|
import torch.nn as nn
from uninas.training.devices.abstract import AbstractDevicesManager, AbstractDeviceMover, TensorOrList
from uninas.register import Register
class CpuDeviceMover(AbstractDeviceMover):
"""
handle data flow to cpu (mostly do nothing)
"""
@property
def name(self) -> str:
return '%s()' % self.__class__.__name__
def empty_cache(self):
"""
empty the cache
"""
pass
def _synchronize(self, indices: [int]):
""" make sure all operations are complete """
pass
def get_usage_dict(self, log_all=False) -> dict:
""" return a dict that logs the usage of the device(s) """
return {}
def move_module(self, module: nn.Module) -> nn.Module:
""" move module to the assigned devices """
assert self.get_num_devices() == 1
return module
def _move(self, t: TensorOrList) -> TensorOrList:
""" move (nested) tensors to the assigned devices """
return t
@Register.devices_manager()
class CpuDevicesManager(AbstractDevicesManager):
"""
manage allocation/de-allocation of one CPU device
"""
_mover_cls = CpuDeviceMover
def __init__(self, seed: int, is_deterministic: bool, num_devices: int):
assert num_devices == 1
super().__init__(seed, is_deterministic, num_devices)
| 27.857143
| 102
| 0.648352
|
3089ea75c2433a9ae83245a9365a2c03cb7966f0
| 16,501
|
py
|
Python
|
molsysmt/item/mdanalysis_Universe/get.py
|
uibcdf/MolModMTs
|
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
|
[
"MIT"
] | null | null | null |
molsysmt/item/mdanalysis_Universe/get.py
|
uibcdf/MolModMTs
|
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
|
[
"MIT"
] | null | null | null |
molsysmt/item/mdanalysis_Universe/get.py
|
uibcdf/MolModMTs
|
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
|
[
"MIT"
] | null | null | null |
#######################################################################################
########### THE FOLLOWING LINES NEED TO BE CUSTOMIZED FOR EVERY CLASS ################
#######################################################################################
from molsysmt._private.execfile import execfile
from molsysmt._private.exceptions import NotWithThisFormError as _NotWithThisFormError
from molsysmt._private.exceptions import NotImplementedMethodError as _NotImplementedMethodError
from molsysmt._private.digestion import digest_item as _digest_item
from molsysmt._private.digestion import digest_indices as _digest_indices
from molsysmt._private.digestion import digest_structure_indices as _digest_structure_indices
from molsysmt import puw as _puw
import numpy as _np
from networkx import Graph as _Graph
_form='mdanalysis.Universe'
## From atom
def get_atom_id_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_atom_name_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_atom_type_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_group_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_component_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_chain_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_molecule_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_entity_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_inner_bonded_atoms_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_n_inner_bonds_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_coordinates_from_atom(item, indices='all', structure_indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
structure_indices = _digest_structure_indices(structure_indices)
coordinates= _puw.quantity(item.trajectory * 0.1, unit='nm')
if indices is not 'all':
coordinates = coordinates[:, atom_indices, :]
if structure_indices is not 'all':
coordinates = coordinates[structure_indices,:,:]
return coordinates
## From group
def get_group_id_from_group(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_group_name_from_group(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_group_type_from_group(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
## From component
def get_component_id_from_component(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_component_name_from_component(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_component_type_from_component(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
## From molecule
def get_molecule_id_from_molecule(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_molecule_name_from_molecule(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_molecule_type_from_molecule(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
## From chain
def get_chain_id_from_chain(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_chain_name_from_chain(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_chain_type_from_chain(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
## From entity
def get_entity_id_from_entity(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_entity_name_from_entity(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_entity_type_from_entity(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
## From system
def get_n_atoms_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_n_groups_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_n_components_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_n_chains_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_n_molecules_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_n_entities_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_n_bonds_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
def get_box_from_system(item, structure_indices='all', check=True):
if check:
_digest_item(item, _form)
structure_indices = _digest_structure_indices(structure_indices)
output = np.array([frame.triclinic_dimensions for frame in item.trajectory])*0.1
output = _puw.quantity(output, unit='nm')
if structure_indices is not 'all':
output=output[structure_indices,:,:]
return output
def get_time_from_system(item, structure_indices='all', check=True):
if check:
_digest_item(item, _form)
structure_indices = _digest_structure_indices(structure_indices)
output = np.array([frame.time for frame in item.trajectory])
output = _puw.quantity(output, unit='ps')
if structure_indices is not 'all':
output = output[structure_indices]
return output
def get_step_from_system(item, structure_indices='all', check=True):
if check:
_digest_item(item, _form)
structure_indices = _digest_structure_indices(structure_indices)
return None
def get_n_structures_from_system(item, check=True):
if check:
_digest_item(item, _form)
output=item.trajectory.n_structures
return output
def get_bonded_atoms_from_system(item, check=True):
if check:
_digest_item(item, _form)
raise _NotImplementedMethodError()
## From bond
def get_bond_order_from_bond(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_bond_type_from_bond(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
def get_atom_index_from_bond(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
raise _NotImplementedMethodError()
#######################################################################################
######### DO NOT TOUCH THE FOLLOWING LINES, JUST INCLUDE THEM AS THEY ARE #############
#######################################################################################
from os import path
this_folder = path.dirname(path.abspath(__file__))
common_get = path.join(this_folder, '../../_private/common_get.py')
execfile(common_get, globals(), locals())
del(path, this_folder, common_get)
#######################################################################################
############## REMOVE COMMON GET METHODS NOT DEFINED FOR THIS FORM ####################
#######################################################################################
del(
# From atom
#get_atom_index_from_atom,
#get_group_id_from_atom,
#get_group_name_from_atom,
#get_group_type_from_atom,
#get_component_id_from_atom,
#get_component_name_from_atom,
#get_component_type_from_atom,
#get_chain_id_from_atom,
#get_chain_name_from_atom,
#get_chain_type_from_atom,
#get_molecule_id_from_atom,
#get_molecule_name_from_atom,
#get_molecule_type_from_atom,
#get_entity_id_from_atom,
#get_entity_name_from_atom,
#get_entity_type_from_atom,
#get_n_atoms_from_atom,
#get_n_groups_from_atom,
#get_n_components_from_atom,
#get_n_molecules_from_atom,
#get_n_chains_from_atom,
#get_n_entities_from_atom,
#get_bonded_atoms_from_atom,
#get_bond_index_from_atom,
#get_n_bonds_from_atom,
#get_inner_bond_index_from_atom,
# From group
#get_atom_index_from_group,
#get_atom_id_from_group,
#get_atom_name_from_group,
#get_atom_type_from_group,
#get_group_index_from_group,
#get_component_index_from_group,
#get_component_id_from_group,
#get_component_name_from_group,
#get_component_type_from_group,
#get_chain_index_from_group,
#get_chain_id_from_group,
#get_chain_name_from_group,
#get_chain_type_from_group,
#get_molecule_index_from_group,
#get_molecule_id_from_group,
#get_molecule_name_from_group,
#get_molecule_type_from_group,
#get_entity_index_from_group,
#get_entity_id_from_group,
#get_entity_name_from_group,
#get_entity_type_from_group,
#get_n_atoms_from_group,
#get_n_groups_from_group,
#get_n_components_from_group,
#get_n_molecules_from_group,
#get_n_chains_from_group,
#get_n_entities_from_group,
# From component
#get_atom_index_from_component,
#get_atom_id_from_component,
#get_atom_name_from_component,
#get_atom_type_from_component,
#get_group_index_from_component,
#get_group_id_from_component,
#get_group_name_from_component,
#get_group_type_from_component,
#get_component_index_from_component,
#get_chain_index_from_component,
#get_chain_id_from_component,
#get_chain_name_from_component,
#get_chain_type_from_component,
#get_molecule_index_from_component,
#get_molecule_id_from_component,
#get_molecule_name_from_component,
#get_molecule_type_from_component,
#get_entity_index_from_component,
#get_entity_id_from_component,
#get_entity_name_from_component,
#get_entity_type_from_component,
#get_n_atoms_from_component,
#get_n_groups_from_component,
#get_n_components_from_component,
#get_n_molecules_from_component,
#get_n_chains_from_component,
#get_n_entities_from_component,
# From molecule
#get_atom_index_from_molecule,
#get_atom_id_from_molecule,
#get_atom_name_from_molecule,
#get_atom_type_from_molecule,
#get_group_index_from_molecule,
#get_group_id_from_molecule,
#get_group_name_from_molecule,
#get_group_type_from_molecule,
#get_component_index_from_molecule,
#get_component_id_from_molecule,
#get_component_name_from_molecule,
#get_component_type_from_molecule,
#get_chain_index_from_molecule,
#get_chain_id_from_molecule,
#get_chain_name_from_molecule,
#get_chain_type_from_molecule,
#get_molecule_index_from_molecule,
#get_entity_index_from_molecule,
#get_entity_id_from_molecule,
#get_entity_name_from_molecule,
#get_entity_type_from_molecule,
#get_n_atoms_from_molecule,
#get_n_groups_from_molecule,
#get_n_components_from_molecule,
#get_n_molecules_from_molecule,
#get_n_chains_from_molecule,
#get_n_entities_from_molecule,
# From chain
#get_atom_index_from_chain,
#get_atom_id_from_chain,
#get_atom_name_from_chain,
#get_atom_type_from_chain,
#get_group_index_from_chain,
#get_group_id_from_chain,
#get_group_name_from_chain,
#get_group_type_from_chain,
#get_component_index_from_chain,
#get_component_id_from_chain,
#get_component_name_from_chain,
#get_component_type_from_chain,
#get_chain_index_from_chain,
#get_molecule_index_from_chain,
#get_molecule_id_from_chain,
#get_molecule_name_from_chain,
#get_molecule_type_from_chain,
#get_entity_index_from_chain,
#get_entity_id_from_chain,
#get_entity_name_from_chain,
#get_entity_type_from_chain,
#get_n_atoms_from_chain,
#get_n_groups_from_chain,
#get_n_components_from_chain,
#get_n_molecules_from_chain,
#get_n_chains_from_chain,
#get_n_entities_from_chain,
# From entity
#get_atom_index_from_entity,
#get_atom_id_from_entity,
#get_atom_name_from_entity,
#get_atom_type_from_entity,
#get_group_index_from_entity,
#get_group_id_from_entity,
#get_group_name_from_entity,
#get_group_type_from_entity,
#get_component_index_from_entity,
#get_component_id_from_entity,
#get_component_name_from_entity,
#get_component_type_from_entity,
#get_chain_index_from_entity,
#get_chain_id_from_entity,
#get_chain_name_from_entity,
#get_chain_type_from_entity,
#get_molecule_index_from_entity,
#get_molecule_id_from_entity,
#get_molecule_name_from_entity,
#get_molecule_type_from_entity,
#get_entity_index_from_entity,
#get_n_atoms_from_entity,
#get_n_groups_from_entity,
#get_n_components_from_entity,
#get_n_molecules_from_entity,
#get_n_chains_from_entity,
#get_n_entities_from_entity,
# From system
#get_n_aminoacids_from_system,
#get_n_nucleotides_from_system,
#get_n_ions_from_system,
#get_n_waters_from_system,
#get_n_cosolutes_from_system,
#get_n_small_molecules_from_system,
#get_n_peptides_from_system,
#get_n_proteins_from_system,
#get_n_dnas_from_system,
#get_n_rnas_from_system,
#get_n_lipids_from_system,
#get_coordinates_from_system,
#get_box_shape_from_system,
#get_box_lengths_from_system,
#get_box_angles_from_system,
#get_box_volume_from_system,
#get_bonded_atoms_from_system,
#get_bond_index_from_system,
#get_inner_bonded_atoms_from_system,
#get_inner_bond_index_from_system,
# From bond
#get_bond_index_from_bond,
#get_n_bonds_from_bond
)
| 26.06793
| 96
| 0.71729
|
a0998dc5f7a91135ec1ae8a72eabdc04d57ba796
| 1,247
|
py
|
Python
|
test/functional_requirements/array/MOUNT_ARRAY_CHECKING_UNIQUE_ID.py
|
so931/poseidonos
|
2aa82f26bfbd0d0aee21cd0574779a655634f08c
|
[
"BSD-3-Clause"
] | 1
|
2022-02-07T23:30:50.000Z
|
2022-02-07T23:30:50.000Z
|
test/functional_requirements/array/MOUNT_ARRAY_CHECKING_UNIQUE_ID.py
|
so931/poseidonos
|
2aa82f26bfbd0d0aee21cd0574779a655634f08c
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional_requirements/array/MOUNT_ARRAY_CHECKING_UNIQUE_ID.py
|
so931/poseidonos
|
2aa82f26bfbd0d0aee21cd0574779a655634f08c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import os
import sys
sys.path.append("../")
sys.path.append("../../system/lib/")
import json_parser
import pos
import cli
import api
import json
import CREATE_ARRAY_BASIC
import SCAN_DEV_BASIC
SPARE = CREATE_ARRAY_BASIC.SPARE
ANY_DATA = CREATE_ARRAY_BASIC.ANY_DATA
ANY_OTHER_DATA = CREATE_ARRAY_BASIC.ANY_OTHER_DATA
ARRAYNAME = CREATE_ARRAY_BASIC.ARRAYNAME
def execute():
CREATE_ARRAY_BASIC.execute()
result = cli.array_info(ARRAYNAME)
pos.exit_pos()
SCAN_DEV_BASIC.execute()
result_npor = cli.array_info(ARRAYNAME)
uniqueId = json.loads(result)['Response']['result']['data']['unique_id']
print("uniqueId Before NPOR : " + str(uniqueId))
uniqueId_npor = json.loads(result_npor)['Response']['result']['data']['unique_id']
print("uniqueId After NPOR : " + str(uniqueId_npor))
if uniqueId == uniqueId_npor:
out = json_parser.make_result_code(0)
else:
out = json_parser.make_result_code(-1)
return out
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
api.clear_result(__file__)
out = execute()
ret = api.set_result_by_code_eq(out, 0, __file__)
pos.flush_and_kill_pos()
exit(ret)
| 25.44898
| 86
| 0.708901
|
ddd15cc21e0ceaefe571a53807967826258b044f
| 1,194
|
py
|
Python
|
Osori_rpg/models.py
|
bees1114/Osori_level_meter
|
6af9b06e91fc4935be75e8293879e2bf881beefe
|
[
"MIT"
] | 1
|
2017-11-04T21:27:59.000Z
|
2017-11-04T21:27:59.000Z
|
Osori_rpg/models.py
|
bees1114/Osori_level_meter
|
6af9b06e91fc4935be75e8293879e2bf881beefe
|
[
"MIT"
] | null | null | null |
Osori_rpg/models.py
|
bees1114/Osori_level_meter
|
6af9b06e91fc4935be75e8293879e2bf881beefe
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
git_commit = models.IntegerField(default=0)
room_visit = models.IntegerField(default=0)
event_visit = models.IntegerField(default=0)
contribution = models.IntegerField(default=0)
login_counter = models.IntegerField(default=0)
level = models.IntegerField(default=0)
exp = models.IntegerField(default=0)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class ExpRequest(models.Model):
ExpOptions = (
('Room_Visit', 'Room_Visit'),
('Event_Visit', 'Event_Visit'),
('Contribution', 'Contribution'),
)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
options = models.CharField(max_length=100, choices=ExpOptions)
spec = models.TextField()
| 32.27027
| 66
| 0.731993
|
f27981341390e2b00603b256ee86b2292c27c961
| 3,825
|
py
|
Python
|
script/inception_score.py
|
gargrohin/optimistic_GAN_training
|
b9215e052e830941ec023cb37d44424680eb9570
|
[
"MIT"
] | null | null | null |
script/inception_score.py
|
gargrohin/optimistic_GAN_training
|
b9215e052e830941ec023cb37d44424680eb9570
|
[
"MIT"
] | null | null | null |
script/inception_score.py
|
gargrohin/optimistic_GAN_training
|
b9215e052e830941ec023cb37d44424680eb9570
|
[
"MIT"
] | null | null | null |
# From https://github.com/openai/improved-gan/blob/master/inception_score/model.py
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
from tqdm import tqdm, trange
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
print(images[0].shape)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session() as sess:
preds, pools = [], []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
print("n_batches :", n_batches)
for i in trange(n_batches):
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred, pool = sess.run([softmax, pool3], {'InputTensor:0': inp})
preds.append(pred)
pools.append(pool)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores), np.squeeze(np.concatenate(pools, 0))
# This function is called automatically.
# Init inception
def _init_inception():
global softmax, pool3
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_tensor = tf.placeholder(tf.float32, shape=[None, None, None, 3],
name='InputTensor')
_ = tf.import_graph_def(graph_def, name='',
input_map={'ExpandDims:0':input_tensor})
# _ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session() as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.set_shape(tf.TensorShape(new_shape))
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3, [1,2]), w)
softmax = tf.nn.softmax(logits)
if softmax is None:
_init_inception()
| 36.084906
| 90
| 0.659869
|
d9cea492e6176c150dbd15acd7f1f960319d35a5
| 776
|
py
|
Python
|
web/twitch_webhook/views.py
|
samuelfirst/nemoobot
|
b74ad66d4f2052eaba14e4b79e20c3da274b5909
|
[
"MIT"
] | 1
|
2021-01-30T09:19:37.000Z
|
2021-01-30T09:19:37.000Z
|
web/twitch_webhook/views.py
|
samuelfirst/nemoobot
|
b74ad66d4f2052eaba14e4b79e20c3da274b5909
|
[
"MIT"
] | 2
|
2020-12-21T20:57:19.000Z
|
2021-01-26T08:08:09.000Z
|
web/twitch_webhook/views.py
|
samuelfirst/nemoobot
|
b74ad66d4f2052eaba14e4b79e20c3da274b5909
|
[
"MIT"
] | 1
|
2020-12-22T07:42:42.000Z
|
2020-12-22T07:42:42.000Z
|
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from .tasks import save_subscription_model
from .utils import is_request_verified, process_event
@csrf_exempt
def follows_webhook(request, twitch_user_id):
if is_request_verified(request):
data = json.loads(request.body.decode('utf-8'))
token = data.get('challenge')
if token:
subscription_data = data.get('subscription')
save_subscription_model.apply_async((subscription_data, twitch_user_id))
return HttpResponse(token, content_type="text/plain", status=200)
process_event(data.get('event'), 'new_follow')
return HttpResponse(status=200)
else:
return HttpResponse(status=403)
| 33.73913
| 84
| 0.721649
|
b07d6663202d8525e3de395ea68e8d3a567cdf29
| 5,323
|
py
|
Python
|
lib/gen_charts.py
|
mehmetcanbudak/StrategyCheck
|
40b6ace71e0f8b428cd5db5c7516569f255484fa
|
[
"MIT"
] | 1
|
2021-05-09T21:48:04.000Z
|
2021-05-09T21:48:04.000Z
|
lib/gen_charts.py
|
mehmetcanbudak/StrategyCheck
|
40b6ace71e0f8b428cd5db5c7516569f255484fa
|
[
"MIT"
] | null | null | null |
lib/gen_charts.py
|
mehmetcanbudak/StrategyCheck
|
40b6ace71e0f8b428cd5db5c7516569f255484fa
|
[
"MIT"
] | 1
|
2021-05-09T21:48:37.000Z
|
2021-05-09T21:48:37.000Z
|
from plotly.subplots import make_subplots
import plotly.graph_objs as go
import pandas as pd
import lib.strategy as strategy
import lib.indicators as indicators
import os
import json
from peakdetect import peakdetect
with open("lib/settings.json", "r") as settings_json:
settings = json.load(settings_json)
exchange_settings = settings["ExchangeSettings"]
indicator_settings = settings["IndicatorSettings"]
strategy_settings = settings["StrategySettings"]
class Chart:
def __init__(self, name):
self.name = name
try:
self.df = pd.read_csv(f'output/candle_data/{name}_{exchange_settings["Days_to_look_back"]}days_{exchange_settings["Candle_Interval"]}_ta.csv')
except FileNotFoundError:
return
self.long_index = []
self.short_index = []
if strategy_settings["check_peaks"]:
indicator_peaks = peakdetect(
self.df[strategy_settings["Peak_Indicator"]],
lookahead=strategy_settings["Peak_Lookahead"])
self.long_index = [peak[0] for peak in indicator_peaks[1]]
self.short_index = [peak[0] for peak in indicator_peaks[0]]
else:
self.long_index = strategy.go_long(self.df)
self.short_index = strategy.go_short(self.df)
# set up the whole graph
indicators_total = max(indicator_settings["Add_Indicators"][item]['Row'] for item in indicator_settings["Add_Indicators"])
self.figure = make_subplots(
rows=indicators_total,
cols=1,
row_width=[1 / indicators_total] * indicators_total
)
# candlestick graph of asset
self.figure.append_trace(
go.Candlestick(
x=self.df['date'],
name='price',
open=self.df['open'],
high=self.df['high'],
low=self.df['low'],
close=self.df['close']
),
row=1,
col=1
)
# set date as index for graphs
self.date_long = [self.df['date'][i] for i in self.long_index]
self.date_short = [self.df['date'][i] for i in self.short_index]
self.add_signal_to_graph('close')
# add every indicator to graph (see settings.json)
for indicator in indicator_settings["Add_Indicators"]:
self.add_indicator_to_graph(indicator)
if indicator_settings["Add_Indicators"][indicator]['Add_Signal']:
self.add_signal_to_graph(indicator)
# generate html file
size = 1500
self.figure.update_layout(
title=self.name,
xaxis_rangeslider_visible=False,
autosize=False,
width=size * 1.5,
height=size
)
if not os.path.exists('output/charts'):
os.makedirs('output/charts')
self.figure.write_html(f'output/charts/{self.name}.html')
print(f'generated chart for {name}')
'''
This method adds an indicator (see conf.py) to the graph
plot_type: scatter/bar
'''
def add_indicator_to_graph(self, name):
if indicator_settings["Add_Indicators"][name]['Plot_Type'] == 'scatter':
self.figure.append_trace(
go.Scatter(
x=self.df['date'],
y=self.df[name],
name=name,
line=dict(color=indicator_settings["Add_Indicators"][name]['Color'])
),
row=indicator_settings["Add_Indicators"][name]['Row'],
col=1
)
elif indicator_settings["Add_Indicators"][name]['Plot_Type'] == 'bar':
self.figure.append_trace(
go.Bar(
x=self.df['date'],
y=self.df[name],
name=name,
marker=dict(color=indicator_settings["Add_Indicators"][name]['Color'])
),
row=indicator_settings["Add_Indicators"][name]['Row'],
col=1
)
'''
This method sets green and red markers within the graph of the associated indicator
'''
def add_signal_to_graph(self, name):
indicator_long_filter = [self.df[name].tolist()[i] for i in self.long_index]
indicator_short_filter = [self.df[name].tolist()[i] for i in self.short_index]
# Long Signals
self.figure.append_trace(
go.Scatter(
x=self.date_long,
y=indicator_long_filter,
name="Buy Signals",
marker=dict(color="lime", size=12, opacity=0.5),
mode="markers"
),
row=indicator_settings["Add_Indicators"][name]['Row'] if name not in ['high', 'low', 'close', 'open'] else 1,
col=1
)
# Short Signals
self.figure.append_trace(
go.Scatter(
x=self.date_short,
y=indicator_short_filter,
name="Sell Signals",
marker=dict(color="rgb(255, 36, 0)", size=12, opacity=0.5),
mode="markers"
),
row=indicator_settings["Add_Indicators"][name]['Row'] if name not in ['high', 'low', 'close', 'open'] else 1,
col=1
)
| 36.458904
| 154
| 0.566222
|
ce97290d6724fdec009c06dd44fdc7747857c2ad
| 301
|
py
|
Python
|
seiketsu/users/serializers.py
|
tychota/seiketsu
|
2b5280365b9de44cd84ac65ed74981b30be5cc76
|
[
"MIT"
] | null | null | null |
seiketsu/users/serializers.py
|
tychota/seiketsu
|
2b5280365b9de44cd84ac65ed74981b30be5cc76
|
[
"MIT"
] | null | null | null |
seiketsu/users/serializers.py
|
tychota/seiketsu
|
2b5280365b9de44cd84ac65ed74981b30be5cc76
|
[
"MIT"
] | null | null | null |
from .models import User
from rest_framework.serializers import ModelSerializer
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'id',
'first_name',
'last_name',
'username',
'email',
]
| 20.066667
| 54
| 0.538206
|
b6c4617732b5b52793c26a91c723b32c8d552f42
| 6,262
|
py
|
Python
|
cogs/general.py
|
clugraphy/Python-Discord-Bot-Template
|
948bbdc3a7488a0257d5ec7f61f43cc8a70db6d8
|
[
"Apache-2.0"
] | null | null | null |
cogs/general.py
|
clugraphy/Python-Discord-Bot-Template
|
948bbdc3a7488a0257d5ec7f61f43cc8a70db6d8
|
[
"Apache-2.0"
] | null | null | null |
cogs/general.py
|
clugraphy/Python-Discord-Bot-Template
|
948bbdc3a7488a0257d5ec7f61f43cc8a70db6d8
|
[
"Apache-2.0"
] | null | null | null |
import os, sys, discord, platform, random, aiohttp, json
from discord.ext import commands
if not os.path.isfile("config.py"):
sys.exit("'config.py' not found! Please add it and try again.")
else:
import config
class general(commands.Cog, name="general"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="info", aliases=["botinfo"])
async def info(self, context):
"""
Get some useful (or not) information about the bot.
"""
embed = discord.Embed(
description="cLu designer Bot for HTF Fund",
color=config.success
)
embed.set_author(
name="HTF Bot Information"
)
embed.add_field(
name="Owner:",
value="cLuGraphy#7516",
inline=True
)
embed.add_field(
name="Python Version:",
value=f"{platform.python_version()}",
inline=True
)
embed.add_field(
name="Prefix:",
value=f"{config.BOT_PREFIX}",
inline=False
)
embed.set_footer(
text=f"Requested by {context.message.author}"
)
await context.send(embed=embed)
@commands.command(name="serverinfo")
async def serverinfo(self, context):
"""
Get some useful (or not) information about the server.
"""
server = context.message.guild
roles = [x.name for x in server.roles]
role_length = len(roles)
if role_length > 50:
roles = roles[:50]
roles.append(f">>>> Displaying[50/{len(roles)}] Roles")
roles = ", ".join(roles)
channels = len(server.channels)
time = str(server.created_at)
time = time.split(" ")
time = time[0]
embed = discord.Embed(
title="**Server Name:**",
description=f"{server}",
color=config.success
)
embed.set_thumbnail(
url=server.icon_url
)
embed.add_field(
name="Owner",
value=f"{server.owner}\n{server.owner.id}"
)
embed.add_field(
name="Server ID",
value=server.id
)
embed.add_field(
name="Member Count",
value=server.member_count
)
embed.add_field(
name="Text/Voice Channels",
value=f"{channels}"
)
embed.add_field(
name=f"Roles ({role_length})",
value=roles
)
embed.set_footer(
text=f"Created at: {time}"
)
await context.send(embed=embed)
@commands.command(name="ping")
async def ping(self, context):
"""
Check if the bot is alive.
"""
embed = discord.Embed(
color=config.success
)
embed.add_field(
name="Pong!",
value=":ping_pong:",
inline=True
)
embed.set_footer(
text=f"Pong request by {context.message.author}"
)
await context.send(embed=embed)
@commands.command(name="invite")
async def invite(self, context):
"""
Get the invite link of the bot to be able to invite it.
"""
await context.send("I sent you a private message!")
await context.author.send(f"Invite me by clicking here: https://discordapp.com/oauth2/authorize?&client_id={config.APPLICATION_ID}&scope=bot&permissions=8")
@commands.command(name="server")
async def server(self, context):
"""
Get the invite link of the discord server of the bot for some support.
"""
await context.send("I sent you a private message!")
await context.author.send("Join my discord server by clicking here: https://discord.gg/HzJ3Gfr")
@commands.command(name="poll")
async def poll(self, context, *args):
"""
Create a poll where members can vote.
"""
poll_title = " ".join(args)
embed = discord.Embed(
title="A new poll has been created!",
description=f"{poll_title}",
color=config.success
)
embed.set_footer(
text=f"Poll created by: {context.message.author} • React to vote!"
)
embed_message = await context.send(embed=embed)
await embed_message.add_reaction("👍")
await embed_message.add_reaction("👎")
await embed_message.add_reaction("🤷")
@commands.command(name="8ball")
async def eight_ball(self, context, *args):
"""
Ask any question to the bot.
"""
answers = ['It is certain.', 'It is decidedly so.', 'You may rely on it.', 'Without a doubt.',
'Yes - definitely.', 'As I see, yes.', 'Most likely.', 'Outlook good.', 'Yes.',
'Signs point to yes.', 'Reply hazy, try again.', 'Ask again later.', 'Better not tell you now.',
'Cannot predict now.', 'Concentrate and ask again later.', 'Don\'t count on it.', 'My reply is no.',
'My sources say no.', 'Outlook not so good.', 'Very doubtful.']
embed = discord.Embed(
title="**My Answer:**",
description=f"{answers[random.randint(0, len(answers))]}",
color=config.success
)
embed.set_footer(
text=f"Question asked by: {context.message.author}"
)
await context.send(embed=embed)
@commands.command(name="bitcoin")
async def bitcoin(self, context):
"""
Get the current price of bitcoin.
"""
url = "https://api.coindesk.com/v1/bpi/currentprice/BTC.json"
# Async HTTP request
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = json.loads(response)
embed = discord.Embed(
title=":information_source: Info",
description=f"Bitcoin price is: ${response['bpi']['USD']['rate']}",
color=config.success
)
await context.send(embed=embed)
def setup(bot):
bot.add_cog(general(bot))
| 32.957895
| 164
| 0.548706
|
c645911992ee0e35007cb42706109c7232dfc912
| 3,173
|
py
|
Python
|
tests/test_filter.py
|
jina-ai/pqlite
|
2ce1ec2283b381f5153ea60141a6bb474bbf0f0c
|
[
"Apache-2.0"
] | 45
|
2021-12-10T07:39:39.000Z
|
2022-02-20T22:58:28.000Z
|
tests/test_filter.py
|
jina-ai/pqlite
|
2ce1ec2283b381f5153ea60141a6bb474bbf0f0c
|
[
"Apache-2.0"
] | 30
|
2021-12-10T07:46:28.000Z
|
2022-02-18T09:27:48.000Z
|
tests/test_filter.py
|
jina-ai/annlite
|
e4e706e313ba5cbfb7083a5dea9e75b8d2813394
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from annlite.filter import Filter
def test_empty_filter():
f = Filter()
where_clause, parameters = f.parse_where_clause()
assert where_clause == ''
assert parameters == ()
def test_simple_filter():
f = Filter({'brand': {'$lt': 1}})
where_clause, parameters = f.parse_where_clause()
assert where_clause == '(brand < ?)'
assert parameters == (1,)
def test_logic_operator():
f = Filter({'$and': {'brand': {'$lt': 1}, 'price': {'$gte': 50}}})
where_clause, parameters = f.parse_where_clause()
assert where_clause == '(brand < ?) AND (price >= ?)'
assert parameters == (1, 50)
f = Filter({'brand': {'$lt': 1}, 'price': {'$gte': 50}})
where_clause, parameters = f.parse_where_clause()
assert where_clause == '(brand < ?) AND (price >= ?)'
assert parameters == (1, 50)
f = Filter({'$or': {'brand': {'$lt': 1}, 'price': {'$gte': 50}}})
where_clause, parameters = f.parse_where_clause()
assert where_clause == '(brand < ?) OR (price >= ?)'
assert parameters == (1, 50)
def test_membership_operator():
f = Filter({'$and': {'brand': {'$in': ['Nike', 'Gucci']}, 'price': {'$gte': 50}}})
where_clause, parameters = f.parse_where_clause()
assert where_clause == '(brand IN(?, ?)) AND (price >= ?)'
assert parameters == ('Nike', 'Gucci', 50)
f = Filter({'$or': {'brand': {'$nin': ['Nike', 'Gucci']}, 'price': {'$gte': 50}}})
where_clause, parameters = f.parse_where_clause()
assert where_clause == '(brand NOT IN(?, ?)) OR (price >= ?)'
assert parameters == ('Nike', 'Gucci', 50)
def test_cases():
express = {
'$and': {
'price': {'$gte': 0, '$lte': 54},
'rating': {'$gte': 1},
'year': {'$gte': 2007, '$lte': 2010},
}
}
f = Filter(express)
where_clause, parameters = f.parse_where_clause()
assert (
where_clause
== '(price >= ?) AND (price <= ?) AND (rating >= ?) AND (year >= ?) AND (year <= ?)'
)
assert parameters == (0, 54, 1, 2007, 2010)
express = {
'$and': {
'price': {'$or': [{'price': {'$gte': 0}}, {'price': {'$lte': 54}}]},
'rating': {'$gte': 1},
'year': {'$gte': 2007, '$lte': 2010},
}
}
f = Filter(express)
where_clause, parameters = f.parse_where_clause()
assert (
where_clause
== '((price >= ?) OR (price <= ?)) AND (rating >= ?) AND (year >= ?) AND (year <= ?)'
)
assert parameters == (0, 54, 1, 2007, 2010)
express = {
'$and': {
'$or': [{'price': {'$gte': 0}}, {'price': {'$lte': 54}}],
'rating': {'$gte': 1},
'year': {'$gte': 2007, '$lte': 2010},
}
}
f = Filter(express)
where_clause, parameters = f.parse_where_clause()
assert (
where_clause
== '((price >= ?) OR (price <= ?)) AND (rating >= ?) AND (year >= ?) AND (year <= ?)'
)
assert parameters == (0, 54, 1, 2007, 2010)
def test_error_filter():
f = Filter({'$may': {'brand': {'$lt': 1}, 'price': {'$gte': 50}}})
with pytest.raises(ValueError):
f.parse_where_clause()
| 31.107843
| 93
| 0.512764
|
862d42a5ef8fe116180e4850d683ce06075f924e
| 3,410
|
gyp
|
Python
|
cloud9_root/src/build/stp.gyp
|
DanielGuoVT/symsc
|
95b705bd1f4d2863d79866c84fc7ee90aba743cb
|
[
"Apache-2.0"
] | 3
|
2019-02-12T04:14:39.000Z
|
2020-11-05T08:46:20.000Z
|
cloud9_root/src/build/stp.gyp
|
DanielGuoVT/symsc
|
95b705bd1f4d2863d79866c84fc7ee90aba743cb
|
[
"Apache-2.0"
] | null | null | null |
cloud9_root/src/build/stp.gyp
|
DanielGuoVT/symsc
|
95b705bd1f4d2863d79866c84fc7ee90aba743cb
|
[
"Apache-2.0"
] | null | null | null |
#
# Cloud9 Parallel Symbolic Execution Engine
#
# Copyright (c) 2012, Dependable Systems Laboratory, EPFL
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Dependable Systems Laboratory, EPFL nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE DEPENDABLE SYSTEMS LABORATORY, EPFL BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# All contributors are listed in CLOUD9-AUTHORS file.
#
{
'variables': {
'stp_path': '../third_party/stp',
'stp_src': '../third_party/stp/src',
'valid_extensions': [
'-name', '*.h',
'-o', '-name', '*.hpp',
'-o', '-name', '*.cpp',
'-o', '-name', '*.c',
],
'lex_tool': 'flex',
'yacc_tool': 'bison -d -y --debug -v',
}, # variables
'target_defaults': {
'cflags': [
'-O3',
'-fomit-frame-pointer',
'-Wno-deprecated',
],
'defines': [
'NDEBUG',
# Required by MiniSAT
'__STDC_LIMIT_MACROS',
'__STDC_FORMAT_MACROS',
'EXT_HASH_MAP',
],
}, # target_defaults
'targets': [
{
'target_name': 'libast',
'type': 'static_library',
'sources': [
'<!@(find <(stp_src)/AST -maxdepth 1 <(valid_extensions))',
'<!@(find <(stp_src)/STPManager -maxdepth 1 <(valid_extensions))',
'<!@(find <(stp_src)/absrefine_counterexample -maxdepth 1 <@(valid_extensions))',
'<!@(find <(stp_src)/AST/NodeFactory -maxdepth 1 <(valid_extensions))',
'<!@(find <(stp_src)/c_interface -maxdepth 1 <(valid_extensions))',
#'<!@(find <(stp_src)/cpp_interface -maxdepth 1 <(valid_extensions))',
'<!@(find <(stp_src)/to-sat <(valid_extensions))',
],
},
{
'target_name': 'libstpmgr',
'type': 'static_library',
'sources': [
],
},
{
'target_name': 'libprinter',
'type': 'static_library',
'sources': [
'<!@(find <(stp_src)/printer -maxdepth 1 <(valid_extensions))',
],
},
{
'target_name': 'libabstractionrefinement',
'type': 'static_library',
'sources': [
],
},
], # targets
}
| 35.520833
| 92
| 0.642229
|
0c4b1d23814d78f66c5f5ba696c9114399d1d94d
| 1,336
|
py
|
Python
|
system/rules.py
|
aaaimx/COVID19-Detection
|
7fd3864a4ed258c8232d5e0edf5db9fe1fadd674
|
[
"MIT"
] | 2
|
2020-03-24T07:53:55.000Z
|
2020-03-24T14:41:17.000Z
|
system/rules.py
|
aaaimx/COVID19-Detection
|
7fd3864a4ed258c8232d5e0edf5db9fe1fadd674
|
[
"MIT"
] | 18
|
2020-03-24T04:36:17.000Z
|
2021-08-23T20:40:32.000Z
|
system/rules.py
|
aaaimx/COVID19-Detection
|
7fd3864a4ed258c8232d5e0edf5db9fe1fadd674
|
[
"MIT"
] | 2
|
2020-03-24T07:54:00.000Z
|
2020-10-27T09:14:01.000Z
|
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
from antecedents import Fi, DC, Mi, Fa, CN, Es, DG, DR, Ri
from consequents import Co, Al, Re, In
# -------------------------------------
# Inference rules
# -------------------------------------
rule1 = ctrl.Rule(antecedent=(Fi['No'] & DG['No']), consequent=(Co['PP'], Al['MP'], Re['PP'], In['PP']))
rule2 = ctrl.Rule(antecedent=(Fi['No'] & DG['Leve']), consequent=(Co['PP'], Re['MP'], In['PP']))
rule3 = ctrl.Rule(antecedent=(Fi['Alta'] & DC['~No'] & Mi['~No'] & Fa['~No'] & DR['No']), consequent=Re['MP'])
rule4 = ctrl.Rule(antecedent=(Fi['Alta'] & DR['~No']), consequent=(Co['MP'], Al['PP'], Re['PP']))
rule5 = ctrl.Rule(antecedent=(Fi['Leve'] & DR['~No']), consequent=Co['Pr'])
rule6 = ctrl.Rule(antecedent=(Fi['Leve'] & CN['~No'] & Es['~No'] & DG['~No'] & DR['No'] & Ri['~No']), consequent=Re['MP'])
rule7 = ctrl.Rule(antecedent=(Fi['~No'] & DC['~No'] & Mi['~No'] & Fa['~No'] & DR['~No']), consequent=In['MP'])
rule8 = ctrl.Rule(antecedent=(Fi['Leve'] & DR['No']), consequent=(Re['Pr'], In['Po']))
rule9 = ctrl.Rule(antecedent=(Fi['Alta'] & DC['~No'] & Mi['~No'] & Fa['~No'] & DR['~No']), consequent=(Co['MP'], Al['PP'], Re['PP'], In['PP']))
rule10 = ctrl.Rule(antecedent=(Fi['No'] & DG['Severo']), consequent=(Co['PP'], Al['PP'], Re['MP'], In['PP']))
| 70.315789
| 143
| 0.541168
|
4c221c72d1813b56ba362c840df7348fce4e4dcc
| 10,937
|
py
|
Python
|
macbethLookTransfer.py
|
hellothisistim/macbethLookTransfer
|
bcf4e25cd1a7586ebd3641a452135e8b2872de3a
|
[
"Unlicense"
] | null | null | null |
macbethLookTransfer.py
|
hellothisistim/macbethLookTransfer
|
bcf4e25cd1a7586ebd3641a452135e8b2872de3a
|
[
"Unlicense"
] | null | null | null |
macbethLookTransfer.py
|
hellothisistim/macbethLookTransfer
|
bcf4e25cd1a7586ebd3641a452135e8b2872de3a
|
[
"Unlicense"
] | null | null | null |
import numpy as np
from scipy.misc import imread, imsave
import matplotlib.pyplot as plt
from colormath.color_conversions import convert_color
from colormath.color_objects import sRGBColor, XYZColor
from pprint import pprint
import math
from copy import deepcopy
macbeth_patch_names = ["Dark skin", "Light skin", "Blue sky", "Foliage", "Blue flower", "Bluish green",
"Orange", "Purplish blue", "Moderate red", "Purple", "Yellow green", "Orange yellow",
"Blue", "Green", "Red", "Yellow", "Magenta", "Cyan",
"White", "Neutral 8", "Neutral 6.5", "Neutral 5", "Neutral 3.5", "Black"]
def import_pointcloud(source_file='', dest_file=''):
source_image = imread(source_file)
dest_image = imread(dest_file)
cloud = []
# assuming the wedge images contain 7 exposure steps
source_levels = np.hsplit(source_image, 8)
dest_levels = np.hsplit(dest_image, 8)
for level_num in range(len(source_levels)):
source_level = source_levels[level_num]
dest_level = dest_levels[level_num]
pixel_number = 0
for row_number in range(len(source_level)):
source_row = source_level[row_number]
dest_row = dest_level[row_number]
for column_number in range(len(source_row)):
source_pixel = source_row[column_number]
dest_pixel = dest_row[column_number]
source_r = source_pixel[0]
source_g = source_pixel[1]
source_b = source_pixel[2]
dest_r = dest_pixel[0]
dest_g = dest_pixel[1]
dest_b = dest_pixel[2]
source_srgb = sRGBColor(source_r, source_g, source_b, is_upscaled=True)
dest_srgb = sRGBColor(dest_r, dest_g, dest_b, is_upscaled=True)
source_xyz = convert_color(source_srgb, XYZColor)
dest_xyz = convert_color(dest_srgb, XYZColor)
cloud.append({'level': level_num,
'color name': macbeth_patch_names[pixel_number],
'source color': source_xyz,
'dest color': dest_xyz })
pixel_number += 1
return cloud
def filter_pointcloud(pointcloud, levels=[], color_names=[]):
filtered_cloud_levels = []
if levels != []:
for point in pointcloud:
if point['level'] in levels:
filtered_cloud_levels.append(point)
else:
filtered_cloud_levels = pointcloud
filtered_cloud_colors = []
if color_names != []:
for point in filtered_cloud_levels:
if point['color name'] in color_names:
filtered_cloud_colors.append(point)
else:
filtered_cloud_colors = filtered_cloud_levels
return filtered_cloud_colors
def filter_duplicate_source_points_dumb(pointcloud):
filtered_cloud = []
for i, point in enumerate(pointcloud):
other_points = [x for j,x in enumerate(pointcloud) if j != i]
duplicate = False
for other_point in other_points:
if point['source color'].get_value_tuple() == other_point['source color'].get_value_tuple():
duplicate = True
if not duplicate:
filtered_cloud.append(point)
return filtered_cloud
def pointcloud_contains_source_duplicates(pointcloud):
for i, point in enumerate(pointcloud):
other_points = [x for j,x in enumerate(pointcloud) if j != i]
for other_point in other_points:
if point['source color'].get_value_tuple() == other_point['source color'].get_value_tuple():
return True
return False
def filter_duplicate_source_points_per_level(pointcloud):
# Removes the entire level if there are duplicate source colors in the level.
filtered_cloud = []
levels = []
for point in pointcloud:
if point['level'] not in levels:
levels.append(point['level'])
for i, level in enumerate(levels):
these_points = filter_pointcloud(pointcloud, levels=[i])
if not pointcloud_contains_source_duplicates(these_points):
for point in these_points:
filtered_cloud.append(point)
else:
# print 'dropping level', i
pass
return filtered_cloud
def filter_duplicate_source_points_smart(pointcloud):
filtered_cloud = filter_duplicate_source_points_per_level(pointcloud)
filtered_cloud = filter_duplicate_source_points_dumb(filtered_cloud)
return filtered_cloud
def filter_duplicate_source_points(pointcloud):
return filter_duplicate_source_points_smart(pointcloud)
def distance(one_color, other_color):
# Colors are colormath.color_objects.
one_x, one_y, one_z = one_color.get_value_tuple()
other_x, other_y, other_z = other_color.get_value_tuple()
dist = math.sqrt(pow((one_x - other_x), 2) +
pow((one_y - other_y), 2) +
pow((one_z - other_z), 2))
return dist
def closest(cloud, color, mode='source color'):
# cloud is the pointcloud list, color is colormath.color_objects
# mode is either "source color" or "dest color"
smallest_distance_so_far = 10000
for point in cloud:
d = distance(color, point[mode])
if d < smallest_distance_so_far:
smallest_distance_so_far = d
closest = point
return closest
def octant_split(pointcloud, color):
# Divide the pointcloud into octants around the given color,
# which is an instance from colormath.color_objects
# Do not return empty octants.
labeled_points = []
color_tuple = color.get_value_tuple()
for point in pointcloud:
labeled_point = {'point': point}
point_tuple = point['source color'].get_value_tuple()
if point_tuple[0] >= color_tuple[0]:
labeled_point['x_dir'] = '+'
else:
labeled_point['x_dir'] = '-'
if point_tuple[1] >= color_tuple[1]:
labeled_point['y_dir'] = '+'
else:
labeled_point['y_dir'] = '-'
if point_tuple[2] >= color_tuple[2]:
labeled_point['z_dir'] = '+'
else:
labeled_point['z_dir'] = '-'
labeled_points.append(labeled_point)
octants = [('+', '+', '+'), ('-', '+', '+'), ('-', '-', '+'),
('+', '-', '+'), ('+', '+', '-'), ('-', '+', '-'),
('-', '-', '-'), ('+', '-', '-'), ]
split_octants = []
for octant in octants:
split_octants.append([labeled_point['point'] for labeled_point in labeled_points if (labeled_point['x_dir'], labeled_point['y_dir'], labeled_point['z_dir']) == octant])
# remove empty octants
out = tuple( [octant for octant in split_octants if octant != []] )
return out
def closest_in_each_octant(pointcloud, color):
octants = octant_split(pointcloud, color)
out = [closest(i, color) for i in octants]
return tuple(out)
def weighted_dest_color(pointcloud, color):
nearest_points = closest_in_each_octant(pointcloud, color)
total_weight = 0
total_vector = (0, 0, 0)
for point in nearest_points:
d = distance(color, point['source color'])
if d == 0:
return point['source color']
else:
total_weight += (1 / d)
for i, point in enumerate(nearest_points):
# calculate vector from source color to destination color
source = point['source color'].get_value_tuple()
dest = point['dest color'].get_value_tuple()
vector = np.subtract(dest, source)
# weight vector and normalize
weight = (1 / distance(color, point['source color'])) / total_weight
# print 'distance:', distance(color, point['source color']), 'inverted:', 1/distance(color, point['source color']), 'weight:', weight
# print vector
weighted_vector = [ n * weight for n in vector]
# print weighted_vector
total_vector = np.add(total_vector, weighted_vector)
# print total_vector
dest_color = np.add(color.get_value_tuple(), total_vector)
typ = type(color)
return typ(dest_color[0], dest_color[1], dest_color[2], observer=color.observer, illuminant=color.illuminant)
def image_to_dest(pointcloud, image, dither_error=True):
dest_image = np.zeros(image.shape, dtype="uint8")
error_collection = np.zeros(image.shape)
for row_number in range(len(image)):
# print 'row:', row_number
for column_number in range(len(image[0])):
raw_rgb = image[row_number][column_number]
srgb = sRGBColor(raw_rgb[0], raw_rgb[1], raw_rgb[2], is_upscaled=True)
xyz = convert_color(srgb, XYZColor)
dest_xyz = weighted_dest_color(pointcloud, xyz)
dest_srgb = convert_color(dest_xyz, sRGBColor)
if dither_error:
r,g,b = np.add(dest_srgb.get_value_tuple(), error_collection[row_number][column_number])
else:
r,g,b = dest_srgb.get_value_tuple()
# print 'xxx', dest_srgb, r, g, b
# print dest_srgb.get_value_tuple()
# print error_collection[row_number][column_number]
upscaled_srgb = sRGBColor(r, g, b).get_upscaled_value_tuple()
dest_image[row_number][column_number] = upscaled_srgb
if dither_error:
rounded_srgb = sRGBColor(upscaled_srgb[0], upscaled_srgb[1], upscaled_srgb[2], is_upscaled=True)
rounding_error = np.subtract(dest_srgb.get_value_tuple(), rounded_srgb.get_value_tuple())
# do Floyd-Steinberg dither
# over
try:
error_collection[row_number][column_number + 1] += rounding_error * 7 / 16
except IndexError:
pass # It's the end of the line, don't worry about it.
# down and back
try:
error_collection[row_number + 1][column_number - 1] += rounding_error * 3 / 16
except IndexError:
pass
# down
try:
error_collection[row_number + 1][column_number] += rounding_error * 5 / 16
except IndexError:
pass
# down and over
try:
error_collection[row_number + 1][column_number + 1] += rounding_error * 1 / 16
except IndexError:
pass
dest_image[row_number][column_number] = upscaled_srgb
# print dest_image[row_number][column_number], raw_rgb, upscaled_srgb
# print "error collection:\n", error_collection
return dest_image
if __name__ == "__main__":
import checks
checks.run()
cloud = import_pointcloud(source_file = "./img/wedge_dslr.tif",
dest_file = "./img/wedge_instax.tif")
# cloud = import_pointcloud(source_file = "./img/wedge_dslr.tif",
# dest_file = "./img/wedge_dslr.tif")
selected_colors = ['Red', 'Green', 'Blue', 'Cyan', 'Magenta', 'Yellow', 'Neutral 5']
# selected_colors = ["White", "Neutral 8", "Neutral 6.5", "Neutral 5", "Neutral 3.5", "Black"]
# selected_colors = macbeth_patch_names
selected_cloud = filter_pointcloud(cloud, color_names=selected_colors)
dedup = filter_duplicate_source_points(selected_cloud)
source_image = imread("./img/lego.jpg")
dest_image = image_to_dest(dedup, source_image, dither_error=True)
cloud = import_pointcloud(source_file = "./img/wedge_dslr.tif",
dest_file = "./img/wedge_instax-tweaked2.tif")
dedup = filter_duplicate_source_points(cloud)
dest_two = image_to_dest(dedup, source_image, dither_error=True)
# imsave("./out.jpg", dest_image)
plt.figure(1)
plt.subplot(221)
plt.imshow(source_image, interpolation='nearest')
plt.title('original')
plt.subplot(222)
plt.imshow(dest_image, interpolation='nearest')
plt.title('wedge-instax')
plt.subplot(223)
plt.imshow(dest_two, interpolation='nearest')
plt.title('wedge-instax-tweaked2')
plt.subplot(224)
cloud = import_pointcloud(source_file = "./img/wedge_dslr.tif",
dest_file = "./img/wedge_dslr.tif")
dedup = filter_duplicate_source_points(cloud)
dest_two = image_to_dest(dedup, source_image, dither_error=True)
plt.imshow(dest_two, interpolation='nearest')
plt.title('wedge_dslr')
plt.show()
| 35.054487
| 170
| 0.715918
|
ee5e567eaedebe80df7df39e83d8e795870f83b1
| 955
|
py
|
Python
|
robotathome/log.py
|
goyoambrosio/RobotAtHome_API
|
91864b4cf06202656def6b66ac348708337a9d52
|
[
"MIT"
] | 1
|
2021-02-21T09:31:25.000Z
|
2021-02-21T09:31:25.000Z
|
robotathome/log.py
|
goyoambrosio/RobotAtHome_API
|
91864b4cf06202656def6b66ac348708337a9d52
|
[
"MIT"
] | null | null | null |
robotathome/log.py
|
goyoambrosio/RobotAtHome_API
|
91864b4cf06202656def6b66ac348708337a9d52
|
[
"MIT"
] | null | null | null |
"""
Logger related functions for robotathome package
This script requires that `loguru` be installed within the Python
environment you are running this script in.
Install with:
conda install -c conda-forge loguru
pip install loguru
"""
import sys
# import loguru
from loguru import logger
logger.disable("robotathome")
def enable_logger(sink=sys.stderr, level="WARNING"):
"""
Enable the logging of messages.
Configure the ``logger`` variable imported from ``loguru``.
Args:
sink (file): An opened file pointer, or stream handler. Default to
standard error.
level (str): The log level to use. Possible values are TRACE, DEBUG,
INFO, WARNING, ERROR, CRITICAL.
Default to WARNING.
(*) Extracted from aria2p project
"""
logger.remove()
logger.configure(handlers=[{"sink": sink, "level": level}])
logger.enable("robotathome")
| 24.487179
| 76
| 0.657592
|
b77daf21ef08950ba54f46383457d6fa71ae0a44
| 1,050
|
py
|
Python
|
aalh_iit_buildings_008/cleanup-description-pipes.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_buildings_008/cleanup-description-pipes.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_buildings_008/cleanup-description-pipes.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
from openpyxl import load_workbook
filename = 'aalh_iit_buildings_008.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 517
iterationrow = 7
targetcol = 13
titlecol = 2
desccol = 8
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
testvar = ws.cell(row=iterationrow, column=desccol).value
for cell in row:
if testvar.endswith('|'):
desc = testvar[:-1]
desc = desc.strip()
ws.cell(row=iterationrow, column=desccol).value = desc
print(iterationrow,'PIPE FOUND END')
elif testvar.find(': |') != -1:
desc2 = testvar.replace(': |',':')
ws.cell(row=iterationrow, column=desccol).value = desc2
print(iterationrow,'PIPE FOUND START')
else:
continue
iterationrow = iterationrow + 1
print('***FINISHED SEARCHING FOR PIPES***')
wb.save("aalh_iit_buildings_008.xlsx")
| 31.818182
| 105
| 0.631429
|
58803a32e192a4a0aa973566899362e5752ea404
| 779
|
py
|
Python
|
av-s14/empresa_4/q2/Mapper.py
|
felipelssilva/fudamento-de-big-data
|
74ab67c29a28367f78b44da537ba6020381766b2
|
[
"MIT"
] | null | null | null |
av-s14/empresa_4/q2/Mapper.py
|
felipelssilva/fudamento-de-big-data
|
74ab67c29a28367f78b44da537ba6020381766b2
|
[
"MIT"
] | null | null | null |
av-s14/empresa_4/q2/Mapper.py
|
felipelssilva/fudamento-de-big-data
|
74ab67c29a28367f78b44da537ba6020381766b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import re
COMPANY = "@Empresa4"
BADWORDS = ['worst', 'bad', 'never', 'horrible', 'terrible']
def findbadwords(tweet):
for bad in BADWORDS:
if len(re.findall(bad, tweet)) > 0:
return True
else:
return False
def isquestion(tweet):
if tweet.find("?") >= 0:
return True
else:
return False
def isforcompany(tweet):
if tweet.find(COMPANY) >= 0:
return True
else:
return False
for line in sys.stdin:
fields = line.split(';')
id_tweet = fields[0]
id_autor = fields[1]
data_criacao = fields[2]
tweet = fields[3]
if isforcompany(tweet) and not(isquestion(tweet)) and not(findbadwords(tweet)):
print '%s\t%s' % (id_autor, 1)
| 20.5
| 83
| 0.590501
|
0841b65f699e6d7524db465f7d9cca8746ca43f5
| 14,258
|
py
|
Python
|
shapefit/deform/src/deformation/preproc.py
|
alexeybokhovkin/CAD-Deform
|
462fc6c97d91cc579f9e5551ed983cc10ecb9976
|
[
"MIT"
] | 78
|
2020-03-12T12:09:44.000Z
|
2022-02-28T12:19:47.000Z
|
shapefit/deform/src/deformation/preproc.py
|
alexeybokhovkin/CAD-Deform
|
462fc6c97d91cc579f9e5551ed983cc10ecb9976
|
[
"MIT"
] | 3
|
2021-01-19T14:40:38.000Z
|
2021-09-28T12:56:23.000Z
|
shapefit/deform/src/deformation/preproc.py
|
alexeybokhovkin/CAD-Deform
|
462fc6c97d91cc579f9e5551ed983cc10ecb9976
|
[
"MIT"
] | 8
|
2021-01-20T06:16:28.000Z
|
2022-01-14T05:27:14.000Z
|
import numpy as np
import trimesh
from trimesh.grouping import clusters
from .utils import filter_edges_by_parts
def compute_bitriangles(mesh_unique_faces, mesh_unique_edges):
bitriangles = {}
for face in mesh_unique_faces:
edge_1 = tuple(sorted([face[0], face[1]]))
if edge_1 not in bitriangles:
bitriangles[edge_1] = {face[0], face[1], face[2]}
else:
bitriangles[edge_1].add(face[2])
edge_2 = tuple(sorted([face[1], face[2]]))
if edge_2 not in bitriangles:
bitriangles[edge_2] = {face[0], face[1], face[2]}
else:
bitriangles[edge_2].add(face[0])
edge_3 = tuple(sorted([face[0], face[2]]))
if edge_3 not in bitriangles:
bitriangles[edge_3] = {face[0], face[1], face[2]}
else:
bitriangles[edge_3].add(face[1])
bitriangles_aligned = np.empty((len(mesh_unique_edges), 4), dtype=int)
for j, edge in enumerate(mesh_unique_edges):
bitriangle = [*sorted(edge)]
bitriangle += [x for x in list(bitriangles[tuple(sorted(edge))]) if x not in bitriangle]
bitriangles_aligned[j] = bitriangle
return bitriangles_aligned
def level_merger(data_object, partnet_map, part_levels, global_part_ids, level='map_0k_8'):
data_object_merged = data_object.copy()
partnet_map_merged = partnet_map.copy()
object_part_ids = global_part_ids[global_part_ids['object_id'] == data_object_merged['object_id']]
part_to_global_id = {}
for index, row in object_part_ids.iterrows():
part_to_global_id[index] = row['part_dir_name']
global_id_to_merge = {}
for global_id in part_to_global_id.keys():
global_id_to_merge[global_id] = part_levels.iloc[global_id][level]
part_name_to_merge = {}
for global_id in global_id_to_merge.keys():
if global_id_to_merge[global_id] not in part_name_to_merge:
part_name_to_merge[global_id_to_merge[global_id]] = []
part_name_to_merge[global_id_to_merge[global_id]] += [part_to_global_id[global_id]]
new_parts_list = list(part_name_to_merge.values())
new_parts_dict = {}
old_parts_to_new = {}
for i, new_part in enumerate(new_parts_list):
new_part_points = []
old_parts = []
for old_part in new_part:
old_parts += [old_part]
if old_part in data_object_merged['parts_vertices_p2p']:
new_part_points += [data_object_merged['parts_vertices_p2p'][old_part]]
if len(new_part_points) > 0:
new_part_points = np.vstack(new_part_points)
new_parts_dict['merged-new-{}'.format(i)] = new_part_points
for old_part in old_parts:
old_parts_to_new[old_part] = 'merged-new-{}'.format(i)
data_object_merged['parts_vertices_p2p'] = new_parts_dict
for key in partnet_map_merged:
old_part = partnet_map_merged[key][0].split('.')[0]
partnet_map_merged[key] = (old_parts_to_new[old_part] + '.obj',
partnet_map_merged[key][1],
partnet_map_merged[key][2])
return data_object_merged, partnet_map_merged
def find_sharp_edges(mesh, data_object, sharp_edges, partnet_map):
part_sharp_edges_ids = None
if data_object['shapenet_id'] in sharp_edges:
if data_object['object_id'] in sharp_edges[data_object['shapenet_id']]:
sharp_edges_for_mesh = sharp_edges[data_object['shapenet_id']][data_object['object_id']]
non_conflict_edges, conflict_edges = filter_edges_by_parts(sharp_edges_for_mesh, partnet_map)
unique_edges_to_vertices = {i: list(x) for i, x in enumerate(mesh.edges_unique)}
vertices_to_unique_edges = {tuple(unique_edges_to_vertices[i]): i for i in unique_edges_to_vertices}
part_sharp_edges_ids = []
for part_id in non_conflict_edges:
part_edges = non_conflict_edges[part_id]
edges_ids = []
for edge in part_edges:
try:
if tuple(edge) in vertices_to_unique_edges:
edges_ids += [vertices_to_unique_edges[tuple(edge)]]
else:
edges_ids += [vertices_to_unique_edges[tuple(edge[::-1])]]
except:
continue
part_sharp_edges_ids += [edges_ids]
return part_sharp_edges_ids
def split_vertices_by_parts(part_names, partnet_map):
parts_idx = [[] for _ in range(len(part_names))]
for k in partnet_map:
for i, part in enumerate(part_names):
if partnet_map[k][0] == part:
parts_idx[i] += [k]
return parts_idx
def transform_voxels_to_origin(data_object, all_parts, transform, parts_idx):
voxel_centers_p2p = []
surface_samples_p2p = []
for i, part in enumerate(all_parts):
part_samples_ids = parts_idx[i]
surface_samples_p2p += [part_samples_ids]
points = data_object['parts_vertices_p2p'][part.split('.')[0]]
points = np.hstack([points, np.ones(len(points))[:, None]])
points = (points @ np.linalg.inv(transform).T)[:, :3]
voxel_centers_p2p += [points]
return voxel_centers_p2p, surface_samples_p2p
def filter_voxels_by_clustering(voxel_centers):
voxel_centers_new = []
for points in voxel_centers:
if len(points) > 0:
if len(points) != 1:
groups = clusters(points, 0.1)
else:
groups = np.array([[0]])
groups_lens = [len(group) for group in groups]
if len(groups_lens) == 0:
voxel_centers_new += [[]]
else:
max_group_id = np.argmax(groups_lens)
new_group = groups[max_group_id]
voxel_centers_new += [points[new_group]]
else:
voxel_centers_new += [[]]
return voxel_centers_new
def neighboring_scene_voxel_parts(voxel_centers):
neighboring_voxel_centers_ids = []
for i, points_i in enumerate(voxel_centers):
for j, points_j in enumerate(voxel_centers):
if j > i and len(points_i) > 0 and len(points_j) > 0:
min_dist = np.min(np.sum((points_i[None, ...] - points_j[:, None, :]) ** 2, axis=2))
if min_dist < 0.05:
neighboring_voxel_centers_ids += [(i, j)]
return neighboring_voxel_centers_ids
def filter_scene_voxel_parts_with_obb(voxel_centers, neighboring_voxel_centers_ids):
point_clouds = []
for i, points in enumerate(voxel_centers):
if len(points) > 0:
point_clouds += [trimesh.points.PointCloud(points)]
else:
point_clouds += [[]]
neighbors_to_merge = []
for neighbors in neighboring_voxel_centers_ids:
try:
if point_clouds[neighbors[0]] != [] and point_clouds[neighbors[1]] != []:
bbox_1 = point_clouds[neighbors[0]].bounding_box_oriented
bbox_2 = point_clouds[neighbors[1]].bounding_box_oriented
point_cloud_merge = trimesh.points.PointCloud(np.vstack([voxel_centers[neighbors[0]],
voxel_centers[neighbors[1]]]))
bbox_merge = point_cloud_merge.bounding_box_oriented
volume_1 = bbox_1.volume
volume_2 = bbox_2.volume
volume_merge = bbox_merge.volume
max_volume = max(volume_1, volume_2)
if volume_merge / max_volume < 1.3:
if volume_1 > volume_2:
major = neighbors[0]
minor = neighbors[1]
else:
major = neighbors[1]
minor = neighbors[0]
neighbors_to_merge += [(major, minor)]
except:
continue
for neighbors in neighbors_to_merge:
if len(voxel_centers[neighbors[1]]) > 0 and len(voxel_centers[neighbors[0]]) > 0:
voxel_centers[neighbors[0]] = np.vstack([voxel_centers[neighbors[0]],
voxel_centers[neighbors[1]]])
voxel_centers[neighbors[1]] = []
return voxel_centers
def find_corresondences_with_obb(voxel_centers, mesh, parts_idx, make_bbox_transform=False):
bbox_transforms = []
bboxes_vertices = []
bboxes_voxels = []
if make_bbox_transform:
for i, points in enumerate(voxel_centers):
try:
vertices = mesh.vertices[parts_idx[i]]
if len(points) != 0:
bbox_vertices = trimesh.points.PointCloud(vertices).bounding_box_oriented
bbox_voxels = trimesh.points.PointCloud(points).bounding_box_oriented
bboxes_vertices += [bbox_vertices.vertices]
bboxes_voxels += [bbox_voxels.vertices]
vertices_box_vicinities = [[i] for i in range(len(bbox_vertices.vertices))]
noncorrect_edges = []
for facet in bbox_vertices.facets:
face_1 = bbox_vertices.faces[facet[0]]
face_2 = bbox_vertices.faces[facet[1]]
intersection = list(set(face_1).intersection(set(face_2)))
noncorrect_edges += [intersection]
noncorrect_edges = np.sort(np.array(noncorrect_edges), axis=1)
noncorrect_edges = [tuple(x) for x in noncorrect_edges]
all_edges = np.sort(bbox_vertices.edges_unique, axis=1)
all_edges = [tuple(x) for x in all_edges]
correct_edges = [x for x in all_edges if x not in noncorrect_edges]
for edge in correct_edges:
vertices_box_vicinities[edge[0]] += [edge[1]]
vertices_box_vicinities[edge[1]] += [edge[0]]
anchor_vertices_vicinity = vertices_box_vicinities[0]
voxels_box_vicinities = [[i] for i in range(len(bbox_voxels.vertices))]
noncorrect_edges = []
for facet in bbox_voxels.facets:
face_1 = bbox_voxels.faces[facet[0]]
face_2 = bbox_voxels.faces[facet[1]]
intersection = list(set(face_1).intersection(set(face_2)))
noncorrect_edges += [intersection]
noncorrect_edges = np.sort(np.array(noncorrect_edges), axis=1)
noncorrect_edges = [tuple(x) for x in noncorrect_edges]
all_edges = np.sort(bbox_voxels.edges_unique, axis=1)
all_edges = [tuple(x) for x in all_edges]
correct_edges = [x for x in all_edges if x not in noncorrect_edges]
for edge in correct_edges:
voxels_box_vicinities[edge[0]] += [edge[1]]
voxels_box_vicinities[edge[1]] += [edge[0]]
voxels_full_vicinities = []
for vicinity in voxels_box_vicinities:
three_other_vertices = vicinity[1:]
voxels_full_vicinities += [vicinity]
voxels_full_vicinities += [
[vicinity[0], three_other_vertices[1], three_other_vertices[2], three_other_vertices[0]]]
voxels_full_vicinities += [
[vicinity[0], three_other_vertices[2], three_other_vertices[0], three_other_vertices[1]]]
best_dist = 100
vertices_target = np.array(bbox_vertices.vertices[anchor_vertices_vicinity])
vertices_target = np.hstack([vertices_target, np.ones(len(vertices_target))[:, None]])
for vicinity in voxels_full_vicinities:
voxels_source = np.array(bbox_voxels.vertices[vicinity])
voxels_source = np.hstack([voxels_source, np.ones(len(voxels_source))[:, None]])
transform = np.linalg.inv(voxels_source) @ vertices_target
if transform[0, 0] > 0 and transform[1, 1] > 0 and transform[2, 2] > 0:
dist = np.sum((transform[:3, :3] - np.eye(3)) ** 2)
if dist < best_dist:
best_dist = dist
if best_dist < 0:
# choose transform here or np.eye(4)
bbox_transforms += [transform]
else:
bbox_transforms += [np.eye(4)]
else:
bbox_transforms += [np.eye(4)]
except:
bbox_transforms += [np.eye(4)]
else:
bbox_transforms = [np.eye(4) for _ in voxel_centers]
min_init_dists = []
min_transformed_dists = []
mesh_vertices_nn = []
voxel_centers_nn = []
parts_vertices = []
parts_voxels = []
parts_voxels_transformed = []
for i, points in enumerate(voxel_centers):
if len(points) != 0:
vertices = mesh.vertices[parts_idx[i]]
parts_vertices += [vertices]
voxels = np.hstack([points, np.ones(len(points))[:, None]])
parts_voxels += [voxels[:, :3]]
voxels_transformed = (voxels @ bbox_transforms[i])[:, :3]
parts_voxels_transformed += [voxels_transformed]
vertices_idx = []
for j, point in enumerate(voxels_transformed):
dists = np.sum((vertices - point) ** 2, axis=1)
min_vertex_id = np.argmin(dists)
min_init_dist = np.sum((mesh.vertices[parts_idx[i]][min_vertex_id] - voxels[j][:3]) ** 2)
if min_init_dist < 0.01:
min_init_dists += [min_init_dist]
min_transformed_dists += [min(dists)]
vertices_idx += [parts_idx[i][min_vertex_id]]
voxel_centers_nn += [voxels[j][:3]]
mesh_vertices_nn += vertices_idx
return voxel_centers_nn, mesh_vertices_nn
| 47.211921
| 117
| 0.580797
|
1e9a61587116f4d44f94c6f0093fe33e3b533c3a
| 9,367
|
py
|
Python
|
SALib/util/__init__.py
|
LRY0111/SensitivityAnalysis-python
|
b48607fafb818f6d90490cd71dfc6f9f39f65d95
|
[
"Apache-2.0"
] | null | null | null |
SALib/util/__init__.py
|
LRY0111/SensitivityAnalysis-python
|
b48607fafb818f6d90490cd71dfc6f9f39f65d95
|
[
"Apache-2.0"
] | null | null | null |
SALib/util/__init__.py
|
LRY0111/SensitivityAnalysis-python
|
b48607fafb818f6d90490cd71dfc6f9f39f65d95
|
[
"Apache-2.0"
] | 2
|
2019-09-22T05:30:21.000Z
|
2021-12-02T03:15:31.000Z
|
"""A set of utility functions
"""
from collections import OrderedDict
import csv
from warnings import warn
import numpy as np
import scipy as sp
__all__ = ["scale_samples", "read_param_file"]
def scale_samples(params, bounds):
'''Rescale samples in 0-to-1 range to arbitrary bounds
Arguments
---------
bounds : list
list of lists of dimensions `num_params`-by-2
params : numpy.ndarray
numpy array of dimensions `num_params`-by-:math:`N`,
where :math:`N` is the number of samples
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# sample * (upper_bound - lower_bound) + lower_bound
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
def unscale_samples(params, bounds):
"""Rescale samples from arbitrary bounds back to [0,1] range
Arguments
---------
bounds : list
list of lists of dimensions num_params-by-2
params : numpy.ndarray
numpy array of dimensions num_params-by-N,
where N is the number of samples
"""
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# (sample - lower_bound) / (upper_bound - lower_bound)
np.divide(np.subtract(params, lower_bounds, out=params),
np.subtract(upper_bounds, lower_bounds),
out=params)
def nonuniform_scale_samples(params, bounds, dists):
"""Rescale samples in 0-to-1 range to other distributions
Arguments
---------
problem : dict
problem definition including bounds
params : numpy.ndarray
numpy array of dimensions num_params-by-N,
where N is the number of samples
dists : list
list of distributions, one for each parameter
unif: uniform with lower and upper bounds
triang: triangular with width (scale) and location of peak
location of peak is in percentage of width
lower bound assumed to be zero
norm: normal distribution with mean and standard deviation
lognorm: lognormal with ln-space mean and standard deviation
"""
b = np.array(bounds)
# initializing matrix for converted values
conv_params = np.zeros_like(params)
# loop over the parameters
for i in range(conv_params.shape[1]):
# setting first and second arguments for distributions
b1 = b[i][0]
b2 = b[i][1]
if dists[i] == 'triang':
# checking for correct parameters
if b1 <= 0 or b2 <= 0 or b2 >= 1:
raise ValueError('''Triangular distribution: Scale must be
greater than zero; peak on interval [0,1]''')
else:
conv_params[:, i] = sp.stats.triang.ppf(
params[:, i], c=b2, scale=b1, loc=0)
elif dists[i] == 'unif':
if b1 >= b2:
raise ValueError('''Uniform distribution: lower bound
must be less than upper bound''')
else:
conv_params[:, i] = params[:, i] * (b2 - b1) + b1
elif dists[i] == 'norm':
if b2 <= 0:
raise ValueError('''Normal distribution: stdev must be > 0''')
else:
conv_params[:, i] = sp.stats.norm.ppf(
params[:, i], loc=b1, scale=b2)
# lognormal distribution (ln-space, not base-10)
# paramters are ln-space mean and standard deviation
elif dists[i] == 'lognorm':
# checking for valid parameters
if b2 <= 0:
raise ValueError(
'''Lognormal distribution: stdev must be > 0''')
else:
conv_params[:, i] = np.exp(
sp.stats.norm.ppf(params[:, i], loc=b1, scale=b2))
else:
valid_dists = ['unif', 'triang', 'norm', 'lognorm']
raise ValueError('Distributions: choose one of %s' %
", ".join(valid_dists))
return conv_params
def read_param_file(filename, delimiter=None):
"""Unpacks a parameter file into a dictionary
Reads a parameter file of format::
Param1,0,1,Group1,dist1
Param2,0,1,Group2,dist2
Param3,0,1,Group3,dist3
(Group and Dist columns are optional)
Returns a dictionary containing:
- names - the names of the parameters
- bounds - a list of lists of lower and upper bounds
- num_vars - a scalar indicating the number of variables
(the length of names)
- groups - a list of group names (strings) for each variable
- dists - a list of distributions for the problem,
None if not specified or all uniform
Arguments
---------
filename : str
The path to the parameter file
delimiter : str, default=None
The delimiter used in the file to distinguish between columns
"""
names = []
bounds = []
groups = []
dists = []
num_vars = 0
fieldnames = ['name', 'lower_bound', 'upper_bound', 'group', 'dist']
with open(filename, 'rU') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter)
csvfile.seek(0)
reader = csv.DictReader(
csvfile, fieldnames=fieldnames, dialect=dialect)
for row in reader:
if row['name'].strip().startswith('#'):
pass
else:
num_vars += 1
names.append(row['name'])
bounds.append(
[float(row['lower_bound']), float(row['upper_bound'])])
# If the fourth column does not contain a group name, use
# the parameter name
if row['group'] is None:
groups.append(row['name'])
elif row['group'] is 'NA':
groups.append(row['name'])
else:
groups.append(row['group'])
# If the fifth column does not contain a distribution
# use uniform
if row['dist'] is None:
dists.append('unif')
else:
dists.append(row['dist'])
if groups == names:
groups = None
elif len(set(groups)) == 1:
raise ValueError('''Only one group defined, results will not be
meaningful''')
# setting dists to none if all are uniform
# because non-uniform scaling is not needed
if all([d == 'unif' for d in dists]):
dists = None
return {'names': names, 'bounds': bounds, 'num_vars': num_vars,
'groups': groups, 'dists': dists}
def compute_groups_matrix(groups):
"""Generate matrix which notes factor membership of groups
Computes a k-by-g matrix which notes factor membership of groups
where:
k is the number of variables (factors)
g is the number of groups
Also returns a g-length list of unique group_names whose positions
correspond to the order of groups in the k-by-g matrix
Arguments
---------
groups : list
Group names corresponding to each variable
Returns
-------
tuple
containing group matrix assigning parameters to
groups and a list of unique group names
"""
if not groups:
return None
num_vars = len(groups)
# Get a unique set of the group names
unique_group_names = list(OrderedDict.fromkeys(groups))
number_of_groups = len(unique_group_names)
indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)])
output = np.zeros((num_vars, number_of_groups), dtype=np.int)
for parameter_row, group_membership in enumerate(groups):
group_index = indices[group_membership]
output[parameter_row, group_index] = 1
return np.matrix(output), unique_group_names
def requires_gurobipy(_has_gurobi):
'''
Decorator function which takes a boolean _has_gurobi as an argument.
Use decorate any functions which require gurobi.
Raises an import error at runtime if gurobi is not present.
Note that all runtime errors should be avoided in the working code,
using brute force options as preference.
'''
def _outer_wrapper(wrapped_function):
def _wrapper(*args, **kwargs):
if _has_gurobi:
result = wrapped_function(*args, **kwargs)
else:
warn("Gurobi not available", ImportWarning)
result = None
return result
return _wrapper
return _outer_wrapper
| 33.09894
| 79
| 0.590584
|
841e5179b17e52301d46950c988598e67b7b7deb
| 230
|
py
|
Python
|
src/learners/__init__.py
|
TonghanWang/NDQ
|
575f2e243bac1a567c072dbea8e093aaa4959511
|
[
"Apache-2.0"
] | 63
|
2020-02-23T09:37:15.000Z
|
2022-01-17T01:30:50.000Z
|
src/learners/__init__.py
|
fringsoo/NDQ
|
e243ba917e331065e82c6634cb1d756873747be5
|
[
"Apache-2.0"
] | 14
|
2020-04-20T02:20:11.000Z
|
2022-03-12T00:16:33.000Z
|
src/learners/__init__.py
|
mig-zh/NDQ
|
5720e3e8b529724e8d96a9a24c73bca24a11e7f9
|
[
"Apache-2.0"
] | 16
|
2020-03-12T02:57:52.000Z
|
2021-11-27T13:07:08.000Z
|
from .q_learner import QLearner
from .coma_learner import COMALearner
from .categorical_q_learner import CateQLearner
REGISTRY = {
"q_learner": QLearner,
"coma_learner": COMALearner,
"cate_q_learner": CateQLearner
}
| 20.909091
| 47
| 0.769565
|
81fe9d3f36c58da40b1047d9eca64f0414d1dd62
| 1,117
|
py
|
Python
|
gitir_downloader/main.py
|
yankeexe/git.ir_downloader
|
7607ac2a92656625e94f754707e2b7a86fc40c75
|
[
"MIT"
] | 4
|
2020-04-14T11:22:33.000Z
|
2020-09-17T07:20:15.000Z
|
gitir_downloader/main.py
|
yankeexe/git.ir_downloader
|
7607ac2a92656625e94f754707e2b7a86fc40c75
|
[
"MIT"
] | 1
|
2020-04-14T16:22:34.000Z
|
2020-04-14T16:22:34.000Z
|
gitir_downloader/main.py
|
yankeexe/git.ir_downloader
|
7607ac2a92656625e94f754707e2b7a86fc40c75
|
[
"MIT"
] | 1
|
2020-04-14T14:35:33.000Z
|
2020-04-14T14:35:33.000Z
|
import os
import sys
import argparse
from gitir_downloader.parser import parse_url
from gitir_downloader.downloader import download_files
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def init_argparse():
"""
Initialize argparse module for commandline argument parsing.
"""
parser = argparse.ArgumentParser(
description="Download video files from git.ir links.",
epilog="Enjoy the program :)",
)
parser.add_argument("link", type=str, help="git.ir URL")
parser.add_argument(
"-n", "--name", help="Folder name to store the downloaded files"
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def start():
"""
entry-point for the app
"""
try:
args: argparse.Namespace = init_argparse()
folder_title, LINKS = parse_url(args)
download_files(folder_title, LINKS, args)
except KeyboardInterrupt:
print("Stopped Downloading!" + " \N{cross mark}")
try:
sys.exit(0)
except SystemExit:
os._exit(os.EX_OK)
| 23.765957
| 72
| 0.638317
|
8c9469b0558c593650fe0192ebd23e40e6bcf455
| 584
|
py
|
Python
|
setup.py
|
AustinRochford/webmc3
|
d0ba393a2c3c158d66984c895c99584f44ff1f33
|
[
"Apache-2.0"
] | 46
|
2017-12-26T20:11:30.000Z
|
2021-09-10T11:03:20.000Z
|
setup.py
|
AustinRochford/webmc3
|
d0ba393a2c3c158d66984c895c99584f44ff1f33
|
[
"Apache-2.0"
] | 5
|
2017-12-29T13:28:44.000Z
|
2018-01-07T00:51:32.000Z
|
setup.py
|
AustinRochford/webmc3
|
d0ba393a2c3c158d66984c895c99584f44ff1f33
|
[
"Apache-2.0"
] | 8
|
2018-01-05T17:27:44.000Z
|
2021-07-27T10:08:04.000Z
|
#!/usr/bin/env python
from os.path import realpath, dirname, join
from setuptools import setup, find_packages
import versioneer
DISTNAME = 'webmc3'
AUTHOR = 'Austin Rochford'
AUTHOR_EMAIL = 'austin.rochford@gmail.com'
VERSION = '0.1'
PROJECT_ROOT = dirname(realpath(__file__))
REQUIREMENTS_FILE = join(PROJECT_ROOT, 'requirements.txt')
with open(REQUIREMENTS_FILE) as reqfile:
install_reqs = reqfile.read().splitlines()
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
packages=find_packages(),
install_requires=install_reqs)
| 26.545455
| 58
| 0.734589
|
e037925961f2bfc8b8906fa81c2d7908ea590a62
| 64,561
|
py
|
Python
|
tensorflow/python/client/session.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 5
|
2017-08-28T11:27:19.000Z
|
2021-08-03T17:40:00.000Z
|
tensorflow/python/client/session.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 1
|
2020-11-25T21:29:56.000Z
|
2021-06-11T05:31:49.000Z
|
tensorflow/python/client/session.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 4
|
2019-11-11T13:46:27.000Z
|
2020-03-14T05:36:53.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import warnings
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.dense_shape],
lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds
of one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.',
tensor_type)
return
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond
exactly to the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' % (fetch,
type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, dict):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' % (fetch,
type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined
in _REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability
and to convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError(
'Operation %r has been marked as not fetchable.' % op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned
by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError(
'config must be a tf.ConfigProto, but got %s' % type(config))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSession(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Each element in the list has the following properties:
- `name`: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
- `device_type`: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
- `memory_limit`: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
device_list.append(_DeviceAttributes(name, device_type, memory))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except AttributeError:
# At shutdown, `c_api_util` or `tf_session` may have been garbage
# collected, causing the above method calls to fail. In this case,
# silently leak since the program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
@{tf.Operation.run} or @{tf.Tensor.eval} should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use @{tf.get_default_session}.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of @{tf.get_default_graph},
you must explicitly enter a `with sess.graph.as_default():` block
to make `sess.graph` the default graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* An @{tf.Operation}.
The corresponding fetched value will be `None`.
* A @{tf.Tensor}.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A @{tf.SparseTensor}.
The corresponding fetched value will be a
@{tf.SparseTensorValue}
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a @{tf.Tensor}, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
@{tf.placeholder}, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
@{tf.SparseTensor},
the value should be a
@{tf.SparseTensorValue}.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (see documentation for `run`).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' % (feed,
type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(
session, feed_list, fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' % (feed,
type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError(
'Cannot interpret feed_dict key as Tensor: ' + e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles.'
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' + str(
type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError('Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name,
str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
@{tf.Session.run} for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See @{tf.Session.run}
for details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See
@{tf.Session.run} for details of the allowable feed key types.
accept_options: (Optional.) Iff `True`, the returned `Callable` will be
able to accept @{tf.RunOptions} and @{tf.RunMetadata} as optional
keyword arguments `options` and `run_metadata`, respectively, with
the same syntax and semantics as @{tf.Session.run}, which is useful
for certain use cases (profiling and debugging) but will result in
measurable slowdown of the `Callable`'s performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to @{tf.Session.run}.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val
for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(
options_ptr, {}, fetch_list, target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(
None, {}, fetch_list, target_list, None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(
options, feed_dict, fetch_list, target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(
self._session, options, feed_dict, fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(
self._session, handle, feed_dict, fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
with errors.raise_exception_on_not_ok_status() as status:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr, status)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
# TODO(mrry): Switch to raising an exception from the SWIG wrapper.
with errors.raise_exception_on_not_ok_status() as status:
ret = tf_session.TF_SessionRunCallable(
self._session._session, self._handle, args, status,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if self._handle is not None and self._session._session is not None:
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_SessionReleaseCallable(
self._session._session, self._handle, status)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export('Session')
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
@{tf.Variable}, @{tf.QueueBase},
and @{tf.ReaderBase}. It is important to release
these resources when they are no longer required. To do this, either
invoke the @{tf.Session.close} method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. See
@{$distributed$Distributed TensorFlow}
for more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export('InteractiveSession')
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods @{tf.Tensor.eval}
and @{tf.Operation.run}
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
| 38.452055
| 94
| 0.677762
|
212dc8fe31941309235b2ba969471e708c6aa345
| 2,904
|
py
|
Python
|
pyatv/protocols/mrp/protobuf/GetKeyboardSessionMessage_pb2.py
|
Jacobs4/pyatv
|
52956adf3b79198be52cc03649f3ddeee19f9e6c
|
[
"MIT"
] | 532
|
2017-02-01T19:23:28.000Z
|
2022-03-29T09:57:39.000Z
|
pyatv/protocols/mrp/protobuf/GetKeyboardSessionMessage_pb2.py
|
Jacobs4/pyatv
|
52956adf3b79198be52cc03649f3ddeee19f9e6c
|
[
"MIT"
] | 1,639
|
2017-02-01T19:22:04.000Z
|
2022-03-31T17:26:40.000Z
|
pyatv/protocols/mrp/protobuf/GetKeyboardSessionMessage_pb2.py
|
bdraco/pyatv
|
9541d21e6101c60866d832626be97bf962774cd5
|
[
"MIT"
] | 102
|
2017-02-02T01:42:13.000Z
|
2022-02-26T08:49:34.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/GetKeyboardSessionMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/protocols/mrp/protobuf/GetKeyboardSessionMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n<pyatv/protocols/mrp/protobuf/GetKeyboardSessionMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"\x1b\n\x19GetKeyboardSessionMessage:3\n\x19getKeyboardSessionMessage\x12\x10.ProtocolMessage\x18\x1d \x01(\t'
,
dependencies=[pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,])
GETKEYBOARDSESSIONMESSAGE_FIELD_NUMBER = 29
getKeyboardSessionMessage = _descriptor.FieldDescriptor(
name='getKeyboardSessionMessage', full_name='getKeyboardSessionMessage', index=0,
number=29, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_GETKEYBOARDSESSIONMESSAGE = _descriptor.Descriptor(
name='GetKeyboardSessionMessage',
full_name='GetKeyboardSessionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=143,
)
DESCRIPTOR.message_types_by_name['GetKeyboardSessionMessage'] = _GETKEYBOARDSESSIONMESSAGE
DESCRIPTOR.extensions_by_name['getKeyboardSessionMessage'] = getKeyboardSessionMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetKeyboardSessionMessage = _reflection.GeneratedProtocolMessageType('GetKeyboardSessionMessage', (_message.Message,), {
'DESCRIPTOR' : _GETKEYBOARDSESSIONMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.GetKeyboardSessionMessage_pb2'
# @@protoc_insertion_point(class_scope:GetKeyboardSessionMessage)
})
_sym_db.RegisterMessage(GetKeyboardSessionMessage)
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(getKeyboardSessionMessage)
# @@protoc_insertion_point(module_scope)
| 38.210526
| 250
| 0.82438
|
41305db6f9b7bf26ab3766202f793b59557aeeaf
| 2,703
|
py
|
Python
|
app/model/types.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
app/model/types.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
app/model/types.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
import numbers
#===============================================
class Types:
sTypes = [None, "null", "list", "dict", "empty", "link", "string",
"int", "numeric"]
# and "undef", "json"
@staticmethod
def _detectValTypes(value):
if value is None:
return [1]
elif isinstance(value, list):
return [2]
elif isinstance(value, dict):
return [3]
elif isinstance(value, basestring):
if not value:
return [4, 5, 6]
elif value.startswith("http"):
if value.startswith("https:") or value.startswith("http:"):
return [5, 6]
return [6]
elif isinstance(value, int):
return [7, 8]
elif isinstance(value, numbers.Number):
return [8]
# convert everything another to string
return [6]
@classmethod
def typeIdx(cls, value):
return cls.sTypes.index(value)
@classmethod
def detectValTypes(cls, value):
kind_idxs = cls._detectValTypes(value)
ret = set()
if kind_idxs:
for idx in kind_idxs:
if cls.sTypes[idx]:
ret.add(cls.sTypes[idx])
return ret
@classmethod
def filterTypeKind(cls, kinds):
for kind in kinds:
if kind in cls.sTypes:
return kind
return None
#===============================================
class TypeCounter:
def __init__(self, req_type = None):
self.mCounts = [0] * 9
self.mReqType = Types.typeIdx(req_type)
def regValue(self, value):
cnt0 = self.mCounts[1] + self.mCounts[self.mReqType]
self.mCounts[0] += 1
for idx in Types._detectValTypes(value):
self.mCounts[idx] += 1
return (self.mCounts[1] + self.mCounts[self.mReqType]) != cnt0
def _checkType(self, idx, with_optional):
cnt = self.mCounts[idx]
if with_optional:
cnt += self.mCounts[1]
if cnt == self.mCounts[0]:
return Types.sTypes[idx]
def detect(self, with_optional = True):
if self.mCounts[0] == 0:
return "undef"
if self.mReqType > 0:
ret = self._checkType(self.mReqType, with_optional)
if ret:
return ret
for idx in range(2, 9):
ret = self._checkType(idx, with_optional)
if ret:
return ret
return "json"
def empty(self):
return self.mCounts[0] == self.mCounts[1]
def getTotalCount(self):
return self.mCounts[0]
def getEmptyCount(self):
return self.mCounts[1]
| 28.755319
| 75
| 0.519793
|
4d90cc8399899e4c777a067a101953b4f5ed5875
| 38,739
|
py
|
Python
|
stars/ProjectMaker.py
|
lhcramer-GISforks/stars
|
3c7532a6ea9cd0af7c21f009d603d80cbd69278a
|
[
"BSD-2-Clause"
] | 9
|
2015-06-15T14:25:08.000Z
|
2021-04-01T22:57:48.000Z
|
stars/ProjectMaker.py
|
lhcramer-GISforks/stars
|
3c7532a6ea9cd0af7c21f009d603d80cbd69278a
|
[
"BSD-2-Clause"
] | 8
|
2015-08-12T23:59:53.000Z
|
2021-06-13T00:33:37.000Z
|
stars/ProjectMaker.py
|
lhcramer-GISforks/stars
|
3c7532a6ea9cd0af7c21f009d603d80cbd69278a
|
[
"BSD-2-Clause"
] | 15
|
2016-02-08T05:03:44.000Z
|
2021-05-17T21:07:00.000Z
|
"""
Standalone Utility for conversion of ArcView files to STARS project.
----------------------------------------------------------------------
AUTHOR(S): Mark V. Janikas janikas@users.sourceforge.net
Sergio J. Rey sjrey@users.sourceforge.net
----------------------------------------------------------------------
"""
from guimixin import *
from guimaker import *
import os
import sys
import string
from math import *
import sdialogue as sd
#from Common import Options
from ProjectWorker import *
from DataViewer import MixedDataTable
import Matcher as MATCH
import Tkinter as tk
class SProjectMaker(GuiMixin, GuiMaker): # or GuiMakerFrameMenu
"""Application level GUI Wrapper"""
def start(self):
self.hellos = 0
self.master.title("SPM: STARS Project Maker")
self.master.iconname("SPM")
h = self.winfo_screenheight()
self.screenHeight = h
w = self.winfo_screenwidth()
self.screenWidth = w
if w > 1280:
w = 1280
windowWidth = w/2.
windowHeight = h/2.
x0 = int((w - windowWidth) / 2.)
y0 = int((h - windowHeight) / 2.)
geom = "%dx%d+%d+%d"%(windowWidth,windowHeight,0,0)
print geom
self.master.geometry(geom)
self.root = self.master
self.project = None
self.starsProjectOn = 0
self.projectedCoordsOn = 0
self.menuBar = [
('File', 0,
[
('Create New STARS Project',0,self.createNewSTARSProject),
('Open STARS Project',0,self.openSTARSProject),
'separator',
('Save STARS Project',0,self.saveProject),
('Save As STARS Project',2,self.saveAsProject),
('Write Cross-Section Names',0,self.writeCSO),
#('Write Project Files',2,self.writeProjectFiles),
'separator',
('Exit', 1, self.quit)
]
),
('Data',0,
[ ('Variable',0,
[
('Convert',0,
[
('Base Data to CS',0,self.convertCSVariables),
('Base Data to CSTS',0,self.convertCSTSVariable),
('Base Data to CSTS (Batch)',0,self.convertCSTSVariableBatch),
('Cross-Section to Panel',0,self.cs2Panel),
('Panel to Cross-Section',0,self.panel2CS)
]
),
('Merge',0,
[
('CS Data',0,self.readCSV_CS),
('TS Data',0,self.readCSV_TS),
('CSTS Data',0,self.readCSV_CSTS)
]
),
('Join',0,
[
('CS Data',0,self.joinCS),
('CSTS Data',0,self.joinCSTS)
]
),
]
),
'separator',
('Matrix',0,
[
('Import GAL Binary',0,self.importGalBinary),
('Create GAL Binary from Shapefile',0,self.createGalAppend),
#('Import GAL Valued',0,self.importGalValued),
#('Import Full',0,self.importFullMatrix)
]
), ]
),
('Tables',0,
[
('Specific Variable(s)',0,self.variableSpecificTable),
('CS Variables',0,self.variableCSTable),
('TS Variables',0,self.variableTSTable),
('CSTS Variables',0,self.variableCSTSTable),
('CS and CSTS Variables',0,self.variableCS_CSTSTable),
('Base Data Variables',0,self.baseVariableTable) ]
),
('Plot',0,
[('Plot Map',0,self.doMaps)])]
def createNewSTARSProject(self):
"""
Creates a new STARS project.
Callback.
"""
d = sd.SDialogue('Create New STARS Project')
values='ArcView', 'CSV'
txt="Choose the type of file you want to use as your base data.\n"
rbutton = sd.RadioButtons(d, label='Base Data', values=values,
align='LEFT', title='Types', helpText=txt)
d.draw()
if d.status:
type = d.results[0]
if type == 0:
fileType = "*.dbf"
else:
fileType = "*.csv"
FILE_TYPES=[("Files",fileType)]
baseFileName = askopenfilename(filetypes=FILE_TYPES, title="Choose Base Data File.")
if baseFileName:
self.prj = 0
type = baseFileName.split(".")[-1]
if type == "dbf":
arc = 1
self.report("Base data generated from an ArcView Project")
else:
arc = 0
self.report("Base data generated from a Comma Delimited File")
self.proj = ProjectMaker(baseFileName,arc=arc)
d = sd.SDialogue('Create STARS Project Name')
txt = """Choose a name for the STARS project you want to create."""
sd.UserEntry(d,label="Project Prefix",
align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
self.proj.changeProjPrefix(d.results[0])
self.baseVariableTable()
d = sd.SDialogue('Choose Time Series Type')
values='Decadal', 'Annual', 'Quarterly', 'Monthly', 'Irregular'
txt="Choose the type of file you want to use as your base data.\n"
rbutton = sd.RadioButtons(d, label='Time-Series', values=values,
align='LEFT', title='Types', helpText=txt)
d.draw()
if d.status:
type = d.results[0]
self.evalTimeInfo(values[type])
self.createIdsAndNames()
if arc == 1:
self.createGal()
self.starsProjectOn = 1
def openSTARSProject(self):
"""
Open an Existing STARS Project.
Callback.
"""
fileName = askopenfilename(filetypes=[('Project Files',"*.prj")],
title="Open STARS project.")
if fileName:
self.prj = 1
self.proj = ProjectMaker(fileName,prj=1)
print self.proj.stars.catalogue()
timeType = self.proj.stars.timeFreq
start = self.proj.stars.timeInfo[1]
end = self.proj.stars.timeInfo[2]
within = ['MONTHLY', 'QUARTERLY']
if timeType in within:
s = start.split("/")
startYear = s[-1]
startSub = s[0]
e = end.split("/")
endYear = e[-1]
endSub = e[0]
if timeType == "MONTHLY":
self.proj.createMonthly(startM, startYear, endM, endYear)
varNames = self.proj.stars.getVariableNames()
d = {}
for var in varNames:
v = self.proj.stars.dataBase.getVariable(var)
type = v.varType
self.starsProjectOn = 1
self.projectedCoordsOn = 1
self.report(self.proj.projectSummary())
def writeCSO(self):
try:
self.proj.writeCSO()
except:
self.report("""Could not export region names. Perhaps they have not
been identified yet.""")
def evalTimeInfo(self,type):
tDict = {'Decadal':self.createDECADAL,
'Annual':self.createANNUAL,
'Quarterly':self.createQUARTERLY,
'Monthly':self.createMONTHLY,
'Irregular':self.createIRREGULAR}
tDict[type]()
def createDECADAL(self):
d = sd.SDialogue('Decadal Time-Series Dialogue')
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
start = d.results[0]
end = d.results[1]
self.proj.createDecadal(start, end)
self.report(self.proj.timeSummary)
def createANNUAL(self):
d = sd.SDialogue('Annual Time-Series Dialogue')
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
start = d.results[0]
end = d.results[1]
self.proj.createAnnual(start, end)
self.report(self.proj.timeSummary)
def createQUARTERLY(self):
d = sd.SDialogue('Quarterly Time-Series Dialogue')
txt = "Choose the starting quarter for your project."
quarters = range(1,5)
entries = ['Start Quarter']
sd.MultiEntry(d,quarters, entries, title='',
helpText=txt)
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the ending quarter for your project."
entries = ['End Quarter']
sd.MultiEntry(d,quarters, entries, title='',
helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
startQ = int(d.results[0]['Start Quarter'])
startYear = int(d.results[1])
endQ = int(d.results[2]['End Quarter'])
endYear = int(d.results[3])
self.proj.createQuarterly(startQ, startYear, endQ, endYear)
self.report(self.proj.timeSummary)
def createMONTHLY(self):
d = sd.SDialogue('Monthly Time-Series Dialogue')
txt = "Choose the starting month for your project."
months = range(1,13)
entries = ['Start Month']
sd.MultiEntry(d,months, entries, title='',
helpText=txt)
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the ending month for your project."
entries = ['End Month']
sd.MultiEntry(d,months, entries, title='',
helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
startM = int(d.results[0]['Start Month'])
startYear = int(d.results[1])
endM = int(d.results[2]['End Month'])
endYear = int(d.results[3])
self.proj.createMonthly(startM, startYear, endM, endYear)
self.report(self.proj.timeSummary)
def createIRREGULAR(self):
d = sd.SDialogue('Irregular Time-Series Dialogue')
txt = "Choose the number of time periods (Integer)"
sd.UserEntry(d,label="Number of Time Periods (t)", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
t = int(d.results[0])
self.proj.createIrregular(t)
self.report(self.proj.timeSummary)
def createIdsAndNames(self):
d = sd.SDialogue('Create Region Names and Ids')
txt = """You must identify names for the regions in your project.
*** All the options in this dialogue are optional. If you leave them
blank, your regions will be identified by the integers associated with
the number of rows in the input .dbf or .csv file.
1. Use the Unique Field to identify unique labels that match the
number of cross-sections in your study. Examples would include NUTS
or FIPS codes.
2. If there are no Fields that can be used to determine the
uniqueness of each cross-section you may combine the values from two
fields to create region ids. The Join Field term will be combined
with the Unique Field to create a "more unique" identifier.
3. Use the Optional Name Field if you have identified regions with
either the Unique or Joined method, but you want the names of the
regions to be determined by this field.
4. The user can select the type of delimiter used join field entries.
The default delimiter is an underscore: field1_field2
"""
varNames = self.proj.getDBFVariableNames()
varNames.sort()
entries = ['Unique Field', 'Join Field', 'Optional Name Field', 'Delimiter']
sd.MultiEntry(d,varNames, entries, title='Optional Arguments', helpText=txt)
d.draw()
if d.status:
nameField = d.results[0]['Unique Field']
if nameField:
nameField = self.proj.getDBFVariable(nameField)
else:
nameField = []
joinField = d.results[0]['Join Field']
if joinField:
joinField = self.proj.getDBFVariable(joinField)
else:
joinField = []
finalField = d.results[0]['Optional Name Field']
if finalField:
finalField = self.proj.getDBFVariable(finalField)
else:
finalField = []
delimiter = d.results[0]['Delimiter']
if delimiter:
pass
else:
delimiter = "_"
self.proj.createNamesAndIDs(var1=nameField,
var2=joinField,
var3=finalField,
delim=delimiter)
self.report(self.proj.variableSummary())
def createGalAppend(self):
if self.proj.arc == 1:
self.createGal()
else:
self.report("""You must be using an arcview type project for this
option.""")
def createGal(self):
d = sd.SDialogue('Create Contiguity Matrices')
txt="""Rook contiguity is based on shared edges, while Queen
contiguity is based on shared vertices between pairs of polygons."""
types = "Rook", "Queen"
sd.CheckButtons(d, title='Criterion', label='Criterion', values=types,
helpText=txt)
d.draw()
if d.status:
criterion = d.results[0]
mats = []
matNames = []
self.master.update()
if criterion[0][1]: # rook
text="Creating Rook Based Contiguity Weights"
rd=sd.Warning(self.master,text=text)
if self.proj.aggOn == 1:
mats.append(self.proj.makeGalWeightsAgg())
else:
mats.append(self.proj.makeGalWeights())
matNames.append('rook')
rd.destroy()
if criterion[1][1]: # queen
txt="Creating Queen Based Contiguity Weights."
qd=sd.Warning(self.master,txt)
if self.proj.aggOn == 1:
mats.append(self.proj.makeGalWeightsAgg(2))
else:
mats.append(self.proj.makeGalWeights(2))
matNames.append('queen')
qd.destroy()
for name,stringOut in zip(matNames,mats):
print 'writing GAL file(s)'
nameOut = self.proj.projPrefix+"_"+name+".gal"
nameOut = os.path.join(self.proj.projectDir,nameOut)
fo=open(nameOut,'w')
fo.write(stringOut)
fo.close()
self.proj.matrices[nameOut]='gal'
print 'done writing GAL files(s)'
def convertCSVariables(self):
d = sd.SDialogue('Convert Initial Field(s) to STARS Cross-Sectional Variables(s)')
varNames = self.proj.getDBFVariableNames()
varNames.sort()
txt="""Select one or more initial variables to convert into pure
cross-sectional STARS variables."""
sd.DualListBoxes(d,varNames,title='Fields', helpText=txt)
entries = ['Aggregation Method']
txt = """If the same cross-sectional unit has more than one value
associated with it, ProjectMaker will have to combine the values in
some way. You have the following options:
Sum: will sum up any values associated with the same cross-section.
Max: will take the maximum value of any values associated with the same cross-section.
Min: will take the minimum value of any values associated with the same cross-section.
Average: will average the values associated with the same cross-section.
String: will essentially use the value of the last instance for
each cross-section. Furthermore the value is a string. Use this
for categorical data.
***The default method is "Average"."""
types = ['Sum', 'Max', 'Min', 'Average', 'String']
sd.MultiEntry(d,types, entries, title='Optional Arguments', helpText=txt)
d.draw()
if d.status:
varList = d.results[0]
cohesion = d.results[1]['Aggregation Method']
if cohesion:
pass
else:
cohesion = 'Average'
createVars = [ self.proj.convertArcViewVariable(cohesion,var,[var]) for var in varList ]
self.report(self.proj.variableSummary())
def convertCSTSVariable(self):
d = sd.SDialogue('Convert Initial Fields to a STARS Panel Variables')
varNames = self.proj.getDBFVariableNames()
varNames.sort()
txt="""Select the fields in time order to be create a panel variable."""
time = str(self.proj.t)
tRemind = "Choose t = " + time + " fields"
sd.DualListBoxes(d,varNames,title=tRemind, helpText=txt)
txt = "Choose a name for your STARS Panel variable."
sd.UserEntry(d,label="Choose Panel Variable Name", align="LEFT", title="",helpText=txt)
entries = ['Aggregation Method']
txt = """If the same cross-sectional unit has more than one value
associated with it, ProjectMaker will have to combine the values in
some way. You have the following options:
Sum: will sum up any values associated with the same cross-section.
Max: will take the maximum value of any values associated with the same cross-section.
Min: will take the minimum value of any values associated with the same cross-section.
Average: will average the values associated with the same cross-section.
String: will essentially use the value of the last instance for
each cross-section. Furthermore the value is a string. Use this
for categorical data.
***The default method is "Average"."""
types = ['Sum', 'Max', 'Min', 'Average', 'String']
sd.MultiEntry(d,types, entries, title='Optional Arguments', helpText=txt)
d.draw()
if d.status:
varList = d.results[0]
varName = d.results[1]
cohesion = d.results[2]['Aggregation Method']
if cohesion:
pass
else:
cohesion = 'Average'
createVar = self.proj.convertArcViewVariable(cohesion,varName,varList)
self.report(self.proj.variableSummary())
def convertCSTSVariableBatch(self):
d = sd.SDialogue('Convert Initial Fields to a STARS Panel Variables')
varNames = self.proj.getDBFVariableNames()
batch = MATCH.batchSplit(varNames)
varNames = batch['strings']
varNames.sort()
timeInfo = batch['ints']
timeInfo.sort()
txt="""Select the fields to create panel variables via the batch method."""
time = str(self.proj.t)
add = """Remember that field must have " + time + " time periods
associated with it."""
txt = txt + "\n" + add
title = "Choose fields for batch CSTS creation"
sd.DualListBoxes(d,varNames,title=title, helpText=txt)
txt = """Choose a variable associated with the first time period in
your study, and an additional oone for the year time period. You may
also type this in manuallly."""
timeStuff = ['Start Period for Batch', 'End Period for Batch']
sd.MultiEntry(d,timeInfo, timeStuff, title='Time Period Arguments',
helpText=txt)
txt="""Provide the time period increment:
I.e. Annual: 1
BiAnnual: 2
Decadal: 10
"""
sd.UserEntry(d,label="Integer Value", align="LEFT",
title="User Defined Time Increment",helpText=txt)
entries = ['Aggregation Method']
txt = """If the same cross-sectional unit has more than one value
associated with it, ProjectMaker will have to combine the values in
some way. You have the following options:
Sum: will sum up any values associated with the same cross-section.
Max: will take the maximum value of any values associated with the same cross-section.
Min: will take the minimum value of any values associated with the same cross-section.
Average: will average the values associated with the same cross-section.
String: will essentially use the value of the last instance for
each cross-section. Furthermore the value is a string. Use this
for categorical data.
***The default method is "Average"."""
types = ['Sum', 'Max', 'Min', 'Average', 'String']
sd.MultiEntry(d,types, entries, title='Optional Arguments', helpText=txt)
d.draw()
if d.status:
vars = MATCH.Matcher('vars',d.results[0])
varList = vars.unique
start = int( d.results[1]['Start Period for Batch'] )
end = int( d.results[1]['End Period for Batch'] )
step = int( d.results[2] )
cohesion = d.results[3]['Aggregation Method']
if cohesion:
pass
else:
cohesion = 'Average'
for var in varList:
try:
newVar = [ var+str(i) for i in range(start,end+step,step) ]
createVar = self.proj.convertArcViewVariable(cohesion,var,newVar)
except:
beg = "Could not create new variable for " + var + "."
end = "\nPerhaps the the time series does not match."
self.report(beg+end)
self.report(self.proj.variableSummary())
def cs2Panel(self):
d = sd.SDialogue('Convert Existing CS Variables to a CSTS Variable')
varNames = self.proj.getCSVariableNames()
varNames.sort()
time = str(self.proj.t)
txt="""Select the CS variables in temporal order. Make sure that you
have the same number of CS vars as time periods"""
tRemind = "Choose t = " + time + " CS Variables"
sd.DualListBoxes(d,varNames,title=tRemind, helpText=txt)
txt = "Choose a name for your STARS Panel variable."
sd.UserEntry(d,label="Choose Panel Variable Name", align="LEFT",
title="",helpText=txt)
title='Would you like to delete the original CS Variables?'
values = ['No', 'Yes']
txt = """If you select Yes, then the original CS variables will be erased. ***The default is No"""
sd.RadioButtons(d, values=values, title=title,helpText=txt)
d.draw()
if d.status:
varList = d.results[0]
panelName = d.results[1]
delete = d.results[2]
if len(varList) == self.proj.t:
self.proj.cs2Panel(varList,panelName,delete=delete)
self.report(self.proj.variableSummary())
else:
s = """ERROR: The number of CS Variables you provided do not match the number of time periods in your project."""
self.report(s)
def panel2CS(self):
d = sd.SDialogue('Convert Existing Panel Variable to CS Variables')
varNames = self.proj.getCSTSVariableNames()
varNames.sort()
txt="""Choose the name of the Panel variable(s) that you would like to
decompose by time periods into seperate cross-sectional variables.
You may choose more than one at a time"""
sd.DualListBoxes(d,varNames,title='Panel Variables', helpText=txt)
title='Would you like to delete the original Panel Variables?'
values = ['No', 'Yes']
txt = """If you select Yes, then the original Panel variables will be erased. ***The default is No"""
sd.RadioButtons(d, values=values, title=title,helpText=txt)
d.draw()
if d.status:
varList = d.results[0]
delete = d.results[1]
for var in varList:
self.proj.panel2CS(var,delete=delete)
self.report(self.proj.variableSummary())
def variableSpecificTable(self):
d = sd.SDialogue('View Specific Variable(s)')
txt = """Choose the name(s) of the CS and CSTS variable(s) you want to
view in tabular format."""
cvars = self.proj.getCSVariableNames()
cstvars = self.proj.getCSTSVariableNames()
varNames = cvars + cstvars
sd.DualListBoxes(d,varNames,title="CS and CSTS Variables", helpText=txt)
tsVars = self.proj.getTSVariableNames()
txt = """Choose the name(s) of the TS variable(s) you want to view in
tabular format."""
sd.DualListBoxes(d,tsVars,title="TS Variables", helpText=txt)
d.draw()
if d.status:
csVars = d.results[0]
try:
tab = self.proj.createTableList(csVars)
names = tab[0]
vals = tab[1]
top = Toplevel(self.root)
table = MixedDataTable(top,vals,
name="STARS Variables (CS, CSTS)",
columnLabels = names)
except:
print "No CS or CSTS Variables identified"
tsVars = d.results[1]
try:
tab = self.proj.createTableList(tsVars)
names = tab[0]
vals = tab[1]
top = Toplevel(self.root)
table = MixedDataTable(top,vals,
name="STARS Variables (TS)",
columnLabels = names)
except:
print "No TS Variables identified"
def variableCSTable(self):
vars = self.proj.getCSVariableNames()
tab = self.proj.createTableList(vars)
names = tab[0]
vals = tab[1]
top = Toplevel(self.root)
table = MixedDataTable(top,vals,
name="STARS Variables (CS)",
columnLabels = names)
def variableCSTSTable(self):
vars = self.proj.getCSTSVariableNames()
tab = self.proj.createTableList(vars)
names = tab[0]
vals = tab[1]
top = Toplevel(self.root)
table = MixedDataTable(top,vals,
name="STARS Variables (CSTS)",
columnLabels = names)
def variableTSTable(self):
vars = self.proj.getTSVariableNames()
tab = self.proj.createTableList(vars)
names = tab[0]
vals = tab[1]
top = Toplevel(self.root)
table = MixedDataTable(top,vals,
name="STARS Variables (TS)",
columnLabels = names)
def variableCS_CSTSTable(self):
cvars = self.proj.getCSVariableNames()
cstvars = self.proj.getCSTSVariableNames()
vars = cvars + cstvars
tab = self.proj.createTableList(vars)
names = tab[0]
vals = tab[1]
top = Toplevel(self.root)
table = MixedDataTable(top,vals,
name="STARS Variables (CS and CSTS)",
columnLabels = names)
def baseVariableTable(self,sample=1):
baseData = self.proj.createInitialTable(sample=sample)
top = Toplevel(self.root)
table = MixedDataTable(top,baseData,name="Base Data",
columnLabels=self.proj.initial.keys())
def readCSV_CS(self):
FILE_TYPES=[("Files","*.csv")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="MERGE Additional CS Data.")
if fileName:
self.proj.readCSV_CS(fileName)
self.report(self.proj.variableSummary())
def readCSV_TS(self):
FILE_TYPES=[("Files","*.csv")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="MERGE Additional TS Data.")
if fileName:
self.proj.readCSV_TS(fileName)
self.report(self.proj.variableSummary())
def readCSV_CSTS(self):
FILE_TYPES=[("Files","*.csv")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="MERGE Additional CSTS Data.")
if fileName:
self.proj.readCSV_CSTS(fileName)
self.report(self.proj.variableSummary())
def joinCS(self):
FILE_TYPES=[("Files","*.csv")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="JOIN Additional CS Data.")
if fileName:
self.proj.readJoinCSV(fileName)
d = sd.SDialogue('Join Data Dialogue')
txt = """Identify the existing cross-sectional field in the
project to serve as the master in the matching process.
"""
varNames = self.proj.getCSVariableNames()
varNames.sort()
entries = ['Field']
sd.MultiEntry(d,varNames, entries, title='Identify Master Field', helpText=txt)
txt = """Identify the field in your new data that will serve as
the slave in the matching process.
"""
varNames = self.proj.data2Join.names
varNames.sort()
entries = ['Field']
sd.MultiEntry(d,varNames, entries, title='Identify Slave Field', helpText=txt)
d.draw()
if d.status:
master = d.results[0]['Field']
slave = d.results[1]['Field']
self.proj.joinCS(master,slave)
self.report(self.proj.variableSummary())
def joinCSTS(self):
FILE_TYPES=[("Files","*.csv")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="JOIN Additional CSTS Data.")
if fileName:
self.proj.readJoinCSV(fileName)
d = sd.SDialogue('Join Data Dialogue')
txt = """Identify the existing cross-sectional field in the
project to serve as the master in the matching process.
"""
varNames = self.proj.getCSVariableNames()
varNames.sort()
entries = ['Field']
sd.MultiEntry(d,varNames, entries, title='Identify Master Field', helpText=txt)
txt = """Identify the field in your new data that will serve as
the slave in the matching process.
"""
varNames = self.proj.data2Join.names
varNames.sort()
entries = ['Field']
sd.MultiEntry(d,varNames, entries, title='Identify Slave Field', helpText=txt)
d.draw()
if d.status:
master = d.results[0]['Field']
slave = d.results[1]['Field']
self.proj.joinCSTS(master,slave)
self.report(self.proj.variableSummary())
def importGalBinary(self):
FILE_TYPES=[("Files","*.gal")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="Import Binary Gal File.")
if fileName:
self.proj.importMatrix(fileName,'gal')
def importGalValued(self):
FILE_TYPES=[("Files","*.spv")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="Import Sparse Valued Gal File.")
if fileName:
self.proj.importMatrix(fileName,'spv')
def importFullMatrix(self):
FILE_TYPES=[("Files","*.fmt")]
fileName = askopenfilename(filetypes=FILE_TYPES, title="Import Full Matrix File.")
if fileName:
self.proj.importMatrix(fileName,'fmt')
def saveAsProject(self):
"""
Saves STARS project under a new name.
Callback.
Stub XXX.
"""
if self.saveCheck():
fileName = asksaveasfilename(filetypes=[("STARS Projects","*.prj")],
title="Save STARS Project Name",
initialdir=self.proj.projectDir,
initialfile=self.proj.projPrefix)
if fileName:
self.proj.setProjectFiles(fileName)
self.writeProjectFiles()
def saveProject(self):
"""
Saves STARS project under current name.
Callback.
Stub XXX.
"""
if self.saveCheck():
self.writeProjectFiles()
def openProject(self):
"""
Opens an exisiting STARS Project.
Callback.
Stub XXX.
"""
starsFile = askopenfilename(filetypes=[("STARS Projects","*.prj")])
if starsFile:
print starsFile
self.starsFile = starsFile
def plot(self):
"""
Plots the current ArcView Shapefile scaled for STARS Map.
Callback.
"""
self.avproject.draw()
self.projectedCoordsOn = 1
def summarize(self):
"""
Reports on current ArcView Project.
Callback.
"""
try:
self.avproject.summary()
except:
print 'No ArcView Project Open'
def writeProjectFiles(self):
"""Wrapper to write all files necessary for a STARS Project."""
#if self.saveCheck():
self.proj.writePRJ()
if self.prj != 1:
if self.proj.arc == 1:
self.proj.writeGIS(self.projected)
self.proj.writeCSO()
self.proj.writeDHT(delimiter=" ")
self.proj.writeDAT(delimiter=" ")
print "Finished creating project!"
self.report("Finished creating project!")
def doMaps(self):
# XXX maybe wrap alternative projected maps in a dictionary so that the
# final selection of a projection does not require another projection
# of the coordinates. i.e., if the user firsts looks at mercator, then
# uprojected, then albers, the last map is albers. but, if the user
# wants their project to use none or mercator, they would need to
# reproject it at this point. for now this is in self.projectedMaps
if self.proj.prj == 1:
self.report("Your GIS File has already been created!")
else:
if self.proj.arc == 1:
d = sd.SDialogue('Map Views')
values=('None', 'Mercator', 'Albers', 'Transverse Mercator',
'Cylindrical Equidistant')
txt="Select Map Projection (or none for unprojected)\n"
rbutton = sd.RadioButtons(d, label='Projection', values=values,
align='LEFT', title='Projections', helpText=txt)
d.draw()
if d.status:
type = d.results[0]
projections = {1:Projection.MercatorProj,
2:Projection.AlbersEqualAreaProj,
3:Projection.TransverseMercatorProj,
4:Projection.CylindricalEquidistantProj,
0:"None"}
self.proj.createMap(self.proj.shapeFileName, projections[type])
top = Toplevel(self.root)
self.projected=Projection.MapView(top, self.proj.map)
self.projected.plot()
top.title(self.proj.map.projectionName)
self.proj.projectedMaps[self.proj.map.projectionName] = self.proj.map
self.projectedCoordsOn = 1
else:
self.report("No Shapefile declared for this project")
def writeGAL(self):
print 'writing GAL'
iGal = ReadGisFile(self.filePrefix+".gis")
mGal = gis2Contiguity(iGal[0], iGal[1], iGal[2])
gKeys = mGal.keys()
gKeys.sort()
fgal = open(self.filePrefix+".gal", "w")
fgal.write("%s\n"%(len(gKeys)))
for i in gKeys:
fgal.write("%s %s\n"%(i, mGal[i][0]))
try:
neighs = [ str(i) for i in mGal[i][1] ]
neighs = (" ").join(neighs)
print neighs
fgal.write("%s\n"%(neighs))
except:
fgal.write("%s\n"%(""))
print 'attention: island'
fgal.close()
def saveCheck(self):
"""Wraps all the checks necessary to write a project file"""
flag = 1
flag *= self.starsProjectOn
if self.proj.arc == 1:
flag *= self.projectedCoordsOn
if not self.starsProjectOn: print 'No Stars Project Defined.'
if self.proj.arc == 1:
if not self.projectedCoordsOn:
print 'Please plot shapefile before saving project.'
return flag
def notDone(self):
self.report("This method is not done yet!")
if __name__ == '__main__':
from Tkinter import *
v = SProjectMaker()
v.mainloop()
| 41.744612
| 130
| 0.54178
|
1e00b81a482e34ade0a2cad9f58f6970179e850d
| 2,131
|
py
|
Python
|
examples/chart_gauge.py
|
eddiechapman/XlsxWriter
|
c636117ab30e64e4b7b824c9105595c42887c2c9
|
[
"BSD-2-Clause-FreeBSD"
] | 2,766
|
2015-01-02T17:36:42.000Z
|
2022-03-31T09:23:30.000Z
|
examples/chart_gauge.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 683
|
2015-01-03T09:55:02.000Z
|
2022-03-31T07:18:15.000Z
|
examples/chart_gauge.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 636
|
2015-01-05T01:57:08.000Z
|
2022-03-25T18:42:41.000Z
|
#######################################################################
#
# An example of creating a Gauge Chart in Excel with Python and XlsxWriter.
#
# A Gauge Chart isn't a native chart type in Excel. It is constructed by
# combining a doughnut chart and a pie chart and by using some non-filled
# elements. This example follows the following online example of how to create
# a Gauge Chart in Excel: https://www.excel-easy.com/examples/gauge-chart.html
#
# Copyright 2013-2021, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chart_gauge.xlsx')
worksheet = workbook.add_worksheet()
chart_doughnut = workbook.add_chart({'type': 'doughnut'})
chart_pie = workbook.add_chart({'type': 'pie'})
# Add some data for the Doughnut and Pie charts. This is set up so the
# gauge goes from 0-100. It is initially set at 75%.
worksheet.write_column('H2', ['Donut', 25, 50, 25, 100])
worksheet.write_column('I2', ['Pie', 75, 1, '=200-I4-I3'])
# Configure the doughnut chart as the background for the gauge.
chart_doughnut.add_series({
'name': '=Sheet1!$H$2',
'values': '=Sheet1!$H$3:$H$6',
'points': [
{'fill': {'color': 'green'}},
{'fill': {'color': 'yellow'}},
{'fill': {'color': 'red'}},
{'fill': {'none': True}}],
})
# Rotate chart so the gauge parts are above the horizontal.
chart_doughnut.set_rotation(270)
# Turn off the chart legend.
chart_doughnut.set_legend({'none': True})
# Turn off the chart fill and border.
chart_doughnut.set_chartarea({
'border': {'none': True},
'fill': {'none': True},
})
# Configure the pie chart as the needle for the gauge.
chart_pie.add_series({
'name': '=Sheet1!$I$2',
'values': '=Sheet1!$I$3:$I$6',
'points': [
{'fill': {'none': True}},
{'fill': {'color': 'black'}},
{'fill': {'none': True}}],
})
# Rotate the pie chart/needle to align with the doughnut/gauge.
chart_pie.set_rotation(270)
# Combine the pie and doughnut charts.
chart_doughnut.combine(chart_pie)
# Insert the chart into the worksheet.
worksheet.insert_chart('A1', chart_doughnut)
workbook.close()
| 31.338235
| 78
| 0.650868
|
3e1d2e84764fbe87cd8611b72b83c7b1f71eef6f
| 1,523
|
py
|
Python
|
voc_annotation.py
|
Mr-Yao-Pupil/efficientdet-pytorch
|
f04189b5baf50b98a124dd76dee55a840cf17719
|
[
"MIT"
] | null | null | null |
voc_annotation.py
|
Mr-Yao-Pupil/efficientdet-pytorch
|
f04189b5baf50b98a124dd76dee55a840cf17719
|
[
"MIT"
] | null | null | null |
voc_annotation.py
|
Mr-Yao-Pupil/efficientdet-pytorch
|
f04189b5baf50b98a124dd76dee55a840cf17719
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
from os import getcwd
sets = [('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
def convert_annotation(year, image_id, list_file):
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml' % (year, image_id))
tree = ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = 0
if obj.find('difficult') != None:
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text),
int(xmlbox.find('ymax').text))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
wd = getcwd()
for year, image_set in sets:
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt' % (year, image_set)).read().strip().split()
list_file = open('%s_%s.txt' % (year, image_set), 'w')
for image_id in image_ids:
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg' % (wd, year, image_id))
convert_annotation(year, image_id, list_file)
list_file.write('\n')
list_file.close()
| 38.075
| 119
| 0.594879
|
8952b8f73e36cef3d831e414d33299a4fc8f8289
| 90
|
py
|
Python
|
help_api/apps.py
|
Pravesh-Jamgade/projectSOS
|
e9d1021c1a4a38e5750242b329b6bc725a446299
|
[
"MIT"
] | null | null | null |
help_api/apps.py
|
Pravesh-Jamgade/projectSOS
|
e9d1021c1a4a38e5750242b329b6bc725a446299
|
[
"MIT"
] | null | null | null |
help_api/apps.py
|
Pravesh-Jamgade/projectSOS
|
e9d1021c1a4a38e5750242b329b6bc725a446299
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class HelpApiConfig(AppConfig):
name = 'help_api'
| 15
| 33
| 0.755556
|
0a3a1eefd0bda0121ff45a42e939ba540316035c
| 2,201
|
py
|
Python
|
menu_structure.py
|
adwuard/OP1_File_Organizer
|
0da6d297734a0f7905fc23ea424256456b2b2b45
|
[
"MIT"
] | 27
|
2019-03-30T22:21:50.000Z
|
2019-08-22T04:51:13.000Z
|
menu_structure.py
|
adwuard/OP_Manager
|
0da6d297734a0f7905fc23ea424256456b2b2b45
|
[
"MIT"
] | 4
|
2019-10-29T22:55:53.000Z
|
2022-03-11T23:44:48.000Z
|
menu_structure.py
|
adwuard/OP_Manager
|
0da6d297734a0f7905fc23ea424256456b2b2b45
|
[
"MIT"
] | 8
|
2019-04-14T05:35:30.000Z
|
2019-07-17T16:10:09.000Z
|
WifiTransfer = [
["Wifi Transfer", -1],
["SSH Transfer", "Check_IP"],
["Wifi Server", "Server IP"]
# "Connect", -1
]
MIDI = [
["MIDI", -1],
["USB MIDI IN Test", "MIDI_In_Test"],
["USB MIDI OUT Test", "MIDI_Out_Test"]
]
op1fun = [
["OP1.FUN", -1],
["Packs", "OP1FUN_BrowsePacks"],
["Download All Packs", "OP1FUN_DownloadAllPacks"]
]
Utilities = [
["Utilities", -1],
["Check Storage", "checkStorage"],
["MIDI Host", "MIDI_Host"],
["op1.fun", op1fun],
["SSH Transfer", "Check_IP"],
["Eject", "act_ESC_Eject"],
# ["Power Off System", "act_POWER_OFF"],
]
# PresetPage = [
# ["Manage Presets", -1],
# ["Freeze State", "act_Freeze_State"],
# ["Upload From Local", "act_Upload_Preset_From_Local"],
# ["Del All User Data", "act_DANG_Delete_ALL_From_OP_1"]
# ]
OP_1_Patches_Folder = [
["OP-1 Patches", -1],
["Synth", "OP-1 Synth Patches"], # Start Browser
["Drum", "OP-1 Drum Patches"] # Start Browser
]
Local_Patches = [
["Local Patches", -1],
["Synth", "UploadSynthPatches"], # Start Browser
["Drum", "UploadDrumPatches"] # Start Browser
]
OP_1_Patches = [
["OP-1", -1],
["Synth", "OP1_Synth_Patches"], # Start Browser
["Drum", "OP1_Drum_Patches"] # Start Browser
]
PatchesPage = [
["Patches", -1],
["Backup", "act_5_Backup_All_Patches"],
["Manage Local", Local_Patches],
["Manage OP-1", OP_1_Patches]
]
BackupPage = [
["Backup", -1],
["Tracks + Album", "act_Backup_Project_From_OP_1"],
["Tracks", "act_Load_Project_From_Local_only_tracks"]
]
ProjectsPage = [
["Projects", -1],
["Backup", BackupPage],
["Manage Local", "act_Load_Project_From_Local"]
]
OP1 = [
["OP-1", -1],
["Projects", ProjectsPage],
["Patches", PatchesPage]
]
OPZ = [
["OP-Z", -1],
["Freeze State", "act_Freeze_State_OPZ"],
["Recall State", "act_Recall_State_To_OPZ"],
["Manage OP-Z", "OPZ_Patches"]
# ["Local Projects", "act_Load_Project_From_Local"]
]
MainPage = [
["Main Menu", -1],
["OP-1", OP1],
["OP-Z", OPZ],
# ["Wifi Transfer", WifiTransfer],
["Utilities", Utilities],
["Eject", "act_ESC_Eject"]
]
| 23.168421
| 60
| 0.582917
|
7737fba2d658ddeb81fa82d499103d2c03050b85
| 112
|
py
|
Python
|
code/src/nuvla/api/__init__.py
|
nuvla/python-api
|
7b530aa049eee8c8cd654c27d749d46bf0d19e87
|
[
"Apache-2.0"
] | 4
|
2019-04-27T10:35:44.000Z
|
2019-05-05T13:04:28.000Z
|
code/src/nuvla/api/__init__.py
|
nuvla/python-library
|
421abe6f583e1ce6a48670131faefe16b7e0bc12
|
[
"Apache-2.0"
] | 21
|
2019-02-22T07:30:41.000Z
|
2022-03-30T13:27:55.000Z
|
code/src/nuvla/api/__init__.py
|
nuvla/python-library
|
421abe6f583e1ce6a48670131faefe16b7e0bc12
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .api import Api, NuvlaError, ConnectionError, NuvlaResourceOperationNotAvailable
| 22.4
| 85
| 0.758929
|
859d71041dd2e21f82f014c80bdf37d0de20106a
| 702
|
py
|
Python
|
String/Leetcode 5. Longest Palindromic Substring.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 31
|
2020-06-23T00:40:04.000Z
|
2022-01-08T11:06:24.000Z
|
String/Leetcode 5. Longest Palindromic Substring.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | null | null | null |
String/Leetcode 5. Longest Palindromic Substring.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 7
|
2020-04-30T08:46:03.000Z
|
2021-08-28T16:25:54.000Z
|
class Solution:
def longestPalindrome(self, s: str) -> str:
n = len(s)
dp = [[0]* n for _ in range(n)]
max_len = 0
res = ''
for i in range(len(s)):
dp[i][i] = 1
max_len = 1
res = s[i]
for i in range(len(s)-1):
if s[i] == s[i+1]:
dp[i][i+1] = 1
max_len = 2
res = s[i:i+2]
for j in range(len(s)):
for i in range(j):
if s[i] == s[j] and dp[i+1][j-1]:
dp[i][j] = 1
if j-i+1 > max_len:
max_len = j-i+1
res = s[i:j+1]
return res
| 30.521739
| 49
| 0.339031
|
37d05277c3ba2f39c7d599d40c96b956808e7675
| 102
|
py
|
Python
|
Codeforces/270/gen.py
|
Mindjolt2406/Competitive-Programming
|
d000d98bf7005ee4fb809bcea2f110e4c4793b80
|
[
"MIT"
] | 2
|
2018-12-11T14:37:24.000Z
|
2022-01-23T18:11:54.000Z
|
Codeforces/270/gen.py
|
Mindjolt2406/Competitive-Programming
|
d000d98bf7005ee4fb809bcea2f110e4c4793b80
|
[
"MIT"
] | null | null | null |
Codeforces/270/gen.py
|
Mindjolt2406/Competitive-Programming
|
d000d98bf7005ee4fb809bcea2f110e4c4793b80
|
[
"MIT"
] | null | null | null |
from random import *
print 5
for i in range(5):
for j in range(5): print randint(1,1000),
print ""
| 20.4
| 43
| 0.666667
|
976ca63872052807fa892bcc3cc0c4ff9f61c3af
| 88
|
py
|
Python
|
examples/django/1_drf_base_managed_postges/example_app/urls.py
|
e-kor/yappa
|
1ea3c4e6a5ffb7a3fbd02d810a62f73a13b9d649
|
[
"MIT"
] | 41
|
2021-07-15T14:54:16.000Z
|
2022-03-26T10:59:40.000Z
|
examples/django/1_drf_base_managed_postges/example_app/urls.py
|
e-kor/yappa
|
1ea3c4e6a5ffb7a3fbd02d810a62f73a13b9d649
|
[
"MIT"
] | 29
|
2021-08-04T08:04:26.000Z
|
2021-08-19T09:50:30.000Z
|
examples/django/1_drf_base_managed_postges/example_app/urls.py
|
e-kor/yappa
|
1ea3c4e6a5ffb7a3fbd02d810a62f73a13b9d649
|
[
"MIT"
] | 3
|
2021-07-23T14:56:40.000Z
|
2022-03-24T16:09:55.000Z
|
from django.urls import path
from .views import root
urlpatterns = [path("", root), ]
| 14.666667
| 32
| 0.704545
|
eb65dda0a839148b12883c22d5bf4946eef87495
| 2,042
|
py
|
Python
|
@test/bench.py
|
tenko/kdtree
|
c3bcb9be24615d39b9216ddd85381e981e9f2946
|
[
"BSD-2-Clause"
] | 5
|
2016-01-19T03:49:16.000Z
|
2022-01-07T05:33:12.000Z
|
@test/bench.py
|
tenko/kdtree
|
c3bcb9be24615d39b9216ddd85381e981e9f2946
|
[
"BSD-2-Clause"
] | null | null | null |
@test/bench.py
|
tenko/kdtree
|
c3bcb9be24615d39b9216ddd85381e981e9f2946
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from random import randint, seed
from timeit import Timer
import numpy as np
def asciitable(rows):
# From : https://gist.github.com/lonetwin/4721748
# - figure out column widths
widths = [len(max(columns, key=len)) for columns in zip(*rows)]
def separator():
print('-+-'.join( '-' * width for width in widths ))
separator()
# - print the header
header, data = rows[0], rows[1:]
print(
' | '.join(format(title, "%ds" % width) for width, title in zip(widths, header))
)
separator()
# - print the data
for row in data:
print(
" | ".join(format(cdata, "%ds" % width) for width, cdata in zip(widths, row))
)
separator()
DATA = None
if __name__ == '__main__':
seed(42)
HEADING = ('Test', 'cKDTree', 'KDTree', 'Ratio')
rows = [HEADING]
SETUP_CKDTREE = """
from __main__ import DATA
from scipy.spatial import cKDTree
kdtree1 = cKDTree(DATA, leafsize=10)
import numpy as np
pnt = np.array((.5,.5,.5))
"""
SETUP_KDTREE = """
from __main__ import DATA
import numpy as np
from kdtree import KDTree, KNNResultSet
pnt = np.array((.5,.5,.5))
kdtree2 = KDTree(DATA, maxLeafSize = 10)
kdtree2.build()
res2 = KNNResultSet(10)
"""
M = 10
def run(name, ckdtree_stmt, kdtree_stmt):
a = Timer(ckdtree_stmt, setup = SETUP_CKDTREE).timeit(number = M)
b = Timer(kdtree_stmt, setup = SETUP_KDTREE).timeit(number = M)
ratio = a / b
rows.append((name, "%g" % a, "%g" % b, "%.1f" % ratio))
for N in (1000, 10000, 100000):
DATA = pnts = np.random.rand(N,3)
run('Initialize', 'cKDTree(DATA, leafsize=10)', 'tree=KDTree(DATA, maxLeafSize = 10); tree.build()')
run('Nearest', 'kdtree1.query(pnt, k=10)', 'kdtree2.findNeighbors(res2, pnt)')
print(" DATA SIZE %d" % N)
asciitable(rows)
print("\n")
rows.clear()
rows.append(HEADING)
| 26.519481
| 108
| 0.573457
|
6eb601973fc5bc3cbd97fba937f845de8e87ee43
| 118
|
py
|
Python
|
.history/config_20210927032431.py
|
GraceOswal/pitch-perfect
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
[
"MIT"
] | null | null | null |
.history/config_20210927032431.py
|
GraceOswal/pitch-perfect
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
[
"MIT"
] | null | null | null |
.history/config_20210927032431.py
|
GraceOswal/pitch-perfect
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv as ld
ld()
class Config:
debug = True
SECRET_KEY = OS.ENVIRON.GET
| 11.8
| 36
| 0.70339
|
7a72aa69f9b6acd63a91dc48c0e22425a5a7aaf6
| 494
|
py
|
Python
|
scripts/ui-banner.py
|
subutai-io/launcher
|
d8397995e18200b12d60781ed485af04f70bff03
|
[
"Apache-2.0"
] | 1
|
2017-10-31T18:55:36.000Z
|
2017-10-31T18:55:36.000Z
|
scripts/ui-banner.py
|
subutai-attic/launcher
|
d8397995e18200b12d60781ed485af04f70bff03
|
[
"Apache-2.0"
] | 199
|
2016-07-28T07:30:48.000Z
|
2017-10-14T06:15:40.000Z
|
scripts/ui-banner.py
|
subutai-io/launcher
|
d8397995e18200b12d60781ed485af04f70bff03
|
[
"Apache-2.0"
] | 1
|
2021-03-27T10:08:26.000Z
|
2021-03-27T10:08:26.000Z
|
import subutai
from time import sleep
def subutaistart():
subutai.download("launcher-ad-1.png")
while !subutai.isDownloadComplete() == 1:
sleep(0.05)
subutai.download("launcher-ad-2.png")
while !subutai.isDownloadComplete() == 1:
sleep(0.05)
subutai.download("launcher-ad-3.png")
while !subutai.isDownloadComplete() == 1:
sleep(0.05)
subutai.download("launcher-ad-4.png")
while !subutai.isDownloadComplete() == 1:
sleep(0.05)
| 24.7
| 45
| 0.645749
|
c897804072b7b02dc3f109f6073e83d325985bc7
| 31,267
|
py
|
Python
|
root/trip_ids_at_stops_merge_in_muni_perday_v3.py
|
transitanalystisarel/TransitAnalystIsrael
|
341de9272b352c18333ff136a00de0b97cd82216
|
[
"MIT"
] | null | null | null |
root/trip_ids_at_stops_merge_in_muni_perday_v3.py
|
transitanalystisarel/TransitAnalystIsrael
|
341de9272b352c18333ff136a00de0b97cd82216
|
[
"MIT"
] | null | null | null |
root/trip_ids_at_stops_merge_in_muni_perday_v3.py
|
transitanalystisarel/TransitAnalystIsrael
|
341de9272b352c18333ff136a00de0b97cd82216
|
[
"MIT"
] | 3
|
2019-05-08T04:36:03.000Z
|
2020-11-23T19:46:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# collect a set of trip_id s at all stops in a GTFS file over the selected week of the service period starting at serviceweekstartdate
# filter stops in munis based on input txt file - stopsinmuni_post_edit
# merge sets of trips at stops in each muni to count trips per hour and per day
#
# inputs:
# parent_path = 'C:\\transitanalyst\\gtfs\\'
# pathout = 'C:\\transitanalyst\\processed\\'
# sserviceweekstartdate = '20181021'
# gtfsdate = '20181021'
# gtfsdir = 'israel'+gtfsdate
# stopsinmuni_post_edit = 'stopsinmuni_post_edit'+'_'+servicedate+'.txt'
#
# outputs:
# output txtfileout4 of munis with tpd per line (agency_id+route_short_name) in muni - 'muni_w_tpd_per_line'+'_'+servicedate+'.txt'
# output txtfileout3 of munis with trips per hour in day summed over one week -'munis_w_tph_summed_over_week'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt'
# output jsfileout of munis with tpd per line (agency_id+route_short_name) in muni - 'muni_w_tpd_per_line_'+sserviceweekstartdate+'.js'
#
print('----------------- collect a set of trip_id s at all stops --------------------------')
print('output txt file of stops with trip_id s')
from datetime import date
from datetime import timedelta
import time
import copy
import json
import csv
print("Local current time :", time.asctime( time.localtime(time.time()) ))
#
# input:
parent_path = 'C:\\transitanalyst\\gtfs\\'
pathout = 'C:\\transitanalyst\\processed\\'
sserviceweekstartdate = '20181021' # recommend to use gtfsdate (expect gtfs files to be most accurate for first week in service range)
gtfsdate = '20181021'
gtfsdir = 'israel'+gtfsdate
servicedate = sserviceweekstartdate
stopsinmuni_post_edit = 'stopsinmuni_post_edit'+'_'+servicedate+'.txt'
# output:
txtfileout4 = 'muni_w_tpd_per_line'+'_'+servicedate+'.txt'
#txtfileout1 = 'stops_w_trip_ids'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt' # commented out - generates very big file
#txtfileout2 = 'stops_w_tph_summed_over_week'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt' # stops with trips per hour in day summed over one week
txtfileout3 = 'munis_w_tph_summed_over_week'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt' # munis with trips per hour in day summed over one week
jsfileout = 'muni_w_tpd_per_line_'+sserviceweekstartdate+'.js'
#parent_path = 'C:\\transitanalyst\\processed\\' # small files for test
#gtfsdir = 'israel20180106-binyamina_station' # small files for test
gtfspathin = parent_path / gtfsdir
gtfspath = gtfspathin
gtfspathout = pathout
processedpathin = pathout
DAYSTOCOUNT = 7
daysofservicetocount = DAYSTOCOUNT - DAYSTOCOUNT/7
MAX_STOPS_COUNT = 50000
MAX_STOP_TIMES_COUNT = 25000000
MAX_TRIPS_COUNT = 900000
MAX_SHAPES_COUNT = 10000000
MAX_ROUTES_COUNT = 15000
MAX_AGENCY_COUNT = 100
MAX_CALENDAR_COUNT = 250000
#
# scan lines in calendar to compute start and end service dates and to fill calendar_dict with calendar lines keyed on service_id
#
maxfilelinecount = MAX_CALENDAR_COUNT
gtfsfile = 'calendar.txt'
inid = 'service_id'
calendar_dict = {}
slinelist=[]
print(gtfspath / gtfsfile)
filein = open(gtfspath / gtfsfile, 'r', encoding="utf8")
sline = filein.readline()
slinelist=sline[:-1].split(",")
print(slinelist)
keylist = slinelist
inid_index = keylist.index(inid)
service_id_i = keylist.index('service_id')
sunday_i = keylist.index('sunday')
monday_i = keylist.index('monday')
tuesday_i = keylist.index('tuesday')
wednesday_i = keylist.index('wednesday')
thursday_i = keylist.index('thursday')
friday_i = keylist.index('friday')
saturday_i = keylist.index('saturday')
start_date_i = keylist.index('start_date')
end_date_i = keylist.index('end_date')
calendar_dict = {keylist[inid_index]:slinelist}
dayofweek=[monday_i, tuesday_i, wednesday_i, thursday_i, friday_i, saturday_i, sunday_i]
#print calendar_dict
# scan lines in calendar
count = 0
sstartservicedate = '25250101'
sendservicedate = '15150101'
sline = filein.readline()
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print slinelist
in_id = slinelist[inid_index]
# print in_id
calendar_dict[slinelist[inid_index]] = slinelist
sstartservicedate = min(sstartservicedate, slinelist[start_date_i])
sendservicedate = max(sendservicedate, slinelist[end_date_i])
#print calendarline_dict
#print calendar_dict
#print '------------------'
count += 1
sline = filein.readline()
print('------------------')
#print calendar_dict
print(sstartservicedate, sendservicedate)
filein.close()
#
# print int(sstartservicedate[0:4]),int(sstartservicedate[4:6]),int(sstartservicedate[6:8])
# from str to date format
startservicedate = date(int(sstartservicedate[0:4]),int(sstartservicedate[4:6]),int(sstartservicedate[6:8]))
endservicedate = date(int(sendservicedate[0:4]),int(sendservicedate[4:6]),int(sendservicedate[6:8]))
serviceweekstartdate = date(int(sserviceweekstartdate[0:4]),int(sserviceweekstartdate[4:6]),int(sserviceweekstartdate[6:8]))
print('startservicedate, endservicedate, serviceweekstartdate ', startservicedate, endservicedate, serviceweekstartdate)
#
# create trips per hour list with hours from 0-30 (for times after midnight) and count of 0, for tripsperhour
# use as template for trips per hour lists per stop
#
dateinservicerange = lambda d: d >= startservicedate and d <= endservicedate
# print timedelta(days=1)
serviceweekenddate = serviceweekstartdate + timedelta(days=DAYSTOCOUNT-1)
print('serviceweekstartdate, serviceweekenddate ', serviceweekstartdate, serviceweekenddate)
if dateinservicerange(serviceweekstartdate) and dateinservicerange(serviceweekenddate) :
print('serviceweek selected is in service range')
else :
print('error*********************serviceweek selected is NOT in service range: ' , serviceweekstartdate, serviceweekenddate, startservicedate, endservicedate)
exit()
print('startservicedate, endservicedate ', startservicedate, endservicedate)
startservicedate = serviceweekstartdate
endservicedate = serviceweekenddate
print('startservicedate, endservicedate ', startservicedate, endservicedate)
tripsperhourlist = []
for houratstop in range (31):
tripsperhourlist.append(0)
print('----tripsperhourlist----')
print(tripsperhourlist)
#
# scan stops.txt to create a stops dict keyed on stop_id that includes lat lon, an empty dict of trip_id s and times at stop for this stop and a
# trips per hour at stop list
# also calculate min and max lat lon#
maxfilelinecount = MAX_STOPS_COUNT
gtfsfile = 'stops.txt'
inid = 'stop_id'
stops_dict = {}
tripsperstop_dict = {} # dict of trip_id s and times at stop for this stop
slinelist=[]
print(gtfspath / gtfsfile)
filein = open(gtfspath / gtfsfile, 'r', encoding="utf8")
sline = filein.readline()
slinelist=sline[:-1].split(",")
# print slinelist
keylist = slinelist
inid_index = keylist.index(inid)
stop_id_i = keylist.index('stop_id')
stop_lat_i = keylist.index('stop_lat')
stop_lon_i = keylist.index('stop_lon')
stop_desc_i = keylist.index('stop_desc')
#stops_dict = {keylist[inid_index]:[slinelist[slinelist[stop_lat_i], slinelist[stop_lon_i], copy.deepcopy(tripsperstop_dict), copy.deepcopy(tripsperhourlist), 0]}
#print stops_dict
# scan gtfsfile
count = 0
minlat = '90.000000'
minlon = '90.000000'
maxlat = '00.000000'
maxlon = '00.000000'
sline = filein.readline()
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print slinelist
in_id = slinelist[inid_index]
# print in_id
stops_dict[slinelist[inid_index]] = [slinelist[stop_lat_i], slinelist[stop_lon_i], copy.deepcopy(tripsperstop_dict), copy.deepcopy(tripsperhourlist), 0]
minlat = min(minlat, slinelist[stop_lat_i])
maxlat = max(maxlat, slinelist[stop_lat_i])
minlon = min(minlon, slinelist[stop_lon_i])
maxlon = max(maxlon, slinelist[stop_lon_i])
count += 1
sline = filein.readline()
print('------------------')
print(in_id, stops_dict[in_id]) #last one
#for stop_id, stopsdictlist in stops_dict.iteritems():
#print stop_id, stopsdictlist[:2], list(stopsdictlist[2])
print('------------------')
print('minlat, minlon : ', minlat, minlon)
print('maxlat, maxlon : ', maxlat, maxlon)
print('stop lines scanned ', count)
filein.close()
#
# scan stop_times.txt to populate trip_id dict per stop in the stops dict
#
maxtimeatstop = '00:00:00'
maxfilelinecount = MAX_STOP_TIMES_COUNT
gtfspath = gtfspathin
gtfsfile = 'stop_times.txt'
slinelist=[]
print(gtfspath / gtfsfile)
filein = open(gtfspath / gtfsfile, 'r', encoding="utf8")
sline = filein.readline()
slinelist=sline[:-1].split(",")
# print slinelist
keylist = slinelist
stop_id_i = keylist.index('stop_id') # index in stop_times slinelist.
trip_id_i = keylist.index('trip_id') # index in stop_times slinelist.
departure_time_i = keylist.index('departure_time') # index in stop_times slinelist.
trip_dict_i = 2; # index in stops_dict. changed from 2 when stop_desc added. changed back to 2 when desc removed
# scan gtfsfile
count = 0
stopscount = 0
sline = filein.readline()
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print slinelist
stop_id = slinelist[stop_id_i]
#print stop_id
trip_id = slinelist[trip_id_i]
departure_time = slinelist[departure_time_i]
if stop_id in stops_dict:
#print stop_id, trip_id, stops_dict[stop_id], stops_dict[stop_id][trip_dict_i]
if trip_id in stops_dict[stop_id][trip_dict_i]: # trip at stop more than once... yes it does happen
stops_dict[stop_id][trip_dict_i][trip_id].append(departure_time)
#print 'trips at stop more than once - ', stop_id, len(stops_dict[stop_id][trip_dict_i]), len(stops_dict[stop_id][trip_dict_i][trip_id])
else : # trip at stop first time
stops_dict[stop_id][trip_dict_i][trip_id] = [departure_time]
#print 'trip at stop first time ********************** ', stop_id, stops_dict[stop_id][trip_dict_i]
stopscount += 1
else :
print('************* error ** stop_id not found in stops_dict ', stop_id)
count += 1
maxtimeatstop = max(maxtimeatstop, departure_time)
sline = filein.readline()
print('------------------')
#print stops_dict
#for stop_id in stops_dict:
# print stop_id, len(stops_dict[stop_id][trip_dict_i])
# for trip_id in stops_dict[stop_id][trip_dict_i]:
# print '>>>', trip_id, len(stops_dict[stop_id][trip_dict_i][trip_id])
# if len(stops_dict[stop_id][trip_dict_i][trip_id]) > 1 : print '>>>>>>>>>>>>>>>>>>>>>>>>>>'
#print 'last stops_dict entry updated: ', stops_dict[stop_id]
print('stop_times lines scanned ', count)
print('stops found in dict ', stopscount)
print('maxlat, maxlon', maxlat, maxlon)
print('maxtimeatstop : ', maxtimeatstop)
filein.close()
#
# scan routes.txt to create a routes dict keyed on route_id that includes a route_short_name, and agency_id
#
maxfilelinecount = MAX_ROUTES_COUNT
gtfsfile = 'routes.txt'
inid = 'route_id'
routes_dict = {}
slinelist=[]
print(gtfspath / gtfsfile)
filein = open(gtfspath / gtfsfile, 'r', encoding="utf8")
sline = filein.readline()
slinelist=sline[:-1].split(",")
print(slinelist)
keylist = slinelist
inid_index = keylist.index(inid)
route_id_i = keylist.index('route_id')
agency_id_i = keylist.index('agency_id')
route_short_name_i = keylist.index('route_short_name')
route_long_name_i = keylist.index('route_long_name')
route_desc_i = keylist.index('route_desc')
route_type_i = keylist.index('route_type')
#routes_dict = {keylist[inid_index]:[slinelist[agency_id_i], slinelist[route_short_name_i]]}
#print routes_dict
# scan gtfsfile
count = 0
sline = filein.readline()
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print slinelist
in_id = slinelist[inid_index]
# print in_id
routes_dict[slinelist[inid_index]] = [slinelist[agency_id_i], slinelist[route_short_name_i]]
count += 1
sline = filein.readline()
print('------------------')
#print routes_dict
print('last routes_dict entry entered: ', slinelist[inid_index], routes_dict[slinelist[inid_index]])
print('------------------')
print('route lines scanned ', count)
filein.close()
#
# scan trips.txt to create trips dict keyed on trip_id and includes service_id and route_id and number of times the trip runs during the analyzed service week
#
maxfilelinecount = MAX_TRIPS_COUNT
gtfspath = gtfspathin
gtfsfile = 'trips.txt'
inid = 'trip_id'
trips_dict = {} # trip_id: [service_id, route_id, xinweek, xpdlist, agency_id, route_short_name]
slinelist=[]
print(gtfspath / gtfsfile)
filein = open(gtfspath / gtfsfile, 'r', encoding="utf8")
sline = filein.readline()
slinelist=sline[:-1].split(",")
# print slinelist
keylist = slinelist
inid_index = keylist.index(inid)
trip_id_i = keylist.index('trip_id')
service_id_i = keylist.index('service_id')
route_id_i = keylist.index('route_id')
#trips_dict = {keylist[inid_index]:[slinelist[service_id_i], slinelist[route_id_i]]}
#print trips_dict
# scan gtfsfile
count = 0
count_trip_ids_in_week = 0
sline = filein.readline()
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print slinelist
in_id = slinelist[inid_index] # trip_id
# print in_id
xinweek = 0
xpdlist = [0,0,0,0,0,0,0]
service_id = slinelist[service_id_i]
route_id = slinelist[route_id_i]
agency_id = routes_dict[route_id][0]
route_short_name = routes_dict[route_id][1]
calslinelist = calendar_dict[service_id] # use service_id from trips_dict to look up calendar line list
sstartcalendardate = calslinelist[start_date_i] # string
sendcalendardate = calslinelist[end_date_i] # string
startcalendardate = date(int(sstartcalendardate[0:4]),int(sstartcalendardate[4:6]),int(sstartcalendardate[6:8])) # start date for trip service
endcalendardate = date(int(sendcalendardate[0:4]),int(sendcalendardate[4:6]),int(sendcalendardate[6:8])) # end date for trip service
#print startcalendardate, endcalendardate, ' start and end date for trip service'
#print startservicedate, endservicedate, ' start and end date for all service'
for ordcalendardate in range(max(startcalendardate.toordinal(),startservicedate.toordinal()),min(endcalendardate.toordinal(),endservicedate.toordinal())+1):
calendardate = date.fromordinal(ordcalendardate)
calendardayofweek = calendardate.weekday()
#print calendardate, calendardayofweek, dayofweek[calendardayofweek], calslinelist[dayofweek[calendardayofweek]]
tripcountforday = int(calslinelist[dayofweek[calendardayofweek]])
#print tripcountforday, calslinelist
if tripcountforday > 0 :
xinweek += tripcountforday
xpdlist[(calendardate-startservicedate).days] += tripcountforday # add to trip count for that service day
trips_dict[in_id] = [slinelist[service_id_i], slinelist[route_id_i], xinweek, xpdlist, agency_id, route_short_name]
if xinweek > 0 : count_trip_ids_in_week +=1
count += 1
sline = filein.readline()
print('------------------')
#print trips_dict
print('trips lines scanned ', count)
print('trip ids in week ', count_trip_ids_in_week)
filein.close()
'''
#
# scan agency.txt to create agency dict keyed on agency_id and includes agency name
#
maxfilelinecount = MAX_AGENCY_COUNT
gtfspath = gtfspathin
gtfsfile = 'agency.txt'
inid = 'agency_id'
agency_dict = {}
slinelist=[]
print gtfspath+gtfsfile
filein = open(gtfspath / gtfsfile, 'r', encoding="utf8")
sline = filein.readline()
slinelist=sline[:-1].split(",")
# print slinelist
keylist = slinelist
inid_index = keylist.index(inid)
agency_id_i = keylist.index('agency_id')
agency_name_i = keylist.index('agency_name')
# scan gtfsfile
count = 0
sline = filein.readline()
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print slinelist
in_id = slinelist[inid_index]
# print in_id
agency_dict[in_id] = slinelist[agency_name_i]
count += 1
sline = filein.readline()
print '------------------'
#print agency_dict
print 'agency lines scanned ', count
filein.close()
'''
#
# scan stops dict to populate trips per hour by looking up the each trip_id in the set in the trip dict
# to get the service_id to look up the service days in the calendar dict
# also update the total count
#
print('scan stops dict to populate trips per hour')
count = 0
tripcount = 0
maxtphanystop = 0
maxtpwanystop = 0
deltatimehist = []
for i in range(121) : deltatimehist.append(0)
for stop_id, [stop_lat, stop_lon, tripsatstop_dict, tphlist, totaltpwatstop] in stops_dict.items():
#print count, stop_id, stop_lat, stop_lon , len(tripsatstop_dict), tphlist, totaltpwatstop
count += 1
for trip_id, timeatstoplist in tripsatstop_dict.items():
tripcount +=1
service_id = trips_dict[trip_id][0]
slinelist = calendar_dict[service_id] # use service_id from trips_dict to look up calendar line list
sstartcalendardate = slinelist[start_date_i] # string
sendcalendardate = slinelist[end_date_i] # string
startcalendardate = date(int(sstartcalendardate[0:4]),int(sstartcalendardate[4:6]),int(sstartcalendardate[6:8])) # start date for trip service
endcalendardate = date(int(sendcalendardate[0:4]),int(sendcalendardate[4:6]),int(sendcalendardate[6:8])) # end date for trip service
#print startcalendardate, endcalendardate, ' start and end date for trip service'
#print startservicedate, endservicedate, ' start and end date for all service'
route_id = trips_dict[trip_id][1]
agency_id = routes_dict[route_id][0]
route_short_name = routes_dict[route_id][1]
#agency_name = agency_dict[agency_id]
#line_name = agency_name+' - '+route_short_name # bigger file
line_name = agency_id+'-'+route_short_name # smaller geojson file, but need to lookup agency name in client app for display
#print count, tripcount, stop_id, trip_id, service_id, tpdlist[:2], totaltpwatstop
#print 'route_id, line_name: ',route_id, line_name
for ordcalendardate in range(max(startcalendardate.toordinal(),startservicedate.toordinal()),min(endcalendardate.toordinal(),endservicedate.toordinal())+1):
calendardate = date.fromordinal(ordcalendardate)
calendardayofweek = calendardate.weekday()
#print calendardate, calendardayofweek, slinelist[dayofweek[calendardayofweek]]
tripcountforday = int(slinelist[dayofweek[calendardayofweek]])
#print tripcountforday
if tripcountforday > 0 :
maxtimetripatstop = 0
mintimetripatstop = 30*60
for timeatstop in timeatstoplist :
hour_i = int(timeatstop[0:2])
#print timeatstop, timeatstop[0:2], hour_i
tphlist[hour_i] += tripcountforday # add to trip count for that day at the hour
inttimeatstop = 60*int(timeatstop[0:2]) + int(timeatstop[3:5])
maxtimetripatstop = max(maxtimetripatstop, inttimeatstop)
mintimetripatstop = min(mintimetripatstop, inttimeatstop)
deltatimetripatstop = maxtimetripatstop - mintimetripatstop
if deltatimetripatstop < 120 : deltatimehist[deltatimetripatstop] +=1
else : deltatimehist[120] +=1
if deltatimetripatstop > 100 : print('stop_id, trip_id, mintimetripatstop, maxtimetripatstop, deltatimetripatstop : ', stop_id, trip_id, mintimetripatstop, maxtimetripatstop, deltatimetripatstop)
#print count, stop_id, stops_dict[stop_id][3]
totaltpwatstop = 0
for tph in stops_dict[stop_id][3] :
totaltpwatstop += tph
maxtphanystop = max(maxtphanystop, tph)
#print count, stop_id, totaltpwatstop
stops_dict[stop_id][4] = totaltpwatstop
maxtpwanystop = max(maxtpwanystop, totaltpwatstop)
print('stop count ', count)
#print 'last stops_dict entry : ', stops_dict[stop_id]
print('maxtpwanystop ', maxtpwanystop)
print('maxtphanystop ', maxtphanystop)
print(deltatimehist)
#
# >>> load txt file of stopsinmuni post edit
#
print('>>> load txt file of stopsinmuni post edit')
txtfilein = stopsinmuni_post_edit
stopsinmuni = {}
with open(processedpathin / txtfilein, newline='', encoding="utf8") as f:
reader = csv.reader(f)
header = next(reader) # ['muni_id', 'stop_id']
print(header)
for row in reader:
#print row
muni_id = row[0]
stop_id = row[1]
# add to list, do not remove muni from list of stopsinmuni
if muni_id in stopsinmuni :
stopsinmuni[muni_id].append(stop_id)
else :
stopsinmuni[muni_id] = [stop_id]
print(stopsinmuni[muni_id]) # last one
print('stopsinmuni loaded. muni count ', len(stopsinmuni))
#
# to create tripsinmuni_dict
# for each muni and stop in muni location
# merge the tripsatstop_dict from all stops in muni to create mergedtripsinmuni_dict
#
municount = 0
tripsinmuni_dict = {} # muni_id: mergedtripsinmuni_dict
# for each muni
# get in stop list to use as filter
for muni_id, stopsinlist in stopsinmuni.items():
print(municount, muni_id)
municount +=1
# for stops w tpd per line in muni
mergedtripsinmuni_dict = {} # trip_id: [timeinmuni1, timeinmuni2, timeinmuni3...]
stopinmunicount = 0
for stop_id in stopsinlist :
[stop_lat, stop_lon, tripsatstop_dict, tphlist, totaltpwatstop] = stops_dict[stop_id]
stopinmunicount +=1
# merge the tripsatstop_dict from all stops in muni to create mergedtripsinmuni_dict
for trip_id, timeatstoplist in tripsatstop_dict.items() :
if trips_dict[trip_id][2] > 0 : # xinweek > 0 then add first or merge otherwise don't add to dict at all
if trip_id not in mergedtripsinmuni_dict: # not in merged dict then add it
mergedtripsinmuni_dict[trip_id] = timeatstoplist
else: # already in merged dict then append timeatstoplist
mergedtripsinmuni_dict[trip_id].extend(timeatstoplist)
tripsinmuni_dict[muni_id] = mergedtripsinmuni_dict
print('muni_id, len(mergedtripsinmuni_dict) : ', muni_id, len(mergedtripsinmuni_dict))
#print muni_id, mergedtripsinmuni_dict # last one
print('municount, stopinmunicount, ', municount, stopinmunicount)
#
# create tripswxinweekandminmaxtimesinmuni_dict by converting the list of times per trip in muni to
# a list of min and max time for trip in muni and add also times per week that the trip is used
#
tripswxinweekandminmaxtimesinmuni_dict = {} # muni_id: tripswxinweekandminmaxtimes_dict
for muni_id, mergedtripsinmuni_dict in tripsinmuni_dict.items() :
tripswxinweekandminmaxtimes_dict ={} # trip_id: [xinweek, mintimetripatstop, maxtimetripatstop, deltatimetripatstop, agency_id]
for trip_id, timeatstoplist in mergedtripsinmuni_dict.items() :
maxtimetripatstop = 0
mintimetripatstop = 30*60
for timeatstop in timeatstoplist :
inttimeatstop = 60*int(timeatstop[0:2]) + int(timeatstop[3:5])
maxtimetripatstop = max(maxtimetripatstop, inttimeatstop)
mintimetripatstop = min(mintimetripatstop, inttimeatstop)
deltatimetripatstop = maxtimetripatstop - mintimetripatstop
tripswxinweekandminmaxtimes_dict[trip_id] = [trips_dict[trip_id][2], mintimetripatstop, maxtimetripatstop, deltatimetripatstop, trips_dict[trip_id][4]]
tripswxinweekandminmaxtimesinmuni_dict[muni_id] = tripswxinweekandminmaxtimes_dict
print('muni_id, len(tripswxinweekandminmaxtimes_dict) : ', muni_id, len(tripswxinweekandminmaxtimes_dict))
#print muni_id, tripswxinweekandminmaxtimes_dict # last one
#
# create tripswxpdandlineinmuni_dict by looking up xpd and line in trips_dict for trip in muni and add also times per week that the trip is used
#
tripswxpdandlineinmuni_dict = {} # muni_id: tripswxpdandline_dict
for muni_id, mergedtripsinmuni_dict in tripsinmuni_dict.items() :
tripswxpdandline_dict ={} # trip_id: [xinweek, xpdlist, agency_id, route_short_name]
for trip_id, timeatstoplist in mergedtripsinmuni_dict.items() :
tripswxpdandline_dict[trip_id] = [trips_dict[trip_id][2], copy.deepcopy(trips_dict[trip_id][3]), trips_dict[trip_id][4], trips_dict[trip_id][5]]
tripswxpdandlineinmuni_dict[muni_id] = tripswxpdandline_dict
print('muni_id, len(tripswxpdandline_dict) : ', muni_id, len(tripswxpdandline_dict))
#print muni_id, tripswxpdandline_dict # last one
#
# create tpdperlineinmuni_dict by collecting perline tpd dict for each trip in muni
#
tpdperlineinmuni_dict = {} # muni_id: tpdperline_dict
for muni_id, tripswxpdandline_dict in tripswxpdandlineinmuni_dict.items() :
tpdperline_dict = {} # line_name_i: [tpw, tpdlist]
for trip_id, [xinweek, xpdlist, agency_id, route_short_name] in tripswxpdandline_dict.items() :
#if xpdlist[0] > 1 : print '>>>>> ' ,muni_id, trip_id, [xinweek, xpdlist, agency_id, route_short_name]
#line_name = agency_dict[agency_id]+'-'+route_short_name
line_name_i = agency_id+'-'+route_short_name # smaller geojson file, but need to lookup agency name in client app for display
if line_name_i in tpdperline_dict : # if line name already in dict then merge
tpdperline_dict[line_name_i][0] += xinweek
for i in range(len(xpdlist)) : tpdperline_dict[line_name_i][1][i] += xpdlist[i]
else : # if line_name_i new then set to this trip values
tpdperline_dict[line_name_i] = [xinweek, copy.deepcopy(xpdlist)]
tpdperlineinmuni_dict[muni_id] = tpdperline_dict
print('muni_id, len(tpdperline_dict) : ', muni_id, len(tpdperline_dict))
print(muni_id) # last one
for line_name_i, [tpw, tpdlist] in tpdperline_dict.items() : print(tpw, tpdlist) # last one
#
# output to txt file
#
#
# output txtfileout3 of munis with trips per hour in day summed over one week -'munis_w_tph_summed_over_week'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt'
#
fileout = open(gtfspathout+txtfileout3, 'w', encoding="utf8") # save results in file
postsline = 'muni_id,tph00,tph01,tph02,tph03,tph04,tph05,tph06,tph07,tph08,tph09,tph10,tph11,tph12,tph13,tph14,tph15,tph16,tph17,tph18,tph19,tph20,tph21,tph22,tph23\n'
fileout.write(postsline)
for muni_id, tripswxinweekandminmaxtimes_dict in tripswxinweekandminmaxtimesinmuni_dict.items() :
tphlist24 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
tpwinmuni = 0
count1x = 0
for trip_id, [xinweek, mintimetripatstop, maxtimetripatstop, deltatimetripatstop, agency_id] in tripswxinweekandminmaxtimes_dict.items() :
tpwinmuni += xinweek * 1
count1x +=1
hour_i = int(mintimetripatstop/60)%24
tphlist24[hour_i] +=xinweek
print(muni_id, tphlist24, tpwinmuni, count1x)
stph24 = ''
for i in range(24) : stph24 +=','+str(tphlist24[i])
postsline = muni_id+stph24+'\n'
fileout.write(postsline)
fileout.close()
print(gtfspathout+txtfileout3)
'''
#
# output txtfileout1 of stops with trip_id s -'stops_w_trip_ids'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt'
#
fileout = open(gtfspathout+txtfileout1, 'w', encoding="utf8") # save results in file
postsline = 'stop_id,trip_id\n'
fileout.write(postsline)
for stop_id, [stop_lat, stop_lon, tripsatstop_dict, tphlist, totaltpwatstop] in stops_dict.iteritems():
for trip_id in tripsatstop_dict :
if trips_dict[trip_id][2] > 0 : # if trip_id is used in service week analyzed then add to file
postsline = stop_id+','+trip_id+'\n'
fileout.write(postsline)
fileout.close()
print gtfspathout+txtfileout1
#
# output txtfileout2 of stops with trips per hour in day summed over one week -'stops_w_tph_summed_over_week'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt'
#
fileout = open(gtfspathout+txtfileout2, 'w', encoding="utf8") # save results in file
postsline = 'stop_id,tph00,tph01,tph02,tph03,tph04,tph05,tph06,tph07,tph08,tph09,tph10,tph11,tph12,tph13,tph14,tph15,tph16,tph17,tph18,tph19,tph20,tph21,tph22,tph23\n'
fileout.write(postsline)
for stop_id, [stop_lat, stop_lon, tripsatstop_dict, tphlist, totaltpwatstop] in stops_dict.iteritems():
stphlist = ''
for i in range(7) : stphlist += ','+str(tphlist[i]+tphlist[i+24])
for i in range(7,24) : stphlist += ','+str(tphlist[i])
postsline = stop_id+stphlist+'\n'
fileout.write(postsline)
fileout.close()
print gtfspathout+txtfileout2
'''
#
# create munisforoutput_dict
# find day with max tpd and compute tpw
# include in outputdict the tpdperline detail of the day with max tpd
# munisforoutput_dict[muni_id] = [tpwinmuni, maxdaytpdinmuni, averagetpdinmuni, maxdaytpdperline_dict]
#
count = 0
munisforoutput_dict = {}
for muni_id, tpdperline_dict in tpdperlineinmuni_dict.items():
tpwinmuni = 0
maxdaytpdinmuni = 0
averagetpdinmuni = 0
maxdaytpdperline_dict = {}
tpdinmunilist = [0,0,0,0,0,0,0]
maxday_i = 0
for line_name_i, [tpw, tpdlist] in tpdperline_dict.items() :
tpwinmuni += tpw
for i in range(len(tpdlist)) : tpdinmunilist[i] += tpdlist[i]
for i in range(len(tpdinmunilist)) :
if tpdinmunilist[i] > tpdinmunilist[maxday_i] : maxday_i = i
maxdaytpdinmuni = tpdinmunilist[maxday_i]
averagetpdinmuni = tpwinmuni/daysofservicetocount
for line_name_i, [tpw, tpdlist] in tpdperline_dict.items() :
maxdaytpdperline_dict[line_name_i] = tpdlist[maxday_i]
munisforoutput_dict[muni_id] = [tpwinmuni, maxdaytpdinmuni, averagetpdinmuni, maxdaytpdperline_dict]
#print count, muni_id
count +=1
print('munisforoutput_dict created , len: ', len(munisforoutput_dict), count)
print(muni_id, munisforoutput_dict[muni_id]) # print last one
#
# output js file of munis with max and average trips per day and tpd per line (agency_id, route short name) -'munis_w_tpd_per_line'+'_'+sserviceweekstartdate+'.js'
# munisforoutput_dict[muni_id] = [tpwinmuni, maxdaytpdinmuni, averagetpdinmuni, maxdaytpdperline_dict]
#
'''
def getJSON(s_id):
return {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": []
},
"properties": {
"muni_id": s_id,
"total_tpd": munisforoutput_dict[s_id][1],
"tpdperline_dict": munisforoutput_dict[s_id][3], # no sort in py, sort in js during display
"tpwinmuni": munisforoutput_dict[s_id][0]
}
}
# saveGeoJSON
print ("Generating GeoJSON export.")
geoj = {
"type": "FeatureCollection",
"features": [getJSON(muni_id) for muni_id in munisforoutput_dict]
}
print ("Saving file: ", gtfspathout /jsfileout, " ...")
nf = open(gtfspathout+jsfileout, "w", encoding="utf8")
jsonstr = json.dumps(geoj, separators=(',',':')) # smaller file for download
outstr = jsonstr.replace('}},', '}},\n')
nf.write('var munisWtpdperline =\n')
nf.write(outstr)
nf.close()
print ("Saved file: " + jsfileout)
'''
def getJSON(m_id):
return {
m_id: {
"tpwinmuni": munisforoutput_dict[m_id][0],
"maxday_muni_tpd": munisforoutput_dict[m_id][1],
"average_muni_tpd": munisforoutput_dict[m_id][2],
"tpdperline_dict": munisforoutput_dict[m_id][3] # no sort in py, sort in js during display
}
}
# saveGeoJSON
print ("Generating JSON export.")
json_list = [getJSON(muni_id) for muni_id in munisforoutput_dict]
print(("Saving file: ", gtfspathout /jsfileout, " ..."))
nf = open(gtfspathout+jsfileout, "w", encoding="utf8")
jsonstr = json.dumps(json_list, separators=(',',':')) # smaller file for download
outstr = jsonstr.replace('}},{', '},\n').replace('[{', '{').replace('}]', '}')
nf.write('var munisWtpdperline =\n')
nf.write(outstr)
nf.close()
print(("Saved file: " + jsfileout))
#
# output txt file with tpd per line (agency_id+route_short_name) in muni - 'muni_w_tpd_per_line'+'_'+servicedate+'.txt'
#
fileout = open(gtfspathout+txtfileout4, 'w', encoding="utf8") # open file to save results
postsline = 'muni_id,tpwinmuni,total_tpd,line_name_i,maxdaylinetpd\n'
fileout.write(postsline)
for muni_id, [tpwinmuni, maxdaytpdinmuni, averagetpdinmuni, maxdaytpdperline_dict] in munisforoutput_dict.items():
for line_name_i, maxdaylinetpd in sorted(iter(maxdaytpdperline_dict.items()), reverse=True, key=lambda k_v:(k_v[1])):
postsline = muni_id+','+str(tpwinmuni)+','+str(maxdaytpdinmuni)+','+line_name_i+','+str(maxdaylinetpd)+'\n'
fileout.write(postsline)
fileout.close()
print(gtfspathout+txtfileout4)
print("Local current time :", time.asctime( time.localtime(time.time()) ))
| 42.772914
| 199
| 0.753254
|
b73d18886ad1da6bc89a367ca3040dc45d70275f
| 5,647
|
py
|
Python
|
django_jobvite/tests/test_syncing.py
|
Mozilla-GitHub-Standards/217dddf3a535a0407c3079dc3f9b7598fe49ea00dd496c094fc0fcc3fd99900d
|
4d3ca66b8de2e4a43e4dc4d88376f73d7768dc15
|
[
"BSD-3-Clause"
] | 4
|
2015-06-18T10:20:32.000Z
|
2018-01-31T01:23:51.000Z
|
django_jobvite/tests/test_syncing.py
|
Mozilla-GitHub-Standards/217dddf3a535a0407c3079dc3f9b7598fe49ea00dd496c094fc0fcc3fd99900d
|
4d3ca66b8de2e4a43e4dc4d88376f73d7768dc15
|
[
"BSD-3-Clause"
] | 6
|
2015-05-13T11:02:47.000Z
|
2019-03-28T03:43:23.000Z
|
django_jobvite/tests/test_syncing.py
|
Mozilla-GitHub-Standards/217dddf3a535a0407c3079dc3f9b7598fe49ea00dd496c094fc0fcc3fd99900d
|
4d3ca66b8de2e4a43e4dc4d88376f73d7768dc15
|
[
"BSD-3-Clause"
] | 6
|
2015-02-24T19:35:54.000Z
|
2019-03-28T03:43:23.000Z
|
from mock import Mock
import test_utils
from django_jobvite.management.commands import syncjobvite
from django_jobvite.models import Category, Position
one_position = """<result>
<job>
<id>oWqcfdsa</id>
<title>Software Engineer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am a job<![CDATA[<br><script>alert('I am bad');</script>]]></description>
<briefdescription>...</briefdescription>
</job>
</result>"""
two_positions = """<result>
<job>
<id>oWqcfdsa</id>
<title>Software Engineer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am job</description>
<briefdescription>...</briefdescription>
</job>
<job>
<id>fcOwxed</id>
<title>Software Engineer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am job</description>
<briefdescription>...</briefdescription>
</job>
</result>"""
updated = """<result>
<job>
<id>oWqcfdsa</id>
<title>Software Developer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am job</description>
<briefdescription>...</briefdescription>
</job>
<job>
<id>fcOwxed</id>
<title>Software Developer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am job</description>
<briefdescription>...</briefdescription>
</job>
</result>"""
empty = """<result></result>"""
missing_field = """<result>
<job>
<id>oWqcfdsa</id>
<title>Software Developer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am job</description>
<briefdescription>...</briefdescription>
</job>
<job>
<id>fcOwxed</id>
<title>Software Developer</title>
<requisitionid>1229</requisitionid>
<category>Engineering</category>
<jobtype>Full-Time</jobtype>
<location>Mountain View, CA</location>
<date>2/21/2011</date>
<detail-url>http://example.com/job</detail-url>
<apply-url>http://example.com/job</apply-url>
<description>I am job</description>
<briefdescription>...</briefdescription>
<location_x0020_filter>All</location_x0020_filter>
</job>
</result>"""
class SyncTests(test_utils.TestCase):
def setUp(self):
mocked_xml_func = Mock()
mocked_xml_func.return_value = one_position
self.command = syncjobvite.Command()
self.command._get_jobvite_xml = mocked_xml_func
def _assert_count(self, xml, expected):
"""
Run the sync with the provided xml and assert that the expected
number of ``Position`` models exist afterwards.
"""
self.command._get_jobvite_xml.return_value = xml
self.command.handle()
assert Position.objects.count() == expected
def test_adding_new(self):
"""Test that adding one position works."""
assert Position.objects.count() == 0
self._assert_count(one_position, 1)
def test_description_safe(self):
"""Test that bad tags are stripped."""
self.command.handle()
assert Position.objects.all()[0].description == "I am a job<br>alert('I am bad');"
def test_empty_xml(self):
"""Test that handling an empty xml doc does not delete db records."""
self._assert_count(one_position, 1)
self._assert_count(empty, 1)
def test_removing(self):
"""Test that removing one position works."""
self._assert_count(two_positions, 2)
self._assert_count(one_position, 1)
def test_empty_category(self):
"""Test that a category with no positions is removed."""
assert not Category.objects.exists()
def test_updating(self):
"""Test that updating fields in existing positions works."""
self._assert_count(two_positions, 2)
positions = Position.objects.all()
for position in positions:
assert position.title == 'Software Engineer'
self._assert_count(updated, 2)
positions = Position.objects.all()
for position in positions:
assert position.title == 'Software Developer'
def test_missing_field(self):
"""Fields missing from the XML doc should be empty."""
self.command._get_jobvite_xml.return_value = missing_field
self.command.handle()
assert Position.objects.get(job_id='oWqcfdsa').location_filter == ''
assert Position.objects.get(job_id='fcOwxed').location_filter == 'All'
| 33.023392
| 90
| 0.687268
|
7d5b7aae3d49a35db3b973b20ee9142a560ec483
| 1,561
|
py
|
Python
|
google/ads/googleads/v6/errors/types/campaign_draft_error.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/errors/types/campaign_draft_error.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/errors/types/campaign_draft_error.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.errors",
marshal="google.ads.googleads.v6",
manifest={"CampaignDraftErrorEnum",},
)
class CampaignDraftErrorEnum(proto.Message):
r"""Container for enum describing possible campaign draft errors."""
class CampaignDraftError(proto.Enum):
r"""Enum describing possible campaign draft errors."""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_DRAFT_NAME = 2
INVALID_STATUS_TRANSITION_FROM_REMOVED = 3
INVALID_STATUS_TRANSITION_FROM_PROMOTED = 4
INVALID_STATUS_TRANSITION_FROM_PROMOTE_FAILED = 5
CUSTOMER_CANNOT_CREATE_DRAFT = 6
CAMPAIGN_CANNOT_CREATE_DRAFT = 7
INVALID_DRAFT_CHANGE = 8
INVALID_STATUS_TRANSITION = 9
MAX_NUMBER_OF_DRAFTS_PER_CAMPAIGN_REACHED = 10
LIST_ERRORS_FOR_PROMOTED_DRAFT_ONLY = 11
__all__ = tuple(sorted(__protobuf__.manifest))
| 32.520833
| 74
| 0.72902
|
1199dabc789c231a2f9aee8a6d12074851ca047d
| 1,835
|
py
|
Python
|
judge/decorators.py
|
shan18/Online-Judge
|
b03e1df9eaa91957b635b6527f4abf5509495b56
|
[
"MIT"
] | 1
|
2020-07-26T20:54:53.000Z
|
2020-07-26T20:54:53.000Z
|
judge/decorators.py
|
shan18/Online-Judge
|
b03e1df9eaa91957b635b6527f4abf5509495b56
|
[
"MIT"
] | null | null | null |
judge/decorators.py
|
shan18/Online-Judge
|
b03e1df9eaa91957b635b6527f4abf5509495b56
|
[
"MIT"
] | null | null | null |
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.decorators import available_attrs
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
default_message = "You must Log In first!"
def user_passes_test(test_func, message=default_message):
"""
Decorator for views that checks that the user passes the given test,
setting a message in case of no success. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if not test_func(request.user):
messages.success(request, message)
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator
def login_required_message(function=None, message=default_message):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
message=message,
)
if function:
return actual_decorator(function)
return actual_decorator
def login_required_messsage_and_redirect(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None, message=default_message):
if function:
return login_required_message(
login_required(function, redirect_field_name, login_url),
message
)
return lambda deferred_function: login_required_message_and_redirect(deferred_function, redirect_field_name, login_url, message)
| 35.288462
| 138
| 0.73297
|
950f3e194fadae8a27d7553a4bbbdca1e818a930
| 5,400
|
py
|
Python
|
Project2Final/FaceRecognition/preprocess.py
|
201019-UiPath/Jewlz-TheBoyz-PlaylistAutomation-P2
|
72a866ef671740786d68ddb658fe19b9a553b0c9
|
[
"MIT"
] | null | null | null |
Project2Final/FaceRecognition/preprocess.py
|
201019-UiPath/Jewlz-TheBoyz-PlaylistAutomation-P2
|
72a866ef671740786d68ddb658fe19b9a553b0c9
|
[
"MIT"
] | null | null | null |
Project2Final/FaceRecognition/preprocess.py
|
201019-UiPath/Jewlz-TheBoyz-PlaylistAutomation-P2
|
72a866ef671740786d68ddb658fe19b9a553b0c9
|
[
"MIT"
] | 2
|
2020-11-22T22:34:28.000Z
|
2020-11-22T22:51:51.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import os
import tensorflow as tf
import numpy as np
import facenet
import detect_face
class preprocesses:
def __init__(self, input_datadir, output_datadir):
self.input_datadir = input_datadir
self.output_datadir = output_datadir
def collect_data(self):
output_dir = os.path.expanduser('~/OneDrive/Desktop/IdentificationModule/pre_img')
npy_dir = '~/OneDrive/Desktop/IdentificationModule/npy'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset = facenet.get_dataset('~/OneDrive/Desktop/IdentificationModule/train_img')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.expanduser(npy_dir))
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
margin = 44
image_size = 182
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename + '.png')
print("Image: %s" % image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim < 2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
print('to_rgb data dimension: ', img.ndim)
img = img[:, :, 0:3]
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold,
factor)
nrof_faces = bounding_boxes.shape[0]
print('No of Detected Face: %d' % nrof_faces)
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
img_size = np.asarray(img.shape)[0:2]
if nrof_faces > 1:
bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
img_center = img_size / 2
offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
index = np.argmax(
bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
det = det[index, :]
det = np.squeeze(det)
bb_temp = np.zeros(4, dtype=np.int32)
bb_temp[0] = det[0]
bb_temp[1] = det[1]
bb_temp[2] = det[2]
bb_temp[3] = det[3]
cropped_temp = img[bb_temp[1]:bb_temp[3], bb_temp[0]:bb_temp[2], :]
scaled_temp = misc.imresize(cropped_temp, (image_size, image_size), interp='bilinear')
nrof_successfully_aligned += 1
misc.imsave(output_filename, scaled_temp)
text_file.write('%s %d %d %d %d\n' % (
output_filename, bb_temp[0], bb_temp[1], bb_temp[2], bb_temp[3]))
else:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
return (nrof_images_total,nrof_successfully_aligned)
| 51.923077
| 124
| 0.485741
|
b2b48a9046ac10edc4829deb0bd683ee02c539a3
| 3,300
|
py
|
Python
|
src/robot/parsing/lexer/tokens.py
|
JasperCraeghs/robotframework
|
856afa6ed2a16e39194b14bce138aa2044e0b0b6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/parsing/lexer/tokens.py
|
JasperCraeghs/robotframework
|
856afa6ed2a16e39194b14bce138aa2044e0b0b6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/parsing/lexer/tokens.py
|
JasperCraeghs/robotframework
|
856afa6ed2a16e39194b14bce138aa2044e0b0b6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import py2to3
@py2to3
class Token(object):
SETTING_HEADER = 'SETTING_HEADER'
VARIABLE_HEADER = 'VARIABLE_HEADER'
TESTCASE_HEADER = 'TESTCASE_HEADER'
KEYWORD_HEADER = 'KEYWORD_HEADER'
COMMENT_HEADER = 'COMMENT_HEADER'
DOCUMENTATION = 'DOCUMENTATION'
SUITE_SETUP = 'SUITE_SETUP'
SUITE_TEARDOWN = 'SUITE_TEARDOWN'
METADATA = 'METADATA'
TEST_SETUP = 'TEST_SETUP'
TEST_TEARDOWN = 'TEST_TEARDOWN'
TEST_TEMPLATE = 'TEST_TEMPLATE'
TEST_TIMEOUT = 'TEST_TIMEOUT'
FORCE_TAGS = 'FORCE_TAGS'
DEFAULT_TAGS = 'DEFAULT_TAGS'
LIBRARY = 'LIBRARY'
RESOURCE = 'RESOURCE'
VARIABLES = 'VARIABLES'
SETUP = 'SETUP'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TIMEOUT = 'TIMEOUT'
TAGS = 'TAGS'
ARGUMENTS = 'ARGUMENTS'
RETURN = 'RETURN'
VARIABLE = 'VARIABLE'
ARGUMENT = 'ARGUMENT'
NAME = 'NAME'
ASSIGN = 'ASSIGN'
KEYWORD = 'KEYWORD'
FOR = 'FOR'
FOR_SEPARATOR = 'FOR_SEPARATOR'
OLD_FOR_INDENT = 'OLD_FOR_INDENT'
END = 'END'
SEPARATOR = 'SEPARATOR'
EOL = 'EOL'
COMMENT = 'COMMENT'
CONTINUATION = 'CONTINUATION'
IGNORE = 'IGNORE'
EOS = 'EOS'
ERROR = 'ERROR'
DATA = 'DATA'
NON_DATA_TOKENS = (
SEPARATOR,
COMMENT,
CONTINUATION,
IGNORE,
EOL,
EOS
)
SETTING_TOKENS = (
DOCUMENTATION,
SUITE_SETUP,
SUITE_TEARDOWN,
METADATA,
TEST_SETUP,
TEST_TEARDOWN,
TEST_TEMPLATE,
TEST_TIMEOUT,
FORCE_TAGS,
DEFAULT_TAGS,
LIBRARY,
RESOURCE,
VARIABLES,
SETUP,
TEARDOWN,
TEMPLATE,
TIMEOUT,
TAGS,
ARGUMENTS,
RETURN
)
HEADER_TOKENS = (
SETTING_HEADER,
VARIABLE_HEADER,
TESTCASE_HEADER,
KEYWORD_HEADER
)
__slots__ = ['type', 'value', 'lineno', 'columnno', 'error']
def __init__(self, type, value='', lineno=-1, columnno=-1):
self.type = type
self.value = value
self.lineno = lineno
self.columnno = columnno
self.error = None
def __unicode__(self):
return self.value
def __repr__(self):
return 'Token(%s, %r, %s, %s)' % (self.type, self.value,
self.lineno, self.columnno)
class EOS(Token):
__slots__ = []
def __init__(self, lineno=-1, columnno=-1):
Token.__init__(self, Token.EOS, '', lineno, columnno)
@classmethod
def from_token(cls, token):
return EOS(token.lineno, token.columnno + len(token.value))
| 25.384615
| 75
| 0.615152
|
2673f6fbaac2197895b7dd049352bf7fe6deb04d
| 990
|
py
|
Python
|
src/logger.py
|
jruberg/Pyty
|
db7da06a696e170e2e6b7f4b16f59715154bd628
|
[
"MIT"
] | 2
|
2017-07-18T22:20:17.000Z
|
2022-02-17T14:07:05.000Z
|
src/logger.py
|
jruberg/Pyty
|
db7da06a696e170e2e6b7f4b16f59715154bd628
|
[
"MIT"
] | null | null | null |
src/logger.py
|
jruberg/Pyty
|
db7da06a696e170e2e6b7f4b16f59715154bd628
|
[
"MIT"
] | null | null | null |
import math
import logging
from settings import LOG_LEVEL, LOG_DIR, LOGFILE, FILE_DEBUG
logging.basicConfig(level=LOG_LEVEL, filename=LOG_DIR+LOGFILE,
format='%(asctime)s: %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
_MARGIN = len('%s, %s %s %s %s:%s:%s: ' %
('Thu', '24', 'Mar', '2011', '17', '09', '37'))
class Logger:
nl = "\n" + (" " * _MARGIN)
def __init__(self):
self.in_debug_file = False
def enter_debug_file(self):
self.in_debug_file = True
def exit_debug_file(self):
self.in_debug_file = False
def debug(self, s, cond=True):
if FILE_DEBUG and self.in_debug_file and cond:
logging.debug(s.replace('\n', Logger.nl))
def announce_file(filename):
gen_width = 40 - len(filename)
lwidth = int(math.ceil(gen_width / 2.0))
rwidth = int(math.floor(gen_width / 2.0))
logging.debug("="*lwidth + " RUNNING " + filename.upper() + " " + "="*rwidth)
| 29.117647
| 81
| 0.586869
|
55a6ccd0c518982201f57d17dec48cd90ac29c19
| 5,322
|
py
|
Python
|
datageneration/generate_config.py
|
atoaiari/surreal
|
3f8d0d2a837e07511add210c7f62d1f8ee7f0f0d
|
[
"MIT-CMU",
"OLDAP-2.2.1"
] | null | null | null |
datageneration/generate_config.py
|
atoaiari/surreal
|
3f8d0d2a837e07511add210c7f62d1f8ee7f0f0d
|
[
"MIT-CMU",
"OLDAP-2.2.1"
] | null | null | null |
datageneration/generate_config.py
|
atoaiari/surreal
|
3f8d0d2a837e07511add210c7f62d1f8ee7f0f0d
|
[
"MIT-CMU",
"OLDAP-2.2.1"
] | null | null | null |
import json
import argparse
import sys
import configs.config as config
import os
import numpy as np
import random
from datetime import datetime
from utils.utils import *
def main():
parser = argparse.ArgumentParser(description="Generate synth dataset images for disentanglement.")
parser.add_argument("--frames", type=int, help="frames to use from the sequence", default=2)
parser.add_argument("--gender", type=int,
help="-1: both, 0: female, 1: male", default=-1)
parser.add_argument("--backgrounds", type=int,
help="number of backgrounds", default=10)
parser.add_argument("--orientations", type=int, choices=[4, 8, 16], default=4,
help="number of orientation classes")
parser.add_argument("--shapes", type=int, default=4,
help="number of shapes")
parser.add_argument("--textures", type=int, default=8,
help="number of textures")
parser.add_argument("--reset", action="store_true", help="reset the generation config file, even if it already exists")
parser.add_argument("path", help="basic config path")
args = parser.parse_args()
configuration_dict = {}
params = config.load_file(args.path, "SYNTH_DATA")
if not os.path.isfile(os.path.join(params["output_path"], "generation_config.json")) or args.reset:
seed_number = 11
random.seed(seed_number)
np.random.seed(seed_number)
configuration_dict.update(params)
configuration_dict["created"] = datetime.now().strftime("%d-%m-%Y-%H-%M")
configuration_dict["factors"] = {"frames_per_sequence": args.frames}
# backgrounds
bg_names = os.path.join(params["bg_path"], 'train_img.txt')
nh_txt_paths = []
with open(bg_names) as f:
for line in f:
nh_txt_paths.append(os.path.join(params["bg_path"], line[:-1]))
# backgrounds = np.random.choice(nh_txt_paths[:-1], args.backgrounds, replace=False)
backgrounds = nh_txt_paths[:args.backgrounds]
configuration_dict["factors"]["backgrounds"] = backgrounds
# gender
genders = {0: 'female', 1: 'male'}
# set gender.
if args.gender == -1:
gender = [genders.get(g) for g in genders]
else:
gender = genders.get(args.gender)
configuration_dict["factors"]["gender"] = gender
# orientations
configuration_dict["factors"]["orientations"] = list(np.arange(0, 360, (360/args.orientations)))
# clothing/textures
assert args.textures % 2 == 0
textures = []
for igndr, gndr in enumerate(gender):
with open(os.path.join(params["smpl_data_folder"], 'textures', '%s_%s.txt' % (gndr, 'train'))) as f:
txt_paths = f.read().splitlines()
# if using only one source of clothing
if params["clothing_option"] == 'nongrey':
clothing_txt_paths = [k for k in txt_paths if 'nongrey' in k]
elif params["clothing_option"] == 'grey':
clothing_txt_paths = [k for k in txt_paths if 'nongrey' not in k]
textures.extend(np.random.choice(clothing_txt_paths, size=int(args.textures / 2), replace=False))
configuration_dict["factors"]["textures"] = textures
# shapes (extracted only from female model)
ndofs = 10
gndr = "female"
smpl_data = np.load(os.path.join(params["smpl_data_folder"], params["smpl_data_filename"]))
fshapes = smpl_data['%sshapes' % gndr][:, :ndofs]
nb_fshapes = len(fshapes)
fshapes = fshapes[:int(nb_fshapes*0.8)] # train split
shapes_idx = np.random.choice(np.arange(len(fshapes)), size=args.shapes, replace=False)
shapes = fshapes[shapes_idx]
configuration_dict["factors"]["shapes"] = shapes
# light
configuration_dict["sh_coeffs"] = .7 * (2 * np.random.rand(9) - 1)
configuration_dict["sh_coeffs"][0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum is ambient. Rest is uniformly distributed, higher means brighter.
configuration_dict["sh_coeffs"][1] = -.7 * np.random.rand()
# camera distance
# configuration_dict["camera_distance"] = np.random.normal(8.0, 1)
configuration_dict["camera_distance"] = 7.2 # fixed not random
if args.reset and os.path.exists(params["output_path"]) and params["output_path"] != "" and params["output_path"] != "/":
os.system(f"rm -rf {params['output_path']}")
os.makedirs(params["output_path"], exist_ok=True)
folders = ["info", "images", "logs", "dataset"]
for folder in folders:
os.makedirs(os.path.join(params["output_path"], folder), exist_ok=True)
configuration_dict[f"{folder}_path"] = str(os.path.join(params["output_path"], folder))
with open(os.path.join(params["output_path"], "generation_config.json"), "w", encoding="utf-8") as f:
json.dump(configuration_dict, f, ensure_ascii=False, indent=4, cls=NumpyEncoder)
print("Generated a new configuration file!")
else:
print("Configuration file already exists!")
if __name__ == "__main__":
main()
| 46.278261
| 184
| 0.623826
|
4cb85dad7a525d60e695768345c733a9df4ccf69
| 460
|
py
|
Python
|
pjsip/tests/pjsua/scripts-pesq/200_codec_g722.py
|
tomorrow-rain/pjsip
|
776e032c4ee2672cd42b8c665021b1310181d126
|
[
"MIT"
] | null | null | null |
pjsip/tests/pjsua/scripts-pesq/200_codec_g722.py
|
tomorrow-rain/pjsip
|
776e032c4ee2672cd42b8c665021b1310181d126
|
[
"MIT"
] | null | null | null |
pjsip/tests/pjsua/scripts-pesq/200_codec_g722.py
|
tomorrow-rain/pjsip
|
776e032c4ee2672cd42b8c665021b1310181d126
|
[
"MIT"
] | null | null | null |
# $Id$
#
from inc_cfg import *
ADD_PARAM = ""
if (HAS_SND_DEV == 0):
ADD_PARAM += "--null-audio"
# Call with G722 codec
test_param = TestParam(
"PESQ codec G722",
[
InstanceParam("UA1", ADD_PARAM + " --max-calls=1 --add-codec g722 --clock-rate 16000 --play-file wavs/input.16.wav"),
InstanceParam("UA2", "--null-audio --max-calls=1 --add-codec g722 --clock-rate 16000 --rec-file wavs/tmp.16.wav --auto-answer 200")
]
)
pesq_threshold = 3.7
| 23
| 135
| 0.65
|
20abfa850822153e9cfa769054ad5205f89010c6
| 7,111
|
py
|
Python
|
test/unit/test_gcs_util.py
|
csdev/snowflake-connector-python
|
db054fd4490ac006ef633ed12d852bb09941068e
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_gcs_util.py
|
csdev/snowflake-connector-python
|
db054fd4490ac006ef633ed12d852bb09941068e
|
[
"Apache-2.0"
] | 14
|
2021-01-26T06:53:10.000Z
|
2022-03-14T11:16:54.000Z
|
test/unit/test_gcs_util.py
|
csdev/snowflake-connector-python
|
db054fd4490ac006ef633ed12d852bb09941068e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import logging
import mock
import pytest
from snowflake.connector.constants import ResultStatus
from ..randomize import random_string
pytestmark = pytest.mark.gcp
try:
from snowflake.connector.gcs_util import SnowflakeGCSUtil # NOQA
except ImportError:
SnowflakeGCSUtil = None
# We need these for our OldDriver tests. We run most up to date tests with the oldest supported driver version
try:
from snowflake.connector.vendored import requests # NOQA
vendored_request = True
except ImportError: # pragma: no cover
import requests
vendored_request = False
def test_create_client(caplog):
"""Creates a GCSUtil with an access token."""
caplog.set_level(logging.DEBUG, 'snowflake.connector')
client = SnowflakeGCSUtil.create_client({'creds': {'GCS_ACCESS_TOKEN': 'fake_token'}})
assert client is not None
assert client == 'fake_token'
@pytest.mark.xfail(reason='Newer version support access token. This test is obsoleted')
def test_native_download_access_token(caplog):
"""Tests that GCS access token error is correctly logged when downloading."""
caplog.set_level(logging.DEBUG, 'snowflake.connector')
meta = {}
SnowflakeGCSUtil._native_download_file(meta, None, 99)
assert meta['result_status'] == ResultStatus.ERROR
assert (('snowflake.connector.gcs_util', logging.ERROR, "GCS download operation with an access token is "
"currently unsupported") in caplog.record_tuples)
@pytest.mark.xfail(reason='Newer version support access token. This test is obsoleted')
def test_native_upload_access_token(caplog):
"""Tests that GCS access token error is correctly logged when uploading."""
caplog.set_level(logging.DEBUG, 'snowflake.connector')
meta = {}
SnowflakeGCSUtil.upload_file(None, meta, None, 99)
assert meta['result_status'] == ResultStatus.ERROR
assert (('snowflake.connector.gcs_util', logging.ERROR, "GCS upload operation with an access token is "
"currently unsupported") in caplog.record_tuples)
@pytest.mark.parametrize('errno', [403, 408, 429, 500, 503])
def test_upload_retry_errors(errno, tmpdir):
"""Tests whether retryable errors are handled correctly when upploading."""
f_name = str(tmpdir.join('some_file.txt'))
resp = requests.Response()
resp.status_code = errno
meta = {'presigned_url': ['some_url'], 'sha256_digest': 'asd'}
with open(f_name, 'w') as f:
f.write(random_string(15))
with mock.patch('snowflake.connector.vendored.requests.put' if vendored_request else 'requests.put',
side_effect=requests.exceptions.HTTPError(response=resp)):
SnowflakeGCSUtil.upload_file(f_name, meta, None, 99)
assert isinstance(meta['last_error'], requests.exceptions.HTTPError)
assert meta['result_status'] == ResultStatus.NEED_RETRY
def test_upload_uncaught_exception(tmpdir):
"""Tests whether non-retryable errors are handled correctly when uploading."""
f_name = str(tmpdir.join('some_file.txt'))
resp = requests.Response()
resp.status_code = 501
meta = {'presigned_url': ['some_url'], 'sha256_digest': 'asd'}
with open(f_name, 'w') as f:
f.write(random_string(15))
with mock.patch('snowflake.connector.vendored.requests.put' if vendored_request else 'requests.put',
side_effect=requests.exceptions.HTTPError(response=resp)):
with pytest.raises(requests.exceptions.HTTPError):
SnowflakeGCSUtil.upload_file(f_name, meta, None, 99)
@pytest.mark.parametrize('errno', [403, 408, 429, 500, 503])
def test_download_retry_errors(errno, tmpdir):
"""Tests whether retryable errors are handled correctly when downloading."""
resp = requests.Response()
resp.status_code = errno
meta = {'presigned_url': ['some_url'], 'sha256_digest': 'asd'}
with mock.patch('snowflake.connector.vendored.requests.get' if vendored_request else 'requests.get',
side_effect=requests.exceptions.HTTPError(response=resp)):
SnowflakeGCSUtil._native_download_file(meta, str(tmpdir), 99)
assert isinstance(meta['last_error'], requests.exceptions.HTTPError)
assert meta['result_status'] == ResultStatus.NEED_RETRY
def test_download_uncaught_exception(tmpdir):
"""Tests whether non-retryable errors are handled correctly when downloading."""
resp = requests.Response()
resp.status_code = 501
meta = {'presigned_url': ['some_url'], 'sha256_digest': 'asd'}
with mock.patch('snowflake.connector.vendored.requests.get' if vendored_request else 'requests.get',
side_effect=requests.exceptions.HTTPError(response=resp)):
with pytest.raises(requests.exceptions.HTTPError):
SnowflakeGCSUtil._native_download_file(meta, str(tmpdir), 99)
def test_upload_put_timeout(tmpdir, caplog):
"""Tests whether timeout error is handled correctly when uploading."""
caplog.set_level(logging.DEBUG, 'snowflake.connector')
f_name = str(tmpdir.join('some_file.txt'))
resp = requests.Response()
meta = {'presigned_url': ['some_url'], 'sha256_digest': 'asd'}
with open(f_name, 'w') as f:
f.write(random_string(15))
with mock.patch('snowflake.connector.vendored.requests.put' if vendored_request else 'requests.put',
side_effect=requests.exceptions.Timeout(response=resp)):
SnowflakeGCSUtil.upload_file(f_name, meta, None, 99)
assert isinstance(meta['last_error'], requests.exceptions.Timeout)
assert meta['result_status'] == ResultStatus.NEED_RETRY
assert all([log in caplog.record_tuples for log in [
('snowflake.connector.gcs_util', logging.DEBUG, 'GCS file upload Timeout Error: ')
]])
def test_upload_get_timeout(tmpdir, caplog):
"""Tests whether timeout error is handled correctly when downloading."""
caplog.set_level(logging.DEBUG, 'snowflake.connector')
resp = requests.Response()
meta = {'presigned_url': ['some_url'], 'sha256_digest': 'asd'}
with mock.patch('snowflake.connector.vendored.requests.get' if vendored_request else 'requests.get',
side_effect=requests.exceptions.Timeout(response=resp)):
SnowflakeGCSUtil._native_download_file(meta, str(tmpdir), 99)
assert isinstance(meta['last_error'], requests.exceptions.Timeout)
assert meta['result_status'] == ResultStatus.NEED_RETRY
assert ('snowflake.connector.gcs_util', logging.DEBUG, 'GCS file download Timeout Error: ') in caplog.record_tuples
def test_get_file_header_none_with_presigned_url():
"""Tests whether default file handle created by get_file_header is as expected."""
file_header = SnowflakeGCSUtil.get_file_header({"presigned_url": "www.example.com"}, 'file')
assert file_header.digest is None
assert file_header.content_length is None
assert file_header.encryption_metadata is None
| 46.477124
| 119
| 0.714105
|
2ee21f996d50186850b416c595518a39f7ecd18b
| 1,455
|
py
|
Python
|
widgets/SingleChoiceDialog.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 3
|
2018-03-19T07:57:10.000Z
|
2021-07-05T08:55:14.000Z
|
widgets/SingleChoiceDialog.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 6
|
2020-03-24T15:40:18.000Z
|
2021-12-13T19:46:09.000Z
|
widgets/SingleChoiceDialog.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 4
|
2018-03-29T21:59:55.000Z
|
2019-12-16T14:56:38.000Z
|
#!/usr/bin/env python
import wx
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Create and Show a SingleChoiceDialog", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
dlg = wx.SingleChoiceDialog(
self, 'Test Single Choice', 'The Caption',
['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight'],
wx.CHOICEDLG_STYLE
)
if dlg.ShowModal() == wx.ID_OK:
self.log.WriteText('You selected: %s\n' % dlg.GetStringSelection())
dlg.Destroy()
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#---------------------------------------------------------------------------
overview = """\
This class represents a dialog that shows a list of strings, and allows the user
to select one. Double-clicking on a list item is equivalent to single-clicking
and then pressing OK.
As with all dialogs, be sure to retrieve the information you need BEFORE you
destroy the dialog.
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 25.526316
| 89
| 0.512027
|
2b102dd13ee5bdf15f767521be3108d70fc60464
| 1,200
|
py
|
Python
|
spider1/migrations/0001_initial.py
|
EricMbuthia/SeleniumDjangoWebscraping
|
27954bcf02b895b3c1001f5924433d6aaf3f195e
|
[
"MIT"
] | null | null | null |
spider1/migrations/0001_initial.py
|
EricMbuthia/SeleniumDjangoWebscraping
|
27954bcf02b895b3c1001f5924433d6aaf3f195e
|
[
"MIT"
] | null | null | null |
spider1/migrations/0001_initial.py
|
EricMbuthia/SeleniumDjangoWebscraping
|
27954bcf02b895b3c1001f5924433d6aaf3f195e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-10-30 06:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ScrapeRecordsInventory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rec_date', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='ScrapeRecords',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owners_name', models.CharField(max_length=100)),
('property_value_current_year', models.CharField(max_length=100)),
('property_value_next_year', models.CharField(max_length=100)),
('tax_value', models.CharField(max_length=100)),
('record_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spider1.scraperecordsinventory')),
],
),
]
| 35.294118
| 132
| 0.608333
|
d07436e1d8b047ce720afac7dec09d9b23a6d0ab
| 19,747
|
py
|
Python
|
ravendb/documents/operations/batch.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 8
|
2016-10-08T17:45:44.000Z
|
2018-05-29T12:16:43.000Z
|
ravendb/documents/operations/batch.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 5
|
2017-02-12T15:50:53.000Z
|
2017-09-18T12:25:01.000Z
|
ravendb/documents/operations/batch.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 8
|
2016-07-03T07:59:12.000Z
|
2017-09-18T11:22:23.000Z
|
from copy import deepcopy
from typing import Union, List, Dict, TYPE_CHECKING, Optional
from ravendb import constants
from ravendb.documents.commands.batches import SingleNodeBatchCommand, ClusterWideBatchCommand, CommandType
from ravendb.documents.operations.patch import PatchStatus
from ravendb.documents.session.event_args import AfterSaveChangesEventArgs
from ravendb.documents.session.misc import TransactionMode
from ravendb.documents.session.document_info import DocumentInfo
from ravendb.exceptions.raven_exceptions import ClientVersionMismatchException
from ravendb.json.result import BatchCommandResult
from ravendb.tools.utils import CaseInsensitiveDict
if TYPE_CHECKING:
from ravendb.documents.session.in_memory_document_session_operations import InMemoryDocumentSessionOperations
class BatchOperation:
def __init__(self, session: "InMemoryDocumentSessionOperations"):
self.__session = session
self.__entities: List[object] = []
self.__session_commands_count: Union[None, int] = None
self.__all_commands_count: Union[None, int] = None
self.__on_successful_request: Union[
None, "InMemoryDocumentSessionOperations.SaveChangesData.ActionsToRunOnSuccess"
] = None
self.__modifications: Union[None, Dict[str, DocumentInfo]] = None
def create_request(self) -> Union[None, SingleNodeBatchCommand]:
result = self.__session.prepare_for_save_changes()
self.__on_successful_request = result.on_success
self.__session_commands_count = len(result.session_commands)
result.session_commands.extend(result.deferred_commands)
self.__session.validate_cluster_transaction(result)
self.__all_commands_count = len(result.session_commands)
if self.__all_commands_count == 0:
return None
self.__session.increment_requests_count()
self.__entities = result.entities
if self.__session.transaction_mode == TransactionMode.CLUSTER_WIDE:
return ClusterWideBatchCommand(
self.__session.conventions,
result.session_commands,
result.options,
self.__session.disable_atomic_document_writes_in_cluster_wide_transaction,
)
return SingleNodeBatchCommand(self.__session.conventions, result.session_commands, result.options)
def set_result(self, result: BatchCommandResult) -> None:
def get_command_type(obj_node: dict) -> CommandType:
c_type = obj_node.get("Type")
if not c_type:
return CommandType.NONE
type_as_str = str(c_type)
command_type = CommandType.parse_csharp_value(type_as_str)
return command_type
if result.results is None:
self.__throw_on_null_result()
return
self.__on_successful_request.clear_session_state_after_successful_save_changes()
if self.__session.transaction_mode == TransactionMode.CLUSTER_WIDE:
if result.transaction_index <= 0:
raise ClientVersionMismatchException(
"Cluster transaction was send to a node that is not supporting "
"it. So it was executed ONLY on the requested node on " + self.__session.request_executor.url
)
for i in range(self.__session_commands_count):
batch_result = result.results[i]
if batch_result is None:
continue
command_type = get_command_type(batch_result)
if command_type == CommandType.PUT:
self.__handle_put(i, batch_result, False)
elif command_type == CommandType.FORCE_REVISION_CREATION:
self.__handle_force_revision_creation(batch_result)
elif command_type == CommandType.DELETE:
self.__handle_delete(batch_result)
elif command_type == CommandType.COMPARE_EXCHANGE_PUT:
self.__handle_compare_exchange_put(batch_result)
elif command_type == CommandType.COMPARE_EXCHANGE_DELETE:
self.__handle_compare_exchange_delete(batch_result)
else:
raise ValueError(f"Command {command_type} is not supported")
for i in range(self.__session_commands_count, self.__all_commands_count):
batch_result = result.results[i]
if batch_result is None:
continue
command_type = get_command_type(batch_result)
if command_type == CommandType.PUT:
self.__handle_put(i, batch_result, False)
elif command_type == CommandType.DELETE:
self.__handle_delete(batch_result)
elif command_type == CommandType.PATCH:
self.__handle_patch(batch_result)
elif command_type == CommandType.ATTACHMENT_PUT:
self.__handle_attachment_put(batch_result)
elif command_type == CommandType.ATTACHMENT_DELETE:
self.__handle_attachment_delete(batch_result)
elif command_type == CommandType.ATTACHMENT_MOVE:
self.__handle_attachment_move(batch_result)
elif command_type == CommandType.ATTACHMENT_COPY:
self.__handle_attachment_copy(batch_result)
elif (
command_type == CommandType.COMPARE_EXCHANGE_PUT
or CommandType.COMPARE_EXCHANGE_DELETE
or CommandType.FORCE_REVISION_CREATION
):
pass
elif command_type == CommandType.COUNTERS:
self.__handle_counters(batch_result)
elif command_type == CommandType.TIME_SERIES_COPY or command_type == CommandType.BATCH_PATCH:
break
else:
raise ValueError(f"Command {command_type} is not supported")
self.__finalize_result()
def __finalize_result(self):
if not self.__modifications:
return
for key, document_info in self.__modifications.items():
self.__apply_metadata_modifications(key, document_info)
def __apply_metadata_modifications(self, key: str, document_info: DocumentInfo):
document_info.metadata_instance = None
document_info.metadata = deepcopy(document_info.metadata)
document_info.metadata[constants.Documents.Metadata.CHANGE_VECTOR] = document_info.change_vector
document_copy = deepcopy(document_info.document)
document_copy[constants.Documents.Metadata.KEY] = document_info.metadata
document_info.document = document_copy
def __get_or_add_modifications(
self, key: str, document_info: DocumentInfo, apply_modifications: bool
) -> DocumentInfo:
if not self.__modifications:
self.__modifications = CaseInsensitiveDict()
modified_document_info = self.__modifications.get(key)
if modified_document_info is not None:
if apply_modifications:
self.__apply_metadata_modifications(key, modified_document_info)
else:
self.__modifications[key] = modified_document_info = document_info
return modified_document_info
def __handle_compare_exchange_put(self, batch_result: dict) -> None:
self.__handle_compare_exchange_internal(CommandType.COMPARE_EXCHANGE_PUT, batch_result)
def __handle_compare_exchange_delete(self, batch_result: dict) -> None:
self.__handle_compare_exchange_internal(CommandType.COMPARE_EXCHANGE_DELETE, batch_result)
def __handle_compare_exchange_internal(self, command_type: CommandType, batch_result: dict) -> None:
key: str = batch_result.get("Key")
if not key:
self.__throw_missing_field(command_type, "Key")
index: int = batch_result.get("Index")
if not index:
self.__throw_missing_field(command_type, "Index")
cluster_session = self.__session.cluster_session
cluster_session.update_state(key, index)
def __handle_patch(self, batch_result: dict) -> None:
patch_status = batch_result.get("PatchStatus")
if not patch_status:
self.__throw_missing_field(CommandType.PATCH, "PatchStatus")
status = PatchStatus(patch_status)
if status == PatchStatus.CREATED or PatchStatus.PATCHED:
document = batch_result.get("ModifiedDocument")
if not document:
return
key = self.__get_string_field(batch_result, CommandType.PUT, "Id")
session_document_info = self.__session.documents_by_id.get(key)
if session_document_info == None:
return
document_info = self.__get_or_add_modifications(key, session_document_info, True)
change_vector = self.__get_string_field(batch_result, CommandType.PATCH, "ChangeVector")
last_modified = self.__get_string_field(batch_result, CommandType.PATCH, "LastModified")
document_info.change_vector = change_vector
document_info.metadata[constants.Documents.Metadata.KEY] = key
document_info.metadata[constants.Documents.Metadata.CHANGE_VECTOR] = change_vector
document_info.metadata[constants.Documents.Metadata.LAST_MODIFIED] = last_modified
document_info.document = document
self.__apply_metadata_modifications(key, document_info)
if document_info.entity is not None:
self.__session.entity_to_json.populate_entity(document_info.entity, key, document_info.document)
self.__session.__on_after_save_changes(self.__session, document_info.key, document_info.entity)
def __handle_delete(self, batch_result: dict) -> None:
self.__handle_delete_internal(batch_result, CommandType.DELETE)
def __handle_delete_internal(self, batch_result: dict, command_type: CommandType):
key = self.__get_string_field(batch_result, command_type, "Id")
document_info = self.__session.documents_by_id.get(key)
if document_info is None:
return
self.__session.documents_by_id.pop(key, None)
if document_info.entity is not None:
self.__session.documents_by_entity.pop(document_info.entity, None)
self.__session.deleted_entities.discard(document_info.entity)
def __handle_force_revision_creation(self, batch_result: dict) -> None:
if not self.__get_boolean_field(batch_result, CommandType.FORCE_REVISION_CREATION, "RevisionCreated"):
# no forced revision was created...nothing to update
return
key = self.__get_string_field(
batch_result, CommandType.FORCE_REVISION_CREATION, constants.Documents.Metadata.KEY
)
change_vector = self.__get_string_field(
batch_result, CommandType.FORCE_REVISION_CREATION, constants.Documents.Metadata.CHANGE_VECTOR
)
document_info = self.__session.documents_by_id.get(key)
if not document_info:
return
document_info.change_vector = change_vector
self.__handle_metadata_modifications(document_info, batch_result, key, change_vector)
self.__session.__on_after_save_changes(self.__session, document_info.key, document_info.entity)
def __handle_put(self, index: int, batch_result: dict, is_deferred: bool) -> None:
entity = None
document_info = None
if not is_deferred:
entity = self.__entities[index]
document_info = self.__session.documents_by_entity.get(entity)
if document_info is None:
return
key = self.__get_string_field(batch_result, CommandType.PUT, constants.Documents.Metadata.ID)
change_vector = self.__get_string_field(
batch_result, CommandType.PUT, constants.Documents.Metadata.CHANGE_VECTOR
)
if is_deferred:
session_document_info = self.__session.documents_by_id.get(key)
if session_document_info is None:
return
document_info = self.__get_or_add_modifications(key, session_document_info, True)
entity = document_info.entity
self.__handle_metadata_modifications(document_info, batch_result, key, change_vector)
self.__session.documents_by_id.update({document_info.key: document_info})
if entity:
self.__session.generate_entity_id_on_the_client.try_set_identity(entity, key)
self.__session.after_save_changes_invoke(
AfterSaveChangesEventArgs(self.__session, document_info.key, document_info.entity)
)
def __handle_metadata_modifications(
self, document_info: DocumentInfo, batch_result: dict, key: str, change_vector: str
) -> None:
for property_name, value in batch_result.items():
if "Type" == property_name:
continue
document_info.metadata[property_name] = value
document_info.key = key
document_info.change_vector = change_vector
self.__apply_metadata_modifications(key, document_info)
def __handle_counters(self, batch_result: dict) -> None:
doc_id = self.__get_string_field(batch_result, CommandType.COUNTERS, "Id")
counters_detail: dict = batch_result.get("CountersDetail")
if counters_detail is None:
self.__throw_missing_field(CommandType.COUNTERS, "CountersDetail")
counters = counters_detail.get("Counters")
if counters is None:
self.__throw_missing_field(CommandType.COUNTERS, "Counters")
cache = self.__session.counters_by_doc_id[doc_id]
if cache is None:
cache = [False, CaseInsensitiveDict()]
self.__session.counters_by_doc_id[doc_id] = cache
change_vector = self.__get_string_field(batch_result, CommandType.COUNTERS, "DocumentChangeVector", False)
if change_vector is not None:
document_info = self.__session.documents_by_id.get(doc_id)
if document_info is not None:
document_info.change_vector = change_vector
for counter in counters:
counter: dict
name = counter.get("CounterName")
value = counter.get("TotalValue")
if not name and not value:
cache[1][name] = value
def __handle_attachment_put(self, batch_result: dict) -> None:
self.__handle_attachment_put_internal(
batch_result, CommandType.ATTACHMENT_PUT, "Id", "Name", "DocumentChangeVector"
)
def __handle_attachment_copy(self, batch_result: dict) -> None:
self.__handle_attachment_put_internal(
batch_result, CommandType.ATTACHMENT_COPY, "Id", "Name", "DocumentChangeVector"
)
def __handle_attachment_move(self, batch_result: dict) -> None:
self.__handle_attachment_delete_internal(
batch_result, CommandType.ATTACHMENT_MOVE, "Id", "Name", "DocumentChangeVector"
)
self.__handle_attachment_put_internal(
batch_result, CommandType.ATTACHMENT_MOVE, "DestinationId", "DestinationName", "DocumentChangeVector"
)
def __handle_attachment_delete(self, batch_result: dict) -> None:
self.__handle_attachment_delete_internal(
batch_result, CommandType.ATTACHMENT_DELETE, constants.Documents.Metadata.ID, "Name", "DocumentChangeVector"
)
def __handle_attachment_delete_internal(
self,
batch_result: dict,
command_type: CommandType,
id_field_name: str,
attachment_name_field_name: str,
document_change_vector_field_name: str,
) -> None:
key = self.__get_string_field(batch_result, command_type, id_field_name)
session_document_info = self.__session.documents_by_id.get_value(key)
if session_document_info is None:
return
document_info = self.__get_or_add_modifications(key, session_document_info, True)
document_change_vector = self.__get_string_field(
batch_result, command_type, document_change_vector_field_name, False
)
if document_change_vector:
document_info.change_vector = document_change_vector
attachments_json = document_info.metadata.get(constants.Documents.Metadata.ATTACHMENTS)
if not attachments_json:
return
name = self.__get_string_field(batch_result, command_type, attachment_name_field_name)
attachments = []
document_info.metadata[constants.Documents.Metadata.ATTACHMENTS] = attachments
for attachment in attachments_json:
attachment_name = self.__get_string_field(attachment, command_type, "Name")
if attachment_name == name:
continue
attachments.append(attachment)
def __handle_attachment_put_internal(
self,
batch_result: dict,
command_type: CommandType,
id_field_name: str,
attachment_name_field_name: str,
document_change_vector_field_name: str,
) -> None:
key = self.__get_string_field(batch_result, command_type, id_field_name)
session_document_info = self.__session.documents_by_id.get_value(key)
if session_document_info is None:
return
document_info = self.__get_or_add_modifications(key, session_document_info, False)
document_change_vector = self.__get_string_field(
batch_result, command_type, document_change_vector_field_name, False
)
if document_change_vector:
document_info.change_vector = document_change_vector
attachments = document_info.metadata.get(constants.Documents.Metadata.ATTACHMENTS)
if attachments is None:
attachments = []
document_info.metadata[constants.Documents.Metadata.ATTACHMENTS] = attachments
dynamic_node = {
"ChangeVector": self.__get_string_field(batch_result, command_type, "ChangeVector"),
"ContentType": self.__get_string_field(batch_result, command_type, "ContentType"),
"Hash": self.__get_string_field(batch_result, command_type, "Hash"),
"Name": self.__get_string_field(batch_result, command_type, "Name"),
"Size": self.__get_string_field(batch_result, command_type, "Size"),
}
attachments.append(dynamic_node)
def __get_string_field(
self, json: dict, command_type: CommandType, field_name: str, throw_on_missing: Optional[bool] = True
) -> str:
json_node = json.get(field_name, None)
if throw_on_missing and json_node is None:
self.__throw_missing_field(command_type, field_name)
return str(json_node)
def __get_int_field(self, json: dict, command_type: CommandType, field_name: str) -> int:
json_node = json.get(field_name)
if (not json_node) or not isinstance(json_node, int):
self.__throw_missing_field(command_type, field_name)
return json_node
def __get_boolean_field(self, json: dict, command_type: CommandType, field_name: str) -> bool:
json_node = json.get(field_name)
if (not json_node) or not isinstance(json_node, bool):
self.__throw_missing_field(command_type, field_name)
return json_node
def __throw_on_null_result(self) -> None:
raise ValueError(
"Reveived empty response from the server. This is not supposed to happend and is likely a bug."
)
def __throw_missing_field(self, c_type: CommandType, field_name: str) -> None:
raise ValueError(f"{c_type} response is invalid. Field '{field_name}' is missing.")
| 43.688053
| 120
| 0.690788
|
da8efb39c36d1f13e3d2c6888f891bab0a5f34c2
| 337
|
py
|
Python
|
crownstone_uart/topics/UartTopics.py
|
RicArch97/crownstone-lib-python-uart
|
c0aaf1415936e5e622aa6395fdac4f88ebcf82bf
|
[
"MIT"
] | null | null | null |
crownstone_uart/topics/UartTopics.py
|
RicArch97/crownstone-lib-python-uart
|
c0aaf1415936e5e622aa6395fdac4f88ebcf82bf
|
[
"MIT"
] | null | null | null |
crownstone_uart/topics/UartTopics.py
|
RicArch97/crownstone-lib-python-uart
|
c0aaf1415936e5e622aa6395fdac4f88ebcf82bf
|
[
"MIT"
] | null | null | null |
class UartTopics:
newDataAvailable = "UART_newDataAvailable"
uartMessage = "UART_Message" # data is dictionary: {"string": str, "data": [uint8, uint8, ...] }
hello = "UART_hello" # Data is: UartCrownstoneHelloPacket
log = "UART_log" # Data is UartLogPacket
logArray = "UART_logArray" # Data is UartLogArrayPacket
| 30.636364
| 100
| 0.697329
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.