hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cea36ec679e178237a093bbac8b36da547e73bca
| 46
|
py
|
Python
|
cloudbackup/tests/__init__.py
|
nloadholtes/python-cloudbackup-sdk
|
1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6
|
[
"Apache-2.0"
] | 4
|
2015-02-10T14:28:12.000Z
|
2016-12-26T22:52:07.000Z
|
cloudbackup/tests/__init__.py
|
nloadholtes/python-cloudbackup-sdk
|
1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6
|
[
"Apache-2.0"
] | 17
|
2015-01-22T21:58:36.000Z
|
2018-01-25T19:47:43.000Z
|
cloudbackup/tests/__init__.py
|
nloadholtes/python-cloudbackup-sdk
|
1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6
|
[
"Apache-2.0"
] | 9
|
2015-01-26T19:25:45.000Z
|
2018-11-01T20:14:12.000Z
|
"""
Rackspace Cloud Backup API
Test Suite
"""
| 9.2
| 26
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.978261
|
cea3c68b56eb3e56f0d54948bd1517206a13b963
| 5,647
|
py
|
Python
|
gym_locm/toolbox/predictor.py
|
dfpetrin/gym-locm
|
af843f2508dd6c5ce96740390ef67b89c77c34ad
|
[
"MIT"
] | 12
|
2019-11-16T23:11:08.000Z
|
2022-03-24T03:31:59.000Z
|
gym_locm/toolbox/predictor.py
|
dfpetrin/gym-locm
|
af843f2508dd6c5ce96740390ef67b89c77c34ad
|
[
"MIT"
] | 6
|
2021-01-21T15:33:40.000Z
|
2022-03-18T18:06:25.000Z
|
gym_locm/toolbox/predictor.py
|
dfpetrin/gym-locm
|
af843f2508dd6c5ce96740390ef67b89c77c34ad
|
[
"MIT"
] | 2
|
2019-07-18T20:34:42.000Z
|
2022-03-01T19:56:46.000Z
|
import argparse
import json
import os
import pathlib
import numpy as np
import pexpect
from scipy.special import softmax
base_path = str(pathlib.Path(__file__).parent.absolute())
def get_arg_parser():
p = argparse.ArgumentParser(
description="This is a predictor for trained RL drafts.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument("--draft", help="path to draft model",
default="draft.json")
p.add_argument("--draft-1", help="path to first draft model",
default="1st-draft.json")
p.add_argument("--draft-2", help="path to second draft model",
default="2nd-draft.json")
p.add_argument("--battle", help="command line to execute the battle agent",
default='./battle')
return p
def read_game_input():
# read players info
game_input = [input(), input()]
# read cards in hand and actions from opponent
opp_hand, opp_actions = [int(i) for i in input().split()]
game_input.append(f"{opp_hand} {opp_actions}")
# read all opponent actions
for i in range(opp_actions):
game_input.append(input()) # opp action #i
# read card count
card_count = int(input())
game_input.append(str(card_count))
# read cards
for i in range(card_count):
game_input.append(input()) # card #i
return game_input
def encode_state(game_input):
# initialize empty state
state = np.zeros((3, 16), dtype=np.float32)
# get how many opponent action lines to skip
opp_actions = int(game_input[2].split()[1])
# put choices from player hand into the state
for i, card in enumerate(game_input[4 + opp_actions:]):
card = card.split()
card_type = [1.0 if int(card[3]) == i else 0.0 for i in range(4)]
cost = int(card[4]) / 12
attack = int(card[5]) / 12
defense = max(-12, int(card[6])) / 12
keywords = list(map(int, map(card[7].__contains__, 'BCDGLW')))
player_hp = int(card[8]) / 12
enemy_hp = int(card[9]) / 12
card_draw = int(card[10]) / 2
state[i] = card_type + [cost, attack, defense, player_hp,
enemy_hp, card_draw] + keywords
return state.flatten()
def act(network, state, past_choices):
i = 0
use_history = network[list(network.keys())[0]].shape[0] == 33 * 16
if use_history:
state = np.concatenate([past_choices, state])
# do a forward pass through all fully connected layers
while f"model/shared_fc{i}/w:0" in network:
weights = network[f"model/shared_fc{i}/w:0"]
biases = network[f"model/shared_fc{i}/b:0"]
state = np.dot(state, weights) + biases
state = np.tanh(state)
i += 1
# calculate the policy
pi = np.dot(state, network["model/pi/w:0"]) + network["model/pi/b:0"]
pi = softmax(pi)
# extract the deterministic action
action = np.argmax(pi)
return action
def is_valid_action(action):
return action.startswith('PASS') or action.startswith('PICK') \
or action.startswith('SUMMON') or action.startswith('USE') \
or action.startswith('ATTACK')
def load_model(path: str):
# read the parameters
with open(base_path + "/" + path, 'r') as json_file:
params = json.load(json_file)
# initialize the network dict
network = {}
# load activation function for hidden layers
if 'version' not in params or params['version'] < 2:
network['act_fun'] = np.tanh
else:
network['act_fun'] = dict(
tanh=np.tanh,
relu=lambda x: np.maximum(x, 0),
elu=lambda x: np.where(x > 0, x, np.exp(x) - 1)
)[params['act_fun']]
del params['version']
del params['act_fun']
# load weights as numpy arrays
for label, weights in params.items():
network[label] = np.array(weights)
return network
def predict(paths: list, battle_cmd: str):
network = None
# spawn the battle agent
battle_agent = pexpect.spawn(battle_cmd, echo=False, encoding='utf-8')
# count the draft turns
turn = 0
# initialize past choices
past_choices = np.zeros((30, 16))
while True:
game_input = read_game_input()
# write game input to the agent regardless of the phase
battle_agent.write("\n".join(game_input) + "\n")
action = ""
# find action line between all of the agent output
while not is_valid_action(action):
action = battle_agent.readline()
# if mana is zero then it is draft phase
is_draft_phase = int(game_input[0].split()[1]) == 0
if network is None:
playing_first = game_input[0].split()[2] == game_input[1].split()[2]
path = paths[0] if playing_first or len(paths) == 1 else paths[1]
network = load_model(path)
if is_draft_phase:
state = encode_state(game_input)
action = act(network, state, past_choices)
# update past choices with current pick
past_choices[turn] = state[action * 16:(action + 1) * 16]
turn += 1
print("PICK", action)
else:
# print action from battle agent
print(action.strip())
def run():
# get arguments
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
# use json as draft agent
if os.path.isfile(args.draft):
paths = [args.draft]
else:
paths = [args.draft_1, args.draft_2]
predict(paths, args.battle)
if __name__ == '__main__':
run()
| 27.412621
| 80
| 0.607756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,350
| 0.239065
|
cea540d8d7c6742e25322196c14ce8e5fffdddeb
| 57,014
|
py
|
Python
|
src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py
|
xliiv/ralph_assets
|
73e5e46db380c9a8dafb9ca1bd5abe47d5733385
|
[
"Apache-2.0"
] | null | null | null |
src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py
|
xliiv/ralph_assets
|
73e5e46db380c9a8dafb9ca1bd5abe47d5733385
|
[
"Apache-2.0"
] | null | null | null |
src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py
|
xliiv/ralph_assets
|
73e5e46db380c9a8dafb9ca1bd5abe47d5733385
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Licence', fields ['sn']
db.delete_unique('ralph_assets_licence', ['sn'])
# Adding model 'TransitionsHistory'
db.create_table('ralph_assets_transitionshistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('transition', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.Transition'])),
('logged_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'logged user', to=orm['auth.User'])),
('affected_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'affected user', to=orm['auth.User'])),
('report_filename', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('uid', self.gf('django.db.models.fields.CharField')(max_length=36)),
('report_file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('ralph_assets', ['TransitionsHistory'])
# Adding M2M table for field assets on 'TransitionsHistory'
db.create_table('ralph_assets_transitionshistory_assets', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('transitionshistory', models.ForeignKey(orm['ralph_assets.transitionshistory'], null=False)),
('asset', models.ForeignKey(orm['ralph_assets.asset'], null=False))
))
db.create_unique('ralph_assets_transitionshistory_assets', ['transitionshistory_id', 'asset_id'])
# Adding model 'Attachment'
db.create_table('ralph_assets_attachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True)),
('uploaded_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
))
db.send_create_signal('ralph_assets', ['Attachment'])
# Adding model 'CoaOemOs'
db.create_table('ralph_assets_coaoemos', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
))
db.send_create_signal('ralph_assets', ['CoaOemOs'])
# Adding model 'Action'
db.create_table('ralph_assets_action', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
))
db.send_create_signal('ralph_assets', ['Action'])
# Adding model 'ReportOdtSource'
db.create_table('ralph_assets_reportodtsource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
('template', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('ralph_assets', ['ReportOdtSource'])
# Adding model 'Service'
db.create_table('ralph_assets_service', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('profit_center', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('cost_center', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
))
db.send_create_signal('ralph_assets', ['Service'])
# Adding model 'Transition'
db.create_table('ralph_assets_transition', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
('from_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('to_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
))
db.send_create_signal('ralph_assets', ['Transition'])
# Adding M2M table for field actions on 'Transition'
db.create_table('ralph_assets_transition_actions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('transition', models.ForeignKey(orm['ralph_assets.transition'], null=False)),
('action', models.ForeignKey(orm['ralph_assets.action'], null=False))
))
db.create_unique('ralph_assets_transition_actions', ['transition_id', 'action_id'])
# Adding model 'LicenceHistoryChange'
db.create_table('ralph_assets_licencehistorychange', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('licence', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['ralph_assets.Licence'], null=True, on_delete=models.SET_NULL, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('field_name', self.gf('django.db.models.fields.CharField')(default=u'', max_length=64)),
('old_value', self.gf('django.db.models.fields.CharField')(default=u'', max_length=255)),
('new_value', self.gf('django.db.models.fields.CharField')(default=u'', max_length=255)),
))
db.send_create_signal('ralph_assets', ['LicenceHistoryChange'])
# Adding model 'ImportProblem'
db.create_table('ralph_assets_importproblem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('severity', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('ralph_assets', ['ImportProblem'])
# Deleting field 'Licence.bought_date'
db.delete_column('ralph_assets_licence', 'bought_date')
# Deleting field 'Licence.used'
db.delete_column('ralph_assets_licence', 'used')
# Adding field 'Licence.invoice_date'
db.add_column('ralph_assets_licence', 'invoice_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'Licence.provider'
db.add_column('ralph_assets_licence', 'provider',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Licence.invoice_no'
db.add_column('ralph_assets_licence', 'invoice_no',
self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True),
keep_default=False)
# Adding M2M table for field assets on 'Licence'
db.create_table('ralph_assets_licence_assets', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('licence', models.ForeignKey(orm['ralph_assets.licence'], null=False)),
('asset', models.ForeignKey(orm['ralph_assets.asset'], null=False))
))
db.create_unique('ralph_assets_licence_assets', ['licence_id', 'asset_id'])
# Adding M2M table for field users on 'Licence'
db.create_table('ralph_assets_licence_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('licence', models.ForeignKey(orm['ralph_assets.licence'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('ralph_assets_licence_users', ['licence_id', 'user_id'])
# Adding M2M table for field attachments on 'Licence'
db.create_table('ralph_assets_licence_attachments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('licence', models.ForeignKey(orm['ralph_assets.licence'], null=False)),
('attachment', models.ForeignKey(orm['ralph_assets.attachment'], null=False))
))
db.create_unique('ralph_assets_licence_attachments', ['licence_id', 'attachment_id'])
# Changing field 'Licence.niw'
db.alter_column('ralph_assets_licence', 'niw', self.gf('django.db.models.fields.CharField')(default='N/A', unique=True, max_length=50))
# Adding unique constraint on 'Licence', fields ['niw']
db.create_unique('ralph_assets_licence', ['niw'])
# Changing field 'Licence.price'
db.alter_column('ralph_assets_licence', 'price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2))
# Changing field 'Licence.sn'
db.alter_column('ralph_assets_licence', 'sn', self.gf('django.db.models.fields.TextField')(null=True))
# Deleting field 'Asset.category'
db.delete_column('ralph_assets_asset', 'category_id')
# Adding field 'Asset.location'
db.add_column('ralph_assets_asset', 'location',
self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True),
keep_default=False)
# Adding field 'Asset.service_name'
db.add_column('ralph_assets_asset', 'service_name',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.Service'], null=True, blank=True),
keep_default=False)
# Adding field 'Asset.loan_end_date'
db.add_column('ralph_assets_asset', 'loan_end_date',
self.gf('django.db.models.fields.DateField')(default=None, null=True, blank=True),
keep_default=False)
# Adding field 'Asset.note'
db.add_column('ralph_assets_asset', 'note',
self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True),
keep_default=False)
# Adding M2M table for field attachments on 'Asset'
db.create_table('ralph_assets_asset_attachments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('asset', models.ForeignKey(orm['ralph_assets.asset'], null=False)),
('attachment', models.ForeignKey(orm['ralph_assets.attachment'], null=False))
))
db.create_unique('ralph_assets_asset_attachments', ['asset_id', 'attachment_id'])
# Changing field 'Asset.support_period'
db.alter_column('ralph_assets_asset', 'support_period', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
# Changing field 'Asset.source'
db.alter_column('ralph_assets_asset', 'source', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
# Changing field 'Asset.status'
db.alter_column('ralph_assets_asset', 'status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
# Changing field 'Asset.price'
db.alter_column('ralph_assets_asset', 'price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2))
# Changing field 'Asset.niw'
db.alter_column('ralph_assets_asset', 'niw', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Deleting field 'OfficeInfo.version'
db.delete_column('ralph_assets_officeinfo', 'version')
# Deleting field 'OfficeInfo.last_logged_user'
db.delete_column('ralph_assets_officeinfo', 'last_logged_user')
# Deleting field 'OfficeInfo.date_of_last_inventory'
db.delete_column('ralph_assets_officeinfo', 'date_of_last_inventory')
# Deleting field 'OfficeInfo.attachment'
db.delete_column('ralph_assets_officeinfo', 'attachment')
# Deleting field 'OfficeInfo.license_type'
db.delete_column('ralph_assets_officeinfo', 'license_type')
# Adding field 'OfficeInfo.coa_number'
db.add_column('ralph_assets_officeinfo', 'coa_number',
self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.coa_oem_os'
db.add_column('ralph_assets_officeinfo', 'coa_oem_os',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.CoaOemOs'], null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.imei'
db.add_column('ralph_assets_officeinfo', 'imei',
self.gf('django.db.models.fields.CharField')(max_length=18, unique=True, null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.purpose'
db.add_column('ralph_assets_officeinfo', 'purpose',
self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
# Changing field 'OfficeInfo.license_key'
db.alter_column('ralph_assets_officeinfo', 'license_key', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Removing unique constraint on 'Licence', fields ['niw']
db.delete_unique('ralph_assets_licence', ['niw'])
# Deleting model 'TransitionsHistory'
db.delete_table('ralph_assets_transitionshistory')
# Removing M2M table for field assets on 'TransitionsHistory'
db.delete_table('ralph_assets_transitionshistory_assets')
# Deleting model 'Attachment'
db.delete_table('ralph_assets_attachment')
# Deleting model 'CoaOemOs'
db.delete_table('ralph_assets_coaoemos')
# Deleting model 'Action'
db.delete_table('ralph_assets_action')
# Deleting model 'ReportOdtSource'
db.delete_table('ralph_assets_reportodtsource')
# Deleting model 'Service'
db.delete_table('ralph_assets_service')
# Deleting model 'Transition'
db.delete_table('ralph_assets_transition')
# Removing M2M table for field actions on 'Transition'
db.delete_table('ralph_assets_transition_actions')
# Deleting model 'LicenceHistoryChange'
db.delete_table('ralph_assets_licencehistorychange')
# Deleting model 'ImportProblem'
db.delete_table('ralph_assets_importproblem')
# Adding field 'Licence.bought_date'
db.add_column('ralph_assets_licence', 'bought_date',
self.gf('django.db.models.fields.DateField')(default=None),
keep_default=False)
# Adding field 'Licence.used'
db.add_column('ralph_assets_licence', 'used',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Deleting field 'Licence.invoice_date'
db.delete_column('ralph_assets_licence', 'invoice_date')
# Deleting field 'Licence.provider'
db.delete_column('ralph_assets_licence', 'provider')
# Deleting field 'Licence.invoice_no'
db.delete_column('ralph_assets_licence', 'invoice_no')
# Removing M2M table for field assets on 'Licence'
db.delete_table('ralph_assets_licence_assets')
# Removing M2M table for field users on 'Licence'
db.delete_table('ralph_assets_licence_users')
# Removing M2M table for field attachments on 'Licence'
db.delete_table('ralph_assets_licence_attachments')
# Changing field 'Licence.niw'
db.alter_column('ralph_assets_licence', 'niw', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Changing field 'Licence.price'
db.alter_column('ralph_assets_licence', 'price', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2))
# Changing field 'Licence.sn'
db.alter_column('ralph_assets_licence', 'sn', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200, null=True))
# Adding unique constraint on 'Licence', fields ['sn']
db.create_unique('ralph_assets_licence', ['sn'])
# Adding field 'Asset.category'
db.add_column('ralph_assets_asset', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.AssetCategory'], null=True, blank=True),
keep_default=False)
# Deleting field 'Asset.location'
db.delete_column('ralph_assets_asset', 'location')
# Deleting field 'Asset.service_name'
db.delete_column('ralph_assets_asset', 'service_name_id')
# Deleting field 'Asset.loan_end_date'
db.delete_column('ralph_assets_asset', 'loan_end_date')
# Deleting field 'Asset.note'
db.delete_column('ralph_assets_asset', 'note')
# Removing M2M table for field attachments on 'Asset'
db.delete_table('ralph_assets_asset_attachments')
# Changing field 'Asset.support_period'
db.alter_column('ralph_assets_asset', 'support_period', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
# Changing field 'Asset.source'
db.alter_column('ralph_assets_asset', 'source', self.gf('django.db.models.fields.PositiveIntegerField')(default=None))
# Changing field 'Asset.status'
db.alter_column('ralph_assets_asset', 'status', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
# Changing field 'Asset.price'
db.alter_column('ralph_assets_asset', 'price', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2))
# Changing field 'Asset.niw'
db.alter_column('ralph_assets_asset', 'niw', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Adding field 'OfficeInfo.version'
db.add_column('ralph_assets_officeinfo', 'version',
self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.last_logged_user'
db.add_column('ralph_assets_officeinfo', 'last_logged_user',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.date_of_last_inventory'
db.add_column('ralph_assets_officeinfo', 'date_of_last_inventory',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.attachment'
db.add_column('ralph_assets_officeinfo', 'attachment',
self.gf('django.db.models.fields.files.FileField')(default=None, max_length=100, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.license_type'
db.add_column('ralph_assets_officeinfo', 'license_type',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Deleting field 'OfficeInfo.coa_number'
db.delete_column('ralph_assets_officeinfo', 'coa_number')
# Deleting field 'OfficeInfo.coa_oem_os'
db.delete_column('ralph_assets_officeinfo', 'coa_oem_os_id')
# Deleting field 'OfficeInfo.imei'
db.delete_column('ralph_assets_officeinfo', 'imei')
# Deleting field 'OfficeInfo.purpose'
db.delete_column('ralph_assets_officeinfo', 'purpose')
# Changing field 'OfficeInfo.license_key'
db.alter_column('ralph_assets_officeinfo', 'license_key', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
models = {
'account.profile': {
'Meta': {'object_name': 'Profile'},
'activation_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '153'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'home_page': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '1', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'nick': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ralph_assets.action': {
'Meta': {'object_name': 'Action'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.asset': {
'Meta': {'object_name': 'Asset'},
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['ralph_assets.Attachment']", 'null': 'True', 'blank': 'True'}),
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'delivery_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deprecation_rate': ('django.db.models.fields.DecimalField', [], {'default': '25', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'device_info': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ralph_assets.DeviceInfo']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'force_deprecation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'invoice_no': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'loan_end_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetModel']", 'on_delete': 'models.PROTECT'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'niw': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'office_info': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ralph_assets.OfficeInfo']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'order_no': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owner'", 'null': 'True', 'to': "orm['auth.User']"}),
'part_info': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ralph_assets.PartInfo']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'production_use_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'production_year': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'property_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetOwner']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'provider_order_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'request_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'service_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.Service']", 'null': 'True', 'blank': 'True'}),
'slots': ('django.db.models.fields.FloatField', [], {'default': '0', 'max_length': '64'}),
'sn': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'support_period': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'support_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'support_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'support_void_reporting': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'task_url': ('django.db.models.fields.URLField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'user'", 'null': 'True', 'to': "orm['auth.User']"}),
'warehouse': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.Warehouse']", 'on_delete': 'models.PROTECT'})
},
'ralph_assets.assetcategory': {
'Meta': {'object_name': 'AssetCategory'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'is_blade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': "orm['ralph_assets.AssetCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'primary_key': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'ralph_assets.assethistorychange': {
'Meta': {'object_name': 'AssetHistoryChange'},
'asset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.Asset']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'device_info': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.DeviceInfo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'office_info': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.OfficeInfo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'old_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'part_info': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.PartInfo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'ralph_assets.assetmanufacturer': {
'Meta': {'object_name': 'AssetManufacturer'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.assetmodel': {
'Meta': {'object_name': 'AssetModel'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetCategory']", 'null': 'True', 'blank': 'True'}),
'cores_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'height_of_device': ('django.db.models.fields.FloatField', [], {'default': '0', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetManufacturer']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'power_consumption': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'ralph_assets.assetowner': {
'Meta': {'object_name': 'AssetOwner'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.attachment': {
'Meta': {'object_name': 'Attachment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'ralph_assets.coaoemos': {
'Meta': {'object_name': 'CoaOemOs'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.deviceinfo': {
'Meta': {'object_name': 'DeviceInfo'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'rack': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ralph_device_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'u_height': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'u_level': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'ralph_assets.importproblem': {
'Meta': {'object_name': 'ImportProblem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'severity': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'ralph_assets.licence': {
'Meta': {'object_name': 'Licence'},
'accounting_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'asset_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ralph_assets.Asset']", 'symmetrical': 'False'}),
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['ralph_assets.Attachment']", 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'invoice_no': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'licence_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.LicenceType']", 'on_delete': 'models.PROTECT'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetManufacturer']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'niw': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'number_bought': ('django.db.models.fields.IntegerField', [], {}),
'order_no': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': "orm['ralph_assets.Licence']"}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'property_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetOwner']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sn': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'software_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.SoftwareCategory']", 'on_delete': 'models.PROTECT'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'valid_thru': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'ralph_assets.licencehistorychange': {
'Meta': {'object_name': 'LicenceHistoryChange'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'field_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'licence': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.Licence']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'old_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'ralph_assets.licencetype': {
'Meta': {'object_name': 'LicenceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.officeinfo': {
'Meta': {'object_name': 'OfficeInfo'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'coa_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'coa_oem_os': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.CoaOemOs']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imei': ('django.db.models.fields.CharField', [], {'max_length': '18', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'license_key': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'purpose': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'ralph_assets.partinfo': {
'Meta': {'object_name': 'PartInfo'},
'barcode_salvaged': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'device'", 'null': 'True', 'to': "orm['ralph_assets.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'source_device': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'source_device'", 'null': 'True', 'to': "orm['ralph_assets.Asset']"})
},
'ralph_assets.reportodtsource': {
'Meta': {'object_name': 'ReportOdtSource'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'ralph_assets.service': {
'Meta': {'object_name': 'Service'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
'ralph_assets.softwarecategory': {
'Meta': {'object_name': 'SoftwareCategory'},
'asset_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.transition': {
'Meta': {'object_name': 'Transition'},
'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ralph_assets.Action']", 'symmetrical': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'from_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'to_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'ralph_assets.transitionshistory': {
'Meta': {'ordering': "[u'-created']", 'object_name': 'TransitionsHistory'},
'affected_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'affected user'", 'to': "orm['auth.User']"}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ralph_assets.Asset']", 'symmetrical': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'logged user'", 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'report_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'report_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'transition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.Transition']"}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'ralph_assets.warehouse': {
'Meta': {'object_name': 'Warehouse'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
}
}
complete_apps = ['ralph_assets']
| 74.236979
| 224
| 0.60522
| 56,881
| 0.997667
| 0
| 0
| 0
| 0
| 0
| 0
| 36,358
| 0.637703
|
cea61c55844715882bbb20ff9d1e7b4ef98a8044
| 3,981
|
py
|
Python
|
ansible/modules/network/nxos/nxos_pim.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
ansible/modules/network/nxos/nxos_pim.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ansible/modules/network/nxos/nxos_pim.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_pim
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of a PIM instance.
description:
- Manages configuration of a Protocol Independent Multicast (PIM) instance.
author: Gabriele Gerbino (@GGabriele)
options:
ssm_range:
description:
- Configure group ranges for Source Specific Multicast (SSM).
Valid values are multicast addresses or the keyword 'none'.
required: true
'''
EXAMPLES = '''
- nxos_pim:
ssm_range: "232.0.0.0/8"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip pim ssm range 232.0.0.0/8"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'ssm_range': 'ip pim ssm range'
}
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'^{0}\s(?P<value>.*)$'.format(command), config, re.M)
value = ''
if has_command:
value = has_command.group('value')
existing[arg] = value
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if value:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
for key, value in proposed_commands.items():
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
ssm_range=dict(required=True, type='str'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
splitted_ssm_range = module.params['ssm_range'].split('.')
if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none':
module.fail_json(msg="Valid ssm_range values are multicast addresses "
"or the keyword 'none'.")
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
if k in args and v != existing[k])
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
result['changed'] = True
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 28.035211
| 86
| 0.672444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,619
| 0.406682
|
cea86a1503f29caac9cdfd11f755050fd0f5e54c
| 3,506
|
py
|
Python
|
server/openapi_server/controllers/variable_controller.py
|
mintproject/MINT-ModelCatalogIngestionAPI
|
026d3495483a3e48ea3c1364d0dda09beeea69e4
|
[
"Apache-2.0"
] | 2
|
2019-05-30T21:33:43.000Z
|
2019-09-27T21:04:38.000Z
|
server/openapi_server/controllers/variable_controller.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | 82
|
2019-10-08T16:35:34.000Z
|
2022-03-15T18:25:27.000Z
|
server/openapi_server/controllers/variable_controller.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | null | null | null |
import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import VARIABLE_TYPE_NAME, VARIABLE_TYPE_URI
from openapi_server.models.variable import Variable # noqa: E501
from openapi_server import util
def variables_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of Variable
Gets a list of all instances of Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[Variable]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_id_delete(id, user=None): # noqa: E501
"""Delete an existing Variable
Delete an existing Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param id: The ID of the Variable to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_id_get(id, username=None): # noqa: E501
"""Get a single Variable by its id
Gets the details of a given Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param id: The ID of the Variable to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: Variable
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_id_put(id, user=None, variable=None): # noqa: E501
"""Update an existing Variable
Updates an existing Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param id: The ID of the Variable to be retrieved
:type id: str
:param user: Username
:type user: str
:param variable: An old Variableto be updated
:type variable: dict | bytes
:rtype: Variable
"""
if connexion.request.is_json:
variable = Variable.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=variable,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_post(user=None, variable=None): # noqa: E501
"""Create one Variable
Create a new instance of Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param user: Username
:type user: str
:param variable: Information about the Variableto be created
:type variable: dict | bytes
:rtype: Variable
"""
if connexion.request.is_json:
variable = Variable.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=variable,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
| 28.504065
| 114
| 0.683685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,799
| 0.51312
|
ceab7daa2ddbacd632939bc43e04875d9fe01ee3
| 3,482
|
py
|
Python
|
lhorizon/_request_formatters.py
|
arfon/lhorizon
|
de2fa9c8121b27da87a4e0613a7dd5ec0647d9fb
|
[
"BSD-3-Clause"
] | null | null | null |
lhorizon/_request_formatters.py
|
arfon/lhorizon
|
de2fa9c8121b27da87a4e0613a7dd5ec0647d9fb
|
[
"BSD-3-Clause"
] | null | null | null |
lhorizon/_request_formatters.py
|
arfon/lhorizon
|
de2fa9c8121b27da87a4e0613a7dd5ec0647d9fb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
formatters to translate various parameters and options into URL parameters
that can be parsed by JPL Horizons' CGI. These are mostly intended to be used
by LHorizon methods and should probably not be called directly.
"""
from collections.abc import Mapping, Sequence
from typing import Union
import numpy as np
import pandas as pd
def format_geodetic_origin(location: Mapping) -> dict:
"""
creates dict of URL parameters for a geodetic coordinate origin
"""
return {
"CENTER": "coord@{:s}".format(str(location["body"])),
"COORD_TYPE": "GEODETIC",
"SITE_COORD": "'{:f},{:f},{:f}'".format(
float(location["lon"]),
float(location["lat"]),
float(location["elevation"]),
),
}
def format_geodetic_target(location: Mapping) -> str:
"""creates command string for a geodetic target"""
return "g:{lon},{lat},{elevation}@{body}".format(**location)
def format_epoch_params(epochs: Union[Sequence, Mapping]) -> dict:
"""creates dict of URL parameters from epochs"""
epoch_payload = {}
if isinstance(epochs, (pd.Series, list, tuple, np.ndarray)):
epoch_payload["TLIST"] = "\n".join([str(epoch) for epoch in epochs])
elif isinstance(epochs, dict):
if (
"start" not in epochs
or "stop" not in epochs
or "step" not in epochs
):
raise ValueError("'epochs' must contain start, " + "stop, step")
epoch_payload["START_TIME"] = '"' + str(epochs["start"]) + '"'
epoch_payload["STOP_TIME"] = '"' + str(epochs["stop"]) + '"'
epoch_payload["STEP_SIZE"] = '"' + str(epochs["step"]) + '"'
else:
# treat epochs as scalar
epoch_payload["TLIST"] = str(epochs)
return epoch_payload
def make_commandline(
target: Union[str, int, Mapping],
closest_apparition: Union[bool, str],
no_fragments: bool,
):
"""makes 'primary' command string for Horizons CGI request'"""
if isinstance(target, Mapping):
target = format_geodetic_target(target)
commandline = str(target)
if isinstance(closest_apparition, bool):
if closest_apparition:
commandline += " CAP;"
else:
commandline += " CAP{:s};".format(closest_apparition)
if no_fragments:
commandline += " NOFRAG;"
return commandline
# TODO: add REF_PLANE parameters
def assemble_request_params(
commandline: str,
query_type: str,
extra_precision: bool,
max_hour_angle: float,
quantities: str,
refraction: bool,
refsystem: str,
solar_elongation: Sequence[float],
vec_corr: str,
vec_table: int,
ref_plane: str
) -> dict[str]:
"""final-stage assembler for Horizons CGI URL parameters"""
return {
"batch": 1,
"TABLE_TYPE": query_type,
"QUANTITIES": "'" + str(quantities) + "'",
"COMMAND": '"' + commandline + '"',
"SOLAR_ELONG": '"'
+ str(solar_elongation[0])
+ ","
+ str(solar_elongation[1])
+ '"',
"LHA_CUTOFF": str(max_hour_angle),
"CSV_FORMAT": "YES",
"CAL_FORMAT": "BOTH",
"ANG_FORMAT": "DEG",
"APPARENT": {False: "AIRLESS", True: "REFRACTED"}[refraction],
"REF_SYSTEM": refsystem,
"EXTRA_PREC": {True: "YES", False: "NO"}[extra_precision],
# NONE, LT, LT + s
"VEC_CORR": "'" + vec_corr + "'",
"VEC_TABLE": "'" + str(vec_table) + "'",
}
| 31.654545
| 77
| 0.600804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,142
| 0.327972
|
ceac15e5add44827ffdab3055b716cb3256a3e2a
| 1,327
|
py
|
Python
|
src/aws_lambda_typing/events/kinesis_stream.py
|
chuckwondo/aws-lambda-typing
|
8417ab67f2492be1508fe38b2c34bc106619a56d
|
[
"MIT"
] | 29
|
2021-01-07T13:35:16.000Z
|
2022-03-25T07:20:54.000Z
|
src/aws_lambda_typing/events/kinesis_stream.py
|
chuckwondo/aws-lambda-typing
|
8417ab67f2492be1508fe38b2c34bc106619a56d
|
[
"MIT"
] | 13
|
2021-02-28T00:31:00.000Z
|
2022-03-29T15:24:01.000Z
|
src/aws_lambda_typing/events/kinesis_stream.py
|
chuckwondo/aws-lambda-typing
|
8417ab67f2492be1508fe38b2c34bc106619a56d
|
[
"MIT"
] | 5
|
2021-02-27T13:50:42.000Z
|
2022-01-13T15:05:44.000Z
|
#!/usr/bin/env python
import sys
if sys.version_info >= (3, 8):
from typing import List, TypedDict
else:
from typing import List
from typing_extensions import TypedDict
class KinesisStreamKinesis(TypedDict):
"""
KinesisStreamKinesis
Attributes:
----------
kinesisSchemaVersion: str
partitionKey: str
sequenceNumber: str
data: str
approximateArrivalTimestamp: float
"""
kinesisSchemaVersion: str
partitionKey: str
sequenceNumber: str
data: str
approximateArrivalTimestamp: float
class KinesisStreamRecord(TypedDict):
"""
KinesisStreamRecord
Attributes:
----------
kinesis: :py:class:`KinesisStreamKinesis`
eventSource: str
eventVersion: str
eventID: str
eventName: str
invokeIdentityArn: str
awsRegion: str
eventSourceARN: str
"""
kinesis: KinesisStreamKinesis
eventSource: str
eventVersion: str
eventID: str
eventName: str
invokeIdentityArn: str
awsRegion: str
eventSourceARN: str
class KinesisStreamEvent(TypedDict):
"""
KinesisStreamEvent
https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html
Attributes:
----------
Records: List[:py:class:`KinesisStreamRecord`]
"""
Records: List[KinesisStreamRecord]
| 16.382716
| 66
| 0.666164
| 1,134
| 0.854559
| 0
| 0
| 0
| 0
| 0
| 0
| 675
| 0.508666
|
ceac7c7a86d7f596354c9e7181c0d362a2bc878a
| 1,478
|
py
|
Python
|
tests/test_mangling.py
|
ecoinvent/brightway2-parameters
|
0b42466bf33655087e231364a7d677c6c114a046
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_mangling.py
|
ecoinvent/brightway2-parameters
|
0b42466bf33655087e231364a7d677c6c114a046
|
[
"BSD-3-Clause"
] | 1
|
2019-12-26T15:18:49.000Z
|
2019-12-26T15:18:49.000Z
|
tests/test_mangling.py
|
ecoinvent/brightway2-parameters
|
0b42466bf33655087e231364a7d677c6c114a046
|
[
"BSD-3-Clause"
] | 1
|
2021-07-05T12:14:49.000Z
|
2021-07-05T12:14:49.000Z
|
from bw2parameters import *
def test_mangle_formula():
given = "log(foo * bar) + 7 / baz"
prefix = "pre"
assert mangle_formula(given, prefix, ['bar']) == '(log((pre__foo * bar)) + (7 / pre__baz))'
def test_prefix_parameter_dict():
given = {
'a': {'formula': 'a + b / c', 'foo': True},
'b': {'formula': '2 * a - exp(7 - b)'},
'catch': {}
}
expected = {
't_a': {'formula': '(t_a + (t_b / c))', 'foo': True, 'original': 'a'},
't_b': {'formula': '((2 * t_a) - exp((7 - t_b)))', 'original': 'b'},
't_catch': {'original': 'catch'}
}
substitutions = {'a': 't_a', 'b': 't_b', 'catch': 't_catch'}
assert prefix_parameter_dict(given, "t_") == (expected, substitutions)
def test_chain_prefix_parameter_dict():
given = {'a': {'formula': 'a + b / c'}}
g_copy = {'a': {'formula': 'a + b / c'}}
expected = {
't_a': {'formula': '(t_a + (b / c))', 'original': 'a'},
}
substitutions = {'a': 't_a'}
assert prefix_parameter_dict(given, "t_") == (expected, substitutions)
assert given == g_copy
given, _ = prefix_parameter_dict(given, "t_")
s1 = {'b': 'dog'}
r1 = substitute_in_formulas(given, s1)
expected = {'t_a': {'formula': '(t_a + (dog / c))', 'original': 'a'}}
assert r1 == expected
s2 = {'c': 'cat'}
r2 = substitute_in_formulas(r1, s2)
expected = {'t_a': {'formula': '(t_a + (dog / cat))', 'original': 'a'}}
assert r2 == expected
| 35.190476
| 95
| 0.525034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 531
| 0.359269
|
cead398064b594593f3430fbc788b9476bf86da6
| 150
|
py
|
Python
|
venv/Lib/site-packages/clyent/errors.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | null | null | null |
venv/Lib/site-packages/clyent/errors.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | null | null | null |
venv/Lib/site-packages/clyent/errors.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
class ShowHelp(Exception):
pass
class ClyentError(Exception):
pass
| 18.75
| 72
| 0.8
| 73
| 0.486667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ceadbfc8ec08afd61feb6385ed4d339e585d1115
| 538
|
py
|
Python
|
exercises/de/test_01_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085
|
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/de/test_01_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79
|
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/de/test_01_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361
|
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert "spacy.load" in __solution__, "Rufst du spacy.load auf?"
assert nlp.meta["lang"] == "de", "Lädst du das korrekte Modell?"
assert nlp.meta["name"] == "core_news_sm", "Lädst du das korrekte Modell?"
assert "nlp(text)" in __solution__, "Verarbeitest du den Text korrekt?"
assert "print(doc.text)" in __solution__, "Druckst du den Text des Doc?"
__msg__.good(
"Gut gemacht! Jetzt wo du das Laden von Modellen geübt hast, lass uns "
"mal ein paar ihrer Vorhersagen anschauen."
)
| 44.833333
| 79
| 0.669145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 340
| 0.628466
|
ceaeab5eb737a59c0b5e6c14b392452d4ffec67b
| 2,482
|
py
|
Python
|
abm-predator-prey.py
|
RachidStat/PyCX
|
a1a597e61d03b25cf138dd11ab136db8202e1243
|
[
"BSD-2-Clause-FreeBSD"
] | 176
|
2019-12-18T11:44:28.000Z
|
2022-03-27T09:09:33.000Z
|
abm-predator-prey.py
|
RachidStat/PyCX
|
a1a597e61d03b25cf138dd11ab136db8202e1243
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2020-03-29T00:51:25.000Z
|
2020-07-19T11:08:32.000Z
|
abm-predator-prey.py
|
RachidStat/PyCX
|
a1a597e61d03b25cf138dd11ab136db8202e1243
|
[
"BSD-2-Clause-FreeBSD"
] | 56
|
2019-12-18T19:04:12.000Z
|
2022-03-22T09:35:33.000Z
|
import pycxsimulator
from pylab import *
import copy as cp
nr = 500. # carrying capacity of rabbits
r_init = 100 # initial rabbit population
mr = 0.03 # magnitude of movement of rabbits
dr = 1.0 # death rate of rabbits when it faces foxes
rr = 0.1 # reproduction rate of rabbits
f_init = 30 # initial fox population
mf = 0.05 # magnitude of movement of foxes
df = 0.1 # death rate of foxes when there is no food
rf = 0.5 # reproduction rate of foxes
cd = 0.02 # radius for collision detection
cdsq = cd ** 2
class agent:
pass
def initialize():
global agents
agents = []
for i in range(r_init + f_init):
ag = agent()
ag.type = 'r' if i < r_init else 'f'
ag.x = random()
ag.y = random()
agents.append(ag)
def observe():
global agents
cla()
rabbits = [ag for ag in agents if ag.type == 'r']
if len(rabbits) > 0:
x = [ag.x for ag in rabbits]
y = [ag.y for ag in rabbits]
plot(x, y, 'b.')
foxes = [ag for ag in agents if ag.type == 'f']
if len(foxes) > 0:
x = [ag.x for ag in foxes]
y = [ag.y for ag in foxes]
plot(x, y, 'ro')
axis('image')
axis([0, 1, 0, 1])
def update_one_agent():
global agents
if agents == []:
return
ag = choice(agents)
# simulating random movement
m = mr if ag.type == 'r' else mf
ag.x += uniform(-m, m)
ag.y += uniform(-m, m)
ag.x = 1 if ag.x > 1 else 0 if ag.x < 0 else ag.x
ag.y = 1 if ag.y > 1 else 0 if ag.y < 0 else ag.y
# detecting collision and simulating death or birth
neighbors = [nb for nb in agents if nb.type != ag.type
and (ag.x - nb.x)**2 + (ag.y - nb.y)**2 < cdsq]
if ag.type == 'r':
if len(neighbors) > 0: # if there are foxes nearby
if random() < dr:
agents.remove(ag)
return
if random() < rr*(1-sum([1 for x in agents if x.type == 'r'])/nr):
agents.append(cp.copy(ag))
else:
if len(neighbors) == 0: # if there are no rabbits nearby
if random() < df:
agents.remove(ag)
return
else: # if there are rabbits nearby
if random() < rf:
agents.append(cp.copy(ag))
def update():
global agents
t = 0.
while t < 1. and len(agents) > 0:
t += 1. / len(agents)
update_one_agent()
pycxsimulator.GUI().start(func=[initialize, observe, update])
| 26.978261
| 74
| 0.553183
| 21
| 0.008461
| 0
| 0
| 0
| 0
| 0
| 0
| 526
| 0.211926
|
ceaf4faaf34ba6494575ecceb758062b6bcf8486
| 4,832
|
py
|
Python
|
models/base_ae.py
|
christopher-beckham/amr
|
1bd67b9b4fb2fcf07cc8faba3c863f5ad5d4c4c0
|
[
"BSD-3-Clause"
] | 35
|
2019-08-27T08:59:53.000Z
|
2021-09-19T15:55:34.000Z
|
models/base_ae.py
|
christopher-beckham/amr
|
1bd67b9b4fb2fcf07cc8faba3c863f5ad5d4c4c0
|
[
"BSD-3-Clause"
] | 4
|
2020-01-14T05:09:31.000Z
|
2020-05-25T20:39:55.000Z
|
models/base_ae.py
|
christopher-beckham/amr
|
1bd67b9b4fb2fcf07cc8faba3c863f5ad5d4c4c0
|
[
"BSD-3-Clause"
] | 3
|
2019-12-24T01:29:49.000Z
|
2020-12-06T01:56:19.000Z
|
import torch
import numpy as np
from collections import OrderedDict
from torch import optim
from itertools import chain
from .base import Base
from torch import nn
class BaseAE(Base):
def __init__(self,
generator,
lamb=1.0,
beta=1.0,
recon_loss='l1',
gan_loss='bce',
opt=optim.Adam,
opt_args={'lr': 0.0002, 'betas': (0.5, 0.999)},
handlers=[]):
"""
"""
super(BaseAE, self).__init__()
use_cuda = True if torch.cuda.is_available() else False
self.generator = generator
self.lamb = lamb
self.beta = beta
print("lamb = %f" % lamb)
print("beta = %f" % beta)
self.optim = {}
optim_g = opt(filter(lambda p: p.requires_grad, self.generator.parameters()), **opt_args)
self.optim['g'] = optim_g
self.schedulers = []
#if scheduler_fn is not None:
# for key in self.optim:
# self.scheduler[key] = scheduler_fn(
# self.optim[key], **scheduler_args)
self.handlers = handlers
self.use_cuda = use_cuda
##################
# Loss functions #
##################
if recon_loss == 'l1':
self.recon_loss = lambda x,y: torch.mean(torch.abs(x-y))
elif recon_loss == 'l2':
self.recon_loss = lambda x,y: torch.mean((x-y)**2)
elif recon_loss == 'bce':
self.recon_loss = lambda x,y: nn.BCELoss()( (x*0.5 + 0.5), (y*0.5 + 0.5))
else:
raise Exception("recon_loss must be either l1 or bce!")
########
# cuda #
########
if self.use_cuda:
self.generator.cuda()
self.last_epoch = 0
self.load_strict = True
def _get_stats(self, dict_, mode):
stats = OrderedDict({})
for key in dict_.keys():
stats[key] = np.mean(dict_[key])
return stats
def _train(self):
self.generator.train()
def _eval(self):
self.generator.eval()
def reconstruct(self, x_batch):
"""Get reconstruction.
:param x_batch:
:returns:
:rtype:
"""
self._eval()
if self.use_cuda:
x_batch = x_batch.cuda()
with torch.no_grad():
enc = self.generator.encode(x_batch)
dec = self.generator.decode(enc)
return dec
def sampler(self, bs, f, is_2d, p=None):
"""Sampler function, which outputs an alpha which
you can use to produce a convex combination between
two examples.
:param bs: batch size
:param f: number of units / feature maps at encoding
:param is_2d: is the bottleneck a 2d tensor?
:returns: an alpha of shape `(bs, f)` is `is_2d` is set,
otherwise `(bs, f, 1, 1)`.
:rtype:
"""
shp = (bs, 1) if is_2d else (bs, 1, 1, 1)
if p is None:
alphas = []
for i in range(bs):
alpha = np.random.uniform(0, 1)
alphas.append(alpha)
else:
alphas = [p]*bs
alphas = np.asarray(alphas).reshape(shp)
alphas = torch.from_numpy(alphas).float()
if self.use_cuda:
alphas = alphas.cuda()
return alphas
def sample(self, x_batch):
raise NotImplementedError("This method must be subclassed!")
def mix(self, enc):
raise NotImplementedError("This method must be subclassed!")
def prepare_batch(self, batch):
if len(batch) != 2:
raise Exception("Expected batch to only contain two elements: " +
"X_batch and y_batch")
X_batch = batch[0].float()
y_batch = batch[1].float() # assuming one-hot encoding
if self.use_cuda:
X_batch = X_batch.cuda()
y_batch = y_batch.cuda()
return [X_batch, y_batch]
def save(self, filename, epoch):
dd = {}
# Save the models.
dd['g'] = self.generator.state_dict()
# Save the models' optim state.
for key in self.optim:
dd['optim_%s' % key] = self.optim[key].state_dict()
dd['epoch'] = epoch
torch.save(dd, filename)
def load(self, filename):
if not self.use_cuda:
map_location = lambda storage, loc: storage
else:
map_location = None
dd = torch.load(filename,
map_location=map_location)
# Load the models.
self.generator.load_state_dict(dd['g'], strict=self.load_strict)
for key in self.optim:
self.optim[key].load_state_dict(dd['optim_%s' % key])
self.last_epoch = dd['epoch']
| 31.174194
| 97
| 0.528353
| 4,666
| 0.965646
| 0
| 0
| 0
| 0
| 0
| 0
| 1,109
| 0.229512
|
ceafef1d012a2252b4736fc5912d1fe98bb743cd
| 10,608
|
py
|
Python
|
psdet/models/point_detector/utils.py
|
Jiaolong/gcn-parking-slot
|
f8c3b445b186e3a7fd13af1f17fa5ba0336027c7
|
[
"MIT"
] | 56
|
2021-03-24T08:24:27.000Z
|
2022-03-26T13:56:36.000Z
|
psdet/models/point_detector/utils.py
|
Jiaolong/gcn-parking-slot
|
f8c3b445b186e3a7fd13af1f17fa5ba0336027c7
|
[
"MIT"
] | 7
|
2021-04-05T03:55:05.000Z
|
2022-03-08T03:12:20.000Z
|
psdet/models/point_detector/utils.py
|
Jiaolong/gcn-parking-slot
|
f8c3b445b186e3a7fd13af1f17fa5ba0336027c7
|
[
"MIT"
] | 17
|
2021-04-04T02:42:09.000Z
|
2022-03-31T01:48:06.000Z
|
"""Universal network struture unit definition."""
import torch
import math
from torch import nn
import torchvision
from torch.utils import model_zoo
from torchvision.models.resnet import BasicBlock, model_urls, Bottleneck
def define_squeeze_unit(basic_channel_size):
"""Define a 1x1 squeeze convolution with norm and activation."""
conv = nn.Conv2d(2 * basic_channel_size, basic_channel_size, kernel_size=1,
stride=1, padding=0, bias=False)
norm = nn.BatchNorm2d(basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_halve_unit(basic_channel_size):
"""Define a 4x4 stride 2 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=4,
stride=2, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_depthwise_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv1 = nn.Conv2d(basic_channel_size, 2 * basic_channel_size,
kernel_size=1, stride=1, padding=0, bias=False)
norm1 = nn.BatchNorm2d(2 * basic_channel_size)
relu1 = nn.LeakyReLU(0.1)
conv2 = nn.Conv2d(2 * basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False, groups=2 * basic_channel_size)
norm2 = nn.BatchNorm2d(2 * basic_channel_size)
relu2 = nn.LeakyReLU(0.1)
layers = [conv1, norm1, relu1, conv2, norm2, relu2]
return layers
def define_detector_block(basic_channel_size):
"""Define a unit composite of a squeeze and expand unit."""
layers = []
layers += define_squeeze_unit(basic_channel_size)
layers += define_expand_unit(basic_channel_size)
return layers
class YetAnotherDarknet(nn.modules.Module):
"""Yet another darknet, imitating darknet-53 with depth of darknet-19."""
def __init__(self, input_channel_size, depth_factor):
super(YetAnotherDarknet, self).__init__()
layers = []
# 0
layers += [nn.Conv2d(input_channel_size, depth_factor, kernel_size=3,
stride=1, padding=1, bias=False)]
layers += [nn.BatchNorm2d(depth_factor)]
layers += [nn.LeakyReLU(0.1)]
# 1
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
# 2
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
# 3
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
layers += define_detector_block(depth_factor)
# 4
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
layers += define_detector_block(depth_factor)
# 5
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# vgg backbone
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 1024, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
class ResNet18(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, domains=3):
self.inplanes = 64
super(ResNet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)#resnet 18
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet18(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
class ResNet50(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, domains=3):
self.inplanes = 64
super(ResNet50, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=2) #resnet50
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet50(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
| 36.961672
| 113
| 0.607089
| 6,308
| 0.594646
| 0
| 0
| 0
| 0
| 0
| 0
| 1,031
| 0.097191
|
ceb07de7a23c054666d87af2b849db00c172593e
| 819
|
py
|
Python
|
Config/Texts/NFSW/NFSW.py
|
amiralirj/DarkHelper
|
386eea58eb6b9766d6f900a83f87eeac0b8f09c2
|
[
"MIT"
] | 34
|
2021-08-05T12:41:18.000Z
|
2021-11-30T22:23:20.000Z
|
Config/Texts/NFSW/NFSW.py
|
amiralirj/DarkHelper
|
386eea58eb6b9766d6f900a83f87eeac0b8f09c2
|
[
"MIT"
] | 2
|
2021-08-29T10:32:02.000Z
|
2021-08-31T12:10:29.000Z
|
Config/Texts/NFSW/NFSW.py
|
amiralirj/DarkHelper
|
386eea58eb6b9766d6f900a83f87eeac0b8f09c2
|
[
"MIT"
] | 5
|
2021-08-07T07:41:44.000Z
|
2021-08-20T13:52:36.000Z
|
NFSW_Texts = [
'سکس'
,'گایید'
,' کص'
,'جنده'
,'کیر'
,'jnde'
,'jende'
,'kos'
,'pussy'
,'kir'
,'lashi'
,'لاشی'
,'jakesh'
,'جاکش'
,'مادر خراب'
,'madar kharab'
,'mde kharab'
,'khar kose'
,'fuck'
,'bitch'
,'haroomzade'
,'حرومی'
,'حرامزاده'
,'حرومزاده'
,'جندس'
,'کصه '
]
NFSW_Names=[
'خاله'
,'جنده'
,"کص"
,"کیر"
,"ساعتی"
,"اوف"
,"💦💦💦💦"
,"سوپر"
,"فیلم"
,"بیو"
,"حضوری"
,"مکان"
]
Porn={'dick':'Male Genitalia - Exposed',
'pussy':'Female Genitalia - Exposed',
'coveredpossy':'Female Genitalia - Covered',
'fboobs':'Female Breast - Exposed',
'mboobs':'Male Breast - Exposed',
'coveredboobs':'Female Breast - Covered',
'stomack':'Male Breast - Covered',
'baghal':'Male Breast - Exposed',
'ass':'Buttocks - Exposed',
'feet':'404NotFound',
'coveredass':'Buttocks - Covered'}
| 14.625
| 45
| 0.577534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 731
| 0.783494
|
ceb45da6d8703038dcca50551f842da1aba2b40e
| 14,784
|
py
|
Python
|
ml_collections/config_dict/tests/frozen_config_dict_test.py
|
wyddmw/ViT-pytorch-1
|
81dd3c43880d0f641ec8e15d8226035a358e78fc
|
[
"Apache-2.0"
] | 311
|
2020-08-25T14:44:55.000Z
|
2022-03-30T17:19:45.000Z
|
ml_collections/config_dict/tests/frozen_config_dict_test.py
|
wyddmw/ViT-pytorch-1
|
81dd3c43880d0f641ec8e15d8226035a358e78fc
|
[
"Apache-2.0"
] | 9
|
2020-11-14T04:00:23.000Z
|
2022-02-18T21:03:33.000Z
|
ml_collections/config_dict/tests/frozen_config_dict_test.py
|
wyddmw/ViT-pytorch-1
|
81dd3c43880d0f641ec8e15d8226035a358e78fc
|
[
"Apache-2.0"
] | 19
|
2020-08-25T21:52:30.000Z
|
2022-03-13T22:25:20.000Z
|
# Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.FrozenConfigDict."""
from collections import abc as collections_abc
import copy
import pickle
from absl.testing import absltest
import ml_collections
_TEST_DICT = {
'int': 2,
'list': [1, 2],
'nested_list': [[1, [2]]],
'set': {1, 2},
'tuple': (1, 2),
'frozenset': frozenset({1, 2}),
'dict': {
'float': -1.23,
'list': [1, 2],
'dict': {},
'tuple_containing_list': (1, 2, (3, [4, 5], (6, 7))),
'list_containing_tuple': [1, 2, [3, 4], (5, 6)],
},
'ref': ml_collections.FieldReference({'int': 0})
}
def _test_dict_deepcopy():
return copy.deepcopy(_TEST_DICT)
def _test_configdict():
return ml_collections.ConfigDict(_TEST_DICT)
def _test_frozenconfigdict():
return ml_collections.FrozenConfigDict(_TEST_DICT)
class FrozenConfigDictTest(absltest.TestCase):
"""Tests FrozenConfigDict in config flags library."""
def assertFrozenRaisesValueError(self, input_list):
"""Assert initialization on all elements of input_list raise ValueError."""
for initial_dictionary in input_list:
with self.assertRaises(ValueError):
_ = ml_collections.FrozenConfigDict(initial_dictionary)
def testBasicEquality(self):
"""Tests basic equality with different types of initialization."""
fcd = _test_frozenconfigdict()
fcd_cd = ml_collections.FrozenConfigDict(_test_configdict())
fcd_fcd = ml_collections.FrozenConfigDict(fcd)
self.assertEqual(fcd, fcd_cd)
self.assertEqual(fcd, fcd_fcd)
def testImmutability(self):
"""Tests immutability of frozen config."""
fcd = _test_frozenconfigdict()
self.assertEqual(fcd.list, tuple(_TEST_DICT['list']))
self.assertEqual(fcd.tuple, _TEST_DICT['tuple'])
self.assertEqual(fcd.set, frozenset(_TEST_DICT['set']))
self.assertEqual(fcd.frozenset, _TEST_DICT['frozenset'])
# Must manually check set to frozenset conversion, since Python == does not
self.assertIsInstance(fcd.set, frozenset)
self.assertEqual(fcd.dict.list, tuple(_TEST_DICT['dict']['list']))
self.assertNotEqual(fcd.dict.tuple_containing_list,
_TEST_DICT['dict']['tuple_containing_list'])
self.assertEqual(fcd.dict.tuple_containing_list[2][1],
tuple(_TEST_DICT['dict']['tuple_containing_list'][2][1]))
self.assertIsInstance(fcd.dict, ml_collections.FrozenConfigDict)
with self.assertRaises(AttributeError):
fcd.newitem = 0
with self.assertRaises(AttributeError):
fcd.dict.int = 0
with self.assertRaises(AttributeError):
fcd['newitem'] = 0
with self.assertRaises(AttributeError):
del fcd.int
with self.assertRaises(AttributeError):
del fcd['int']
def testLockAndFreeze(self):
"""Ensures .lock() and .freeze() raise errors."""
fcd = _test_frozenconfigdict()
self.assertFalse(fcd.is_locked)
self.assertFalse(fcd.as_configdict().is_locked)
with self.assertRaises(AttributeError):
fcd.lock()
with self.assertRaises(AttributeError):
fcd.unlock()
with self.assertRaises(AttributeError):
fcd.freeze()
with self.assertRaises(AttributeError):
fcd.unfreeze()
def testInitConfigDict(self):
"""Tests that ConfigDict initialization handles FrozenConfigDict.
Initializing a ConfigDict on a dictionary with FrozenConfigDict values
should unfreeze these values.
"""
dict_without_fcd_node = _test_dict_deepcopy()
dict_without_fcd_node.pop('ref')
dict_with_fcd_node = copy.deepcopy(dict_without_fcd_node)
dict_with_fcd_node['dict'] = ml_collections.FrozenConfigDict(
dict_with_fcd_node['dict'])
cd_without_fcd_node = ml_collections.ConfigDict(dict_without_fcd_node)
cd_with_fcd_node = ml_collections.ConfigDict(dict_with_fcd_node)
fcd_without_fcd_node = ml_collections.FrozenConfigDict(
dict_without_fcd_node)
fcd_with_fcd_node = ml_collections.FrozenConfigDict(dict_with_fcd_node)
self.assertEqual(cd_without_fcd_node, cd_with_fcd_node)
self.assertEqual(fcd_without_fcd_node, fcd_with_fcd_node)
def testInitCopying(self):
"""Tests that initialization copies when and only when necessary.
Ensures copying only occurs when converting mutable type to immutable type,
regardless of whether the FrozenConfigDict is initialized by a dict or a
FrozenConfigDict. Also ensures no copying occurs when converting from
FrozenConfigDict back to ConfigDict.
"""
fcd = _test_frozenconfigdict()
# These should be uncopied when creating fcd
fcd_unchanged_from_test_dict = [
(_TEST_DICT['tuple'], fcd.tuple),
(_TEST_DICT['frozenset'], fcd.frozenset),
(_TEST_DICT['dict']['tuple_containing_list'][2][2],
fcd.dict.tuple_containing_list[2][2]),
(_TEST_DICT['dict']['list_containing_tuple'][3],
fcd.dict.list_containing_tuple[3])
]
# These should be copied when creating fcd
fcd_different_from_test_dict = [
(_TEST_DICT['list'], fcd.list),
(_TEST_DICT['dict']['tuple_containing_list'][2][1],
fcd.dict.tuple_containing_list[2][1])
]
for (x, y) in fcd_unchanged_from_test_dict:
self.assertEqual(id(x), id(y))
for (x, y) in fcd_different_from_test_dict:
self.assertNotEqual(id(x), id(y))
# Also make sure that converting back to ConfigDict makes no copies
self.assertEqual(
id(_TEST_DICT['dict']['tuple_containing_list']),
id(ml_collections.ConfigDict(fcd).dict.tuple_containing_list))
def testAsConfigDict(self):
"""Tests that converting FrozenConfigDict to ConfigDict works correctly.
In particular, ensures that FrozenConfigDict does the inverse of ConfigDict
regarding type_safe, lock, and attribute mutability.
"""
# First ensure conversion to ConfigDict works on empty FrozenConfigDict
self.assertEqual(
ml_collections.ConfigDict(ml_collections.FrozenConfigDict()),
ml_collections.ConfigDict())
cd = _test_configdict()
cd_fcd_cd = ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd))
self.assertEqual(cd, cd_fcd_cd)
# Make sure locking is respected
cd.lock()
self.assertEqual(
cd, ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd)))
# Make sure type_safe is respected
cd = ml_collections.ConfigDict(_TEST_DICT, type_safe=False)
self.assertEqual(
cd, ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd)))
def testInitSelfReferencing(self):
"""Ensure initialization fails on self-referencing dicts."""
self_ref = {}
self_ref['self'] = self_ref
parent_ref = {'dict': {}}
parent_ref['dict']['parent'] = parent_ref
tuple_parent_ref = {'dict': {}}
tuple_parent_ref['dict']['tuple'] = (1, 2, tuple_parent_ref)
attribute_cycle = {'dict': copy.deepcopy(self_ref)}
self.assertFrozenRaisesValueError(
[self_ref, parent_ref, tuple_parent_ref, attribute_cycle])
def testInitCycles(self):
"""Ensure initialization fails if an attribute of input is cyclic."""
inner_cyclic_list = [1, 2]
cyclic_list = [3, inner_cyclic_list]
inner_cyclic_list.append(cyclic_list)
cyclic_tuple = tuple(cyclic_list)
test_dict_cyclic_list = _test_dict_deepcopy()
test_dict_cyclic_tuple = _test_dict_deepcopy()
test_dict_cyclic_list['cyclic_list'] = cyclic_list
test_dict_cyclic_tuple['dict']['cyclic_tuple'] = cyclic_tuple
self.assertFrozenRaisesValueError(
[test_dict_cyclic_list, test_dict_cyclic_tuple])
def testInitDictInList(self):
"""Ensure initialization fails on dict and ConfigDict in lists/tuples."""
list_containing_dict = {'list': [1, 2, 3, {'a': 4, 'b': 5}]}
tuple_containing_dict = {'tuple': (1, 2, 3, {'a': 4, 'b': 5})}
list_containing_cd = {'list': [1, 2, 3, _test_configdict()]}
tuple_containing_cd = {'tuple': (1, 2, 3, _test_configdict())}
fr_containing_list_containing_dict = {
'fr': ml_collections.FieldReference([1, {
'a': 2
}])
}
self.assertFrozenRaisesValueError([
list_containing_dict, tuple_containing_dict, list_containing_cd,
tuple_containing_cd, fr_containing_list_containing_dict
])
def testInitFieldReferenceInList(self):
"""Ensure initialization fails on FieldReferences in lists/tuples."""
list_containing_fr = {'list': [1, 2, 3, ml_collections.FieldReference(4)]}
tuple_containing_fr = {
'tuple': (1, 2, 3, ml_collections.FieldReference('a'))
}
self.assertFrozenRaisesValueError([list_containing_fr, tuple_containing_fr])
def testInitInvalidAttributeName(self):
"""Ensure initialization fails on attributes with invalid names."""
dot_name = {'dot.name': None}
immutable_name = {'__hash__': None}
with self.assertRaises(ValueError):
ml_collections.FrozenConfigDict(dot_name)
with self.assertRaises(AttributeError):
ml_collections.FrozenConfigDict(immutable_name)
def testFieldReferenceResolved(self):
"""Tests that FieldReferences are resolved."""
cfg = ml_collections.ConfigDict({'fr': ml_collections.FieldReference(1)})
frozen_cfg = ml_collections.FrozenConfigDict(cfg)
self.assertNotIsInstance(frozen_cfg._fields['fr'],
ml_collections.FieldReference)
hash(frozen_cfg) # with FieldReference resolved, frozen_cfg is hashable
def testFieldReferenceCycle(self):
"""Tests that FieldReferences may not contain reference cycles."""
frozenset_fr = {'frozenset': frozenset({1, 2})}
frozenset_fr['fr'] = ml_collections.FieldReference(
frozenset_fr['frozenset'])
list_fr = {'list': [1, 2]}
list_fr['fr'] = ml_collections.FieldReference(list_fr['list'])
cyclic_fr = {'a': 1}
cyclic_fr['fr'] = ml_collections.FieldReference(cyclic_fr)
cyclic_fr_parent = {'dict': {}}
cyclic_fr_parent['dict']['fr'] = ml_collections.FieldReference(
cyclic_fr_parent)
# FieldReference is allowed to point to non-cyclic objects:
_ = ml_collections.FrozenConfigDict(frozenset_fr)
_ = ml_collections.FrozenConfigDict(list_fr)
# But not cycles:
self.assertFrozenRaisesValueError([cyclic_fr, cyclic_fr_parent])
def testDeepCopy(self):
"""Ensure deepcopy works and does not affect equality."""
fcd = _test_frozenconfigdict()
fcd_deepcopy = copy.deepcopy(fcd)
self.assertEqual(fcd, fcd_deepcopy)
def testEquals(self):
"""Tests that __eq__() respects hidden mutability."""
fcd = _test_frozenconfigdict()
# First, ensure __eq__() returns False when comparing to other types
self.assertNotEqual(fcd, (1, 2))
self.assertNotEqual(fcd, fcd.as_configdict())
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
fcd_list_to_tuple = ml_collections.FrozenConfigDict(list_to_tuple)
set_to_frozenset = _test_dict_deepcopy()
set_to_frozenset['set'] = frozenset(set_to_frozenset['set'])
fcd_set_to_frozenset = ml_collections.FrozenConfigDict(set_to_frozenset)
self.assertNotEqual(fcd, fcd_list_to_tuple)
# Because set == frozenset in Python:
self.assertEqual(fcd, fcd_set_to_frozenset)
# Items are not affected by hidden mutability
self.assertCountEqual(fcd.items(), fcd_list_to_tuple.items())
self.assertCountEqual(fcd.items(), fcd_set_to_frozenset.items())
def testEqualsAsConfigDict(self):
"""Tests that eq_as_configdict respects hidden mutability but not type."""
fcd = _test_frozenconfigdict()
# First, ensure eq_as_configdict() returns True with an equal ConfigDict but
# False for other types.
self.assertFalse(fcd.eq_as_configdict([1, 2]))
self.assertTrue(fcd.eq_as_configdict(fcd.as_configdict()))
empty_fcd = ml_collections.FrozenConfigDict()
self.assertTrue(empty_fcd.eq_as_configdict(ml_collections.ConfigDict()))
# Now, ensure it has the same immutability detection as __eq__().
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
fcd_list_to_tuple = ml_collections.FrozenConfigDict(list_to_tuple)
set_to_frozenset = _test_dict_deepcopy()
set_to_frozenset['set'] = frozenset(set_to_frozenset['set'])
fcd_set_to_frozenset = ml_collections.FrozenConfigDict(set_to_frozenset)
self.assertFalse(fcd.eq_as_configdict(fcd_list_to_tuple))
# Because set == frozenset in Python:
self.assertTrue(fcd.eq_as_configdict(fcd_set_to_frozenset))
def testHash(self):
"""Ensures __hash__() respects hidden mutability."""
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
self.assertEqual(
hash(_test_frozenconfigdict()),
hash(ml_collections.FrozenConfigDict(_test_dict_deepcopy())))
self.assertNotEqual(
hash(_test_frozenconfigdict()),
hash(ml_collections.FrozenConfigDict(list_to_tuple)))
# Ensure Python realizes FrozenConfigDict is hashable
self.assertIsInstance(_test_frozenconfigdict(), collections_abc.Hashable)
def testUnhashableType(self):
"""Ensures __hash__() fails if FrozenConfigDict has unhashable value."""
unhashable_fcd = ml_collections.FrozenConfigDict(
{'unhashable': bytearray()})
with self.assertRaises(TypeError):
hash(unhashable_fcd)
def testToDict(self):
"""Ensure to_dict() does not care about hidden mutability."""
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
self.assertEqual(_test_frozenconfigdict().to_dict(),
ml_collections.FrozenConfigDict(list_to_tuple).to_dict())
def testPickle(self):
"""Make sure FrozenConfigDict can be dumped and loaded with pickle."""
fcd = _test_frozenconfigdict()
locked_fcd = ml_collections.FrozenConfigDict(_test_configdict().lock())
unpickled_fcd = pickle.loads(pickle.dumps(fcd))
unpickled_locked_fcd = pickle.loads(pickle.dumps(locked_fcd))
self.assertEqual(fcd, unpickled_fcd)
self.assertEqual(locked_fcd, unpickled_locked_fcd)
if __name__ == '__main__':
absltest.main()
| 37.810742
| 80
| 0.718277
| 13,301
| 0.899689
| 0
| 0
| 0
| 0
| 0
| 0
| 4,191
| 0.283482
|
ceb543c431720b3b36051ad70d948b85d8942aeb
| 3,829
|
py
|
Python
|
driver.py
|
kavj/npmd
|
742fcb271e695b24bb062cdc66d455c0f397116d
|
[
"Apache-2.0"
] | null | null | null |
driver.py
|
kavj/npmd
|
742fcb271e695b24bb062cdc66d455c0f397116d
|
[
"Apache-2.0"
] | null | null | null |
driver.py
|
kavj/npmd
|
742fcb271e695b24bb062cdc66d455c0f397116d
|
[
"Apache-2.0"
] | null | null | null |
import numbers
import os
import sys
import typing
import numpy as np
from dataclasses import dataclass
from pathlib import Path
import ir
import type_interface as ti
import type_resolution as tr
from ASTTransform import build_module_ir_and_symbols
from ccodegen import codegen
from canonicalize import NormalizePaths
from errors import error_context, CompilerError
from lowering import loop_lowering
from pretty_printing import pretty_printer
from reaching_check import ReachingCheck
from utils import wrap_input
version = sys.version_info
# Python 2 can't parse a significant
# amount of this code, so error messages ignore it.
if sys.version_info.minor < 8:
raise RuntimeError(f"Python 3.8 or above is required.")
def resolve_types(types):
internal_types = {}
for name, type_ in types.items():
internal_type = tr.by_input_type.get(type_)
if internal_type is None:
msg = f"No internal type matches type {type_}."
raise CompilerError(msg)
internal_types[name] = type_
return internal_types
class CompilerDriver:
def __init__(self, types):
self.build_module = ModuleBuilder()
self.normalize_paths = NormalizePaths()
self.reaching_check = ReachingCheck()
self.pretty_print = pretty_printer(ctx_)
self.ctx = ctx_
def run_pipeline(self, file_name, type_map):
with error_context():
module = self.build_module(file_name)
funcs = module.functions
print(f"file name: {file_name}\n")
for index, func in enumerate(funcs):
func = self.normalize_paths(func)
func_types = type_map[func.name]
infer_types = TypeInfer(func_types)
self.reaching_check(func)
infer_types(func)
# symbols[func.name].types = func_types
funcs[index] = func
return module
def pretty_print_tree(self, module, func_name=None):
with self.ctx.module_scope(module.name):
if func_name is not None:
with self.ctx.function_scope(func_name):
func = module.lookup(func_name)
self.pretty_print(func, self.ctx.current_function)
else:
for func in module.functions:
with self.ctx.function_scope(func.name):
self.pretty_print(func, self.ctx.current_function)
def name_and_source_from_path(file_path):
with open(file_path) as src_stream:
src = src_stream.read()
file_name = os.path.basename(file_path)
return file_name, src
# stub for now, since we may need to remake typed passes later
# per function or incorporate context management
def build_function_pipeline():
pipeline = [NormalizePaths(),
ReachingCheck()]
return pipeline
def compile_module(file_path, types, verbose=False, print_result=True, out=None):
# pipeline = build_function_pipeline()
if verbose:
if file_path:
print(f"Compiling: {file_name}:")
modname = file_path.name
modname, _ = os.path.splitext(modname)
if not modname:
msg = "No module specified"
raise CompilerError(msg)
mod_ir, symbols = build_module_ir_and_symbols(file_path, types)
funcs = []
norm_paths = NormalizePaths()
# rc = ReachingCheck()
for func in mod_ir.functions:
s = symbols.get(func.name)
ll = loop_lowering(s)
func = norm_paths(func)
func = ll(func)
funcs.append(func)
if print_result:
from pretty_printing import pretty_printer
pp = pretty_printer()
pp(func, s)
if out is None:
# try in same folder
out = Path.cwd()
codegen(out, funcs, symbols, modname)
| 31.130081
| 81
| 0.655524
| 1,393
| 0.363803
| 0
| 0
| 0
| 0
| 0
| 0
| 466
| 0.121703
|
ceb62f0313de2d8b1490179cab386a903cdaa203
| 20,552
|
py
|
Python
|
ipt/ipt_hough_circles_detector.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | 1
|
2020-06-30T06:53:36.000Z
|
2020-06-30T06:53:36.000Z
|
ipt/ipt_hough_circles_detector.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | null | null | null |
ipt/ipt_hough_circles_detector.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | null | null | null |
import os
import pickle
import logging
logger = logging.getLogger(__name__)
import cv2
import numpy as np
from skimage.transform import hough_circle, hough_circle_peaks
import ipso_phen.ipapi.base.ip_common as ipc
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.ipt.ipt_edge_detector import IptEdgeDetector
from ipso_phen.ipapi.tools.regions import (
RectangleRegion,
CircleRegion,
AnnulusRegion,
Point,
)
from ipso_phen.ipapi.tools.folders import ipso_folders
class IptHoughCircles(IptBase):
def build_params(self):
self.add_checkbox(
name="enable_cache",
desc="Allow retrieving data from cache",
default_value=1,
hint="Data will be retrieved only if params are identical.",
)
self.add_combobox(
name="source_selector",
desc="Select source",
default_value="current_image",
values={"current_image": "Current image", "mask": "Mask"},
hint="Select which image will be used as source",
)
self.add_roi_settings(
default_name="unnamed_roi", default_type="keep", default_shape="rectangle"
)
self.add_separator(name="s1")
self.add_text_input(
name="crop_roi_name",
desc="Name of ROI to be used",
default_value="",
hint="Circles will only be detected inside ROI",
)
self.add_channel_selector(default_value="l")
self.add_checkbox(
name="normalize",
desc="Normalize channel",
default_value=0,
hint="Normalize channel before edge detection",
)
self.add_slider(
name="median_filter_size",
desc="Median filter size (odd values only)",
default_value=0,
minimum=0,
maximum=51,
)
self.add_spin_box(
name="min_radius",
desc="Minimal radius to consider",
default_value=400,
minimum=0,
maximum=2000,
hint="All circles smaller than this will be ignored",
)
self.add_spin_box(
name="max_radius",
desc="Maximal radius to consider",
default_value=1000,
minimum=0,
maximum=2000,
hint="All circles bigger than this will be ignored",
)
self.add_spin_box(
name="annulus_size",
desc="Annulus secondary radius delta",
default_value=0,
minimum=0,
maximum=2000,
hint="Annulus size, 0 means full disc",
)
self.add_spin_box(
name="step_radius",
desc="Radius granularity",
default_value=10,
minimum=0,
maximum=100,
hint="Steps for scanning radius",
)
self.add_spin_box(
name="max_peaks",
desc="Maximum number of detected circles",
default_value=2,
minimum=-1,
maximum=200,
hint="Keeps only n best circles",
)
self.add_spin_box(
name="min_distance",
desc="Minimum distance between two circles",
default_value=20,
minimum=1,
maximum=2000,
hint="Remove circles that are too close",
)
self.add_spin_box(
name="line_width",
desc="Draw line width",
default_value=4,
minimum=1,
maximum=20,
)
self.add_checkbox(
name="keep_only_one",
desc="Keep only closest, if not, ROI is larger circle",
default_value=0,
)
self.add_combobox(
name="target_position",
desc="Keep the closest circle closest to",
default_value="BOTTOM_CENTER",
values=dict(
TOP_LEFT="TOP_LEFT",
TOP_CENTER="TOP_CENTER",
TOP_RIGHT="TOP_RIGHT",
MIDDLE_LEFT="MIDDLE_LEFT",
MIDDLE_CENTER="MIDDLE_CENTER",
MIDDLE_RIGHT="MIDDLE_RIGHT",
BOTTOM_LEFT="BOTTOM_LEFT",
BOTTOM_CENTER="BOTTOM_CENTER",
BOTTOM_RIGHT="BOTTOM_RIGHT",
),
)
self.add_slider(
name="max_dist_to_root",
desc="Maximum distance to root position",
default_value=1000,
minimum=0,
maximum=4000,
)
self.add_checkbox(
name="draw_boundaries", desc="Draw max and min circles", default_value=0
)
self.add_checkbox(
name="draw_candidates", desc="Draw discarded candidates", default_value=0
)
self.add_spin_box(
name="expand_circle",
desc="Contract/expand circle",
default_value=0,
minimum=-1000,
maximum=1000,
)
self.add_checkbox(name="edge_only", desc="Edge detection only", default_value=0)
self.add_edge_detector()
self.add_text_overlay()
def process_wrapper(self, **kwargs):
"""
Hough circles detector:
Hough circles detector: Perform a circular Hough transform.
Can generate ROIs
Real time: False
Keyword Arguments (in parentheses, argument name):
* Allow retrieving data from cache (enable_cache): Data will be retrieved only if params are identical.
* ROI name (roi_name):
* Select action linked to ROI (roi_type): no clue
* Select ROI shape (roi_shape): no clue
* Target IPT (tool_target): no clue
* Name of ROI to be used (crop_roi_name): Circles will only be detected inside ROI
* Channel (channel):
* Normalize channel (normalize): Normalize channel before edge detection
* Median filter size (odd values only) (median_filter_size):
* Minimal radius to consider (min_radius): All circles smaller than this will be ignored
* Maximal radius to consider (max_radius): All circles bigger than this will be ignored
* Annulus secondary radius delta (annulus_size): Annulus size, 0 means full disc
* Radius granularity (step_radius): Steps for scanning radius
* Maximum number of detected circles (max_peaks): Keeps only n best circles
* Minimum distance between two circles (min_distance): Remove circles that are too close
* Draw line width (line_width):
* Keep only closest, if not, ROI is larger circle (keep_only_one):
* Keep the closest circle closest to (target_position):
* Maximum distance to root position (max_dist_to_root):
* Draw max and min circles (draw_boundaries):
* Draw discarded candidates (draw_candidates):
* Contract/expand circle (expand_circle):
* Edge detection only (edge_only):
* Select edge detection operator (operator):
* Canny's sigma for scikit, aperture for OpenCV (canny_sigma): Sigma.
* Canny's first Threshold (canny_first): First threshold for the hysteresis procedure.
* Canny's second Threshold (canny_second): Second threshold for the hysteresis procedure.
* Kernel size (kernel_size):
* Threshold (threshold): Threshold for kernel based operators
* Apply threshold (apply_threshold):
* Overlay text on top of images (text_overlay): Draw description text on top of images
--------------
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
edge_only = self.get_value_of("edge_only") == 1
pkl_file = os.path.join(
ipso_folders.get_path("stored_data"),
self.get_short_hash(
exclude_list=("annulus_size", "roi_name", "tool_target", "roi_shape")
)
+ ".pkl",
)
if (
(self.get_value_of("enable_cache") == 1)
and edge_only is False
and os.path.isfile(pkl_file)
):
with open(pkl_file, "rb") as f:
self.result = pickle.load(f)
img = self.wrapper.current_image
line_width = self.get_value_of(
"line_width", scale_factor=wrapper.scale_factor
)
else:
# Get the edge
with IptEdgeDetector(wrapper=wrapper, **self.params_to_dict()) as (
res,
ed,
):
if not res:
return
edges = ed.result
if edge_only is True:
self.result = ed.result
self.demo_image = self.result
return True
# Read params
min_radius = self.get_value_of(
"min_radius", scale_factor=wrapper.scale_factor
)
max_radius = self.get_value_of(
"max_radius", scale_factor=wrapper.scale_factor
)
step_radius = self.get_value_of(
"step_radius", scale_factor=wrapper.scale_factor
)
max_peaks = self.get_value_of("max_peaks")
max_peaks = max_peaks if max_peaks > 0 else np.inf
min_distance = self.get_value_of(
"min_distance", scale_factor=wrapper.scale_factor
)
line_width = self.get_value_of(
"line_width", scale_factor=wrapper.scale_factor
)
draw_candidates = self.get_value_of("draw_candidates") == 1
roi = self.get_ipt_roi(
wrapper=wrapper,
roi_names=[self.get_value_of("crop_roi_name")],
selection_mode="all_named",
)
roi = roi[0] if roi else None
if roi is not None:
edges = wrapper.crop_to_roi(
img=edges,
roi=roi,
erase_outside_if_circle=True,
dbg_str="cropped_edges",
)
input_kind = self.get_value_of("source_selector")
if input_kind == "mask":
img = self.get_mask()
elif input_kind == "current_image":
img = wrapper.current_image
else:
img = None
logger.error(f"Unknown source: {input_kind}")
self.result = None
return
# Detect circles
hough_radii = np.arange(min_radius, max_radius, step_radius)
hough_res = hough_circle(edges, hough_radii)
# Draw the result
if len(img.shape) == 2:
img = np.dstack((img, img, img))
# Select the most prominent n circles
accu, cx, cy, radii = hough_circle_peaks(
hough_res,
hough_radii,
min_xdistance=min_distance,
min_ydistance=min_distance,
total_num_peaks=max_peaks,
)
if roi is not None:
roi = roi.as_rect()
cx += roi.left
cy += roi.top
if self.get_value_of("keep_only_one") == 1:
candidates = [[a, x, y, z] for a, x, y, z in zip(accu, cx, cy, radii)]
h, w = img.shape[:2]
roi = RectangleRegion(left=0, right=w, top=0, bottom=h)
roi_root = roi.point_at_position(
self.get_value_of("target_position"), True
)
min_dist = h * w
min_idx = -1
min_accu = -1
i = 0
colors = ipc.build_color_steps(step_count=len(candidates))
max_dist_to_root = self.get_value_of(
"max_dist_to_root", scale_factor=wrapper.scale_factor
)
for c_accu, center_x, center_y, radius in candidates:
if draw_candidates:
cv2.circle(
img,
(center_x, center_y),
radius,
colors[i],
max(1, line_width // 2),
)
cur_dist = roi_root.distance_to(Point(center_x, center_y))
if (
(cur_dist < min_dist)
and (cur_dist < max_dist_to_root)
and (
(cur_dist / min_dist > min_accu / c_accu)
or (min_accu == -1)
)
):
min_dist = cur_dist
min_idx = i
min_accu = c_accu
i += 1
if min_idx >= 0:
self.result = [
[
candidates[min_idx][1],
candidates[min_idx][2],
candidates[min_idx][3],
]
]
self.result[0][2] += self.get_value_of(
"expand_circle", scale_factor=wrapper.scale_factor
)
if self.get_value_of("draw_boundaries") == 1:
cv2.circle(
img,
(roi_root.x, roi_root.y),
min_radius,
ipc.C_RED,
line_width + 4,
)
cv2.circle(
img,
(roi_root.x, roi_root.y),
max_radius,
ipc.C_BLUE,
line_width + 4,
)
else:
self.result = None
else:
self.result = [[x, y, r] for x, y, r in zip(cx, cy, radii)]
if self.get_value_of("enable_cache") == 1:
with open(pkl_file, "wb") as f:
pickle.dump(self.result, f)
if self.result is not None:
colors = ipc.build_color_steps(step_count=len(self.result))
i = 0
annulus_size = self.get_value_of("annulus_size")
for center_x, center_y, radius in self.result:
cv2.circle(img, (center_x, center_y), radius, colors[i], line_width)
if annulus_size > 0 and radius - annulus_size > 0:
cv2.circle(
img,
(center_x, center_y),
radius - annulus_size,
colors[i],
line_width,
)
i += 1
wrapper.store_image(
image=img,
text="hough_circles",
text_overlay=self.get_value_of("text_overlay") == 1,
)
self.demo_image = img
res = True
except Exception as e:
logger.exception(f'Failed to process {self. name}: "{repr(e)}"')
res = False
else:
pass
finally:
return res
def generate_roi(self, **kwargs):
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return None
if self.process_wrapper(**kwargs):
roi_shape = self.get_value_of("roi_shape")
roi_type = self.get_value_of("roi_type")
roi_name = self.get_value_of("roi_name")
tool_target = self.get_value_of("tool_target")
circles = sorted(self.result, key=lambda circle_: circle_[2])
circle = circles[0]
if roi_shape == "rectangle":
r = CircleRegion(cx=circle[0], cy=circle[1], radius=circle[2]).as_rect()
return RectangleRegion(
left=r.left,
width=r.width,
top=r.top,
height=r.height,
name=roi_name,
tag=roi_type,
target=tool_target,
)
elif roi_shape == "circle":
annulus_size = self.get_value_of("annulus_size")
if annulus_size == 0 or (circle[2] - annulus_size <= 0):
return CircleRegion(
cx=circle[0],
cy=circle[1],
radius=circle[2],
name=roi_name,
tag=roi_type,
target=tool_target,
)
else:
return AnnulusRegion(
cx=circle[0],
cy=circle[1],
radius=circle[2],
in_radius=circle[2] - annulus_size,
name=roi_name,
tag=roi_type,
target=tool_target,
)
else:
return None
else:
return None
def apply_roy(self, **kwargs):
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return None
if self.process_wrapper(**kwargs):
circles = sorted(self.result, key=lambda circle_: circle_[2])
circle = circles[0]
roi_name = f"roi_keep_{len(wrapper.rois_list)}"
wrapper.add_circle_roi(circle[0], circle[1], circle[2], roi_name, "keep")
target = kwargs.get("target", "source")
if target == "source":
res = wrapper.apply_rois(wrapper.current_image)
elif target == "mask":
res = wrapper.apply_rois(wrapper.mask)
else:
res = None
logger.error("Unknown ROI target")
wrapper.store_image(res, roi_name, text_overlay=False)
return res
else:
return wrapper.current_image
@property
def name(self):
return "Hough circles detector"
@property
def real_time(self):
return self.get_value_of("edge_only") == 1
@property
def result_name(self):
return "circles"
@property
def output_kind(self):
return "data"
@property
def use_case(self):
return [ipc.ToolFamily.ROI]
@property
def description(self):
return "Hough circles detector: Perform a circular Hough transform.\nCan generate ROIs"
@property
def input_type(self):
if self.get_value_of("source_selector") == "mask":
return ipc.IO_MASK
else:
return ipc.IO_IMAGE
@property
def output_type(self):
if self.get_value_of("edge_only") == 1:
return ipc.IO_IMAGE # self.input_type
else:
return ipc.IO_ROI
def apply_test_values_overrides(self, use_cases: tuple = ()):
self.set_value_of("enable_cache", 0)
| 39.371648
| 116
| 0.476109
| 20,017
| 0.973968
| 0
| 0
| 846
| 0.041164
| 0
| 0
| 4,934
| 0.240074
|
ceb677a0c09c58bfcd7c3dccc2e5b38736bebbf5
| 1,278
|
py
|
Python
|
python_sample/cloud_API_endpoint/my_driving/edge_endpoint/db.py
|
alexcourouble/automotive-iot-samples
|
ead3549e052968b7f2c0a30c3787f34e15e373fd
|
[
"Apache-2.0"
] | 11
|
2019-09-02T12:38:05.000Z
|
2021-01-03T17:52:32.000Z
|
python_sample/cloud_API_endpoint/my_driving/edge_endpoint/db.py
|
alexcourouble/automotive-iot-samples
|
ead3549e052968b7f2c0a30c3787f34e15e373fd
|
[
"Apache-2.0"
] | 34
|
2019-12-29T21:31:35.000Z
|
2021-10-06T03:08:21.000Z
|
python_sample/cloud_API_endpoint/my_driving/edge_endpoint/db.py
|
alexcourouble/automotive-iot-samples
|
ead3549e052968b7f2c0a30c3787f34e15e373fd
|
[
"Apache-2.0"
] | 7
|
2019-04-28T22:14:26.000Z
|
2021-02-17T16:58:34.000Z
|
import sqlite3
from flask import current_app, g
def get_db():
"""
Connect to the application's configured database. The connection
is unique for each request and will be reused if this is called
again.
"""
if 'db' not in g:
g.db = sqlite3.connect(
'../data/cloud_db.db',
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
initialize_DB(g.db)
return g.db
def initialize_DB(db):
"""
Creating event table if it doesn't already exist.
The event table has two keys:
1-A key generated on the edge gateway when an event detected.
2-The sqlite3 rowid: http://www.sqlitetutorial.net/sqlite-autoincrement/
"""
db.execute( """CREATE TABLE IF NOT EXISTS events (client_side_id TEXT, user TEXT, event_timestamp INTEGER, distance TEXT, fuel TEXT);""")
def write_event(json_data):
"""
Inserts data passed in argument.
"""
db = get_db()
row_to_insert = [
json_data["client_side_id"],
json_data["user"],
int(json_data["event_timestamp"]),
json_data["distance"],
json_data["fuel"]
]
db.execute("""INSERT OR REPLACE INTO events VALUES(?,?,?,?,?)""",row_to_insert)
db.commit()
| 26.081633
| 141
| 0.628326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 707
| 0.553208
|
ceb6903422d5b1eae99326a4376ddaace1e94411
| 8,467
|
py
|
Python
|
tests/core/test_app.py
|
ShepardZhao/rancher
|
a747ac408ca34fb0bf465276f07557ec43bf9c89
|
[
"Apache-2.0"
] | 1
|
2019-06-16T01:16:56.000Z
|
2019-06-16T01:16:56.000Z
|
tests/core/test_app.py
|
ShepardZhao/rancher
|
a747ac408ca34fb0bf465276f07557ec43bf9c89
|
[
"Apache-2.0"
] | 1
|
2019-05-14T04:08:43.000Z
|
2019-05-14T04:08:43.000Z
|
tests/core/test_app.py
|
ShepardZhao/rancher
|
a747ac408ca34fb0bf465276f07557ec43bf9c89
|
[
"Apache-2.0"
] | null | null | null |
from .common import random_str
from .test_catalog import wait_for_template_to_be_created
import time
def test_app_mysql(admin_pc, admin_mc):
client = admin_pc.client
name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
answers = {
"defaultImage": "true",
"image": "mysql",
"imageTag": "5.7.14",
"mysqlDatabase": "admin",
"mysqlPassword": "",
"mysqlUser": "admin",
"persistence.enabled": "false",
"persistence.size": "8Gi",
"persistence.storageClass": "",
"service.nodePort": "",
"service.port": "3306",
"service.type": "ClusterIP"
}
client.create_app(
name=name,
externalId="catalog://?catalog=library&template=mysql&version=0.3.7&"
"namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
answers=answers
)
wait_for_workload(client, ns.name, count=1)
def test_app_wordpress(admin_pc, admin_mc):
client = admin_pc.client
name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
answers = {
"defaultImage": "true",
"externalDatabase.database": "",
"externalDatabase.host": "",
"externalDatabase.password": "",
"externalDatabase.port": "3306",
"externalDatabase.user": "",
"image.repository": "bitnami/wordpress",
"image.tag": "4.9.4",
"ingress.enabled": "true",
"ingress.hosts[0].name": "xip.io",
"mariadb.enabled": "true",
"mariadb.image.repository": "bitnami/mariadb",
"mariadb.image.tag": "10.1.32",
"mariadb.mariadbDatabase": "wordpress",
"mariadb.mariadbPassword": "",
"mariadb.mariadbUser": "wordpress",
"mariadb.persistence.enabled": "false",
"mariadb.persistence.size": "8Gi",
"mariadb.persistence.storageClass": "",
"nodePorts.http": "",
"nodePorts.https": "",
"persistence.enabled": "false",
"persistence.size": "10Gi",
"persistence.storageClass": "",
"serviceType": "NodePort",
"wordpressEmail": "user@example.com",
"wordpressPassword": "",
"wordpressUsername": "user"
}
external_id = "catalog://?catalog=library&template=wordpress" \
"&version=1.0.5&namespace=cattle-global-data"
client.create_app(
name=name,
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
answers=answers
)
wait_for_workload(client, ns.name, count=2)
def test_prehook_chart(admin_pc, admin_mc):
client = admin_pc.client
name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
url = "https://github.com/StrongMonkey/charts-1.git"
catalog = admin_mc.client.create_catalog(name=random_str(),
branch="test",
url=url,
)
wait_for_template_to_be_created(admin_mc.client, catalog.name)
external_id = "catalog://?catalog=" + \
catalog.name + "&template=busybox&version=0.0.2" \
"&namespace=cattle-global-data"
client.create_app(
name=name,
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
# it will be only one workload(job), because the deployment has to
# wait for job to be finished, and it will never finish because we
# can't create real container
wait_for_workload(client, ns.name, count=1)
jobs = client.list_job(namespaceId=ns.id)
assert len(jobs) == 1
def test_app_namespace_annotation(admin_pc, admin_mc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
app1 = client.create_app(
name=random_str(),
externalId="catalog://?catalog=library&template=mysql&version=0.3.7"
"&namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
wait_for_workload(client, ns.name, count=1)
external_id = "catalog://?catalog=library&template=wordpress" \
"&version=1.0.5&namespace=cattle-global-data"
app2 = client.create_app(
name=random_str(),
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
wait_for_workload(client, ns.name, count=3)
ns = admin_pc.cluster.client.reload(ns)
ns = wait_for_app_annotation(admin_pc, ns, app1.name)
ns = wait_for_app_annotation(admin_pc, ns, app2.name)
client.delete(app1)
wait_for_app_to_be_deleted(client, app1)
ns = admin_pc.cluster.client.reload(ns)
assert app1.name not in ns.annotations['cattle.io/appIds']
assert app2.name in ns.annotations['cattle.io/appIds']
client.delete(app2)
wait_for_app_to_be_deleted(client, app2)
ns = admin_pc.cluster.client.reload(ns)
assert 'cattle.io/appIds' not in ns.annotations
def wait_for_app_annotation(admin_pc, ns, app_name, timeout=60):
start = time.time()
interval = 0.5
ns = admin_pc.cluster.client.reload(ns)
while app_name not in ns.annotations['cattle.io/appIds']:
if time.time() - start > timeout:
print(ns.annotations)
raise Exception('Timeout waiting for app annotation')
time.sleep(interval)
interval *= 2
ns = admin_pc.cluster.client.reload(ns)
return ns
def test_app_custom_values_file(admin_pc, admin_mc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
values_yaml = "replicaCount: 2\r\nimage:\r\n " \
"repository: registry\r\n tag: 2.7"
answers = {
"image.tag": "2.6"
}
app = client.create_app(
name=random_str(),
externalId="catalog://?catalog=library&template=docker-registry"
"&version=1.6.1&namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
valuesYaml=values_yaml,
answers=answers
)
workloads = wait_for_workload(client, ns.name, count=1)
print(workloads)
assert workloads.data[0].deploymentStatus.unavailableReplicas == 2
assert workloads.data[0].containers[0].image == "registry:2.6"
client.delete(app)
wait_for_app_to_be_deleted(client, app)
def wait_for_workload(client, ns, timeout=60, count=0):
start = time.time()
interval = 0.5
workloads = client.list_workload(namespaceId=ns)
while len(workloads.data) != count:
if time.time() - start > timeout:
print(workloads)
raise Exception('Timeout waiting for workload service')
time.sleep(interval)
interval *= 2
workloads = client.list_workload(namespaceId=ns)
return workloads
def wait_for_app_to_be_deleted(client, app, timeout=120):
start = time.time()
interval = 0.5
while True:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for apps to be deleted")
apps = client.list_app()
found = False
for a in apps:
if a.id == app.id:
found = True
break
if not found:
break
time.sleep(interval)
interval *= 2
| 36.49569
| 77
| 0.591709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,110
| 0.249203
|
ceb7ed5b699f0ffa10c4137af6c73aac0a124844
| 1,455
|
py
|
Python
|
iotPub.py
|
norikokt/serverless-language-translation
|
51c0fdbf3b8c0c15d5e2208ad9cff147adc59efb
|
[
"Apache-2.0"
] | null | null | null |
iotPub.py
|
norikokt/serverless-language-translation
|
51c0fdbf3b8c0c15d5e2208ad9cff147adc59efb
|
[
"Apache-2.0"
] | null | null | null |
iotPub.py
|
norikokt/serverless-language-translation
|
51c0fdbf3b8c0c15d5e2208ad9cff147adc59efb
|
[
"Apache-2.0"
] | 1
|
2020-07-30T09:25:53.000Z
|
2020-07-30T09:25:53.000Z
|
# Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
HOST = 'messaging.internetofthings.ibmcloud.com'
PORT = 1883
PATH = '.' + HOST + ':' + PORT + '/api/v0002/application/types/'
def main(dict):
iot_org_id = dict['iot_org_id']
iot_device_id = dict['iot_device_id']
iot_device_type = dict['iot_device_type']
iot_auth_token = dict['iot_auth_token']
iot_api_key = dict['iot_api_key']
requests.post('http://' + iot_org_id + PATH + iot_device_type +
'/devices/' + iot_device_id + '/events/toClients',
headers={'Content-Type': 'application/json'},
json={
'payload': dict['payload'],
'client': dict['client'],
'language': dict['language'] or dict['sourceLanguage']},
auth=(iot_api_key, iot_auth_token))
return {'msg': dict['payload']}
| 39.324324
| 76
| 0.651546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 893
| 0.613746
|
ceb89dab258e22f4133b8db37425e38785178a38
| 6,177
|
py
|
Python
|
awsshell/autocomplete.py
|
bdharang/AWS_SHELL
|
4e84552f367f4da647e10be05795b870c112e3bb
|
[
"Apache-2.0"
] | null | null | null |
awsshell/autocomplete.py
|
bdharang/AWS_SHELL
|
4e84552f367f4da647e10be05795b870c112e3bb
|
[
"Apache-2.0"
] | null | null | null |
awsshell/autocomplete.py
|
bdharang/AWS_SHELL
|
4e84552f367f4da647e10be05795b870c112e3bb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from awsshell.fuzzy import fuzzy_search
from awsshell.substring import substring_search
class AWSCLIModelCompleter(object):
"""Autocompletion based on the JSON models for AWS services.
This class consumes indexed data based on the JSON models from
AWS service (which we pull through botocore's data loaders).
"""
def __init__(self, index_data, match_fuzzy=True):
self._index = index_data
self._root_name = 'aws'
self._global_options = index_data[self._root_name]['arguments']
# These values mutate as autocompletions occur.
# They track state to improve the autocompletion speed.
self._current_name = 'aws'
self._current = index_data[self._root_name]
self._last_position = 0
self._current_line = ''
self.last_option = ''
# This will get populated as a command is completed.
self.cmd_path = [self._current_name]
self.match_fuzzy = match_fuzzy
@property
def arg_metadata(self):
# Returns the required arguments for the current level.
return self._current.get('argument_metadata', {})
def reset(self):
# Resets all the state. Called after a user runs
# a command.
self._current_name = self._root_name
self._current = self._index[self._root_name]
self._last_position = 0
self.last_option = ''
self.cmd_path = [self._current_name]
def autocomplete(self, line):
"""Given a line, return a list of suggestions."""
current_length = len(line)
self._current_line = line
if current_length == 1 and self._last_position > 1:
# Reset state. This is likely from a user completing
# a previous command.
self.reset()
elif current_length < self._last_position:
# The user has hit backspace. We'll need to check
# the current words.
return self._handle_backspace()
elif not line:
return []
elif current_length != self._last_position + 1:
return self._complete_from_full_parse()
# This position is important. We only update the _last_position
# after we've checked the special cases above where that value
# matters.
self._last_position = len(line)
if line and not line.strip():
# Special case, the user hits a space on a new line so
# we autocomplete all the top level commands.
return self._current['commands']
last_word = line.split()[-1]
if last_word in self.arg_metadata or last_word in self._global_options:
# The last thing we completed was an argument, record
# this as self.last_arg
self.last_option = last_word
if line[-1] == ' ':
# At this point the user has autocompleted a command
# or an argument and has hit space. If they've
# just completed a command, we need to change the
# current context and traverse into the subcommand.
# "ec2 "
# ^--here, need to traverse into "ec2"
#
# Otherwise:
# "ec2 --no-validate-ssl "
# ^-- here, stay on "ec2" context.
if not last_word.startswith('-'):
next_command = self._current['children'].get(last_word)
if next_command is not None:
self._current = next_command
self._current_name = last_word
self.cmd_path.append(self._current_name)
elif last_word in self.arg_metadata and \
self.arg_metadata[last_word]['example']:
# Then this is an arg with a shorthand example so we'll
# suggest that example.
return [self.arg_metadata[last_word]['example']]
# Even if we don't change context, we still want to
# autocomplete all the commands for the current context
# in either of the above two cases.
return self._current['commands'][:]
elif last_word.startswith('-'):
# TODO: cache this for the duration of the current context.
# We don't need to recompute this until the args are
# different.
all_args = self._get_all_args()
if self.match_fuzzy:
return fuzzy_search(last_word, all_args)
else:
return substring_search(last_word, all_args)
if self.match_fuzzy:
return fuzzy_search(last_word, self._current['commands'])
else:
return substring_search(last_word, self._current['commands'])
def _get_all_args(self):
if self._current['arguments'] != self._global_options:
all_args = self._current['arguments'] + self._global_options
else:
all_args = self._current['arguments']
return all_args
def _handle_backspace(self):
return self._complete_from_full_parse()
def _complete_from_full_parse(self):
# We try to avoid calling this, but this is necessary
# sometimes. In this scenario, we're resetting everything
# and starting from the very beginning and reparsing
# everything.
# This is a naive implementation for now. This
# can be optimized.
self.reset()
line = self._current_line
for i in range(1, len(self._current_line)):
self.autocomplete(line[:i])
return self.autocomplete(line)
def _autocomplete_options(self, last_word):
global_args = []
# Autocomplete argument names.
current_arg_completions = [
cmd for cmd in self._current['arguments']
if cmd.startswith(last_word)]
if self._current_name != self._root_name:
# Also autocomplete global arguments.
global_args = [
cmd for cmd in self._global_options if
cmd.startswith(last_word)]
return current_arg_completions + global_args
| 41.736486
| 79
| 0.610329
| 6,048
| 0.979116
| 0
| 0
| 159
| 0.025741
| 0
| 0
| 2,140
| 0.346446
|
ceba8a2b9daea8892048e636439497932e869b2b
| 3,809
|
py
|
Python
|
IM920.py
|
Momijinn/IM920MHz_Module
|
1f70a0021a82ba7bf3bf0fd461b01d921be27ea7
|
[
"MIT"
] | 1
|
2021-06-06T03:54:18.000Z
|
2021-06-06T03:54:18.000Z
|
IM920.py
|
Momijinn/IM920MHz_Module
|
1f70a0021a82ba7bf3bf0fd461b01d921be27ea7
|
[
"MIT"
] | null | null | null |
IM920.py
|
Momijinn/IM920MHz_Module
|
1f70a0021a82ba7bf3bf0fd461b01d921be27ea7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
パッケージpyserialをインストールすること
pytho2.x系で動作(python3.*系も動作検証済み)
Creater:Kaname Takano
'''
import serial
import binascii
import signal
import sys
import platform
from serial.tools import list_ports
#platformの切り替え
if platform.system() == 'Windows': #windows用
ports = list_ports.comports()
portnumber = None
for port in ports:
if (port.vid == 1027) and (port.pid == 24597): #プロダクトIDとベンダーIDが一致したら接続
portnumber = port.device
print("connect to " + portnumber)
break
if portnumber == None:
print("not connetc to im920!")
sys.exit(1)
elif platform.system() == 'Linux': #Linux用
portnumber = '/dev/ttyUSB0'
'''
ctrl+cの命令
'''
def signal_handler(signal, frame):
print('exit')
sys.exit()
'''
serial.Serialの設定
mybaudrate:ボーレート
'''
def setSerial(mybaudrate):
com = serial.Serial(
port = portnumber,
baudrate = mybaudrate,
bytesize = serial.EIGHTBITS,
parity = serial.PARITY_NONE,
timeout = None,
xonxoff = False,
rtscts = False,
writeTimeout = None,
dsrdtr = False,
interCharTimeout = None)
#bufferクリア
com.flushInput()
com.flushOutput()
return com
'''
固有IDの読み出し
mybaudrate:ボーレート
'''
def Rdid(mybaudrate):
com = setSerial(mybaudrate)
com.flushInput()
com.write(b'RDID' + b'\r\n')
com.flushOutput()
print(com.readline().strip())
com.close()
'''
ボーレートの設定
mybaudrate:現在のボーレート
setbaudrate:セットするボーレート(文字列でもってくること)
0 1200bps
1 2400bps
2 4800bps
3 9600bps
4 19200bps
5 38400bps
'''
def Sbrt(mybaudrate, setbaudrate):
com = setSerial(mybaudrate)
com.flushInput()
com.write(b'ENWR' + b'\r\n')
com.flushOutput()
com.readline()
com.write(b'SBRT ' + setbaudrate.encode('utf-8') + b'\r\n')
com.flushOutput()
com.readline()
com.write(b'DSWR' + b'\r\n')
com.flushOutput()
com.readline()
com.close()
'''
ペアリング
mybaudrate:ボーレート
args:ペアリングしたいID(文字列にすること)
'''
def Srid(mybaudrate, args):
com = setSerial(mybaudrate)
com.flushInput()
com.write(b'ENWR' + b'\r\n')
com.flushOutput()
com.readline()
com.write(b'SRID ' + args.encode('utf-8') + b'\r\n')
com.flushOutput()
com.readline()
com.write(b'DSWR' + b'\r\n')
com.flushOutput()
com.readline()
com.close()
'''
ペアリングの削除
全て削除されるため注意!
mybaudrate:ボーレート
'''
def Erid(mybaudrate):
com = setSerial(mybaudrate)
com.flushInput()
com.write(b'ENWR' + b'\r\n')
com.flushOutput()
com.readline()
com.write(b'ERID' + b'\r\n')
com.flushOutput()
com.readline()
com.write(b'DSWR' + b'\r\n')
com.flushOutput()
com.readline()
com.close()
'''
送信
mybaudrate:ボーレート
args:送信したい文字列 (数字の場合も文字列型にすること)
'''
def Send(mybaudrate, args):
com = setSerial(mybaudrate)
com.flushInput()
com.write(b'TXDA' + binascii.b2a_hex(args.encode('utf-8')) + b'\r\n')
com.flushOutput()
com.readline()
com.close()
'''
受信
アスキーコードから文字列に変換したものを返す
mybaudrate:ボーレート
'''
def Reception(mybaudrate):
com = setSerial(mybaudrate)
com.flushInput()
text = ""
cngtext = ""
try:
text = com.readline().decode('utf-8').strip() #受信と空白の削除
com.close()
text = text.replace("\r\n","")
text = text.split(":")[1]
text = text.split(",")
for x in text:
cngtext += chr(int(x,16))
except Exception:
print("not input data")
return cngtext
'''
中継機化
mybaudrate:ボーレート
'''
def Repeater(mybaudrate):
signal.signal(signal.SIGINT, signal_handler)
while True:
data = Reception(mybaudrate)
if len(data) != 0:
print("input data:", data)
Send(19200, data)
| 20.368984
| 78
| 0.608034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,481
| 0.34482
|
cebb72569f74c340b49b55e56cd5cfb94ded36d4
| 229
|
py
|
Python
|
test/webdnn_test/graph_test/operators_test/sigmoid_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
test/webdnn_test/graph_test/operators_test/sigmoid_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
test/webdnn_test/graph_test/operators_test/sigmoid_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
from test.webdnn_test.graph_test.operators_test.util import template_test_unary_operator
from webdnn.graph.operators.sigmoid import Sigmoid
def template():
template_test_unary_operator(Sigmoid)
def test():
template()
| 20.818182
| 88
| 0.812227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cebbaac4650f5e836f24ada37e7051b81fcb685c
| 262
|
py
|
Python
|
hw_asr/metric/__init__.py
|
kostyayatsok/asr_project_template
|
ee5fb8006fa4e4f5d4a2e5c6e9f6352c22ad5bbb
|
[
"MIT"
] | null | null | null |
hw_asr/metric/__init__.py
|
kostyayatsok/asr_project_template
|
ee5fb8006fa4e4f5d4a2e5c6e9f6352c22ad5bbb
|
[
"MIT"
] | null | null | null |
hw_asr/metric/__init__.py
|
kostyayatsok/asr_project_template
|
ee5fb8006fa4e4f5d4a2e5c6e9f6352c22ad5bbb
|
[
"MIT"
] | null | null | null |
from hw_asr.metric.cer_metric import ArgmaxCERMetric, BeamsearchCERMetric
from hw_asr.metric.wer_metric import ArgmaxWERMetric, BeamsearchWERMetric
__all__ = [
"ArgmaxWERMetric",
"ArgmaxCERMetric",
"BeamsearchCERMetric",
"BeamsearchWERMetric"
]
| 26.2
| 73
| 0.790076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.290076
|
cebbe59000886920359408b216f69573eb72a0fb
| 6,746
|
py
|
Python
|
src/main_window.py
|
serenafr/My2048
|
ea96f8c8d9651ad86308f02f35474a51dc3be531
|
[
"MIT"
] | null | null | null |
src/main_window.py
|
serenafr/My2048
|
ea96f8c8d9651ad86308f02f35474a51dc3be531
|
[
"MIT"
] | 1
|
2015-04-25T00:36:44.000Z
|
2015-04-25T00:36:44.000Z
|
src/main_window.py
|
serenafr/My2048
|
ea96f8c8d9651ad86308f02f35474a51dc3be531
|
[
"MIT"
] | null | null | null |
import wx
import wx.lib.stattext as ST
import board
import score_board
from os.path import expanduser
SCORE_FILE_PATH = expanduser('~/.config/my2048/scores.conf')
class My2048_wx(wx.Frame):
def __init__(self, parent, id, title, size, board_object, score_board_object):
super(My2048_wx, self).__init__(parent, title = title,
size = size)
self.board_object = board_object
self.score_board_object = score_board_object
self.SIZE = self.board_object.BOARD_SIZE
'''This list is used to store the wx labels with information
ST.GenStaticText(self, -1, label = text_list[i])
And put all the numbers from the board into the GUI'''
self.__label_list = []
self.__best_score = 0
self.__current_score = 0
self.Construct()
def Construct(self):
self.main_panel = wx.Panel(self, size = (300, 250))
self.main_panel.SetFocus()
self.Bind(wx.EVT_CHAR_HOOK, self.arrow_key_ctrl)
'''panel_box is the container that contains all the widgets'''
self.panel_box = wx.BoxSizer(wx.VERTICAL)
self.generate_header()
'''play_board is a container where all the tiles are put '''
self.play_board = wx.GridSizer(self.SIZE, self.SIZE, 10, 10)
self.generate_playboard()
self.main_panel.SetSizer(self.panel_box)
self.Show(True)
def generate_header(self):
'''header is the top parts which holds the name of the game,
current score, and the best score'''
self.header = wx.BoxSizer(wx.VERTICAL)
'''upper_header contains three parts: game_name(2048), a boxsizer contains the current score information
and another boxsizer contains the best score informaton
All three parts are lined HORIZONTAL'''
self.upper_header = wx.BoxSizer(wx.HORIZONTAL)
self.game_name = ST.GenStaticText(self, -1, label = '2048',
size = (150, 30), style = wx.ALIGN_CENTRE)
self.upper_header.Add(self.game_name, flag = wx.EXPAND|wx.RIGHT, border = 60)
self.upper_header_score = wx.BoxSizer(wx.VERTICAL)
self.score_str = ST.GenStaticText(self, -1, label = 'SCORE', size = (50, 20), style = wx.ALIGN_CENTRE)
self.score_str.SetBackgroundColour((187, 173, 160))
self.score = ST.GenStaticText(self, -1, label = str(self.__current_score), size = (50, 20), style = wx.ALIGN_CENTRE)
self.score.SetForegroundColour('white')
self.score.SetBackgroundColour((187, 173, 160))
self.upper_header_score.AddMany([self.score_str, self.score])
self.upper_header.Add(self.upper_header_score, flag = wx.EXPAND|wx.LEFT|wx.RIGHT, border = 10)
self.upper_header_best = wx.GridSizer(2, 1)
self.best_str = ST.GenStaticText(self, -1, label = 'BEST', size = (50, 20), style = wx.ALIGN_CENTRE)
self.best_str.SetBackgroundColour((187, 173, 160))
self.__best_score = score_board_object.get_best_score()
self.best = ST.GenStaticText(self, -1, label = str(self.__best_score), size = (50, 20), style = wx.ALIGN_CENTRE)
self.best.SetForegroundColour('white')
self.best.SetBackgroundColour((187, 173, 160))
self.upper_header_best.AddMany([self.best_str, self.best])
self.upper_header.Add(self.upper_header_best)
self.header.Add(self.upper_header)
'''lower_header contains a sub_title and a button that allows users to start a new game'''
self.lower_header = wx.BoxSizer(wx.HORIZONTAL)
self.sub_title = ST.GenStaticText(self, -1, label = 'Join the numbers and get to the 2048 tile!', size = (280, 20))
self.lower_header.Add(self.sub_title, flag = wx.EXPAND|wx.RIGHT, border = 10)
self.new_game_button = wx.Button(self, -1, label = 'NEW GAME')
self.new_game_button.Bind(wx.EVT_BUTTON, self.new_game_button_click)
self.lower_header.Add(self.new_game_button)
self.header.Add(self.lower_header)
self.panel_box.Add(self.header, flag = wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM,
border = 10)
def generate_playboard(self):
'''Set a list to store the numbers appear in different labels'''
tile_list = []
text_list = []
'''Get tiles information frome board'''
tile_list = self.board_object.get_tiles()
for i in range(0, self.SIZE):
for j in range(0, self.SIZE):
if tile_list[i][j] == None:
text_list.append('_')
else:
text_list.append(str(tile_list[i][j]))
for i in range(0, self.SIZE * self.SIZE):
self.__label_list.append(ST.GenStaticText(self, -1, label = text_list[i], size = (60, 30), style = wx.ALIGN_CENTRE))
self.__label_list[i].SetBackgroundColour((238, 228, 218))
self.play_board.Add(self.__label_list[i], flag = wx.EXPAND|wx.RIGHT|wx.TOP, border = 10)
self.panel_box.Add(self.play_board, flag = wx.EXPAND|wx.TOP|wx.LEFT, border = 10)
def refresh(self):
self.__current_score = board_object.get_score()
self.score.SetLabel(str(self.__current_score))
self.__best_score = score_board_object.get_best_score()
if self.__current_score >= self.__best_score:
self.__best_score = self.__current_score
score_board_object.add_score(self.__best_score)
self.best.SetLabel(str(self.__best_score))
self.upper_header.Layout()
'''Set a list to store the numbers appear in different labels'''
tile_list = []
text_list = []
'''Get tiles information frome board'''
tile_list = self.board_object.get_tiles()
for i in range(0, self.SIZE):
for j in range(0, self.SIZE):
if tile_list[i][j] == None:
text_list.append('_')
else:
text_list.append(str(tile_list[i][j]))
for i in range(0, self.SIZE * self.SIZE):
self.__label_list[i].SetLabel(text_list[i])
self.play_board.Layout()
def up_move(self):
if board_object.can_move():
board_object.move('up')
self.refresh()
def down_move(self):
if board_object.can_move():
board_object.move('down')
self.refresh()
def left_move(self):
if board_object.can_move():
board_object.move('left')
self.refresh()
def right_move(self):
if board_object.can_move():
board_object.move('right')
self.refresh()
def arrow_key_ctrl(self, event):
if event.GetKeyCode() == wx.WXK_UP:
self.up_move()
elif event.GetKeyCode() == wx.WXK_DOWN:
self.down_move()
elif event.GetKeyCode() == wx.WXK_LEFT:
self.left_move()
elif event.GetKeyCode() == wx.WXK_RIGHT:
self.right_move()
else:
event.Skip()
def new_game_button_click(self, event):
if(self.__current_score >= self.__best_score):
score_board_object.add_score(self.__best_score)
show_best = score_board_object.get_best_score()
board_object.board_data = board_object.initialize_board(2)
board_object.set_score(0)
self.refresh()
self.score.SetLabel(str(self.__current_score))
self.best.SetLabel(str(show_best))
self.upper_header.Layout()
if __name__ == "__main__":
app = wx.App()
board_object = board.Board(2)
score_board_object = score_board.Score_Board()
frame = My2048_wx(None, -1, '2048', (400, 300), board_object, score_board_object)
app.MainLoop()
| 36.464865
| 119
| 0.725912
| 6,357
| 0.942336
| 0
| 0
| 0
| 0
| 0
| 0
| 1,051
| 0.155796
|
cebc0e5ca2b65cfcf3eb7b5e9ccc9a733e94b3e5
| 19,153
|
py
|
Python
|
tests/base/test_maps.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | 1
|
2022-02-18T16:31:27.000Z
|
2022-02-18T16:31:27.000Z
|
tests/base/test_maps.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | null | null | null |
tests/base/test_maps.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | null | null | null |
import numpy as np
import unittest
import discretize
from SimPEG import maps, models, utils
from discretize.utils import mesh_builder_xyz, refine_tree_xyz
import inspect
TOL = 1e-14
np.random.seed(121)
REMOVED_IGNORE = [
"FullMap",
"CircleMap",
"Map2Dto3D",
"Vertical1DMap",
"ActiveCells",
]
MAPS_TO_EXCLUDE_2D = [
"ComboMap",
"ActiveCells",
"InjectActiveCells",
"LogMap",
"ReciprocalMap",
"PolynomialPetroClusterMap",
"Surject2Dto3D",
"Map2Dto3D",
"Mesh2Mesh",
"ParametricPolyMap",
"PolyMap",
"ParametricSplineMap",
"SplineMap",
"BaseParametric",
"ParametricBlock",
"ParametricEllipsoid",
"ParametricCasingAndLayer",
"ParametricLayer",
"ParametricBlockInLayer",
"Projection",
"SelfConsistentEffectiveMedium",
"SumMap",
"SurjectUnits",
"TileMap",
] + REMOVED_IGNORE
MAPS_TO_EXCLUDE_3D = [
"ComboMap",
"ActiveCells",
"InjectActiveCells",
"LogMap",
"ReciprocalMap",
"PolynomialPetroClusterMap",
"CircleMap",
"ParametricCircleMap",
"Mesh2Mesh",
"BaseParametric",
"ParametricBlock",
"ParametricEllipsoid",
"ParametricPolyMap",
"PolyMap",
"ParametricSplineMap",
"SplineMap",
"ParametricCasingAndLayer",
"ParametricLayer",
"ParametricBlockInLayer",
"Projection",
"SelfConsistentEffectiveMedium",
"SumMap",
"SurjectUnits",
"TileMap",
] + REMOVED_IGNORE
class MapTests(unittest.TestCase):
def setUp(self):
maps2test2D = [M for M in dir(maps) if M not in MAPS_TO_EXCLUDE_2D]
maps2test3D = [M for M in dir(maps) if M not in MAPS_TO_EXCLUDE_3D]
self.maps2test2D = [
getattr(maps, M)
for M in maps2test2D
if (
inspect.isclass(getattr(maps, M))
and issubclass(getattr(maps, M), maps.IdentityMap)
)
]
self.maps2test3D = [
getattr(maps, M)
for M in maps2test3D
if inspect.isclass(getattr(maps, M))
and issubclass(getattr(maps, M), maps.IdentityMap)
]
a = np.array([1, 1, 1])
b = np.array([1, 2])
self.mesh2 = discretize.TensorMesh([a, b], x0=np.array([3, 5]))
self.mesh3 = discretize.TensorMesh([a, b, [3, 4]], x0=np.array([3, 5, 2]))
self.mesh22 = discretize.TensorMesh([b, a], x0=np.array([3, 5]))
self.meshCyl = discretize.CylMesh([10.0, 1.0, 10.0], x0="00C")
def test_transforms2D(self):
for M in self.maps2test2D:
self.assertTrue(M(self.mesh2).test())
def test_transforms2Dvec(self):
for M in self.maps2test2D:
self.assertTrue(M(self.mesh2).testVec())
def test_transforms3D(self):
for M in self.maps2test3D:
self.assertTrue(M(self.mesh3).test())
def test_transforms3Dvec(self):
for M in self.maps2test3D:
self.assertTrue(M(self.mesh3).testVec())
def test_invtransforms2D(self):
for M in self.maps2test2D:
print("Testing Inverse {0}".format(str(M.__name__)))
mapping = M(self.mesh2)
d = np.random.rand(mapping.shape[0])
try:
m = mapping.inverse(d)
test_val = np.linalg.norm(d - mapping._transform(m))
if M.__name__ == "SphericalSystem":
self.assertLess(
test_val, 1e-7
) # This mapping is much less accurate
else:
self.assertLess(test_val, TOL)
print(" ... ok\n")
except NotImplementedError:
pass
def test_invtransforms3D(self):
for M in self.maps2test3D:
print("Testing Inverse {0}".format(str(M.__name__)))
mapping = M(self.mesh3)
d = np.random.rand(mapping.shape[0])
try:
m = mapping.inverse(d)
test_val = np.linalg.norm(d - mapping._transform(m))
if M.__name__ == "SphericalSystem":
self.assertLess(
test_val, 1e-7
) # This mapping is much less accurate
else:
self.assertLess(test_val, TOL)
print(" ... ok\n")
except NotImplementedError:
pass
def test_ParametricCasingAndLayer(self):
mapping = maps.ParametricCasingAndLayer(self.meshCyl)
m = np.r_[-2.0, 1.0, 6.0, 2.0, -0.1, 0.2, 0.5, 0.2, -0.2, 0.2]
self.assertTrue(mapping.test(m))
def test_ParametricBlock2D(self):
mesh = discretize.TensorMesh([np.ones(30), np.ones(20)], x0=np.array([-15, -5]))
mapping = maps.ParametricBlock(mesh)
# val_background,val_block, block_x0, block_dx, block_y0, block_dy
m = np.r_[-2.0, 1.0, -5, 10, 5, 4]
self.assertTrue(mapping.test(m))
def test_transforms_logMap_reciprocalMap(self):
# Note that log/reciprocal maps can be kinda finicky, so we are being
# explicit about the random seed.
v2 = np.r_[
0.40077291, 0.1441044, 0.58452314, 0.96323738, 0.01198519, 0.79754415
]
dv2 = np.r_[
0.80653921, 0.13132446, 0.4901117, 0.03358737, 0.65473762, 0.44252488
]
v3 = np.r_[
0.96084865,
0.34385186,
0.39430044,
0.81671285,
0.65929109,
0.2235217,
0.87897526,
0.5784033,
0.96876393,
0.63535864,
0.84130763,
0.22123854,
]
dv3 = np.r_[
0.96827838,
0.26072111,
0.45090749,
0.10573893,
0.65276365,
0.15646586,
0.51679682,
0.23071984,
0.95106218,
0.14201845,
0.25093564,
0.3732866,
]
mapping = maps.LogMap(self.mesh2)
self.assertTrue(mapping.test(v2, dx=dv2))
mapping = maps.LogMap(self.mesh3)
self.assertTrue(mapping.test(v3, dx=dv3))
mapping = maps.ReciprocalMap(self.mesh2)
self.assertTrue(mapping.test(v2, dx=dv2))
mapping = maps.ReciprocalMap(self.mesh3)
self.assertTrue(mapping.test(v3, dx=dv3))
def test_Mesh2MeshMap(self):
mapping = maps.Mesh2Mesh([self.mesh22, self.mesh2])
self.assertTrue(mapping.test())
def test_Mesh2MeshMapVec(self):
mapping = maps.Mesh2Mesh([self.mesh22, self.mesh2])
self.assertTrue(mapping.testVec())
def test_mapMultiplication(self):
M = discretize.TensorMesh([2, 3])
expMap = maps.ExpMap(M)
vertMap = maps.SurjectVertical1D(M)
combo = expMap * vertMap
m = np.arange(3.0)
t_true = np.exp(np.r_[0, 0, 1, 1, 2, 2.0])
self.assertLess(np.linalg.norm((combo * m) - t_true, np.inf), TOL)
self.assertLess(np.linalg.norm((expMap * vertMap * m) - t_true, np.inf), TOL)
self.assertLess(np.linalg.norm(expMap * (vertMap * m) - t_true, np.inf), TOL)
self.assertLess(np.linalg.norm((expMap * vertMap) * m - t_true, np.inf), TOL)
# Try making a model
mod = models.Model(m, mapping=combo)
# print mod.transform
# import matplotlib.pyplot as plt
# plt.colorbar(M.plotImage(mod.transform)[0])
# plt.show()
self.assertLess(np.linalg.norm(mod.transform - t_true, np.inf), TOL)
self.assertRaises(Exception, models.Model, np.r_[1.0], mapping=combo)
self.assertRaises(ValueError, lambda: combo * (vertMap * expMap))
self.assertRaises(ValueError, lambda: (combo * vertMap) * expMap)
self.assertRaises(ValueError, lambda: vertMap * expMap)
self.assertRaises(ValueError, lambda: expMap * np.ones(100))
self.assertRaises(ValueError, lambda: expMap * np.ones((100, 1)))
self.assertRaises(ValueError, lambda: expMap * np.ones((100, 5)))
self.assertRaises(ValueError, lambda: combo * np.ones(100))
self.assertRaises(ValueError, lambda: combo * np.ones((100, 1)))
self.assertRaises(ValueError, lambda: combo * np.ones((100, 5)))
def test_activeCells(self):
M = discretize.TensorMesh([2, 4], "0C")
for actMap in [
maps.InjectActiveCells(M, M.vectorCCy <= 0, 10, nC=M.nCy),
]:
vertMap = maps.SurjectVertical1D(M)
combo = vertMap * actMap
m = np.r_[1.0, 2.0]
mod = models.Model(m, combo)
self.assertLess(
np.linalg.norm(mod.transform - np.r_[1, 1, 2, 2, 10, 10, 10, 10.0]), TOL
)
self.assertLess((mod.transformDeriv - combo.deriv(m)).toarray().sum(), TOL)
def test_tripleMultiply(self):
M = discretize.TensorMesh([2, 4], "0C")
expMap = maps.ExpMap(M)
vertMap = maps.SurjectVertical1D(M)
actMap = maps.InjectActiveCells(M, M.vectorCCy <= 0, 10, nC=M.nCy)
m = np.r_[1.0, 2.0]
t_true = np.exp(np.r_[1, 1, 2, 2, 10, 10, 10, 10.0])
self.assertLess(
np.linalg.norm((expMap * vertMap * actMap * m) - t_true, np.inf), TOL
)
self.assertLess(
np.linalg.norm(((expMap * vertMap * actMap) * m) - t_true, np.inf), TOL
)
self.assertLess(
np.linalg.norm((expMap * vertMap * (actMap * m)) - t_true, np.inf), TOL
)
self.assertLess(
np.linalg.norm((expMap * (vertMap * actMap) * m) - t_true, np.inf), TOL
)
self.assertLess(
np.linalg.norm(((expMap * vertMap) * actMap * m) - t_true, np.inf), TOL
)
self.assertRaises(ValueError, lambda: expMap * actMap * vertMap)
self.assertRaises(ValueError, lambda: actMap * vertMap * expMap)
def test_map2Dto3D_x(self):
M2 = discretize.TensorMesh([2, 4])
M3 = discretize.TensorMesh([3, 2, 4])
m = np.random.rand(int(M2.nC))
for m2to3 in [
maps.Surject2Dto3D(M3, normal="X"),
]:
# m2to3 = maps.Surject2Dto3D(M3, normal='X')
m = np.arange(m2to3.nP)
self.assertTrue(m2to3.test())
self.assertTrue(m2to3.testVec())
self.assertTrue(
np.all(utils.mkvc((m2to3 * m).reshape(M3.vnC, order="F")[0, :, :]) == m)
)
def test_map2Dto3D_y(self):
M2 = discretize.TensorMesh([3, 4])
M3 = discretize.TensorMesh([3, 2, 4])
m = np.random.rand(M2.nC)
for m2to3 in [
maps.Surject2Dto3D(M3, normal="Y"),
]:
# m2to3 = maps.Surject2Dto3D(M3, normal='Y')
m = np.arange(m2to3.nP)
self.assertTrue(m2to3.test())
self.assertTrue(m2to3.testVec())
self.assertTrue(
np.all(utils.mkvc((m2to3 * m).reshape(M3.vnC, order="F")[:, 0, :]) == m)
)
def test_map2Dto3D_z(self):
M2 = discretize.TensorMesh([3, 2])
M3 = discretize.TensorMesh([3, 2, 4])
m = np.random.rand(M2.nC)
for m2to3 in [
maps.Surject2Dto3D(M3, normal="Z"),
]:
# m2to3 = maps.Surject2Dto3D(M3, normal='Z')
m = np.arange(m2to3.nP)
self.assertTrue(m2to3.test())
self.assertTrue(m2to3.testVec())
self.assertTrue(
np.all(utils.mkvc((m2to3 * m).reshape(M3.vnC, order="F")[:, :, 0]) == m)
)
def test_ParametricPolyMap(self):
M2 = discretize.TensorMesh([np.ones(10), np.ones(10)], "CN")
mParamPoly = maps.ParametricPolyMap(M2, 2, logSigma=True, normal="Y")
self.assertTrue(mParamPoly.test(m=np.r_[1.0, 1.0, 0.0, 0.0, 0.0]))
self.assertTrue(mParamPoly.testVec(m=np.r_[1.0, 1.0, 0.0, 0.0, 0.0]))
def test_ParametricSplineMap(self):
M2 = discretize.TensorMesh([np.ones(10), np.ones(10)], "CN")
x = M2.vectorCCx
mParamSpline = maps.ParametricSplineMap(M2, x, normal="Y", order=1)
self.assertTrue(mParamSpline.test())
self.assertTrue(mParamSpline.testVec())
def test_parametric_block(self):
M1 = discretize.TensorMesh([np.ones(10)], "C")
block = maps.ParametricBlock(M1)
self.assertTrue(
block.test(m=np.hstack([np.random.rand(2), np.r_[M1.x0, 2 * M1.hx.min()]]))
)
M2 = discretize.TensorMesh([np.ones(10), np.ones(20)], "CC")
block = maps.ParametricBlock(M2)
self.assertTrue(
block.test(
m=np.hstack(
[
np.random.rand(2),
np.r_[M2.x0[0], 2 * M2.hx.min()],
np.r_[M2.x0[1], 4 * M2.hy.min()],
]
)
)
)
M3 = discretize.TensorMesh([np.ones(10), np.ones(20), np.ones(30)], "CCC")
block = maps.ParametricBlock(M3)
self.assertTrue(
block.test(
m=np.hstack(
[
np.random.rand(2),
np.r_[M3.x0[0], 2 * M3.hx.min()],
np.r_[M3.x0[1], 4 * M3.hy.min()],
np.r_[M3.x0[2], 5 * M3.hz.min()],
]
)
)
)
def test_parametric_ellipsoid(self):
M2 = discretize.TensorMesh([np.ones(10), np.ones(20)], "CC")
block = maps.ParametricEllipsoid(M2)
self.assertTrue(
block.test(
m=np.hstack(
[
np.random.rand(2),
np.r_[M2.x0[0], 2 * M2.hx.min()],
np.r_[M2.x0[1], 4 * M2.hy.min()],
]
)
)
)
M3 = discretize.TensorMesh([np.ones(10), np.ones(20), np.ones(30)], "CCC")
block = maps.ParametricEllipsoid(M3)
self.assertTrue(
block.test(
m=np.hstack(
[
np.random.rand(2),
np.r_[M3.x0[0], 2 * M3.hx.min()],
np.r_[M3.x0[1], 4 * M3.hy.min()],
np.r_[M3.x0[2], 5 * M3.hz.min()],
]
)
)
)
def test_sum(self):
M2 = discretize.TensorMesh([np.ones(10), np.ones(20)], "CC")
block = maps.ParametricEllipsoid(M2) * maps.Projection(
7, np.r_[1, 2, 3, 4, 5, 6]
)
background = (
maps.ExpMap(M2) * maps.SurjectFull(M2) * maps.Projection(7, np.r_[0])
)
summap0 = maps.SumMap([block, background])
summap1 = block + background
m0 = np.hstack(
[
np.random.rand(3),
np.r_[M2.x0[0], 2 * M2.hx.min()],
np.r_[M2.x0[1], 4 * M2.hy.min()],
]
)
self.assertTrue(np.all(summap0 * m0 == summap1 * m0))
self.assertTrue(summap0.test(m0))
self.assertTrue(summap1.test(m0))
def test_surject_units(self):
M2 = discretize.TensorMesh([np.ones(10), np.ones(20)], "CC")
unit1 = M2.gridCC[:, 0] < 0
unit2 = M2.gridCC[:, 0] >= 0
surject_units = maps.SurjectUnits([unit1, unit2])
m0 = np.r_[0, 1]
m1 = surject_units * m0
self.assertTrue(np.all(m1[unit1] == 0))
self.assertTrue(np.all(m1[unit2] == 1))
self.assertTrue(surject_units.test(m0))
def test_Projection(self):
nP = 10
m = np.arange(nP)
self.assertTrue(np.all(maps.Projection(nP, slice(5)) * m == m[:5]))
self.assertTrue(np.all(maps.Projection(nP, slice(5, None)) * m == m[5:]))
self.assertTrue(
np.all(
maps.Projection(nP, np.r_[1, 5, 3, 2, 9, 9]) * m
== np.r_[1, 5, 3, 2, 9, 9]
)
)
self.assertTrue(
np.all(
maps.Projection(nP, [1, 5, 3, 2, 9, 9]) * m == np.r_[1, 5, 3, 2, 9, 9]
)
)
with self.assertRaises(AssertionError):
maps.Projection(nP, np.r_[10]) * m
mapping = maps.Projection(nP, np.r_[1, 2, 6, 1, 3, 5, 4, 9, 9, 8, 0])
mapping.test()
def test_Tile(self):
"""
Test for TileMap
"""
rxLocs = np.random.randn(3, 3) * 20
h = [5, 5, 5]
padDist = np.ones((3, 2)) * 100
local_meshes = []
for ii in range(rxLocs.shape[0]):
local_mesh = mesh_builder_xyz(
rxLocs, h, padding_distance=padDist, mesh_type="tree"
)
local_mesh = refine_tree_xyz(
local_mesh,
rxLocs[ii, :].reshape((1, -1)),
method="radial",
octree_levels=[1],
finalize=True,
)
local_meshes.append(local_mesh)
mesh = mesh_builder_xyz(rxLocs, h, padding_distance=padDist, mesh_type="tree")
# This garantees that the local meshes are always coarser or equal
for local_mesh in local_meshes:
mesh.insert_cells(
local_mesh.gridCC,
local_mesh.cell_levels_by_index(np.arange(local_mesh.nC)),
finalize=False,
)
mesh.finalize()
# Define an active cells from topo
activeCells = utils.surface2ind_topo(mesh, rxLocs)
model = np.random.randn(int(activeCells.sum()))
total_mass = (model * mesh.vol[activeCells]).sum()
for local_mesh in local_meshes:
tile_map = maps.TileMap(
mesh,
activeCells,
local_mesh,
)
local_mass = (
(tile_map * model) * local_mesh.vol[tile_map.local_active]
).sum()
self.assertTrue((local_mass - total_mass) / total_mass < 1e-8)
class TestWires(unittest.TestCase):
def test_basic(self):
mesh = discretize.TensorMesh([10, 10, 10])
wires = maps.Wires(
("sigma", mesh.nCz),
("mu_casing", 1),
)
model = np.arange(mesh.nCz + 1)
assert isinstance(wires.sigma, maps.Projection)
assert wires.nP == mesh.nCz + 1
named_model = wires * model
named_model.sigma == model[: mesh.nCz]
assert named_model.mu_casing == 10
class TestSCEMT(unittest.TestCase):
def test_sphericalInclusions(self):
mesh = discretize.TensorMesh([4, 5, 3])
mapping = maps.SelfConsistentEffectiveMedium(mesh, sigma0=1e-1, sigma1=1.0)
m = np.abs(np.random.rand(mesh.nC))
mapping.test(m=m, dx=0.05, num=3)
def test_spheroidalInclusions(self):
mesh = discretize.TensorMesh([4, 3, 2])
mapping = maps.SelfConsistentEffectiveMedium(
mesh, sigma0=1e-1, sigma1=1.0, alpha0=0.8, alpha1=0.9, rel_tol=1e-8
)
m = np.abs(np.random.rand(mesh.nC))
mapping.test(m=m, dx=0.05, num=3)
if __name__ == "__main__":
unittest.main()
| 32.573129
| 88
| 0.532293
| 17,632
| 0.920587
| 0
| 0
| 0
| 0
| 0
| 0
| 1,698
| 0.088655
|
cebc19ad6f58417864244da0cd384e9b8241025b
| 11,569
|
py
|
Python
|
src/panda_env.py
|
irom-lab/PAC-Imitation
|
1b9c203b02551895613b6710da33e1bebe4a0f11
|
[
"MIT"
] | 12
|
2020-08-11T03:26:36.000Z
|
2022-02-10T01:14:08.000Z
|
grasp/src/panda_env.py
|
irom-lab/Task_Relevant_OOD_Detection
|
c49d04f88a3e155bec9abb5ac5529dff8ea2c449
|
[
"MIT"
] | null | null | null |
grasp/src/panda_env.py
|
irom-lab/Task_Relevant_OOD_Detection
|
c49d04f88a3e155bec9abb5ac5529dff8ea2c449
|
[
"MIT"
] | 1
|
2021-03-08T10:46:06.000Z
|
2021-03-08T10:46:06.000Z
|
import pybullet_data
import pybullet as p
import time
import numpy as np
from src.utils_geom import *
from src.utils_depth import *
from src.panda import Panda
def full_jacob_pb(jac_t, jac_r):
return np.vstack((jac_t[0], jac_t[1], jac_t[2], jac_r[0], jac_r[1], jac_r[2]))
class pandaEnv():
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
mu=0.3,
sigma=0.01,
timestep=1./240.,
long_finger=False,
):
self._urdfRoot = urdfRoot
self._timeStep = timestep
self._pandaId = None
self._planeId = None
self._tableId = None
self._mu = mu
self._sigma = sigma
self.long_finger = long_finger
def reset_env(self):
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150,
enableConeFriction=1,
contactBreakingThreshold=1e-3)
p.setTimeStep(self._timeStep)
# Set gravity
p.setGravity(0, 0, -9.81)
# Load plane and table
self._planeId = p.loadURDF(self._urdfRoot+'/plane.urdf', basePosition=[0, 0, -1], useFixedBase=1)
self._tableId = p.loadURDF(self._urdfRoot+'/table/table.urdf', basePosition=[0.4000000, 0.00000, -0.63+0.005], baseOrientation=[0, 0, 0, 1.0], useFixedBase=1)
# Load arm, no need to settle (joint angle set instantly)
self._panda = Panda(self.long_finger)
self._pandaId = self._panda.load()
# Set friction coefficients of arm and table
self.change_friction_coeffs(self._mu, self._sigma)
# Create a constraint to keep the fingers centered (upper links)
fingerGear = p.createConstraint(self._pandaId,
9,
self._pandaId,
11,
jointType=p.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(fingerGear, gearRatio=-1, erp=0.1, maxForce=2*self._panda.maxFingerForce)
# Disable damping for all links
for i in range(self._panda.numJoints):
p.changeDynamics(self._pandaId, i,
linearDamping=0,
angularDamping=0)
def change_friction_coeffs(self, mu, sigma):
p.changeDynamics(self._pandaId, self._panda.pandaLeftFingerLinkIndex, lateralFriction=mu,
spinningFriction=sigma,
frictionAnchor=1,
)
p.changeDynamics(self._pandaId, self._panda.pandaRightFingerLinkIndex, lateralFriction=mu,
spinningFriction=sigma,
frictionAnchor=1,
)
p.changeDynamics(self._tableId, -1,
lateralFriction=mu,
spinningFriction=sigma,
frictionAnchor=1,
)
def reset_arm_joints_ik(self, pos, orn, fingerPos=0.0):
jointPoses = list(p.calculateInverseKinematics(self._pandaId,
self._panda.pandaEndEffectorLinkIndex,
pos, orn,
jointDamping=self._panda.jd,
lowerLimits=self._panda.jointLowerLimit,
upperLimits=self._panda.jointUpperLimit,
jointRanges=self._panda.jointRange,
restPoses=self._panda.jointRestPose,
residualThreshold=1e-4))
# , maxNumIterations=1e5))
jointPoses = jointPoses[:7] + [0, -np.pi/4, fingerPos, 0.00, fingerPos, 0.00]
self._panda.reset(jointPoses)
def reset_arm_joints(self, joints):
jointPoses = joints + [0, -np.pi/4, self._panda.fingerOpenPos,
0.00, self._panda.fingerOpenPos, 0.00]
self._panda.reset(jointPoses)
########################* Arm control *#######################
def move_pos(self, absolute_pos=None,
relative_pos=None,
absolute_global_euler=None, # preferred
relative_global_euler=None, # preferred
relative_local_euler=None, # not using
absolute_global_quat=None, # preferred
relative_azi=None, # for arm
# relative_quat=None, # never use relative quat
numSteps=50,
maxJointVel=0.20,
relativePos=True,
globalOrn=True,
checkContact=False,
checkPalmContact=False,
objId=None,
gripper_target_pos=None,
timeStep=0):
# Get trajectory
eePosNow, eeQuatNow = self._panda.get_ee()
# Determine target pos
if absolute_pos is not None:
targetPos = absolute_pos
elif relative_pos is not None:
targetPos = eePosNow + relative_pos
else:
targetPos = eePosNow
# Determine target orn
if absolute_global_euler is not None:
targetOrn = euler2quat(absolute_global_euler)
elif relative_global_euler is not None:
targetOrn = quatMult(euler2quat(relative_global_euler), eeQuatNow)
elif relative_local_euler is not None:
targetOrn = quatMult(eeQuatNow, euler2quat(relative_local_euler))
elif absolute_global_quat is not None:
targetOrn = absolute_global_quat
elif relative_azi is not None:
# Extrinsic yaw
targetOrn = quatMult(euler2quat([relative_azi[0],0,0]), eeQuatNow)
# Intrinsic pitch
targetOrn = quatMult(targetOrn, euler2quat([0,relative_azi[1],0]))
# elif relative_quat is not None:
# targetOrn = quatMult(eeQuatNow, relative_quat)
else:
targetOrn = array([1.0, 0., 0., 0.])
# Get trajectory
trajPos = self.traj_time_scaling(startPos=eePosNow,
endPos=targetPos,
numSteps=numSteps)
# Run steps
numSteps = len(trajPos)
for step in range(numSteps):
# Get joint velocities from error tracking control
jointDot = self.traj_tracking_vel(targetPos=trajPos[step], targetQuat=targetOrn)
# Send velocity commands to joints
for i in range(self._panda.numJointsArm):
p.setJointMotorControl2(self._pandaId,
i,
p.VELOCITY_CONTROL,
targetVelocity=jointDot[i],
force=self._panda.maxJointForce[i],
maxVelocity=maxJointVel)
if gripper_target_pos is None:
# Keep gripper current velocity
p.setJointMotorControl2(self._pandaId,
self._panda.pandaLeftFingerJointIndex,
p.VELOCITY_CONTROL,
targetVelocity=self._panda.fingerCurVel,
force=self._panda.maxJointForce[i],
maxVelocity=0.04)
p.setJointMotorControl2(self._pandaId,
self._panda.pandaRightFingerJointIndex,
p.VELOCITY_CONTROL,
targetVelocity=self._panda.fingerCurVel,
force=self._panda.maxJointForce[i],
maxVelocity=0.04)
else:
p.setJointMotorControl2(self._pandaId,
self._panda.pandaLeftFingerJointIndex,
p.POSITION_CONTROL,
targetPosition=gripper_target_pos,
maxVelocity=0.04)
p.setJointMotorControl2(self._pandaId,
self._panda.pandaRightFingerJointIndex,
p.POSITION_CONTROL,
targetPosition=gripper_target_pos,
maxVelocity=0.04)
# Quit if contact at either finger or palm
if checkContact:
contact = self.check_contact(objId, both=False)
if contact:
return timeStep, False
if checkPalmContact:
contact = self.check_palm_contact(objId)
if contact:
return timeStep, False
# Step simulation
p.stepSimulation()
timeStep += 1
return timeStep, True
def grasp(self, targetVel=0):
# Change gripper velocity direction
if targetVel > 1e-2 or targetVel < -1e-2: # Use specified velocity if available
self._panda.fingerCurVel = targetVel
else:
if self._panda.fingerCurVel > 0.0:
self._panda.fingerCurVel = -0.05
else:
self._panda.fingerCurVel = 0.05
return
def traj_time_scaling(self, startPos, endPos, numSteps):
trajPos = np.zeros((numSteps, 3))
for step in range(numSteps):
s = 3 * (1.0 * step / numSteps) ** 2 - 2 * (1.0 * step / numSteps) ** 3
trajPos[step] = (endPos-startPos)*s+startPos
return trajPos
def traj_tracking_vel(self, targetPos, targetQuat, posGain=20, velGain=5):
eePos, eeQuat = self._panda.get_ee()
eePosError = targetPos - eePos
eeOrnError = log_rot(quat2rot(targetQuat).dot((quat2rot(eeQuat).T))) # in spatial frame
jointPoses = self._panda.get_arm_joints() + [0,0,0] # add fingers
eeState = p.getLinkState(self._pandaId,
self._panda.pandaEndEffectorLinkIndex,
computeLinkVelocity=1,
computeForwardKinematics=1)
# Get the Jacobians for the CoM of the end-effector link. Note that in this example com_rot = identity, and we would need to use com_rot.T * com_trn. The localPosition is always defined in terms of the link frame coordinates.
zero_vec = [0.0] * len(jointPoses)
jac_t, jac_r = p.calculateJacobian(self._pandaId,
self._panda.pandaEndEffectorLinkIndex,
eeState[2],
jointPoses,
zero_vec,
zero_vec) # use localInertialFrameOrientation
jac_sp = full_jacob_pb(jac_t, jac_r)[:, :7] # 6x10 -> 6x7, ignore last three columns
try:
jointDot = np.linalg.pinv(jac_sp).dot((np.hstack((posGain*eePosError, velGain*eeOrnError)).reshape(6,1))) # pseudo-inverse
except np.linalg.LinAlgError:
jointDot = np.zeros((7,1))
return jointDot
############################### Contact ##################################
def get_contact(self, objId, minForceThres=1e-1):
left_contacts = p.getContactPoints(self._pandaId,
objId,
linkIndexA=self._panda.pandaLeftFingerLinkIndex,
linkIndexB=-1)
right_contacts = p.getContactPoints(self._pandaId,
objId,
linkIndexA=self._panda.pandaRightFingerLinkIndex,
linkIndexB=-1)
left_contacts = [i for i in left_contacts if i[9] > minForceThres]
right_contacts = [i for i in right_contacts if i[9] > minForceThres]
return left_contacts, right_contacts
def get_finger_force(self, objId):
left_contacts, right_contacts = self.get_contact(objId)
left_force = np.zeros((3))
right_force = np.zeros((3))
for i in left_contacts:
left_force += i[9]*np.array(i[7])+i[10]*np.array(i[11])+i[12]*np.array(i[13])
for i in right_contacts:
right_force += i[9]*np.array(i[7])+i[10]*np.array(i[11])+i[12]*np.array(i[13])
leftNormalMag = sum([i[9] for i in left_contacts])
rightNormalMag = sum([i[9] for i in right_contacts])
numLeftContact = len(left_contacts)
numRightContact = len(right_contacts)
if numLeftContact < 1 or numRightContact < 1:
return None
else:
return left_force, right_force, \
np.array(left_contacts[0][6]), np.array(right_contacts[0][6]), \
leftNormalMag, rightNormalMag
def check_hold_object(self, objId, minForceThres=10.0):
left_contacts, right_contacts = self.get_contact(objId)
leftNormalMag = sum([i[9] for i in left_contacts])
rightNormalMag = sum([i[9] for i in right_contacts])
return leftNormalMag > minForceThres and rightNormalMag > minForceThres
def check_contact(self, objId, both=False):
leftContacts, rightContacts = self.get_contact(objId)
if both:
if len(leftContacts) > 0 and len(rightContacts) > 0:
return 1
else:
if len(leftContacts) > 0 or len(rightContacts) > 0:
return 1
return 0
def check_palm_contact(self, objId, minForceThres=1e-1):
palm_contacts = p.getContactPoints(self._pandaId,
objId,
linkIndexA=self._panda.pandaHandLinkIndex,
linkIndexB=-1)
palm_contacts = [i for i in palm_contacts if i[9] > minForceThres]
return len(palm_contacts) > 0
############################### Info ##################################
def get_ee(self):
return self._panda.get_ee()
def get_gripper_tip_long(self):
return self._panda.get_gripper_tip_long()
def get_arm_joints(self):
return self._panda.get_arm_joints()
def get_gripper_joint(self):
return self._panda.get_gripper_joint()
def get_left_finger(self):
return self._panda.get_left_finger()
def get_right_finger(self):
return self._panda.get_right_finger()
def get_obs(self):
return self._panda.get_obs()
| 31.183288
| 227
| 0.684675
| 11,288
| 0.975711
| 0
| 0
| 0
| 0
| 0
| 0
| 1,403
| 0.121272
|
cebc72a58b425fb7f7cd7143c3625b862489e1f9
| 178
|
py
|
Python
|
desafios/desafio 021.py
|
juaoantonio/curso_video_python
|
7520223d8647929530a1cd96f7c7d8c8f264ba1e
|
[
"MIT"
] | null | null | null |
desafios/desafio 021.py
|
juaoantonio/curso_video_python
|
7520223d8647929530a1cd96f7c7d8c8f264ba1e
|
[
"MIT"
] | null | null | null |
desafios/desafio 021.py
|
juaoantonio/curso_video_python
|
7520223d8647929530a1cd96f7c7d8c8f264ba1e
|
[
"MIT"
] | null | null | null |
import pygame
pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('/home/jaab/Música/bach_1.wav')
pygame.mixer.music.play()
input()
pygame.event.wait()
pygame.mixer.stop()
| 19.777778
| 55
| 0.758427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.173184
|
cebdd561ae5cf73cc61b02a50a7e42a495c58927
| 67
|
py
|
Python
|
deem/pytorch/layers/__init__.py
|
xxaxtt/TwoTowers
|
206c6b38a2f72486906d391c5176e4508036aac0
|
[
"Apache-2.0"
] | 14
|
2021-09-22T02:24:16.000Z
|
2021-12-11T11:59:02.000Z
|
deem/pytorch/layers/__init__.py
|
xxaxtt/TwoTowers
|
206c6b38a2f72486906d391c5176e4508036aac0
|
[
"Apache-2.0"
] | 2
|
2021-10-16T04:39:21.000Z
|
2021-12-01T08:04:46.000Z
|
deem/pytorch/layers/__init__.py
|
xxaxtt/TwoTowers
|
206c6b38a2f72486906d391c5176e4508036aac0
|
[
"Apache-2.0"
] | 5
|
2021-10-09T11:47:53.000Z
|
2021-11-25T04:41:24.000Z
|
from .embedding import *
from .sequence import *
from .mlp import *
| 22.333333
| 24
| 0.746269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cebf0a2899cd29ab2ab60478658090adf649c895
| 1,765
|
py
|
Python
|
common.py
|
braindatalab/scrutinizing-xai
|
fb24fed7ae3adc10e3c35d7f477a5db322b48f4f
|
[
"MIT"
] | null | null | null |
common.py
|
braindatalab/scrutinizing-xai
|
fb24fed7ae3adc10e3c35d7f477a5db322b48f4f
|
[
"MIT"
] | null | null | null |
common.py
|
braindatalab/scrutinizing-xai
|
fb24fed7ae3adc10e3c35d7f477a5db322b48f4f
|
[
"MIT"
] | null | null | null |
import json
import os
import pickle
from dataclasses import dataclass, field
from os.path import join
from typing import Dict, Any, ClassVar
@dataclass
class ScoresAttributes:
global_based: str
sample_based: str
explanations: str
method_names: str
data_weights: str
model_weights: str
model_accuracies: str
logistic_regression: str
neural_net: str
instance: ClassVar = field(default=None)
@staticmethod
def default_conf() -> dict:
return {
'global_based': 'global',
'sample_based': 'sample',
'explanations': 'expl',
'method_names': 'names',
'data_weights': 'd_weights',
'model_weights': 'm_weights',
'model_accuracies': 'm_accuracy',
'logistic_regression': 'Logistic Regression',
'neural_net': 'Single-Layer Neural Net'
}
@classmethod
def get(cls):
if not cls.instance:
conf_dict = cls.default_conf()
cls.instance = cls(**conf_dict)
return cls.instance
def load_json_file(file_path: str) -> Dict:
with open(file_path, 'r') as f:
file = json.load(f)
return file
def load_pickle(file_path: str) -> Any:
with open(file_path, 'rb') as f:
data = pickle.load(f)
return data
def to_pickle(output_dir: str, data: Any, suffix: str) -> str:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_path = join(output_dir, f'data_{suffix}.pkl')
print(f'Output path: {output_path}')
with open(output_path, 'wb') as f:
pickle.dump(data, f)
return output_path
def extract_pattern_type(data_path: str) -> str:
return data_path.split('.')[0].split('pattern_type_')[-1]
| 25.955882
| 62
| 0.628329
| 922
| 0.52238
| 0
| 0
| 933
| 0.528612
| 0
| 0
| 323
| 0.183003
|
cebff2caa1f50e0a7946513e1fed0db5898382ad
| 1,623
|
py
|
Python
|
src/spn/tests/prometheus_tests/test.py
|
AmurG/SPFlow
|
ab28dd4af9ed722ace69c6b290cf0a279bbda39e
|
[
"Apache-2.0"
] | null | null | null |
src/spn/tests/prometheus_tests/test.py
|
AmurG/SPFlow
|
ab28dd4af9ed722ace69c6b290cf0a279bbda39e
|
[
"Apache-2.0"
] | null | null | null |
src/spn/tests/prometheus_tests/test.py
|
AmurG/SPFlow
|
ab28dd4af9ed722ace69c6b290cf0a279bbda39e
|
[
"Apache-2.0"
] | null | null | null |
import spn
#from spn.structure.leaves.mvgauss.MVG import *
from spn.io.Text import *
import sys
from spn.structure.leaves.parametric.Parametric import *
from spn.structure.leaves.parametric.MLE import *
from spn.algorithms.MPE import mpe
from spn.structure.prometheus.disc import *
from scipy.stats import multivariate_normal as mn
#from spn.structure.prometheus.disc import *
node = MultivariateGaussian(np.inf, np.inf)
data = np.array([1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 3, 3, 6, 2]).reshape(-1, 2)
update_parametric_parameters_mle(node, data)
print(node.mean, node.sigma)
print(node.scope)
dummydata = np.asarray([[1, 2, 4, 8], [2.1, 4.1, 8.1, 16.1], [
4.1, 8.1, 16.1, 32.1], [8.8, 16.5, 32.3, 64.2]])
dummyscope = list([0, 1, 2, 3])
spn = MultivariateGaussian(np.inf, np.inf)
update_parametric_parameters_mle(spn, dummydata)
print(spn.mean)
print(spn.sigma)
spn.scope = dummyscope
#print(mn.pdf(spn.mean, spn.mean, spn.cov))
print(spn.scope)
dummydata = np.asarray([[np.nan, 2.0, np.nan, np.nan],
[np.nan, np.nan, np.nan, 64.3]])
print(np.shape(dummydata))
print(np.shape(np.asarray(spn.mean)))
print(np.shape(np.asarray(spn.sigma)))
print(mpe(spn, dummydata))
print(spn_to_str_equation(spn))
recreate = (str_to_spn(spn_to_str_equation(spn)))
print(spn_to_str_equation(recreate))
print(recreate.mean)
print(recreate.sigma)
arr = np.load('./test.npy')
teststruct = prometheus(arr, 1, itermult=0, leafsize=4, maxsize=6)
testspn = str_to_spn(teststruct)
recreate = spn_to_str_equation(testspn)
file = open('./ca.txt', 'w')
file.write(teststruct)
file.close()
| 24.969231
| 74
| 0.701787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.097967
|
cec12b95a353d8bc6ee20816f32cfee16d9c8d60
| 5,592
|
py
|
Python
|
tests/test_converters.py
|
stabacco/graphene-pydantic
|
41d62e1879b1f6ebd75319c39b0a872ec6594cc5
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-04-22T06:13:16.000Z
|
2021-04-22T06:13:16.000Z
|
tests/test_converters.py
|
stabacco/graphene-pydantic
|
41d62e1879b1f6ebd75319c39b0a872ec6594cc5
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-11-10T18:40:44.000Z
|
2020-11-10T18:40:44.000Z
|
tests/test_converters.py
|
stabacco/graphene-pydantic
|
41d62e1879b1f6ebd75319c39b0a872ec6594cc5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import datetime
import decimal
import enum
import typing as T
import uuid
import graphene
import graphene.types
import pydantic
import pytest
from pydantic import BaseModel, create_model
import graphene_pydantic.converters as converters
from graphene_pydantic.converters import ConversionError, convert_pydantic_field
from graphene_pydantic.objecttype import PydanticObjectType
from graphene_pydantic.registry import get_global_registry, Placeholder
def _get_field_from_spec(name, type_spec_or_default):
kwargs = {name: type_spec_or_default}
m = create_model("model", **kwargs)
return m.__fields__[name]
def _convert_field_from_spec(name, type_spec_or_default):
return convert_pydantic_field(
_get_field_from_spec(name, type_spec_or_default),
get_global_registry(PydanticObjectType),
)
def test_required_string():
field = _convert_field_from_spec("s", (str, ...))
assert field is not None
assert isinstance(field, graphene.Field)
# The ellipsis in the type spec means required
assert isinstance(field.type, graphene.NonNull)
assert field.type.of_type == graphene.String
def test_default_values():
field = _convert_field_from_spec("s", "hi")
assert field is not None
assert isinstance(field, graphene.Field)
# there's a default value, so it's not required
assert not isinstance(field.type, graphene.NonNull)
assert field.type == graphene.String
assert field.default_value == "hi"
@pytest.mark.parametrize(
"input, expected",
[
((bool, False), graphene.Boolean),
((float, 0.1), graphene.Float),
((int, 6), graphene.Int),
((str, "hi"), graphene.String),
((uuid.UUID, uuid.uuid4()), graphene.UUID),
((datetime.date, datetime.date(2019, 1, 1)), graphene.Date),
((datetime.time, datetime.time(15, 29)), graphene.Time),
((datetime.datetime, datetime.datetime(2019, 1, 1, 1, 37)), graphene.DateTime),
],
)
def test_builtin_scalars(input, expected):
field = _convert_field_from_spec("attr", input)
assert isinstance(field, graphene.Field)
assert field.type == expected
assert field.default_value == input[1]
def test_union():
field = _convert_field_from_spec("attr", (T.Union[int, float, str], 5.0))
assert issubclass(field.type, graphene.Union)
assert field.default_value == 5.0
assert field.type.__name__.startswith("UnionOf")
def test_mapping():
with pytest.raises(ConversionError) as exc:
_convert_field_from_spec("attr", (T.Dict[str, int], {"foo": 5}))
assert exc.value.args[0] == "Don't know how to handle mappings in Graphene."
def test_decimal(monkeypatch):
monkeypatch.setattr(converters, "DECIMAL_SUPPORTED", True)
field = _convert_field_from_spec("attr", (decimal.Decimal, decimal.Decimal(1.25)))
assert field.type.__name__ == "Decimal"
monkeypatch.setattr(converters, "DECIMAL_SUPPORTED", False)
field = _convert_field_from_spec("attr", (decimal.Decimal, decimal.Decimal(1.25)))
assert field.type.__name__ == "Float"
def test_iterables():
field = _convert_field_from_spec("attr", (T.List[int], [1, 2]))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (list, [1, 2]))
assert field.type == graphene.types.List
field = _convert_field_from_spec("attr", (T.Set[int], {1, 2}))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (set, {1, 2}))
assert field.type == graphene.types.List
field = _convert_field_from_spec("attr", (T.Tuple[int, float], (1, 2.2)))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (T.Tuple[int, ...], (1, 2.2)))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (tuple, (1, 2)))
assert field.type == graphene.types.List
field = _convert_field_from_spec("attr", (T.Union[None, int], 1))
assert field.type == graphene.types.Int
def test_enum():
class Color(enum.Enum):
RED = 1
GREEN = 2
field = _convert_field_from_spec("attr", (Color, Color.RED))
assert field.type.__name__ == "Color"
assert field.type._meta.enum == Color
def test_existing_model():
from graphene_pydantic import PydanticObjectType
class Foo(BaseModel):
name: str
class GraphFoo(PydanticObjectType):
class Meta:
model = Foo
field = _convert_field_from_spec("attr", (Foo, Foo(name="bar")))
assert field.type == GraphFoo
def test_unresolved_placeholders():
# no errors should be raised here -- instead a placeholder is created
field = _convert_field_from_spec("attr", (create_model("Model", size=int), None))
assert any(
isinstance(x, Placeholder)
for x in get_global_registry(PydanticObjectType)._registry.values()
)
# this is a runtime error waiting to happen, but what can we do about it?
assert field.type is None
def test_self_referencing():
class NodeModel(BaseModel):
id: int
name: str
# nodes: Union['NodeModel', None]
nodes: T.Optional["NodeModel"]
NodeModel.update_forward_refs()
class NodeModelSchema(PydanticObjectType):
class Meta: # noqa: too-few-public-methods
model = NodeModel
@classmethod
def is_type_of(cls, root, info):
return isinstance(root, (cls, NodeModel))
NodeModelSchema.resolve_placeholders()
assert NodeModelSchema._meta.model is NodeModel
| 31.954286
| 87
| 0.696531
| 558
| 0.099785
| 0
| 0
| 818
| 0.14628
| 0
| 0
| 582
| 0.104077
|
cec1de2a6639546d17fb7dd7eb09653aa22c391e
| 3,158
|
py
|
Python
|
src/freesound.py
|
lRomul/argus-birdsong
|
2290bd78f462cedc2ae143ec0b5e6e0782cd2b19
|
[
"MIT"
] | null | null | null |
src/freesound.py
|
lRomul/argus-birdsong
|
2290bd78f462cedc2ae143ec0b5e6e0782cd2b19
|
[
"MIT"
] | null | null | null |
src/freesound.py
|
lRomul/argus-birdsong
|
2290bd78f462cedc2ae143ec0b5e6e0782cd2b19
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from pathlib import Path
import multiprocessing as mp
from functools import partial
from src.audio import read_as_melspectrogram
from src.utils import get_params_hash
from src import config
NOISE_SOUNDS = [
'Buzz',
'Car_passing_by',
'Crackle',
'Cricket',
'Hiss',
'Mechanical_fan',
'Stream',
'Traffic_noise_and_roadway_noise',
'Walk_and_footsteps',
'Waves_and_surf',
'Crowd',
'Run',
'Female_speech_and_woman_speaking',
'Male_speech_and_man_speaking',
'Raindrop',
'Sink_(filling_or_washing)',
'Gurgling',
'Frying_(food)',
]
def check_noise(labels):
noise = True
for label in labels.split(','):
if label not in NOISE_SOUNDS:
noise = False
break
return noise
def make_spectrogram_and_save(file_path: Path, save_dir: Path, audio_params):
spec = read_as_melspectrogram(file_path, audio_params)
if spec.shape[1] >= 320:
save_dir.mkdir(parents=True, exist_ok=True)
save_path = save_dir / (file_path.name + '.npy')
np.save(save_path, spec)
def prepare_freesound_data(dir_path, audio_params):
dir_path = Path(dir_path)
file_path_lst = []
train_df = pd.read_csv(config.freesound_train_curated_csv_path)
for i, row in train_df.iterrows():
if check_noise(row.labels):
file_path = config.freesound_train_curated_dir / row.fname
file_path_lst.append(file_path)
train_df = pd.read_csv(config.freesound_train_noisy_csv_path)
for i, row in train_df.iterrows():
if check_noise(row.labels):
file_path = config.freesound_train_noisy_dir / row.fname
file_path_lst.append(file_path)
func = partial(make_spectrogram_and_save,
save_dir=dir_path, audio_params=audio_params)
with mp.Pool(mp.cpu_count()) as pool:
pool.map(func, file_path_lst)
def check_prepared_freesound_data(audio_params):
params_hash = get_params_hash({**audio_params.dict(),
'noise_sounds': NOISE_SOUNDS})
prepared_train_dir = config.freesound_prepared_train_curated_dir / params_hash
if not prepared_train_dir.exists():
print(f"Start preparing freesound dataset to '{prepared_train_dir}'")
prepare_freesound_data(prepared_train_dir, audio_params)
print(f"Dataset prepared.")
else:
print(f"'{prepared_train_dir}' already exists.")
def get_freesound_folds_data(audio_params):
params_hash = get_params_hash({**audio_params.dict(),
'noise_sounds': NOISE_SOUNDS})
prepared_train_dir = config.freesound_prepared_train_curated_dir / params_hash
folds_data = []
audio_paths = sorted(prepared_train_dir.glob("*.npy"))
for i, spec_path in enumerate(audio_paths):
sample = {
'ebird_code': 'nocall',
'spec_path': spec_path,
'fold': config.n_folds
}
folds_data.append(sample)
return folds_data
if __name__ == "__main__":
check_prepared_freesound_data(audio_params=config.audio)
| 30.07619
| 82
| 0.674478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 491
| 0.155478
|
cec2cea789e2b7e248e443d7b1eaf90f326de9fc
| 5,338
|
py
|
Python
|
especifico/json_schema.py
|
athenianco/especifico
|
af8b97868390ba23a2c5e3e8506bd5215ee0084a
|
[
"Apache-2.0"
] | null | null | null |
especifico/json_schema.py
|
athenianco/especifico
|
af8b97868390ba23a2c5e3e8506bd5215ee0084a
|
[
"Apache-2.0"
] | null | null | null |
especifico/json_schema.py
|
athenianco/especifico
|
af8b97868390ba23a2c5e3e8506bd5215ee0084a
|
[
"Apache-2.0"
] | null | null | null |
"""
Module containing all code related to json schema validation.
"""
from collections.abc import Mapping
import contextlib
from copy import deepcopy
import io
import os
import typing as t
import urllib.parse
import urllib.request
from jsonschema import Draft4Validator, RefResolver
from jsonschema.exceptions import RefResolutionError, ValidationError # noqa
from jsonschema.validators import extend
import requests
import yaml
from .utils import deep_get
class ExtendedSafeLoader(yaml.SafeLoader):
"""Extends the yaml SafeLoader to coerce all keys to string so the result is valid json."""
def __init__(self, stream):
self.original_construct_mapping = self.construct_mapping
self.construct_mapping = self.extended_construct_mapping
super().__init__(stream)
def extended_construct_mapping(self, node, deep=False):
data = self.original_construct_mapping(node, deep)
return {str(key): data[key] for key in data}
class FileHandler:
"""Handler to resolve file refs."""
def __call__(self, uri):
filepath = self._uri_to_path(uri)
with open(filepath) as fh:
return yaml.load(fh, ExtendedSafeLoader) # nosemgrep
@staticmethod
def _uri_to_path(uri):
parsed = urllib.parse.urlparse(uri)
sep = os.path.sep
host = f"{sep}{sep}{parsed.netloc}{sep}"
return os.path.abspath(os.path.join(host, urllib.request.url2pathname(parsed.path)))
class URLHandler:
"""Handler to resolve url refs."""
def __call__(self, uri):
response = requests.get(uri)
response.raise_for_status()
data = io.StringIO(response.text)
with contextlib.closing(data) as fh:
return yaml.load(fh, ExtendedSafeLoader) # nosemgrep
default_handlers = {
"http": URLHandler(),
"https": URLHandler(),
"file": FileHandler(),
}
def resolve_refs(spec, store=None, handlers=None):
"""
Resolve JSON references like {"$ref": <some URI>} in a spec.
Optionally takes a store, which is a mapping from reference URLs to a
dereferenced objects. Prepopulating the store can avoid network calls.
"""
spec = deepcopy(spec)
store = store or {}
handlers = handlers or default_handlers
resolver = RefResolver("", spec, store, handlers=handlers)
def _do_resolve(node):
if isinstance(node, Mapping) and "$ref" in node:
path = node["$ref"][2:].split("/")
try:
# resolve known references
node.update(deep_get(spec, path))
del node["$ref"]
return node
except KeyError:
# resolve external references
with resolver.resolving(node["$ref"]) as resolved:
return resolved
elif isinstance(node, Mapping):
for k, v in node.items():
node[k] = _do_resolve(v)
elif isinstance(node, (list, tuple)):
for i, _ in enumerate(node):
node[i] = _do_resolve(node[i])
return node
res = _do_resolve(spec)
return res
def allow_nullable(validation_fn: t.Callable) -> t.Callable:
"""Extend an existing validation function, so it allows nullable values to be null."""
def nullable_validation_fn(validator, to_validate, instance, schema):
if instance is None and (schema.get("x-nullable") is True or schema.get("nullable")):
return
yield from validation_fn(validator, to_validate, instance, schema)
return nullable_validation_fn
def validate_required(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for prop in required:
if prop not in instance:
properties = schema.get("properties")
if properties is not None:
subschema = properties.get(prop)
if subschema is not None:
if "readOnly" in validator.VALIDATORS and subschema.get("readOnly"):
continue
if "writeOnly" in validator.VALIDATORS and subschema.get("writeOnly"):
continue
if (
"x-writeOnly" in validator.VALIDATORS
and subschema.get("x-writeOnly") is True
):
continue
yield ValidationError("%r is a required property" % prop)
def validate_readOnly(validator, ro, instance, schema):
yield ValidationError("Property is read-only")
def validate_writeOnly(validator, wo, instance, schema):
yield ValidationError("Property is write-only")
NullableTypeValidator = allow_nullable(Draft4Validator.VALIDATORS["type"])
NullableEnumValidator = allow_nullable(Draft4Validator.VALIDATORS["enum"])
Draft4RequestValidator = extend(
Draft4Validator,
{
"type": NullableTypeValidator,
"enum": NullableEnumValidator,
"required": validate_required,
"readOnly": validate_readOnly,
},
)
Draft4ResponseValidator = extend(
Draft4Validator,
{
"type": NullableTypeValidator,
"enum": NullableEnumValidator,
"required": validate_required,
"writeOnly": validate_writeOnly,
"x-writeOnly": validate_writeOnly,
},
)
| 31.216374
| 95
| 0.641813
| 1,309
| 0.245223
| 1,554
| 0.29112
| 252
| 0.047209
| 0
| 0
| 978
| 0.183215
|
cec4f4492d6d64177d0b32e7ad2dbf7ec31fcdf8
| 779
|
py
|
Python
|
mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | null | null | null |
mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
from erpnext.accounts.doctype.fiscal_year.fiscal_year import FiscalYearIncorrectDate
test_records = frappe.get_test_records('Fiscal Year')
test_ignore = ["Company"]
class TestFiscalYear(unittest.TestCase):
def test_extra_year(self):
if frappe.db.exists("Fiscal Year", "_Test Fiscal Year 2000"):
frappe.delete_doc("Fiscal Year", "_Test Fiscal Year 2000")
fy = frappe.get_doc({
"doctype": "Fiscal Year",
"year": "_Test Fiscal Year 2000",
"year_end_date": "2002-12-31",
"year_start_date": "2000-04-01"
})
self.assertRaises(FiscalYearIncorrectDate, fy.insert)
| 28.851852
| 84
| 0.752246
| 417
| 0.535302
| 0
| 0
| 0
| 0
| 0
| 0
| 329
| 0.422336
|
cec53997d0c2400355b2407286a2ab638e1feab8
| 1,008
|
py
|
Python
|
types/integers.py
|
anthony-walker/me499
|
1ec5761a822956b4e18f83b3e0cda93715b74b3e
|
[
"BSD-3-Clause"
] | 11
|
2020-03-31T21:27:19.000Z
|
2022-01-11T09:50:13.000Z
|
types/integers.py
|
anthony-walker/me499
|
1ec5761a822956b4e18f83b3e0cda93715b74b3e
|
[
"BSD-3-Clause"
] | null | null | null |
types/integers.py
|
anthony-walker/me499
|
1ec5761a822956b4e18f83b3e0cda93715b74b3e
|
[
"BSD-3-Clause"
] | 5
|
2020-05-13T05:47:23.000Z
|
2021-09-27T18:43:25.000Z
|
#!/usr/bin/env python3
if __name__ == '__main__':
# Python can represent integers. Here are a couple of ways to create an integer variable. Notice the truncation,
# rather than rounding, in the assignment of d.
a = 5
b = int()
c = int(4)
d = int(3.84)
print(a, b, c, d)
# Integers have the usual math operations. Note that division will return a float, but others will preserve the
# integer type. The type() function can tell you the type of a variable. You should try to avoid using this
# function in your code.
print('\ndivision')
a = 10
b = 10 / 5
print(b, type(b))
# We can force integer division with //. Note that this will truncate results.
print('\nInteger division')
a = 10
b = 10 // 5
print(b, type(b))
a = 10
b = 10 // 3
print(b, type(b))
# We can also calculate the remainder
n = 10
m = 3
div = n // m
rem = n % m
print('\n{0} = {1} * {2} + {3}'.format(n, div, m, rem))
| 27.243243
| 118
| 0.590278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 611
| 0.606151
|
cec69b53aae0a98c800aee68729ab0b6f22dfd50
| 1,976
|
py
|
Python
|
cli/asciiart.py
|
Christophe1997/pyramid
|
d135c86329b6527d54535d95c0db8b5d2da6cc8c
|
[
"Apache-2.0"
] | null | null | null |
cli/asciiart.py
|
Christophe1997/pyramid
|
d135c86329b6527d54535d95c0db8b5d2da6cc8c
|
[
"Apache-2.0"
] | null | null | null |
cli/asciiart.py
|
Christophe1997/pyramid
|
d135c86329b6527d54535d95c0db8b5d2da6cc8c
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
"""Convert picture to asciiArt, requrie python3.6 or higher.
Dependence:
- fire
- PIL
- numpy
Usage:
- chmod +x asciiart.py
- asciiart.py ${path_to_image} [Height] [Width]
Also, you can remove the filetype ".py" and put it to $HOME/bin/ then enjoy it:)
One example:
*&&&&&&&&&&&&&&&&&&&&&+
&&&$ &&&&&&&&&&&&&&&&&%
&&&&&%&&&&&&&&&&&&&&&&&&&$
%%%%%%%%%%%%%&&&&&&&&$$$$$
+&&&&&&&&&&&&&&&&&&&&&&&$&&&$$$$$$$$ ****** ***
&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$*************
&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$&$%*************
*&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$# ****************
+&&&&&&&&&&&&&$%**************************************
&&&&&&&&&&&&** **************************************
*&&&&&&&&&&$ ***************************************
+&&&&&&&&$ *************************************
**************************
**************************
****************** ****
+********************+
"""
import fire
from PIL import Image
import numpy as np
import sys
class AsciiArt:
DEFAULT_HEIGHT = 20
DEFAULT_WIDTH = 60
SYMBOL = list("@#$&%+* ")
def draw(self, image_path, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH):
try:
image = Image.open(image_path).resize((width, height))
array = np.array(image.convert("L"))
image.close()
except FileNotFoundError:
print(f"{image_path} not exist")
sys.exit(1)
array = np.floor((array / 256) * 8)
result = []
for i, line in enumerate(array):
try:
result.append("".join(map(lambda x: self.SYMBOL[int(x)], line)) + "\n")
except IndexError:
print(i)
sys.exit(1)
return "".join(result)
if __name__ == "__main__":
fire.Fire(AsciiArt)
| 29.058824
| 87
| 0.34666
| 755
| 0.382085
| 0
| 0
| 0
| 0
| 0
| 0
| 1,153
| 0.583502
|
cec6fd770eab40205480d0c6c46d89a072e6b4a6
| 2,057
|
py
|
Python
|
tests/conftest.py
|
MohamedRaslan/screenpy
|
94be7caa444ae7ac8a4ac403cd93ad92108237fe
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
MohamedRaslan/screenpy
|
94be7caa444ae7ac8a4ac403cd93ad92108237fe
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
MohamedRaslan/screenpy
|
94be7caa444ae7ac8a4ac403cd93ad92108237fe
|
[
"MIT"
] | null | null | null |
from typing import Callable, Generator, Any
from unittest import mock
import pytest
from screenpy import AnActor, pacing, settings
from screenpy.abilities import AuthenticateWith2FA, BrowseTheWeb, MakeAPIRequests
from screenpy.narration.narrator import Narrator
@pytest.fixture(scope="function")
def Tester() -> AnActor:
"""Provide an Actor with mocked web browsing abilities."""
AuthenticateWith2FA_Mocked = mock.Mock(spec=AuthenticateWith2FA)
AuthenticateWith2FA_Mocked.otp = mock.Mock()
BrowseTheWeb_Mocked = mock.Mock(spec=BrowseTheWeb)
BrowseTheWeb_Mocked.browser = mock.Mock()
return AnActor.named("Tester").who_can(
AuthenticateWith2FA_Mocked, BrowseTheWeb_Mocked
)
@pytest.fixture(scope="function")
def APITester() -> AnActor:
"""Provide an Actor with mocked API testing abilities."""
MakeAPIRequests_Mocked = mock.Mock(spec=MakeAPIRequests)
MakeAPIRequests_Mocked.session = mock.Mock()
return AnActor.named("Tester").who_can(MakeAPIRequests_Mocked)
@pytest.fixture(scope="function")
def mocked_narrator() -> Generator[mock.MagicMock, Any, None]:
"""Mock out the Narrator for a test, replacing the old one afterwards."""
MockNarrator = mock.MagicMock(spec=Narrator)
old_narrator = pacing.the_narrator
pacing.the_narrator = MockNarrator
yield MockNarrator
pacing.the_narrator = old_narrator
def mock_settings(**new_settings) -> Callable:
"""Mock one or more settings for the duration of a test."""
def decorator(func: Callable) -> Callable:
def wrapper(*args, **kwargs) -> Callable:
old_settings = {
key: getattr(settings, key)
for key in new_settings.keys()
}
for key, value in new_settings.items():
setattr(settings, key, value)
try:
func(*args, **kwargs)
finally:
for key, value in old_settings.items():
setattr(settings, key, value)
return wrapper
return decorator
| 31.646154
| 81
| 0.684978
| 0
| 0
| 331
| 0.160914
| 1,113
| 0.541079
| 0
| 0
| 293
| 0.14244
|
cec7fff35f1ea56bf0187cca1f5248b7d68f0fa3
| 14,729
|
py
|
Python
|
main.py
|
ceciliazhang12/resource-reservation-system
|
d680582a41d39b1558b85d1e42f9006eb07caef8
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ceciliazhang12/resource-reservation-system
|
d680582a41d39b1558b85d1e42f9006eb07caef8
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ceciliazhang12/resource-reservation-system
|
d680582a41d39b1558b85d1e42f9006eb07caef8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START imports]
import os
from datetime import datetime, time, timedelta
import uuid
import time as t
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import mail
import jinja2
import webapp2
from models import Resource, Reservation
from __builtin__ import True
PATH_TEMPLATE = os.path.join(os.path.dirname(__file__), 'templates')
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(PATH_TEMPLATE),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# [END imports]
# Helper Function
def send_mail(resource, reservation):
text = "Hi,\n\n" + "You've reserved {0} from {1} to {2}. " \
.format(reservation.resource_name, reservation.start_time,
reservation.end_time)
mail.send_mail(sender="yz3847@nyu.edu", to=reservation.user,
subject="Reservation Confirmend.", body=text)
'''
Landing Page, which displays the following 4 sections:
user login / logout link
reservations made for resources by that user (sorted by the reservation time)
all resources in the system (shown in reverse time order based on last made reservation)
resources owned by that user
a link to create a new resource
'''
class LandingPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
# retrieve reservations by current user
now_time = datetime.now() - timedelta(minutes=300)
reservation_by_curr_user = Reservation.query(ndb.AND(Reservation.user == user.email(),
Reservation.end_time > now_time)) \
.fetch()
if reservation_by_curr_user:
reservation_by_curr_user = sorted(reservation_by_curr_user, key=lambda r: r.start_time)
# retrieve all resources in system
sorted_resources = Resource.query().order(-Resource.last_reservation_time)
# retrieve resources owned by current user
resources_owned = Resource.query(Resource.owner==user.email())
template_values = {
'user': user,
'reservation_by_curr_user': reservation_by_curr_user,
'sorted_resources': sorted_resources,
'resources_owned': resources_owned,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
# url_linktext = 'Login'
'''
CreateResource Handler enables the function of creating a new resource
'''
class CreateResource(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('newResource.html')
template_values = {}
self.response.write(template.render(template_values))
def post(self):
resource = Resource()
user = users.get_current_user()
if user:
resource.owner = user.email()
resource.id = str(uuid.uuid4())
resource.name = self.request.get('name')
start_time = map(int, self.request.get('available_start_time').split(':'))
end_time = map(int, self.request.get('available_end_time').split(':'))
resource.available_start_time = datetime.combine(datetime.today(), time(*start_time))
resource.available_end_time = datetime.combine(datetime.today(), time(*end_time))
resource.tags = self.request.get('tags').split(', ')
resource.num_reserved = 0
resource.put()
t.sleep(0.1)
self.redirect('/')
'''
ViewResource Handler handels displaying the main page for an existing resource
'''
class ViewResource(webapp2.RequestHandler):
def get(self):
resource_id = self.request.get('id')
owner = Resource.query(Resource.id == resource_id).get().owner
count = Resource.query(Resource.id == resource_id).get().num_reserved
curr_user = users.get_current_user()
now_time = datetime.now() - timedelta(minutes=300)
reservations = Reservation.query(ndb.AND(Reservation.resource_id == resource_id,
Reservation.end_time >= now_time)).fetch()
reservations = sorted(reservations, key=lambda r: r.start_time)
# currentUser = str(users.get_current_user().email())
template_values = {
'reservations': reservations,
'curr_user': curr_user,
'owner': owner,
'resource_id': resource_id,
'count':count,
}
template = JINJA_ENVIRONMENT.get_template('resource.html')
self.response.write(template.render(template_values))
'''
ViewResource Handler handels the function of editing an existing resource
'''
class EditResource(webapp2.RequestHandler):
def get(self):
resource_id = self.request.get('id')
resource = Resource.query(Resource.id == resource_id).get()
start = resource.available_start_time
end = resource.available_end_time
template_values = {
'id': resource_id,
'name': resource.name,
'available_start_time': start,
'available_end_time': end,
'tags': ', '.join(resource.tags),
}
template = JINJA_ENVIRONMENT.get_template('editResource.html')
self.response.write(template.render(template_values))
def post(self):
resource_id = self.request.get('id')
resource = Resource.query(Resource.id == resource_id).get()
resource.name = self.request.get('name')
# today = datetime.now().date()
start_time = map(int, self.request.get('available_start_time').split(':'))
end_time = map(int, self.request.get('available_end_time').split(':'))
resource.available_start_time = datetime.combine(datetime.today(), time(*start_time))
resource.available_end_time = datetime.combine(datetime.today(), time(*end_time))
resource.tags = self.request.get('tags').split(', ')
resource.put()
t.sleep(0.1)
self.redirect('/')
'''
ViewUser Handler handels displaying the main page for an user
'''
class ViewUser(webapp2.RequestHandler):
def get(self):
user_email = self.request.get('email')
# retrieve user's reservations
reservation_by_curr_user = Reservation.query(Reservation.user == user_email)
if reservation_by_curr_user:
reservation_by_curr_user = reservation_by_curr_user.order(Reservation.start_time)
# retrieve resources owned by this user
resources_owned = Resource.query(Resource.owner==user_email)
template_values = {
'reservation_by_curr_user': reservation_by_curr_user,
'resources_owned': resources_owned,
}
template = JINJA_ENVIRONMENT.get_template('user.html')
self.response.write(template.render(template_values))
'''
CreateResservation Handler enables the function of creating a new reservation
'''
class CreateReservation(webapp2.RequestHandler):
def get(self):
resource_id = self.request.get('id')
resource = Resource.query(Resource.id == resource_id).get()
template_values = {
'id': resource_id,
'name': resource.name
}
template = JINJA_ENVIRONMENT.get_template('newReservation.html')
self.response.write(template.render(template_values))
def post(self):
resource_id = self.request.get('id')
resource_name = self.request.get('name')
start_time = time(*map(int, self.request.get('available_start_time').split(':')))
start_time = datetime.combine(datetime.today(), start_time)
duration = int(self.request.get('duration'))
resource = Resource.query(Resource.id == resource_id).get()
end_time = start_time + timedelta(minutes=duration)
# check time format and availability
has_error = False
msg = ''
# error check
if end_time < start_time:
has_error = True
msg = 'Error, wrong format of start time or duration. Please return to former page to enter correctly.'
elif resource.available_start_time > start_time or \
resource.available_end_time < end_time:
has_error = True
msg = 'Error, resource not available during the selected period. Please return to former page to enter another time period.'
else:
reservations = Reservation.query(Reservation.resource_id == resource_id).fetch()
for r in reservations:
if not (end_time <= r.start_time or start_time >= r.end_time):
has_error = True
msg = 'Error, reservation conflict. Please return to former page to enter another time period.'
if has_error:
template = JINJA_ENVIRONMENT.get_template('newReservation.html')
template_values = {'msg': msg}
self.response.write(template.render(template_values))
else:
# add reservation if no error
reservation = Reservation()
reservation.id = str(uuid.uuid4())
reservation.user = str(users.get_current_user().email())
reservation.start_time = start_time
reservation.duration = duration
reservation.end_time = end_time
reservation.resource_id = resource_id
reservation.resource_name = resource_name
reservation.put()
resource.last_reservation_time = datetime.now() - timedelta(minutes=300)
resource.num_reserved += 1
resource.put()
t.sleep(1)
send_mail(resource, reservation)
self.redirect('/')
'''
CreateResservation Handler enables the function of
generating a RSS link for an existing reservation
'''
class GenerateRSS(webapp2.RequestHandler):
def get(self):
resource_id = self.request.get('id')
resource = Resource.query(Resource.id == resource_id).get()
reservations = Reservation.query(Reservation.resource_id == resource_id).fetch()
header = '<?xml version="1.0" encoding="UTF-8" ?>'
tag_owner = '<owner>{}</owner>'.format(resource.owner)
tag_name = '<name>{}</name>'.format(resource.name)
tag_start = '<start_time>{}</start_time>'.format(resource.available_start_time)
tag_end = '<end_time>{}</end_time>'.format(resource.available_end_time)
tags_reservation = []
for r in reservations:
t = {}
t['user'] = '<reservedBy>{}</reservedBy>'.format(r.user)
t['start'] = '<reservedAt>{}</reservedAt>'.format(r.start_time)
tags_reservation.append(t)
template_values = {
'header': header,
'owner': tag_owner,
'name': tag_name,
'start_time': tag_start,
'end_time': tag_end,
'reservations': tags_reservation,
}
template = JINJA_ENVIRONMENT.get_template('rss.html')
self.response.write(template.render(template_values))
'''
DeleteResservation Handler enables deleting an existing reservation in Landing Page
'''
class DeleteReservation(webapp2.RequestHandler):
def post(self):
reservation_id = self.request.get('reservation_id')
reservation = Reservation.query(Reservation.id == reservation_id).get()
reservation.key.delete()
t.sleep(0.1)
self.redirect('/')
'''
ResourceBy Handler enables the function of filtering existing resources by tag
'''
class ResourcesByTag(webapp2.RequestHandler):
def get(self):
tag = self.request.get('tag').lower()
resources = Resource.query().order(-Resource.last_reservation_time).fetch()
filtered_resources = []
for r in resources:
tags = [t.lower().strip() for t in r.tags]
if tag in tags:
filtered_resources.append(r)
template_values = {
'tag': tag,
'resources': filtered_resources,
}
template = JINJA_ENVIRONMENT.get_template('tag.html')
self.response.write(template.render(template_values))
'''
SearchResource Handler enables the function of searching existing resources by name
'''
class SearchResource(webapp2.RequestHandler):
def get(self):
template_values = {}
template = JINJA_ENVIRONMENT.get_template('searchResource.html')
self.response.write(template.render(template_values))
def post(self):
name = self.request.get('name').lower()
resources = Resource.query().order(-Resource.last_reservation_time).fetch()
results = []
for r in resources:
resource_name = r.name.strip().lower()
if name in resource_name:
results.append(r)
print resources
template_values = {
'name': name,
'resources': results,
}
template = JINJA_ENVIRONMENT.get_template('searchResource.html')
self.response.write(template.render(template_values))
# [START app]
app = webapp2.WSGIApplication([
('/', LandingPage),
('/newResource.html', CreateResource),
('/resource.html', ViewResource),
('/editResource.html', EditResource),
('/newReservation.html', CreateReservation),
('/user.html', ViewUser),
('/index.html', DeleteReservation),
('/tag.html', ResourcesByTag),
('/rss.html', GenerateRSS),
('/searchResource.html', SearchResource),
], debug=True)
# [END app]
| 39.808108
| 136
| 0.63263
| 11,512
| 0.781587
| 0
| 0
| 0
| 0
| 0
| 0
| 3,710
| 0.251884
|
cec871b2942200cdc40d51b006698874939d3556
| 144
|
py
|
Python
|
sutta_publisher/src/sutta_publisher/edition_parsers/__init__.py
|
suttacentral/publications
|
878befcfeb7af7f2f511697f2769cd00441aec57
|
[
"CC0-1.0"
] | 1
|
2022-02-16T09:02:58.000Z
|
2022-02-16T09:02:58.000Z
|
sutta_publisher/src/sutta_publisher/edition_parsers/__init__.py
|
suttacentral/publications
|
878befcfeb7af7f2f511697f2769cd00441aec57
|
[
"CC0-1.0"
] | 43
|
2022-02-07T11:37:29.000Z
|
2022-03-30T08:54:55.000Z
|
sutta_publisher/src/sutta_publisher/edition_parsers/__init__.py
|
suttacentral/publications
|
878befcfeb7af7f2f511697f2769cd00441aec57
|
[
"CC0-1.0"
] | null | null | null |
from .epub import EpubEdition
from .html import HtmlEdition
from .pdf import PdfEdition
__all__ = ["EpubEdition", "HtmlEdition", "PdfEdition"]
| 24
| 54
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.263889
|
cec99a1754c7933823e18fdb2d3c3f20789ec5bf
| 5,052
|
py
|
Python
|
tools/c7n_azure/tests_azure/tests_resources/test_event_hub.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 8
|
2021-05-18T02:22:03.000Z
|
2021-09-11T02:49:04.000Z
|
tools/c7n_azure/tests_azure/tests_resources/test_event_hub.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 1
|
2021-04-26T04:38:35.000Z
|
2021-04-26T04:38:35.000Z
|
tools/c7n_azure/tests_azure/tests_resources/test_event_hub.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 1
|
2020-12-28T23:21:30.000Z
|
2020-12-28T23:21:30.000Z
|
# Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..azure_common import BaseTest, arm_template, cassette_name
class EventHubTest(BaseTest):
def test_event_hub_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-event-hub-compliance',
'resource': 'azure.eventhub'
}, validate=True)
self.assertTrue(p)
@arm_template('eventhub.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@cassette_name('firewall')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.0.0/24']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@cassette_name('firewall')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.1.0/24']}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@cassette_name('firewall')
def test_firewall_rules_ranges(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.0.0-11.0.0.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@cassette_name('firewall')
def test_firewall_rules_not_ranges(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.1.0-11.0.1.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@cassette_name('firewall')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'equal': ['11.0.0.0/24', '10.1.1.1/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@cassette_name('firewall')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'equal': ['11.0.1.0/24', '10.1.1.1/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
| 35.328671
| 74
| 0.500792
| 4,403
| 0.871536
| 0
| 0
| 4,055
| 0.802652
| 0
| 0
| 2,047
| 0.405186
|
cecb5247af165b57afea53ae66b0856d10c40b07
| 2,662
|
py
|
Python
|
pythonfiles/testing.py
|
amrut-prabhu/loan-default-prediction
|
2e0a91529a71c69e93d7b30decefc59f2627406f
|
[
"MIT"
] | 2
|
2020-05-06T15:11:56.000Z
|
2020-05-24T13:51:55.000Z
|
pythonfiles/testing.py
|
amrut-prabhu/loan-default-prediction
|
2e0a91529a71c69e93d7b30decefc59f2627406f
|
[
"MIT"
] | null | null | null |
pythonfiles/testing.py
|
amrut-prabhu/loan-default-prediction
|
2e0a91529a71c69e93d7b30decefc59f2627406f
|
[
"MIT"
] | 2
|
2018-09-23T07:09:51.000Z
|
2021-12-16T17:58:14.000Z
|
import numpy as np
import pandas as pd
from sklearn.externals import joblib
#from sklearn.ensemble import RandomForestRegressor
#from sklearn.multioutput import MultiOutputRegressor
#from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split
df = pd.read_csv('https://drive.google.com/uc?export=download&id=1XoV8SfvHmzaxRuDRe81OWSQu10dYTbO5',sep=',')
df_X = df.iloc[:, 2:13].copy()
df_X = pd.get_dummies(df_X)
df_y1 = df.iloc[:, 13:16].copy()
df_y1 = pd.get_dummies(df_y1)
df_y2 = df.iloc[:, 16:20].copy()
df_y2 = pd.get_dummies(df_y2)
#X_train, df_X, y_train, df_y1 = train_test_split(df_X, df_y1, test_size=0.2, random_state=0)
def accuracy(model, X_test, y_test):
predictions = model.predict(X_test)
print(model.score(X_test, y_test))
errors = np.abs(predictions - y_test)
mape = 100 * (errors / y_test)
for col in mape:
accuracy = 100 - np.mean(mape[col])
print('Accuracy:', round(accuracy, 2), '%.')
def test(model, df_X, df_y1, num):
accuracy(model, df_X, df_y1)
predictions = pd.DataFrame(model.predict(df_X))
errors = np.abs(predictions - df_y1)
print(type(predictions))
print(type(errors))
for i in range(num):
#results = df_X.iloc[:, 0:10].values
#results = np.append(results, df_y1.ix[:, i])
#results = np.append(results, predictions[:, i])
#results = np.append(results, errors.ix[:, i])
#result_df = pd.DataFrame(results)
df_X.join(df_y1.ix[:, i]).join(predictions.ix[:, i]).to_csv("ModelPredictions" + str(num) + str(i) + ".csv")
#model = RandomForestRegressor(n_estimators = 1900, max_depth = 70, random_state = 50, learning_rate = 0.1, min_samples_split = 0.1, max_features = 'sqrt', loss = 'lad', warm_start = True, min_samples_leaf = 0.0005)
#model1 = MultipleOutputRegressor(model)
#model2 = MultipleOutputRegressor(model, n_jobs = -1)
#model1.fit(X_train, y_train)
model = joblib.load("RegressorChainGradientBoostingRegressorEarningsNew.pkl")
test(model, df_X, df_y1, 3)
model = joblib.load("RegressorChainGradientBoostingRegressorRepaymentNew.pkl")
test(model, df_X, df_y2, 4)
#model2.fit(X_train, y_train)
#joblib.dump(model2, "MultiplepleOutputRandomForestRegressorEarnings.pkl")
#print("Model 2: ");
#accuracy(model2, df_X, df_y1)
#X_train, df_X, y_train, df_y1 = train_test_split(df_X, df_y2, test_size=0.2, random_state=0)
#model1.fit(X_train, y_train)
#joblib.dump(model1, "MultipleOutputRegressorRandomForestRegressorRepayment.pkl")
#print("Model 3: ");
#accuracy(model1, df_X, df_y1)
#model2.fit(X_train, y_train)
#joblib.dump(model2, "MultiplepleOutputRandomForestRegressorRepayment.pkl")
#print("Model 4: ");
#accuracy(model2, df_X, df_y1)
| 39.731343
| 215
| 0.743426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,595
| 0.599174
|
cecbe395ca91f3242d3c1d6e5ad79275b3277dc8
| 6,806
|
py
|
Python
|
kerasjr/Model.py
|
OliverMathias/kerasjr
|
9fc039cf2fee4d29529707d4644c775121e5d1d7
|
[
"MIT"
] | null | null | null |
kerasjr/Model.py
|
OliverMathias/kerasjr
|
9fc039cf2fee4d29529707d4644c775121e5d1d7
|
[
"MIT"
] | null | null | null |
kerasjr/Model.py
|
OliverMathias/kerasjr
|
9fc039cf2fee4d29529707d4644c775121e5d1d7
|
[
"MIT"
] | null | null | null |
# Python 3
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
class Model:
def __init__(self, x, y, number_of_hidden_layers=2, number_of_hidden_nodes=30, quiet=False):
self.x = x
self.y = y
self.number_of_hidden_layers = number_of_hidden_layers
self.number_of_hidden_nodes = number_of_hidden_nodes
self.input_layer_activation_function = "tanh"
self.hidden_layer_activation_function = "tanh"
self.output_layer_activation_function = "tanh"
#making a random, reproducible seed
np.random.seed(1)
input_shape = self.x[0].shape[0]
output_shape = self.y[0].shape[0]
number_of_hidden_nodes = self.number_of_hidden_nodes
number_of_hidden_layers = self.number_of_hidden_layers
#init the full lists of hidden plus 2 for input and output
#weights
self.W = [None] * (number_of_hidden_layers + 2)
#activations
self.A = [None] * (number_of_hidden_layers + 2)
#deltas
self.D = [None] * (number_of_hidden_layers + 2)
input_layer_weights = 2 * np.random.random((input_shape,number_of_hidden_nodes)) - 1
self.W[0] = (input_layer_weights)
#middle
for i in range(number_of_hidden_layers):
i += 1
hidden_layer_weights = 2 * np.random.random((number_of_hidden_nodes,number_of_hidden_nodes)) - 1
self.W[i] = (hidden_layer_weights)
#output
output_layer_weights = 2 * np.random.random((number_of_hidden_nodes,output_shape)) - 1
self.W[len(self.W)-1] = (output_layer_weights)
if quiet == False:
#show the architecture:
print ("Network Architecture:")
print ("----------------------------------------------------------------------------")
total = 0
for count, i in enumerate(self.W):
total += (i.shape[0] * i.shape[1])
if count == 0:
print("Input Layer Number of Weights: " + str(i.shape[0] * i.shape[1]))
elif count == (len(self.W)-1):
print("Output Layer Number of Weights: " + str(i.shape[0] * i.shape[1]))
else:
print("Hidden Layer " + str(count) + " Number of Weights: " + str(i.shape[0] * i.shape[1]))
print ("----------------------------------------------------------------------------")
print("Total Number of Weights: ", total)
print()
#nonlin func
def nonlin(self, x, deriv, function):
if function == "tanh":
t= 2 / (1 + np.exp(-2 * x)) -1
if (deriv==True):
dt=1-t**2
return dt
return t
elif function == "sigmoid":
if (deriv==True):
return (x * (1-x))
return 1/(1 + np.exp(-x))
elif function == "leaky_relu":
if (deriv==True):
dx = np.ones_like(x)
dx[x < 0] = 0.01
return dx
return np.where(x > 0, x, x * 0.01)
def predict(self, x):
#forward pass
input_layer_activation = self.nonlin(np.dot(x, self.W[0]), False, self.input_layer_activation_function)
self.A[0] = (input_layer_activation)
for i in range(self.number_of_hidden_layers):
i += 1
hidden_layer_activation = self.nonlin(np.dot(self.A[i-1], self.W[i]), False, self.hidden_layer_activation_function)
output_layer_activation = self.nonlin(np.dot(hidden_layer_activation, self.W[len(self.W)-1]), False, self.output_layer_activation_function)
print()
print("Prediction:")
return output_layer_activation
#training
def train(self, loss_function, epochs, alpha=0.001):
for j in range(epochs):
#forward pass
input_layer_activation = self.nonlin(np.dot(self.x, self.W[0]), False, self.input_layer_activation_function)
self.A[0] = (input_layer_activation)
for i in range(self.number_of_hidden_layers):
i += 1
hidden_layer_activation = self.nonlin(np.dot(self.A[i-1], self.W[i]), False, self.hidden_layer_activation_function)
self.A[i] = (hidden_layer_activation)
output_layer_activation = self.nonlin(np.dot(hidden_layer_activation, self.W[len(self.W)-1]), False, self.output_layer_activation_function)
self.A[len(self.A)-1] = (output_layer_activation)
#choose error in compile
#so output_layer_activation is the prediction!!!
if loss_function == "mse":
error = (self.y - output_layer_activation) **2
if loss_function == "mae":
error = np.abs(self.y - output_layer_activation)
if loss_function == "cce":
output_layer_activation = np.clip(output_layer_activation, 1e-12, 1. - 1e-12)
total_number = output_layer_activation.shape[0]
error = -np.sum(self.y*np.log(output_layer_activation+1e-9))/total_number
else:
error = self.y - output_layer_activation
#print every n steps
divis = epochs//10
if (j % divis) == 0:
print ('Epoch: ' + str(j+1) + ' ERROR: ' + str(np.mean(np.abs(error))))
#backwards pass
output_delta = error * self.nonlin(output_layer_activation, True, self.output_layer_activation_function)
self.D[0] = output_delta
#setting working vars
working_delta = output_delta
past_layer_weights = self.W[len(self.W)-1]
for i in range(self.number_of_hidden_layers):
working_index = i+1
hidden_layer_activation_error = working_delta.dot(past_layer_weights.T)
hidden_layer_activation_delta = hidden_layer_activation_error * self.nonlin(self.A[len(self.A)-working_index-1], True, self.hidden_layer_activation_function)
self.D[working_index] = hidden_layer_activation_delta
working_delta = hidden_layer_activation_delta
past_layer_weights = self.W[len(self.W)-(working_index+1)]
input_layer_activation_error = self.D[working_index].dot(self.W[working_index].T)
input_layer_activation_delta = input_layer_activation_error * self.nonlin(input_layer_activation, True, self.input_layer_activation_function)
self.D[working_index+1] = input_layer_activation_delta
#update weights
internal_alpha = alpha
self.W[len(self.W)-1] += input_layer_activation.T.dot(self.D[0]) * internal_alpha
for i,z in enumerate(range(self.number_of_hidden_layers,0,-1)):
i += 1
self.W[z] += self.A[i].T.dot(self.D[i]) * internal_alpha
self.W[0] += self.x.T.dot(self.D[len(self.D)-1]) * internal_alpha
#ending print out
print()
print("Done.")
if (np.abs((np.mean(np.abs(error)))-1)*100) >100:
print("Bad Training Session! Adjust Parameters.")
print("Final Accuracy: " + str(np.abs((np.mean(np.abs(error)))-1)*100) + "%")
| 39.34104
| 168
| 0.620923
| 6,724
| 0.987952
| 0
| 0
| 0
| 0
| 0
| 0
| 882
| 0.129592
|
cece03575811241c5528967937341963df9938c9
| 5,287
|
py
|
Python
|
generate_revision_sheet.py
|
geritwagner/revision-sheet-generator
|
2cfa2f0e10d980ec979fa8d4bd63106d3a089a98
|
[
"MIT"
] | 1
|
2020-05-07T00:18:06.000Z
|
2020-05-07T00:18:06.000Z
|
generate_revision_sheet.py
|
geritwagner/revision-sheet-generator
|
2cfa2f0e10d980ec979fa8d4bd63106d3a089a98
|
[
"MIT"
] | null | null | null |
generate_revision_sheet.py
|
geritwagner/revision-sheet-generator
|
2cfa2f0e10d980ec979fa8d4bd63106d3a089a98
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import docx
from docx.shared import Cm
import pylatex
from pytablewriter import MarkdownTableWriter
def set_column_width(column, width):
column.width = width
for cell in column.cells:
cell.width = width
def generate_word_revision_sheet(filepath, result_path, lines, starting_item):
document = docx.Document()
document.add_heading('Revision sheet', 0)
table = document.add_table(rows=1, cols=3)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Nr.'
hdr_cells[1].text = 'Comment'
hdr_cells[2].text = 'How the comment is addressed'
for line in lines:
if not line.strip():
continue
row_cells = table.add_row().cells
row_cells[0].text = str(starting_item)
row_cells[1].text = line.replace('\\newline', '')
row_cells[2].text = ''
starting_item += 1
set_column_width(table.columns[0], Cm(1.5))
set_column_width(table.columns[1], Cm(10))
set_column_width(table.columns[2], Cm(6))
if result_path:
document.save(result_path)
else:
document.save(filepath[:-4] + '_revision_sheet.docx')
return
def generate_tex_revision_sheet(filepath, result_path,lines, starting_item):
geometry_options = {"tmargin": "2.54cm", "lmargin": "2.54cm"}
doc = pylatex.Document(geometry_options=geometry_options)
with doc.create(pylatex.Section('Revision sheet')):
with doc.create(pylatex.LongTable('|r|p{8cm}|p{8cm}|')) as table:
table.add_hline()
table.add_row(('Nr.', 'Comment', 'How the comment is addressed'))
table.add_hline()
for line in lines:
if not line.strip():
continue
table.add_row((starting_item, line.replace('\\newline', '').replace(' ',''), ''))
table.add_hline()
starting_item += 1
if result_path:
doc.generate_pdf(result_path, clean_tex=False)
else:
doc.generate_pdf(filepath[:-4] + '_revision_sheet.tex', clean_tex=False)
return
def generate_md_revision_sheet(filepath, result_path,lines, starting_item):
writer = MarkdownTableWriter()
if result_path:
result_path = result_path
else:
result_path = result_path + "revision_sheet.md"
writer.headers = ["Nr.", "Comment", "How the comment is addressed"]
comment_list = []
for line in lines:
if not line.strip():
continue
comment_list.append(line.replace('\\newline', ''))
matrix = []
for i in range(len(comment_list)):
matrix.append([starting_item, comment_list[i], ""])
starting_item += 1
print(matrix)
writer.value_matrix = matrix
writer.write_table()
with open(result_path, "w") as f:
writer.stream = f
writer.write_table()
return
def load_file(filepath):
comment_file = open(filepath, 'r')
lines = comment_file.readlines()
revised_lines = []
temp = ''
for line in lines:
if '\\newline' in line or ('Reviewer' in line and len(line.strip()) < 15):
temp += line
continue
if '' == temp:
revised_lines.append(line.rstrip())
else:
revised_lines.append(temp + line.rstrip())
temp = ''
if '' != temp:
revised_lines.append(temp.rstrip())
lines = revised_lines
return lines
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Revision-sheet generator")
parser.add_argument("--input", default=None, help="path to the review text file")
parser.add_argument("--format", default='w', help="format of the output document , w for word (default) or t for tex or m for markdown")
parser.add_argument("--output", default=None, help="path to the file where to put the results (optional)")
parser.add_argument("--i", default=1, help="start of comment numbering (optional)")
args = parser.parse_args()
filepath = args.input
output_format = args.format
result_path = args.output
if not result_path:
result_path = 'revision_sheet'
starting_item = int(args.i)
assert output_format in ['t', 'w', 'm']
if 'w' == output_format:
if result_path:
if '.doc' != result_path[:-4] or '.docx' != result_path[:-5]:
result_path += '.doc'
if 't' == output_format:
if result_path:
if '.tex' != result_path[:-4]:
result_path += '.tex'
if 'm' == output_format:
if result_path:
if '.md' != result_path[:-4]:
result_path += '.md'
assert filepath[-4:] == '.txt'
assert isinstance(starting_item, int)
if result_path:
assert not os.path.exists(result_path)
lines = load_file(filepath)
if 'w' == output_format:
generate_word_revision_sheet(filepath, result_path, lines, starting_item)
if 't' == output_format:
generate_tex_revision_sheet(filepath, result_path, lines, starting_item)
if 'm' == output_format:
generate_md_revision_sheet(filepath, result_path, lines, starting_item)
| 32.042424
| 141
| 0.614715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 782
| 0.147854
|
cecfd2f0eca2d1cff1112ddaf3c1ef7b67c5d1dc
| 7,550
|
py
|
Python
|
trainer_backup.py
|
dbseorms16/drnxgaze
|
c7b84189c263456c648829bc399a5edb2ec17bb8
|
[
"MIT"
] | 1
|
2021-07-06T02:01:54.000Z
|
2021-07-06T02:01:54.000Z
|
trainer_backup.py
|
dbseorms16/drnxgaze
|
c7b84189c263456c648829bc399a5edb2ec17bb8
|
[
"MIT"
] | null | null | null |
trainer_backup.py
|
dbseorms16/drnxgaze
|
c7b84189c263456c648829bc399a5edb2ec17bb8
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import utility
from decimal import Decimal
from tqdm import tqdm
from option import args
from torchvision import transforms
from PIL import Image
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import copy
class Trainer():
def __init__(self, opt, loader, my_model, my_loss, ckp):
self.opt = opt
self.scale = opt.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(opt, self.model)
self.scheduler = utility.make_scheduler(opt, self.optimizer)
self.dual_models = self.model.dual_models
self.dual_optimizers = utility.make_dual_optimizer(opt, self.dual_models)
self.dual_scheduler = utility.make_dual_scheduler(opt, self.dual_optimizers)
self.error_last = 1e8
def train(self):
epoch = self.scheduler.last_epoch + 1
lr = self.scheduler.get_lr()[0]
self.ckp.set_epoch(epoch)
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
for batch, (lr, hr, _) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
# flip train
# flip_lr = copy.deepcopy(lr)
# for i in range(0,len(flip_lr)):
# for j in range(0, len(flip_lr[i])):
# for k in range(0, len(flip_lr[i][j])):
# flip_lr[i][j][k]= torch.fliplr(flip_lr[i][j][k])
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
for i in range(len(self.dual_optimizers)):
self.dual_optimizers[i].zero_grad()
# forward
sr = self.model(lr[0])
# flip train
# flip_sr = self.model(flip_lr[0])
# for i in range(0, len(flip_sr)):
# for j in range(0, len(flip_sr[i])):
# for k in range(0, len(flip_sr[i][j])):
# flip_sr[i][j][k] = torch.fliplr(flip_sr[i][j][k])
# fflip_sr = flip_sr
sr2lr = []
for i in range(len(self.dual_models)):
sr2lr_i = self.dual_models[i](sr[i - len(self.dual_models)])
sr2lr.append(sr2lr_i)
# compute primary loss
loss_primary = self.loss(sr[-1], hr)
for i in range(1, len(sr)):
loss_primary += self.loss(sr[i - 1 - len(sr)], lr[i - len(sr)])
# compute dual loss
loss_dual = self.loss(sr2lr[0], lr[0])
for i in range(1, len(self.scale)):
loss_dual += self.loss(sr2lr[i], lr[i])
# compute average loss
# average_feat =(sr[-1]+fflip_sr[-1])/2
# loss_average = self.loss(average_feat, hr)
#copute flip loss
loss_flip =0
# for i in range(0, len(sr)):
# loss_flip+= self.loss(sr[i], fflip_sr[i])
# compute total loss
loss = loss_primary+ self.opt.dual_weight * loss_dual
if loss.item() < self.opt.skip_threshold * self.error_last:
loss.backward()
self.optimizer.step()
for i in range(len(self.dual_optimizers)):
self.dual_optimizers[i].step()
else:
print('Skip this batch {}! (Loss: {})'.format(
batch + 1, loss.item()
))
timer_model.hold()
if (batch + 1) % self.opt.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.opt.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.step()
def test(self):
epoch = self.scheduler.last_epoch
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(torch.zeros(1, 1))
self.model.eval()
timer_test = utility.timer()
with torch.no_grad():
scale = max(self.scale)
for si, s in enumerate([scale]):
f= open('5060_flip o.txt', 'w')
eval_psnr = 0
eval_simm =0
tqdm_test = tqdm(self.loader_test, ncols=80)
for _, (lr, hr, filename) in enumerate(tqdm_test):
filename = filename[0]
no_eval = (hr.nelement() == 1)
if not no_eval:
lr, hr = self.prepare(lr, hr)
else:
lr, = self.prepare(lr)
sr = self.model(lr[0])
if isinstance(sr, list): sr = sr[-1]
sr = utility.quantize(sr, self.opt.rgb_range)
if not no_eval:
psnr = utility.calc_psnr(
sr, hr, s, self.opt.rgb_range,
benchmark=self.loader_test.dataset.benchmark
)
hr_numpy = hr[0].cpu().numpy().transpose(1, 2, 0)
sr_numpy = sr[0].cpu().numpy().transpose(1, 2, 0)
simm = utility.SSIM(hr_numpy, sr_numpy)
eval_simm += simm
eval_psnr +=psnr
# save test results // SR result !
if self.opt.save_results:
self.ckp.save_results_nopostfix(filename, sr, s)
self.ckp.log[-1, si] = eval_psnr / len(self.loader_test)
eval_simm = eval_simm / len(self.loader_test)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.2f} (Best: {:.2f} @epoch {})'.format(
self.opt.data_test, s,
self.ckp.log[-1, si],
best[0][si],
best[1][si] + 1
)
)
print('SIMM:',eval_simm)
self.ckp.write_log(
'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
if not self.opt.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def step(self):
self.scheduler.step()
for i in range(len(self.dual_scheduler)):
self.dual_scheduler[i].step()
def prepare(self, *args):
device = torch.device('cpu' if self.opt.cpu else 'cuda')
if len(args) > 1:
return [a.to(device) for a in args[0]], args[-1].to(device)
return [a.to(device) for a in args[0]],
def terminate(self):
if self.opt.test_only:
self.test()
return True
else:
epoch = self.scheduler.last_epoch
return epoch >= self.opt.epochs
| 34.474886
| 84
| 0.488477
| 7,272
| 0.963179
| 0
| 0
| 0
| 0
| 0
| 0
| 1,007
| 0.133377
|
ced2a7bfdf7758ecb38fddf1d9823beee1d21d80
| 1,356
|
py
|
Python
|
pymongo_test_query.py
|
rbola/python-mongodb
|
e0a466a5ddc3e105ff2161521313000a3b828d76
|
[
"CNRI-Python"
] | 1
|
2021-07-05T04:10:21.000Z
|
2021-07-05T04:10:21.000Z
|
pymongo_test_query.py
|
rbola/python-mongodb
|
e0a466a5ddc3e105ff2161521313000a3b828d76
|
[
"CNRI-Python"
] | 2
|
2021-06-14T08:39:16.000Z
|
2021-06-14T08:45:12.000Z
|
pymongo_test_query.py
|
rbola/python-mongodb
|
e0a466a5ddc3e105ff2161521313000a3b828d76
|
[
"CNRI-Python"
] | 3
|
2021-07-02T20:32:21.000Z
|
2021-07-14T17:41:19.000Z
|
# Get the database using the method we defined in pymongo_test_insert file
from pymongo_test_insert import get_database
dbname = get_database()
# Create a new collection
collection_name = dbname["user_1_items"]
item_details = collection_name.find()
for item in item_details:
# This will give readable output, but KeyError
print(item['item_name'], item['category'])
###---------------------------------------------------###
### Comment the above 'for loop' & 'print statements' ###
### for the next lines of code to work ###
###---------------------------------------------------###
from pandas import DataFrame
# Convert the dictionary objects to dataframe
items_df = DataFrame(item_details)
# View all items
print(items_df)
###--------------------------------------------------------###
### Get items of particular category without and with index###
###--------------------------------------------------------###
item_details = collection_name.find({"category" : "food"})
for item in item_details:
print(item)
# Add more data to understand the need for indexing
import pymongo_test_insert_more_items
# Create index on category, as an example
category_index = collection_name.create_index("category")
# Execute the previous query again to see the documents scanned (refer to the article)
| 35.684211
| 86
| 0.605457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 875
| 0.64528
|
ced423f7265dba138645d51d245eba93b188c792
| 3,358
|
py
|
Python
|
django_backend/backend/ajax.py
|
team23/django_backend
|
02a2ef70584f80b9abd17b4e1a94576df5461b37
|
[
"BSD-3-Clause"
] | 3
|
2015-09-10T07:10:49.000Z
|
2021-03-16T07:17:58.000Z
|
django_backend/backend/ajax.py
|
team23/django_backend
|
02a2ef70584f80b9abd17b4e1a94576df5461b37
|
[
"BSD-3-Clause"
] | 10
|
2015-09-09T13:40:24.000Z
|
2021-02-27T09:12:23.000Z
|
django_backend/backend/ajax.py
|
team23/django_backend
|
02a2ef70584f80b9abd17b4e1a94576df5461b37
|
[
"BSD-3-Clause"
] | 5
|
2016-06-12T08:20:38.000Z
|
2021-02-27T09:02:30.000Z
|
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.template import Context
from django.template import RequestContext
from django.template.loader import render_to_string, select_template
from django.utils.encoding import force_unicode
from ..compat import context_flatten
from ..compat import get_template_name
class JsonResponseMixin(object):
status = 'ok'
json_encoder_class = DjangoJSONEncoder
def get_json(self, **kwargs):
json_data = {
'status': self.status,
}
json_data.update(kwargs)
return json_data
def render_json_response(self, json_data, **response_kwargs):
json_serialized = json.dumps(
json_data,
cls=self.json_encoder_class)
json_serialized = json_serialized.encode('utf-8')
return HttpResponse(json_serialized,
content_type='application/json; charset=utf-8',
**response_kwargs)
class DialogResponseMixin(JsonResponseMixin):
'''
Use this mixin to provide valid responses for calls from the AjaxDialog
classes in the client.
Subclasses need to have a ``get_template_names`` method that returns the
template names that should be rendered inside the
``page_wrapper_template_name``.
The mixin returns JSON if ``request.is_ajax() == True``. The return value
of the ``get_json()`` method is therefore serialized to JSON.
'''
title = None
page_wrapper_template_name = 'django_backend/page_wrapper.html'
def get_title(self):
return self.title
def get_context_data(self, **kwargs):
kwargs.setdefault('title', self.get_title())
kwargs['is_dialog'] = self.is_dialog()
return super(DialogResponseMixin, self).get_context_data(**kwargs)
def get_page_wrapper_template_names(self):
'''
Returns the wrapper template name that takes the real template names to
be rendered inside.
'''
return [self.page_wrapper_template_name]
def is_dialog(self):
return self.request.is_ajax()
def get_json(self, **kwargs):
json_data = super(DialogResponseMixin, self).get_json(**kwargs)
title = self.get_title()
if title:
json_data['title'] = force_unicode(title)
return json_data
def render_to_response(self, context, **response_kwargs):
if self.is_dialog():
context = RequestContext(self.request, context)
context.update(self.get_context_data())
json_data = self.get_json()
json_data['html'] = render_to_string(
self.get_template_names(), context)
return self.render_json_response(json_data, **response_kwargs)
else:
return self.render_html_response(context, **response_kwargs)
def render_html_response(self, context, **response_kwargs):
response_kwargs.setdefault('content_type', self.content_type)
template = select_template(self.get_template_names())
context['template_name'] = get_template_name(template)
return self.response_class(
request=self.request,
template=self.get_page_wrapper_template_names(),
context=context,
**response_kwargs)
| 34.979167
| 79
| 0.6757
| 2,971
| 0.884753
| 0
| 0
| 0
| 0
| 0
| 0
| 696
| 0.207266
|
ced494334d10930a6ca41715a278fb9e1655660e
| 1,801
|
py
|
Python
|
flight/oled_ssd1306.py
|
MxToolbox/BalloonLaunch
|
a31e02ddc11b54d2ab3691e8206dc24af726f585
|
[
"MIT"
] | 1
|
2020-04-13T20:18:45.000Z
|
2020-04-13T20:18:45.000Z
|
flight/oled_ssd1306.py
|
MxToolbox/BalloonLaunch
|
a31e02ddc11b54d2ab3691e8206dc24af726f585
|
[
"MIT"
] | null | null | null |
flight/oled_ssd1306.py
|
MxToolbox/BalloonLaunch
|
a31e02ddc11b54d2ab3691e8206dc24af726f585
|
[
"MIT"
] | null | null | null |
"""
This demo will fill the screen with white, draw a black box on top
and then print Hello World! in the center of the display
This example is for use on (Linux) computers that are using CPython with
Adafruit Blinka to support CircuitPython libraries. CircuitPython does
not support PIL/pillow (python imaging library)!
"""
import board
import digitalio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
import time
# Define the Reset Pin
oled_reset = digitalio.DigitalInOut(board.D4)
# Change these
# to the right size for your display!
WIDTH = 128
HEIGHT = 64 # Change to 64 if needed
BORDER = 5
# Use for I2C.
i2c = board.I2C()
oled = adafruit_ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c, addr=0x3c, reset=oled_reset)
# Draw Some Text
while True:
# Clear display.
oled.fill(0)
oled.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
image = Image.new('1', (oled.width, oled.height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a white background
#draw.rectangle((0, 0, oled.width, oled.height), outline=255, fill=255)
# Draw a smaller inner rectangle
#draw.rectangle((BORDER, BORDER, oled.width - BORDER - 1, oled.height - BORDER - 1),
# outline=0, fill=0)
# Load default font.
font = ImageFont.load_default()
#font = ImageFont.truetype("arial.ttf", 15)
#font = ImageFont.truetype(font=None, size=10, index=0, encoding='')
text = time.strftime("%H:%M:%S")
(font_width, font_height) = font.getsize(text)
draw.text((oled.width//2 - font_width//2, oled.height//2 - font_height//2),
text, font=font, fill=255)
# Display image
oled.image(image)
oled.show()
time.sleep(1)
| 27.707692
| 88
| 0.68462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 999
| 0.554692
|
ced5d7b593a38747bfdd7d0801c855ff58b4b2ad
| 1,150
|
py
|
Python
|
setup.py
|
billbrod/spatial-frequency-model
|
c962a50ba1041ab352e3426df026a74e21540c2e
|
[
"MIT"
] | null | null | null |
setup.py
|
billbrod/spatial-frequency-model
|
c962a50ba1041ab352e3426df026a74e21540c2e
|
[
"MIT"
] | null | null | null |
setup.py
|
billbrod/spatial-frequency-model
|
c962a50ba1041ab352e3426df026a74e21540c2e
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from setuptools import setup, Extension
import importlib
import os
# copied from kymatio's setup.py: https://github.com/kymatio/kymatio/blob/master/setup.py
sfm_version_spec = importlib.util.spec_from_file_location('sfm_version', 'sfm/version.py')
sfm_version_module = importlib.util.module_from_spec(sfm_version_spec)
sfm_version_spec.loader.exec_module(sfm_version_module)
VERSION = sfm_version_module.version
setup(
name='sfm',
version='0.1',
description='Spatial frequency preferences model',
license='MIT',
url='https://github.com/billbrod/spatial-frequency-model',
author='William F. Broderick',
author_email='billbrod@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.7'],
keywords='Visual Information Processing',
packages=['sfm'],
install_requires=['numpy>=1.1',
'torch>=1.1',
'pandas>=0.25'
'scipy>=1.0',
'matplotlib>=3.1',
'pytest',
'seaborn>=0.9.0'],
tests='tests',
)
| 32.857143
| 90
| 0.630435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 493
| 0.428696
|
ced62c2a72c4a4e557f2a0fa0eaf322653f67299
| 1,490
|
py
|
Python
|
tests/core/test_operation/test_inventory.py
|
pypipet/pypipet
|
8c489e4a7992281fbb68b12e2627decf24f2facb
|
[
"MIT"
] | null | null | null |
tests/core/test_operation/test_inventory.py
|
pypipet/pypipet
|
8c489e4a7992281fbb68b12e2627decf24f2facb
|
[
"MIT"
] | null | null | null |
tests/core/test_operation/test_inventory.py
|
pypipet/pypipet
|
8c489e4a7992281fbb68b12e2627decf24f2facb
|
[
"MIT"
] | 1
|
2021-12-10T22:36:34.000Z
|
2021-12-10T22:36:34.000Z
|
# from pipet.core.sql.query_interface import *
from pypipet.core.operations.inventory import *
import pytest
from pprint import pprint
_supplie_id = 1
def test_update_invs(session, obj_classes, shop_conn):
invs = [ {'sku':'s22456', 'supplier_id':_supplie_id, 'qty':20}]
update_inventory_bulk(obj_classes, session,
invs, ignore_new=False)
res = get_inventory_by_sku(obj_classes, session,
invs[0]['sku'], by_supplier=False)
pprint(res)
# def test_match_upc(session, obj_classes):
# invs = [ {'upc':'48743213', 'supplier_id':1, 'qty':10},
# {'upc':'9348886', 'supplier_id':1, 'qty':10}]
# res = match_variation_sku_by_upc(obj_classes.get('variation'), session, invs)
# assert res is not None
def test_update_inv(session, obj_classes, shop_conn):
inv = {'sku':'s2789', 'supplier_id':_supplie_id, 'qty':82}
update_inventory_db_by_sku(obj_classes, session, inv['sku'], inv)
res = get_inventory_by_sku(obj_classes, session,
inv['sku'], by_supplier=False)
def test_update_front_shop_bulk(obj_classes, session, shop_conn):
update_instock_front_shop(obj_classes, session, shop_conn, set_inventory_management=True)
def test_update_front_shop(obj_classes, session, shop_conn):
update_instock_front_shop_by_sku(obj_classes, session, shop_conn, 's2789')
update_instock_front_shop_by_sku(obj_classes, session, shop_conn, 's22456')
| 40.27027
| 93
| 0.693289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 416
| 0.279195
|
ced6d0de53f60c3136d5f4268abc101197257419
| 2,982
|
py
|
Python
|
reliabpy/models/cost.py
|
FelipeGiro/ReliabiliPy
|
42624a65504a959f66a64ae2ad2ccfb5af5ae9b0
|
[
"MIT"
] | null | null | null |
reliabpy/models/cost.py
|
FelipeGiro/ReliabiliPy
|
42624a65504a959f66a64ae2ad2ccfb5af5ae9b0
|
[
"MIT"
] | 2
|
2021-08-13T15:31:33.000Z
|
2021-08-13T15:31:34.000Z
|
reliabpy/models/cost.py
|
FelipeGiro/reliabpy
|
42624a65504a959f66a64ae2ad2ccfb5af5ae9b0
|
[
"MIT"
] | null | null | null |
# Costs
# 2019 - Luque, Straub - Risk-based optimal inspection strategies for
# structural systems using dynamic Bayesian networks
# Table 4, case 1
import numpy as np
class InspectionMaintenance:
"""
Inspection and Maintenance
==========================
Cost calculation for inspection and maintenance reliability analysis.
Parameters:
-----------
c_c : float
individual cost of campaign
c_i : float
individual cost of inspection
c_r : float
individual cost of repair
c_f : float
individual cost of failure
r : float
discount rate
"""
def __init__(self, c_c=5.0, c_i=1.0, c_r=10.0, c_f=10000, r=0.02):
self.c_c, self.c_i, self.c_r, self.c_c, self.c_f, self.r = c_c, c_i, c_r, c_c, c_f, r
def compute_cost_breakdown(self, system_model):
"""
Compute cost breakdown
======================
From simulated model, compute all lifetime costs:
- campaign : C_C
- inspection : C_I
- repair : C_R
- failure : C_F
Results are stored in the system atributes in dictionaries:
- system_obs
- system_action
- yearly_costs_breakdown
- cost_breakdown
"""
system_insp, system_repair = dict(), dict()
t, pf = np.vstack(system_model.system_pf).T
unique_mask = np.diff(t) == 1
delta_pf = np.diff(pf)[unique_mask]
abs_t = np.unique(t).astype(int) # t[1:][unique_mask]
C_C, C_I, C_R, R_F = np.zeros_like(abs_t, dtype=float), np.zeros_like(abs_t, dtype=float), np.zeros_like(abs_t, dtype=float), np.zeros_like(abs_t, dtype=float)
for component in system_model.components_list:
action, output = np.array(component.action), np.array(component.output)
y_t = (1 - self.r)**(np.array(component.t))
if any(action):
comp_t = np.array(component.t, dtype=int)
t_insp = comp_t[action == 'PoD']
t_repair = comp_t[action == 'PR']
C_I[t_insp] += self.c_i*(1 - self.r)**t_insp
C_R[t_repair] += self.c_r*(1 - self.r)**t_repair
system_insp[component.id]= t_insp
system_repair[component.id] = t_repair
else:
system_insp[component.id]= list()
system_repair[component.id] = list()
t_temp = np.unique(np.concatenate(list(system_insp.values()))).astype(int)
C_C[t_temp] += self.c_c*(1 - self.r)**(t_temp)
R_F[abs_t[1:]] = self.c_f*delta_pf*(1 - self.r)**abs_t[1:]
C_T = C_C + C_I + C_R + R_F
system_model.yearly_costs_breakdown = {'t' : abs_t, 'C_C' : C_C, 'C_I' : C_I, 'C_R' : C_R, 'R_F' : R_F, 'C_T': C_T}
system_model.cost_breakdown = {'C_C' : C_C.sum(), 'C_I' : C_I.sum(), 'C_R' : C_R.sum(), 'R_F' : R_F.sum(), 'C_T': C_T.sum()}
| 36.365854
| 167
| 0.563045
| 2,810
| 0.942321
| 0
| 0
| 0
| 0
| 0
| 0
| 1,082
| 0.362844
|
ced99530196a9edbf9316f733aa9a4cd0cc8c3bd
| 1,515
|
py
|
Python
|
Code/Examples/Example_22.py
|
R6500/SLab
|
d8e4eac7d59dcdb2941ad4b267b59533bd038cab
|
[
"MIT"
] | 2
|
2018-02-23T18:23:35.000Z
|
2018-04-10T11:30:31.000Z
|
Code/Examples/Example_22.py
|
R6500/SLab
|
d8e4eac7d59dcdb2941ad4b267b59533bd038cab
|
[
"MIT"
] | null | null | null |
Code/Examples/Example_22.py
|
R6500/SLab
|
d8e4eac7d59dcdb2941ad4b267b59533bd038cab
|
[
"MIT"
] | null | null | null |
'''
SLab Example
Example_22.py
Create several waveforms
Connect DAC 1 to ADC 1
'''
# Locate slab in the parent folder
import sys
sys.path.append('..')
sys.path.append('.')
import slab
# Set prefix to locate calibrations
slab.setFilePrefix("../")
# Open serial communication
slab.connect()
# Set sample time to 100us
slab.setSampleTime(0.0001)
# Set storage requirements
slab.setTransientStorage(200,1)
# (A) Creates and measures a square wave
slab.waveSquare(1.0,2.0,100)
slab.wavePlot()
# (B) Creates and measures a triangle wave
slab.waveTriangle(1.0,2.0,100)
slab.wavePlot()
# (C) Creates and measures a sawtooth wave
slab.waveSawtooth(1.0,2.0,100)
slab.wavePlot()
# (D) Creates and measures a sine wave
slab.waveSine(1.0,2.0,100)
slab.wavePlot()
# (E) Creates and measures a 10% duty pulse wave
slab.wavePulse(1.0,2.0,100,90)
slab.wavePlot()
# (F) Creates and measures a staircase waveform
list = []
for i in range(0,10):
for j in range(0,10):
list.append(1.0+0.1*i)
slab.loadWavetable(list)
slab.wavePlot()
# (G) Creates and measures a cosine wave
slab.waveCosine(1.0,2.0,100)
slab.wavePlot()
# (H) Creates and measures a noise wave
slab.waveNoise(1.5,0.1,100)
t,a1 = slab.wavePlot(1,returnData=True)
print "Std Dev is " + str(slab.std(a1)) + " V"
# (I) Creates and measures a random wave between 1V and 2V
slab.waveRandom(1,2,100)
slab.wavePlot()
# Close serial communication
slab.disconnect()
| 19.675325
| 59
| 0.678548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 703
| 0.464026
|
cedab3619291fe6be599d6dbcc26bf805226e6ae
| 1,693
|
py
|
Python
|
src/python_module_setup.py
|
Deyht/CIANNA
|
84c2cd94e91af1114aaed1251e36e1a2669e4c82
|
[
"Apache-2.0"
] | 5
|
2020-12-03T14:52:41.000Z
|
2022-01-09T14:12:12.000Z
|
src/python_module_setup.py
|
Deyht/CIANNA
|
84c2cd94e91af1114aaed1251e36e1a2669e4c82
|
[
"Apache-2.0"
] | null | null | null |
src/python_module_setup.py
|
Deyht/CIANNA
|
84c2cd94e91af1114aaed1251e36e1a2669e4c82
|
[
"Apache-2.0"
] | null | null | null |
from distutils.core import setup, Extension
import os
#os.environ['USE_CUDA'] = '1'
#os.environ['USE_BLAS'] = '1'
#os.environ['USE_OPENMP'] = '1'
cuda_obj = []
cuda_extra = []
cuda_include = []
cuda_macro = [(None, None)]
blas_obj = []
blas_extra = []
blas_include = []
blas_macro = [(None, None)]
open_mp_extra = []
if(os.environ.get('USE_CUDA') != None):
print("USE_CUDA")
cuda_obj = ['cuda/cuda_main.o', 'cuda/cuda_conv_layer.o', 'cuda/cuda_dense_layer.o', 'cuda/cuda_pool_layer.o', 'cuda/cuda_activ_functions.o']
cuda_include = ['/usr/local/cuda-11.3/include']
cuda_extra = ['-L/usr/local/cuda-11.3/lib64', '-lcudart', '-lcublas']
cuda_macro = [('CUDA','1'), ('CUDA_THREADS_PER_BLOCKS', '256')]
if(os.environ.get('USE_BLAS') != None):
print("USE_BLAS")
blas_obj = ['blas/blas_dense_layer.o', 'blas/blas_conv_layer.o']
blas_include = ['/opt/OpenBLAS/include']
blas_extra = ['-lopenblas', '-L/opt/OpenBLAS/lib']
blas_macro = [('BLAS', '1')]
if(os.environ.get('USE_OPENMP') != None):
print("USE_OPENMP")
open_mp_extra = ['-fopenmp']
#Re-add naiv: 'naiv/naiv_dense_layer.o', 'naiv/naiv_conv_layer.o', 'naiv/naiv_pool_layer.o'
setup(name = 'CIANNA',
version = '0.9',
ext_modules = [Extension('CIANNA', ['python_module.c'],
extra_objects=['conv_layer.o', 'dense_layer.o', 'pool_layer.o', 'activ_functions.o', 'initializers.o', 'vars.o', 'auxil.o', 'naiv/naiv_dense_layer.o', 'naiv/naiv_conv_layer.o', 'naiv/naiv_pool_layer.o'] + cuda_obj + blas_obj,
include_dirs= cuda_include + blas_include,
extra_link_args=['-O3 -std=c99'] + cuda_extra + blas_extra + open_mp_extra,
define_macros=[('MAX_LAYERS_NB', '100'), ('MAX_NETWORKS_NB','10')] + cuda_macro + blas_macro)])
| 36.021277
| 226
| 0.686946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 868
| 0.512699
|
cedb99a67763a6bd3a431eeb9d02931f0c220063
| 4,324
|
py
|
Python
|
potsim/filters.py
|
nicktimko/pots-sim
|
7620c41fab4e7cecf1c0bb99df3fddfdfe208d61
|
[
"MIT"
] | null | null | null |
potsim/filters.py
|
nicktimko/pots-sim
|
7620c41fab4e7cecf1c0bb99df3fddfdfe208d61
|
[
"MIT"
] | null | null | null |
potsim/filters.py
|
nicktimko/pots-sim
|
7620c41fab4e7cecf1c0bb99df3fddfdfe208d61
|
[
"MIT"
] | 1
|
2020-02-02T14:16:17.000Z
|
2020-02-02T14:16:17.000Z
|
from __future__ import absolute_import, division, print_function
import json
import os.path as op
import six
import numpy as np
import scipy.signal as sig
import scipy.io.wavfile as sciwav
MAXINT16 = 2**15 - 1
FS = 44100
COEFF_DIR = op.join(op.dirname(op.abspath(__file__)), 'coeffs')
def normalize(data, maxamp=1):
data *= maxamp / max(abs(data))
def load_coeffs(fname):
with open(op.join(COEFF_DIR, fname)) as f:
return json.load(f)
POTS_COEFFS = load_coeffs('pots.json')
def pots(data, snr=30, seed=None):
if seed is not None:
np.random.seed(seed)
# ensure mono
if data.ndim != 1:
data = data[:,0]
# convert to float, but simulate 16-bit quantization if needed
orig_dtype = data.dtype
data = data.astype('float')
if orig_dtype != 'int16':
normalize(data, maxamp=MAXINT16)
np.around(data, out=data)
normalize(data)
# pad start and end
#leader_len = np.random.randint(0.1 * FS, 0.4 * FS)
#trailer_len = 0.5 * FS - leader_len
#data = np.concatenate([np.zeros(leader_len), data, np.zeros(trailer_len)])
# do filtering
for b, a in POTS_COEFFS['signal']:
data = sig.lfilter(b, a, data)
# add band-limited noise (filtered white noise)
#np.random.seed(0)
noise = 10**(-snr/20) * ((np.random.random(size=data.shape) * 2) - 1)
for b, a in POTS_COEFFS['noiseband']:
noise = sig.lfilter(b, a, noise)
data += noise
# renormalize and convert to 16-bit integers
normalize(data, maxamp=MAXINT16)
data = data.astype('int16')
return data
class DigitalStreamFilter(object):
mimes = {
'wav': 'audio/vnd.wave',
'txt': 'text/plain',
'json': 'application/json',
}
output_suffix = 'filtered'
def __init__(self, data=None, stream=None, filename=None, dtype=None):
if dtype is None and filename is None:
try:
# werkzeug.FileStorage has 'filename', python files have 'name'
filename = getattr(stream, 'filename', getattr(stream, 'name'))
except AttributeError:
raise ValueError("Can't determine type from stream. "
"Provide dtype or filename to infer type.")
if dtype is None:
dtype = filename.split('.')[-1]
self.dtype = dtype
self.filename = filename
self.json_extra = {}
if data is not None:
self.data = np.array(data)
elif stream is not None:
self.load(stream)
else:
with open(filename, 'rb') as stream:
self.load(stream)
def load(self, stream):
dispatcher = {
'wav': self._load_wave,
'txt': self._load_text,
'json': self._load_json,
}
try:
data = dispatcher[self.dtype](stream)
except KeyError:
raise TypeError('Unsupported input type: {} (accepts {})'.format(
self.dtype, ', '.join(dispatcher.keys())))
self.data = np.array(data)
def process(self, *args, **kwargs):
raise NotImplementedError('abstract method')
def dump(self, stream, dtype=None):
if dtype is None:
dtype = self.dtype
{'wav': self._dump_wave,
'txt': self._dump_text,
'json': self._dump_json,
}[dtype](stream)
def suggested_name(self):
parts = self.filename.split('.')[:-1]
parts.extend([self.output_suffix, self.dtype])
return '.'.join(parts)
def mimetype(self):
return self.mimes[self.dtype]
def _load_wave(self, stream):
rate, data = sciwav.read(stream)
return data
def _load_text(self, stream):
return np.loadtxt(stream, dtype='int16')
def _load_json(self, stream):
return np.array(json.load(stream))
def _dump_wave(self, stream):
sciwav.write(stream, FS, self.data)
def _dump_text(self, stream):
np.savetxt(stream, self.data, fmt='%d')
def _dump_json(self, stream):
json.dump({'data': self.data.tolist(), 'rate': FS}, stream)
class POTSFilter(DigitalStreamFilter):
output_suffix = 'pots-filtered'
def process(self, *args, **kwargs):
self.data = pots(self.data, *args, **kwargs)
| 28.077922
| 79
| 0.594357
| 2,725
| 0.630204
| 0
| 0
| 0
| 0
| 0
| 0
| 812
| 0.187789
|
cedc48d01729e8a20c5b2b1eaaa514c17e38fd56
| 5,952
|
py
|
Python
|
scripts/bootstrap_optimize.py
|
adrn/thrift-shop
|
8dcd8d7e242ded1263edb4b1fb7c05f04c05b47c
|
[
"MIT"
] | null | null | null |
scripts/bootstrap_optimize.py
|
adrn/thrift-shop
|
8dcd8d7e242ded1263edb4b1fb7c05f04c05b47c
|
[
"MIT"
] | 11
|
2020-09-29T19:18:19.000Z
|
2020-11-21T21:26:09.000Z
|
scripts/bootstrap_optimize.py
|
adrn/thrift-shop
|
8dcd8d7e242ded1263edb4b1fb7c05f04c05b47c
|
[
"MIT"
] | 1
|
2021-09-07T22:36:06.000Z
|
2021-09-07T22:36:06.000Z
|
# Standard library
import atexit
import os
os.environ["OMP_NUM_THREADS"] = "1"
import sys
import traceback
# Third-party
from astropy.utils import iers
iers.conf.auto_download = False
import astropy.table as at
import numpy as np
# This project
from totoro.config import cache_path
from totoro.data import datasets, elem_names
from totoro.objective import TorusImagingObjective
def worker(task):
i, obj, x0, tmp_filename = task
res = None
try:
res = obj.minimize(x0=x0, method="nelder-mead",
options=dict(maxiter=250))
print(f"{i} finished optimizing: {res}")
except Exception as e:
print(f"{i} failed: {str(e)}")
traceback.print_exc()
if res is None or not res.success:
xopt = np.nan * np.array(x0)
else:
xopt = res.x
xopt = {
'zsun': [xopt[0]],
'vzsun': [xopt[1]],
'mdisk_f': [xopt[2]],
'disk_hz': [xopt[3]],
}
at.Table(xopt).write(tmp_filename, overwrite=True)
return tmp_filename
def combine_output(all_filename, this_cache_path, elem_name):
import glob
cache_glob_pattr = str(this_cache_path / f'tmp-*{elem_name}*.csv')
if os.path.exists(all_filename):
prev_table = at.Table.read(all_filename)
else:
prev_table = None
# combine the individual worker cache files
all_tables = []
remove_filenames = []
for filename in glob.glob(cache_glob_pattr):
all_tables.append(at.Table.read(filename))
remove_filenames.append(filename)
if all_tables:
all_table = at.vstack(all_tables)
else:
return
if prev_table:
all_table = at.vstack((prev_table, all_table))
all_table.write(all_filename, overwrite=True)
for filename in remove_filenames:
os.unlink(filename)
def main(pool, overwrite=False):
tree_K = 32 # MAGIC NUMBER: set heuristically in Objective-function.ipynb
bootstrap_K = 128 # MAGIC NUMBER
for data_name, d in datasets.items():
# TODO: make seed configurable?
rnd = np.random.default_rng(seed=42)
# loop over all elements
tasks = []
cache_paths = []
cache_filenames = []
for elem_name in elem_names[data_name]:
print(f"Running element: {elem_name}")
# TODO: if galah in data_name, filter on flag??
this_cache_path = cache_path / data_name
this_cache_filename = (this_cache_path /
f'optimize-results-{elem_name}.csv')
if this_cache_filename.exists() and not overwrite:
print(f"Cache file exists for {elem_name}: "
f"{this_cache_filename}")
continue
atexit.register(combine_output,
this_cache_filename,
this_cache_path,
elem_name)
cache_paths.append(this_cache_path)
cache_filenames.append(this_cache_filename)
# print("Optimizing with full sample to initialize bootstraps...")
# obj = TorusImagingObjective(d.t, d.c, elem_name, tree_K=tree_K)
# full_sample_res = obj.minimize(method="nelder-mead",
# options=dict(maxiter=1024))
# if not full_sample_res.success:
# print(f"FAILED TO CONVERGE: optimize for full sample failed "
# f"for {elem_name}")
# continue
# print(f"Finished optimizing full sample: {full_sample_res.x}")
# x0 = full_sample_res.x
x0 = np.array([20.8, 7.78, 1.1, 0.28]) # HACK: init from fiducial
for k in range(bootstrap_K):
idx = rnd.choice(len(d), len(d), replace=True)
obj = TorusImagingObjective(d[idx], elem_name,
tree_K=tree_K)
tmp_filename = (this_cache_path /
f'tmp-optimize-results-{elem_name}-{k}.csv')
tasks.append((k, obj, x0, tmp_filename))
print("Done setting up bootstrap samples - running pool.map() on "
f"{len(tasks)} tasks")
for _ in pool.map(worker, tasks):
pass
for this_cache_filename, this_cache_path, elem_name in zip(
cache_filenames,
cache_paths,
elem_names[data_name]):
combine_output(this_cache_filename, this_cache_path, elem_name)
sys.exit(0)
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser()
parser.add_argument("-o", "--overwrite", dest="overwrite",
action="store_true")
# vq_group = parser.add_mutually_exclusive_group()
# vq_group.add_argument('-v', '--verbose', action='count', default=0,
# dest='verbosity')
# vq_group.add_argument('-q', '--quiet', action='count', default=0,
# dest='quietness')
group = parser.add_mutually_exclusive_group()
group.add_argument("--procs", dest="n_procs", default=1,
type=int, help="Number of processes.")
group.add_argument("--mpi", dest="mpi", default=False,
action="store_true", help="Run with MPI.")
parsed = parser.parse_args()
# deal with multiproc:
if parsed.mpi:
from schwimmbad.mpi import MPIPool
Pool = MPIPool
kw = dict()
elif parsed.n_procs > 1:
from schwimmbad import MultiPool
Pool = MultiPool
kw = dict(processes=parsed.n_procs)
else:
from schwimmbad import SerialPool
Pool = SerialPool
kw = dict()
Pool = Pool
Pool_kwargs = kw
with Pool(**Pool_kwargs) as pool:
main(pool=pool, overwrite=parsed.overwrite)
sys.exit(0)
| 31.162304
| 79
| 0.586022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,606
| 0.269825
|
cedc87d4e440dc6b8050ce800298170c9981e927
| 3,209
|
py
|
Python
|
Solutions/346.py
|
ruppysuppy/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | 70
|
2021-03-18T05:22:40.000Z
|
2022-03-30T05:36:50.000Z
|
Solutions/346.py
|
ungaro/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | null | null | null |
Solutions/346.py
|
ungaro/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | 30
|
2021-03-18T05:22:43.000Z
|
2022-03-17T10:25:18.000Z
|
"""
Problem:
You are given a huge list of airline ticket prices between different cities around the
world on a given day. These are all direct flights. Each element in the list has the
format (source_city, destination, price).
Consider a user who is willing to take up to k connections from their origin city A to
their destination B. Find the cheapest fare possible for this journey and print the
itinerary for that journey.
For example, our traveler wants to go from JFK to LAX with up to 3 connections, and our
input flights are as follows:
[
('JFK', 'ATL', 150),
('ATL', 'SFO', 400),
('ORD', 'LAX', 200),
('LAX', 'DFW', 80),
('JFK', 'HKG', 800),
('ATL', 'ORD', 90),
('JFK', 'LAX', 500),
]
Due to some improbably low flight prices, the cheapest itinerary would be
JFK -> ATL -> ORD -> LAX, costing $440.
"""
from sys import maxsize
from typing import Dict, List, Optional, Tuple
from DataStructures.Graph import GraphDirectedWeighted
from DataStructures.PriorityQueue import MinPriorityQueue
def modified_dijkstra(
graph: GraphDirectedWeighted, start: str, k: int
) -> Tuple[Dict[str, int], Dict[str, Optional[str]]]:
dist = {node: maxsize for node in graph.connections}
parent = {node: None for node in graph.connections}
dist[start] = 0
priority_queue = MinPriorityQueue()
[priority_queue.push(node, weight) for node, weight in dist.items()]
while not priority_queue.is_empty():
node = priority_queue.extract_min()
ancestors = 0
parent_node = parent[node]
# calculating ancestors
while parent_node:
ancestors += 1
parent_node = parent[parent_node]
# limiting distance update till k moves
if ancestors <= k:
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
parent[neighbour] = node
priority_queue.update_key(neighbour, dist[neighbour])
return dist, parent
def generate_path(
flights: List[Tuple[str, str, int]], start: str, dest: str, k: int
) -> Tuple[int, List[str]]:
# graph generation
graph = GraphDirectedWeighted()
for src, dest, wt in flights:
graph.add_edge(src, dest, wt)
# running dijkstra's algorithm
dist, parent = modified_dijkstra(graph, start, k)
# getting the cost and path
if not parent[dest]:
return []
path, cost = [dest], dist[dest]
curr = parent[dest]
while curr:
path.append(curr)
curr = parent[curr]
return cost, path[::-1]
if __name__ == "__main__":
flights = [
("JFK", "ATL", 150),
("ATL", "SFO", 400),
("ORD", "LAX", 200),
("LAX", "DFW", 80),
("JFK", "HKG", 800),
("ATL", "ORD", 90),
("JFK", "LAX", 500),
]
print(generate_path(flights, "JFK", "LAX", 3))
"""
SPECS:
TIME COMPLEXITY: O(e x v x log(v))
SPACE COMPLEXITY: O(v ^ 2)
[even though dijkstra's algorithm runs in O(e x log(v)) to lock maximum k moves, the
compleity increases to O(e x v x log(v))]
"""
| 31.15534
| 87
| 0.631973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,273
| 0.396697
|
cedc891e437dc6a7d998e688a8eceada192b23b2
| 2,518
|
py
|
Python
|
training/pytorch_ddp_nvidia.py
|
gclouduniverse/dlenv-templates
|
27e662c6a5bcea1d828252aa2632bc545d38d082
|
[
"MIT"
] | null | null | null |
training/pytorch_ddp_nvidia.py
|
gclouduniverse/dlenv-templates
|
27e662c6a5bcea1d828252aa2632bc545d38d082
|
[
"MIT"
] | null | null | null |
training/pytorch_ddp_nvidia.py
|
gclouduniverse/dlenv-templates
|
27e662c6a5bcea1d828252aa2632bc545d38d082
|
[
"MIT"
] | null | null | null |
"""PyTorch Distributed Data Parallel example from NVIDIA."""
# https://github.com/NVIDIA/DeepLearningExamples
import argparse
import utils
import virtual_machine
def main():
parser = argparse.ArgumentParser(description='Optional app description')
parser.add_argument('--vm-name', dest='vm_name', type=str, required=True)
parser.add_argument('--vm-number', dest='vm_num', type=int, default=2)
parser.add_argument('--gpu-per-vm', dest='gpu_per_vm', type=int, default=1)
args = parser.parse_args()
gpu_per_vm = args.gpu_per_vm
vm_num = args.vm_num
vms = []
for i in range(args.vm_num):
vm_name = '{}-{}'.format(args.vm_name, str(i))
vms.append(virtual_machine.VirtualMachine(vm_name))
# prepare
prepare_command = """
# install DALI
pip install --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda110
# install apex
pip install apex
# install nvidia-dllogger
pip install nvidia-pyindex
pip install nvidia-dllogger
# install pytorch-quantization
git clone https://github.com/NVIDIA/TensorRT.git
cd TensorRT/tools/pytorch-quantization || exit
python setup.py install
cd || exit
# clone main repo
git clone https://github.com/NVIDIA/DeepLearningExamples.git
"""
utils.run_threads(vms, [prepare_command] * len(vms), login_shell=True)
# run
master_ip = vms[0].internal_ip
master_port = '1234'
# pylint: disable=implicit-str-concat
run_command_template = (
'python '
'./DeepLearningExamples/PyTorch/Classification/ConvNets/multiproc.py '
'--node_rank {node_rank} '
'--master_addr {master_ip} '
'--master_port {master_port} '
'--nnodes {vm_num} '
'--nproc_per_node {gpu_per_vm} '
'./DeepLearningExamples/PyTorch/Classification/ConvNets/main.py '
'~/fake-data-path '
'--data-backend syntetic '
'--raport-file raport.json '
'-j8 -p 100 '
'--lr 4.096 '
'--optimizer-batch-size 4096 '
'--warmup 16 '
'--arch resnet50 '
'--label-smoothing 0.1 '
'--lr-schedule cosine '
'--mom 0.875 '
'--wd 3.0517578125e-05 '
'--no-checkpoints '
'-b 256 '
'--amp '
'--static-loss-scale 128 '
'--epochs 2'
)
commands = []
for i, _ in enumerate(vms):
run_command = run_command_template.format(
node_rank=i,
master_ip=master_ip,
master_port=master_port,
vm_num=vm_num,
gpu_per_vm=gpu_per_vm,
)
commands.append(run_command)
utils.run_threads(vms, commands, login_shell=True)
if __name__ == "__main__":
main()
| 26.505263
| 102
| 0.685068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,360
| 0.540111
|
cedd3f2eb0e40b696aec07eb8b2518152978c2ab
| 19,880
|
py
|
Python
|
Back/ecoreleve_server/modules/stations/station_resource.py
|
NaturalSolutions/ecoReleve-Data
|
535a6165984544902563eca7cb10d07f1686c963
|
[
"MIT"
] | 15
|
2015-02-15T18:02:54.000Z
|
2021-10-31T00:08:41.000Z
|
Back/ecoreleve_server/modules/stations/station_resource.py
|
NaturalSolutions/ecoReleve-Data
|
535a6165984544902563eca7cb10d07f1686c963
|
[
"MIT"
] | 505
|
2015-03-24T15:16:55.000Z
|
2022-03-21T22:17:11.000Z
|
Back/ecoreleve_server/modules/stations/station_resource.py
|
NaturalSolutions/ecoReleve-Data
|
535a6165984544902563eca7cb10d07f1686c963
|
[
"MIT"
] | 31
|
2015-04-09T10:48:31.000Z
|
2020-12-08T16:32:30.000Z
|
import json
import itertools
from datetime import datetime, timedelta
import pandas as pd
from sqlalchemy import select, and_, join
from sqlalchemy.exc import IntegrityError
import copy
from ecoreleve_server.core import RootCore
from ecoreleve_server.core.base_resource import DynamicObjectResource, DynamicObjectCollectionResource
from .station_model import Station, Station_FieldWorker
from ..monitored_sites.monitored_site_model import MonitoredSite, MonitoredSitePosition
from ..users.user_model import User
from ..field_activities import fieldActivity
from ..observations.observation_resource import ObservationsResource
from .station_collection import StationCollection
from ..permissions import context_permissions
from ..sensors.sensor_data import CamTrap
from ...utils.datetime import parse
class StationResource(DynamicObjectResource):
model = Station
children = [('observations', ObservationsResource)]
__acl__ = context_permissions['stations']
def delete(self):
if self.objectDB:
id_ = self.objectDB.ID
DynamicObjectResource.delete(self)
else:
id_ = None
response = {'id': id_}
return response
class StationsResource(DynamicObjectCollectionResource):
Collection = StationCollection
model = Station
moduleFormName = 'StationForm'
moduleGridName = 'StationGrid'
children = [('{int}', StationResource)]
__acl__ = context_permissions['stations']
def __init__(self, ref, parent):
DynamicObjectCollectionResource.__init__(self, ref, parent)
self.__acl__ = context_permissions[ref]
def insertWithCamTrap(self):
session = self.request.dbsession
data = {}
for items, value in self.request.json_body.items():
data[items] = value
if data['camtrapId'] is None:
self.request.response.status_code = 502
raise KeyError("no camtrapId submitted")
else:
idCreated = -1
camtrapItem = session.query(CamTrap).get(data['camtrapId'])
self.objectDB.values = data
try:
session.begin_nested()
try:
session.add(self.objectDB)
session.flush()
except Exception as e:
# error when try inserting station ever on server
#hack handle error raise by business ruler
# need to find a cleaner way
self.request.response.status_code = 409
self.request.response.text = e.value
session.rollback()
pass
session.commit()
# session.refresh(self.objectDB)
idCreated = self.objectDB.ID
camtrapItem.stationId = idCreated
camtrapItem.validated = 2
session.add(camtrapItem)
session.flush()
except Exception as e:
self.request.response.status_code = 502
if self.request.response.status_code == 409 :
return self.request.response.text
else:
return {'ID': idCreated}
def insertAllWithCamTrap(self):
session = self.request.dbsession
session.autoflush = False
data = self.request.json_body
result = []
collectionItem = []
for row in data:
try:
self.newobjectDB = Station()
self.newobjectDB.values = row
session.begin_nested()
try:
session.add(self.newobjectDB)
session.flush()
camtrapItem = session.query(CamTrap).get(row['camtrapId'])
if self.newobjectDB.ID:
camtrapItem.stationId = self.newobjectDB.ID
camtrapItem.validated = 2
session.add(camtrapItem)
session.flush()
result.append({ row['camtrapId'] : self.newobjectDB.ID })
except Exception as e:
# error when try inserting station ever on server
#hack handle error raise by business ruler
# need to find a cleaner way
result.append({ row['camtrapId'] : e.value })
self.request.response.status_code = 202
self.newobjectDB.ID = None
session.rollback()
pass
session.commit()
except Exception as e:
self.request.response.status_code = 502
raise e
return result
def deleteStationWithCamTrap(self):
session = self.request.dbsession
data = self.request.json_body
result = []
for row in data:
camTrapItem = session.query(CamTrap).get(row['id'])
stationItem = session.query(self.model).get(row['stationId'])
try:
if stationItem:
session.delete(stationItem)
camTrapItem.stationId = None
session.add(camTrapItem)
result.append({camTrapItem.pk_id : 'station deleted'})
except Exception as e:
self.request.response.status_code = 502
raise e
return result
def insertAll(self) :
session = self.request.dbsession
data = self.request.json_body
result = []
collectionItem = []
for row in data:
self.newobjectDB = Station()
collectionItem.append(self.newobjectDB)
row = self.handleDataBeforeInsert(row)
self.newobjectDB.values = row
self.session.add(self.newobjectDB)
self.session.flush()
for item in collectionItem:
if item.ID :
result.append({ ''+str(item.Name)+'' : item.ID})
else :
result.append({ ''+str(item.Name)+'' : None})
return result
def handleDataBeforeInsert(self, data):
user_id = self.request.authenticated_userid['iss']
data['creator'] = user_id
return data
def updateMonitoredSite(self):
session = self.request.dbsession
data = self.request.params.mixed()
if "FK_MonitoredSite" not in data or data['FK_MonitoredSite'] == '':
return 'Station is not monitored'
try:
data['StartDate'] = data['StationDate']
data['Precision'] = data['precision']
if data.get('Name', None):
del data['Name']
currentMonitoredSite = session.query(MonitoredSite).get(data['FK_MonitoredSite'])
tmpVal = copy.deepcopy(currentMonitoredSite.values)
# tmpVal = currentMonitoredSite.values
tmpVal['LAT'] = data['LAT']
tmpVal['LON'] = data['LON']
tmpVal['ELE'] = data['ELE']
tmpVal['Comments'] = data['Comments']
tmpVal['StartDate'] = data['StationDate']
if tmpVal['creationDate'] > parse(data['StationDate'] ) :
tmpVal['creationDate'] = data['StationDate']
# print("on a fetch le site monitoré",currentMonitoredSite.values)
# print("on va mettre les valeurs",data)
currentMonitoredSite.values = tmpVal
# currentMonitoredSite.updateFromJSON(data)
return 'Monitored site position was updated'
except IntegrityError as e:
session.rollback()
return 'This location already exists'
except Exception as e:
print(e)
def getFormImportGPX(self):
return self.getForm(objectType=1, moduleName='ImportFileForm')
def lastImported(self, obj, params):
'''
will add all this criteria if this params is apply
'''
user = self.request.authenticated_userid['iss']
dateFrom = datetime.today() - timedelta(days=2)
dateFrom = dateFrom.replace(
hour=0,
minute=0,
second=0,
microsecond=0
)
obj['Operator'] = '='
obj['Value'] = True
criteria = [
{
'Column': 'creator',
'Operator': '=',
'Value': user
},
{
'Column': 'FK_StationType',
'Operator': '=',
'Value': 4 # => TypeID of GPX station
},
{
"Column": "creationDate",
"Operator": ">=",
"Value": dateFrom.strftime("%Y-%m-%dT%H:%M:%SZ")
}
]
params['criteria'].extend(criteria)
def handleCriteria(self, params):
if 'criteria' in params:
lastImported = False
for obj in params['criteria']:
if obj['Column'] == 'LastImported':
self.lastImported(obj, params)
lastImported = True
if not lastImported:
map(lambda x: obj['Column'] != 'FK_StationType', params['criteria'])
removePending = [
{
'Column': 'FK_StationType',
'Operator': 'Is not',
'Value': 6 # => TypeID of pending stations
}
]
params['criteria'].extend(removePending)
if 'geo' in self.request.params.mixed():
self.getGeoJsonParams(params)
return params
def handleResult(self, result):
if 'geo' in self.request.params.mixed():
data = self.getGeoJsonResult(result)
else:
data = self.getFieldWorkers(result)
# data = result
return data
def handleCount(self, count, callback, params):
if 'geo' in self.request.params.mixed() and count > 50000:
return []
else:
return callback(**params)
def retrieve(self):
if 'geo' in self.request.params.mixed():
paging = False
else:
paging = True
return self.search(paging=paging)
def deleteMany(self):
error = False
data = {}
if len(self.request.json_body) > 0 :
session = self.request.dbsession
stas = session.query(Station).filter(Station.ID.in_(self.request.json_body)).all()
for sta in stas:
data[str(sta.ID)] = 'not deleted'
try :
session.delete(sta)
data[str(sta.ID)] = 'deleted'
except :
self.request.response.status_code = 502
return data
def deleteManyWithCamTrap(self):
error = False
data = {}
if len(self.request.json_body) > 0 :
session = self.request.dbsession
stas = session.query(Station).filter(Station.ID.in_(self.request.json_body)).all()
camtraps = session.query(CamTrap).filter(CamTrap.stationId.in_(self.request.json_body)).all()
if len(camtraps):
for cam in camtraps:
data[str(cam.stationId)] = 'not exist'
flagNotFound = True
for sta in stas:
if sta.ID == cam.stationId:
flagNotFound = False
data[str(cam.stationId)] = 'not deleted'
try:
session.delete(sta)
cam.stationId = None
session.add(cam)
data[str(cam.stationId)] = 'deleted'
except:
self.request.response.status_code = 502
if flagNotFound:
try:
cam.stationId = None
session.add(cam)
except:
self.request.response.status_code = 502
return data
def getFieldActivityList(self):
query = select([fieldActivity.ID.label('value'),
fieldActivity.Name.label('label')])
result = self.session.execute(query).fetchall()
res = []
for row in result:
res.append({'label': row['label'], 'value': row['value']})
return sorted(res, key=lambda x: x['label'])
def getFieldWorkers(self, data):
params, history, startDate = self.formatParams({}, paging=True)
# params = {'selectable': ['ID'],
# 'filters':params.get('criteria', [])#,
# #'offset':params.get('offset'),
# #'limit':params.get('per_page')#,
# #'order_by':params.get('order_by')
# }
params = {
'selectable': [a.get('Column') for a in params.get('criteria')],
'filters': params.get('criteria', [])
}
queryTmp = self.collection.build_query(**params)
queryTmp = queryTmp.with_only_columns([getattr(self.model, 'ID')])
queryCTE = queryTmp.cte()
# queryCTE = self.collection.build_query(**params).cte()
joinFW = join(
Station_FieldWorker,
User,
Station_FieldWorker.FK_FieldWorker == User.id
)
joinTable = join(
queryCTE,
joinFW,
queryCTE.c['ID'] == Station_FieldWorker.FK_Station
)
query = select([
Station_FieldWorker.FK_Station,
User.Login
]).select_from(joinTable)
FieldWorkers = self.session.execute(query).fetchall()
list_ = {}
for x, y in FieldWorkers:
list_.setdefault(x, []).append(y)
for row in data[1]:
try:
row['FK_FieldWorker_FieldWorkers'] = list_[row['ID']]
except Exception as e:
print(e)
pass
return data
def getGeoJsonParams(self, params):
params['order_by'] = []
criteria = [{'Column': 'LAT',
'Operator': 'Is not',
'Value': None
},
{'Column': 'LON',
'Operator': 'Is not',
'Value': None
}]
params['criteria'].extend(criteria)
def getGeoJsonResult(self, data):
geoJson = []
exceed = True
countResult = data[0]['total_entries']
result = data[1]
if countResult < 50000:
exceed = False
for row in result:
geoJson.append({
'type': 'Feature',
'properties': {
'name': row['Name'],
'date': row['StationDate']},
'geometry': {
'type': 'Point',
'coordinates': [row['LAT'], row['LON']]}
})
data = {'type': 'FeatureCollection',
'features': geoJson,
'exceed': exceed}
return data
def insertMany(self):
### deprecated ???
session = self.request.dbsession
data = self.request.json_body
data_to_insert = []
format_dt = '%d/%m/%Y %H:%M'
dateNow = datetime.now()
model = self.model
# Rename field and convert date
# TODO
for row in data:
newRow = {}
newRow['LAT'] = row['latitude']
newRow['LON'] = row['longitude']
newRow['ELE'] = row['elevation']
newRow['precision'] = row['precision']
newRow['Name'] = row['name']
newRow['fieldActivityId'] = row['fieldActivity']
newRow['precision'] = 10 # row['Precision']
newRow['creationDate'] = dateNow
newRow['creator'] = self.request.authenticated_userid['iss']
newRow['FK_StationType'] = 4
newRow['id'] = row['id']
newRow['NbFieldWorker'] = row['NbFieldWorker']
newRow['StationDate'] = datetime.strptime(
row['waypointTime'], format_dt)
if 'fieldActivity' in row:
newRow['fieldActivityId'] = row['fieldActivity']
if 'NbFieldWorker' in row:
newRow['NbFieldWorker'] = row['NbFieldWorker']
data_to_insert.append(newRow)
# Load date into pandas DataFrame then round LAT,LON into decimal(5)
DF_to_check = pd.DataFrame(data_to_insert)
DF_to_check['LAT'] = DF_to_check['LAT'].round(5)
DF_to_check['LON'] = DF_to_check['LON'].round(5)
maxDate = DF_to_check['StationDate'].max()
minDate = DF_to_check['StationDate'].min()
maxLon = DF_to_check['LON'].max()
minLon = DF_to_check['LON'].min()
maxLat = DF_to_check['LAT'].max()
minLat = DF_to_check['LAT'].min()
# Retrieve potential duplicated stations from Database
query = select([model]).where(
and_(
model.StationDate.between(minDate, maxDate),
model.LAT.between(minLat, maxLat)
)).where(model.LON.between(minLon, maxLon))
data_to_insert = []
result_to_check = pd.read_sql_query(query, session.get_bind())
if result_to_check.shape[0] > 0:
# IF potential duplicated stations, load them into pandas DataFrame
result_to_check['LAT'] = result_to_check['LAT'].round(5)
result_to_check['LON'] = result_to_check['LON'].round(5)
merge_check = pd.merge(DF_to_check, result_to_check, on=[
'LAT', 'LON', 'StationDate'])
# Get only non existing data to insert
DF_to_insert = DF_to_check[~DF_to_check['id'].isin(merge_check['id'])]
DF_to_insert = DF_to_insert.drop(['id'], 1)
data_to_insert = json.loads(DF_to_insert.to_json(
orient='records', date_format='iso'))
else:
data_to_insert = json.loads(DF_to_check.to_json(
orient='records', date_format='iso'))
staListID = []
nbExc = 0
if len(data_to_insert) != 0:
for sta in data_to_insert:
curSta = model(type_id=4)
curSta.init_on_load()
curDate = datetime.strptime(
sta['StationDate'], "%Y-%m-%dT%H:%M:%S.%fZ")
curSta.updateFromJSON(sta)
curSta.StationDate = curDate
try:
session.add(curSta)
session.flush()
session.commit()
staListID.append(curSta.ID)
except IntegrityError as e:
session.rollback()
nbExc += 1
pass
result = staListID
# Insert FieldWorkers
if not data[0]['FieldWorkers'] is None or not data[0]['FieldWorkers'] == "":
list_ = list(map(lambda b: list(map(lambda a: {
'FK_Station': a,
'FK_FieldWorker': b},
result)),
data[0]['FieldWorkers']))
list_ = list(itertools.chain.from_iterable(list_))
stmt = Station_FieldWorker.__table__.insert().values(list_)
session.execute(stmt)
else:
result = []
response = {'exist': len(data) - len(data_to_insert) +
nbExc, 'new': len(data_to_insert) - nbExc}
return response
RootCore.children.append(('stations', StationsResource))
| 36.883117
| 105
| 0.523793
| 19,014
| 0.956391
| 0
| 0
| 0
| 0
| 0
| 0
| 3,179
| 0.159901
|
cedd6b663a220db3d928b8923e83432fb4962366
| 6,002
|
py
|
Python
|
t_core/Mutex/HIgnoreRuleNAC0.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
t_core/Mutex/HIgnoreRuleNAC0.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
t_core/Mutex/HIgnoreRuleNAC0.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternNAC
import cPickle as pickle
from uuid import UUID
class HIgnoreRuleNAC0(HimesisPreConditionPatternNAC):
def __init__(self, LHS):
"""
Creates the himesis graph representing the AToM3 model HIgnoreRuleNAC0.
"""
# Create the himesis graph
EDGE_LIST = [(1, 0), (0, 2)]
# Flag this instance as compiled now
self.is_compiled = True
super(HIgnoreRuleNAC0, self).__init__(name='HIgnoreRuleNAC0', num_nodes=3, edges=EDGE_LIST, LHS=LHS)
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__Mutex'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the NAC have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True forbids the rule from being applied,
# returning False enables the rule to be applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = UUID('e7b125bc-601d-45cd-bd93-94bf2458403b')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_label__"] = """5"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__held_by"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('b5c13282-57af-43e8-9c0d-b2ba70fa3e80')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_label__"] = """4"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__Resource"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = UUID('a2180a79-1b82-4100-ae03-de9961620a9e')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_label__"] = """1"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = """MT_pre__Process"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = UUID('736cb7ae-597e-4f29-8075-133e21668d5a')
# Load the bridge between this NAC and its LHS
from HIgnoreRuleNAC0Bridge import HIgnoreRuleNAC0Bridge
self.bridge = HIgnoreRuleNAC0Bridge()
def eval_name4(self, attr_value, PreNode, graph):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, PreNode, graph):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the NAC have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True forbids the rule from being applied,
# returning False enables the rule to be applied.
#===============================================================================
return True
| 45.12782
| 118
| 0.50933
| 5,870
| 0.978007
| 0
| 0
| 0
| 0
| 0
| 0
| 4,244
| 0.707098
|
0c66ce32672f9f98c60320cbf4c3eb540dfab0e0
| 918
|
py
|
Python
|
codes/dataloader.py
|
UltronMHTM/pytorch_learning
|
840d71cc499a2e87ba2774880f46c1befd5a1658
|
[
"Apache-2.0"
] | null | null | null |
codes/dataloader.py
|
UltronMHTM/pytorch_learning
|
840d71cc499a2e87ba2774880f46c1befd5a1658
|
[
"Apache-2.0"
] | null | null | null |
codes/dataloader.py
|
UltronMHTM/pytorch_learning
|
840d71cc499a2e87ba2774880f46c1befd5a1658
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import Dataset
from skimage import io
import os
import torch
class MnistData(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
img_list = []
label_list = os.listdir(self.root_dir)
for label in label_list:
file_names = os.listdir(os.path.join(self.root_dir, label))
for name in file_names:
img_list.append(label + "&" + name)
self.images = img_list
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_label = image_index.split("&")[0]
img_name = image_index.split("&")[1]
img_path = os.path.join(self.root_dir,img_label)
img_path = os.path.join(img_path, img_name)
img = io.imread(img_path)
sample = [img, img_label]
return sample
| 32.785714
| 72
| 0.602397
| 829
| 0.90305
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.009804
|
0c68a10dfccbb91dce925152bacac5157188cf26
| 20,478
|
py
|
Python
|
qunetsim/objects/storage/quantum_storage.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 61
|
2020-02-15T00:59:20.000Z
|
2022-03-08T10:29:23.000Z
|
qunetsim/objects/storage/quantum_storage.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 50
|
2020-01-28T12:18:50.000Z
|
2021-12-16T21:38:19.000Z
|
qunetsim/objects/storage/quantum_storage.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 27
|
2020-01-21T12:59:28.000Z
|
2022-02-21T14:23:00.000Z
|
from qunetsim.backends.rw_lock import RWLock
from qunetsim.objects.logger import Logger
import queue
class QuantumStorage(object):
"""
An object which stores qubits.
"""
STORAGE_LIMIT_ALL = 1
STORAGE_LIMIT_PER_HOST = 2
STORAGE_LIMIT_INDIVIDUALLY_PER_HOST = 3
def __init__(self):
# _host_dict stores host_id -> array with qubits of the host.
self._host_dict = {}
# _qubit_dict stores qubit_id -> dict Host_id -> Qubit objects with this id.
self._qubit_dict = {}
# _purpose_dict stores qubit_id -> dict Host_id -> Purpose belonging to
# the Qubit with the same Host and ID.
self._purpose_dict = {}
self._storage_mode = QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST
self._storage_limits_per_host = {}
self._amount_qubits_stored_per_host = {}
self._default_storage_limit_per_host = -1
self._storage_limit = -1
self._amount_qubit_stored = 0
# read write lock, for threaded access
self.lock = RWLock()
self.logger = Logger.get_instance()
# for tracking pending requests
# dictionary tracks the request made by a pending request.
self._pending_request_dict = {}
# Determines a unique ID for a pending request.
self._request_id = 0
# Amount of pending requests
self._amount_pending_requests = 0
def __str__(self):
out = ""
out += "Quantum storage with the properties:\nstorage mode: %d\nstorage limit: %d\n" % (
self._storage_mode, self._storage_limit)
out += "Host dictionary is:\n"
out += "; ".join([str(key) + ":" + str([v.id for v in value])
for key, value in self._host_dict.items()])
out += "\n"
out += "Qubit dictionary is:\n"
out += "; ".join([str(key) + ":" + str(value)
for key, value in self._qubit_dict.items()])
out += "\n"
out += "Purpose dictionary is:\n"
out += "; ".join([str(key) + ":" + str(value)
for key, value in self._purpose_dict.items()])
out += "\n"
return out
@property
def storage_limit(self):
return self._storage_limit
@storage_limit.setter
def storage_limit(self, new_limit):
"""
Set a new storage limit for the storage. The implementations depends on
the storage mode.
Args:
new_limit (int): The new max amount of qubit.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_ALL:
self._storage_limit = new_limit
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_PER_HOST:
self._storage_limit = new_limit
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
self._default_storage_limit_per_host = new_limit
for id_ in list(self._storage_limits_per_host):
self._storage_limits_per_host[id_] = new_limit
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
@property
def storage_limit_mode(self):
return self._storage_mode
@storage_limit_mode.setter
def storage_limit_mode(self, new_mode):
self._storage_mode = new_mode
@property
def amount_qubits_stored(self):
return self._amount_qubit_stored
def amount_qubits_stored_with_host(self, host_id):
return self._amount_qubits_stored_per_host[host_id]
def set_storage_limit_with_host(self, new_limit, host_id):
"""
Set a new storage limit for the storage. The implementations depends on
the storage mode.
Args:
new_limit (int): The new max amount of qubit.
host_id (str): optional, if given, and the storage mode is
STORAGE_LIMIT_INDIVIDUALLY_PER_HOST, the limit is only
set for this specific host.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
if host_id is None:
raise ValueError(
"Host ID must be given in this storage mode")
else:
self._storage_limits_per_host[host_id] = new_limit
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
def reset_storage(self):
"""
Reset the quantum storage.
"""
for host in self._host_dict:
self.reset_qubits_from_host(host)
def release_storage(self):
"""
Releases all qubits in this storage. The storage is not
usable anymore after this function has been called.
"""
self.lock.acquire_write()
for q in self._qubit_dict.values():
for ele in q.values():
ele.release()
# do not release write, storage not usable anymore
def check_qubit_from_host_exists(self, from_host_id, purpose=None):
"""
Check if a qubit from a host exists in this quantum storage.
Args:
from_host_id (str): The host id of the host from which the qubit is from.
purpose (int): Optional, purpose of the qubit which should exist.
Returns:
(bool): True, if such a qubit is in the storage, false if not.
"""
self.lock.acquire_write()
if from_host_id not in self._host_dict:
self.lock.release_write()
return False
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
self.lock.release_write()
return True
self.lock.release_write()
return False
def get_qubit_by_id(self, q_id):
"""
Return the qubit that has the id *q_id*
Args:
q_id (str): The ID of the qubit
Returns:
(Qubit): The qubit with the id *q_id* or None if it does not exist
"""
if q_id in self._qubit_dict:
return list(self._qubit_dict[q_id].values())[0]
return None
def change_qubit_id(self, from_host_id, new_id, old_id=None):
"""
Changes the ID of a qubit. If the ID is not given, a random
qubit which is from a host is changed to the new id.
Args:
from_host_id (str): The ID of the owner
new_id (str): The ID to change to
old_id (str): The old ID
Returns:
(str): The new ID
"""
new_id = str(new_id)
self.lock.acquire_write()
if old_id is not None:
old_id = str(old_id)
qubit, purpose = self._pop_qubit_with_id_and_host_from_qubit_dict(
old_id, from_host_id)
if qubit is not None:
qubit.id = new_id
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
else:
if from_host_id in self._host_dict and self._host_dict[from_host_id]:
qubit = self._host_dict[from_host_id][0]
old_id = qubit.id
_, purpose = self._pop_qubit_with_id_and_host_from_qubit_dict(
old_id, from_host_id)
qubit.id = new_id
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
self.lock.release_write()
return old_id
def add_qubit_from_host(self, qubit, purpose, from_host_id):
"""
Adds a qubit which has been received from a host.
Args:
qubit (Qubit): qubit which should be stored.
from_host_id (str): Id of the Host from whom the qubit has
been received.
purpose (str): Purpose of the Qubit, for example EPR or data.
"""
self.lock.acquire_write()
if self._check_qubit_in_system(qubit, from_host_id, purpose=purpose):
self.logger.log("Qubit with id %s, purpose %s and from host %s"
" already in storage" % (qubit.id, purpose, from_host_id))
raise ValueError("Qubit with these parameters already in storage!")
if from_host_id not in self._host_dict:
self._add_new_host(from_host_id)
if not self._increase_qubit_counter(from_host_id):
qubit.release()
self.lock.release_write()
return
self._host_dict[from_host_id].append(qubit)
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
# Check if a Qubit of one of the callbacks has arrived
self._check_all_requests()
self.lock.release_write()
def get_all_qubits_from_host(self, from_host_id, purpose=None, remove=False):
"""
Get all Qubits from a specific host id.
These qubits are not removed from storage!
Args:
from_host_id (str): The host who the qubits are from
purpose (int): The purpose of the qubits
remove (bool): Also remove from storage
Returns:
(list): The list of qubits
"""
if from_host_id in self._host_dict:
out = []
self.lock.acquire_write()
flag = False
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
if not remove:
out.append(q)
else:
flag = True
if remove:
break
if not flag and remove:
num_qubits = len(self._host_dict[from_host_id])
for _ in range(num_qubits):
out.append(self._get_qubit_from_host(from_host_id, purpose=purpose))
self.lock.release_write()
return out
return []
def reset_qubits_from_host(self, from_host_id, purpose=None):
"""
Remove all stored qubits from the host *from_host_id*.
Args:
from_host_id (str): The host who the qubits are from
purpose (int):
"""
self.lock.acquire_write()
if from_host_id in self._host_dict:
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
self._get_qubit_from_host(from_host_id, purpose=purpose)
self.lock.release_write()
def _check_all_requests(self):
"""
Checks if any of the pending requests is now fulfilled.
Returns:
If a request is fulfilled, the request is handled and the function
returns the qubit of this request.
"""
for req_id, args in self._pending_request_dict.items():
ret = self._get_qubit_from_host(args[1], args[2], args[3])
if ret is not None:
args[0].put(ret)
self._remove_request(req_id)
return ret
def _add_request(self, args):
"""
Adds a new request to the quantum storage. If a new qubit arrives, it
is checked if the request for the qubit is satisfied.
Args:
args (list): [Queue, from_host_id, q_id, purpose]
"""
self._pending_request_dict[self._request_id] = args
self._request_id += 1
self._amount_pending_requests += 1
return self._request_id
def _remove_request(self, req_id):
"""
Removes a pending request from the request dict.
Args:
req_id (int): The id of the request to remove.
"""
if req_id in self._pending_request_dict:
del self._pending_request_dict[req_id]
self._amount_pending_requests -= 1
def get_qubit_from_host(self, from_host_id, q_id=None, purpose=None, wait=0):
"""
Returns next qubit which has been received from a host. If the qubit has
not been receives yet, the thread is blocked for a maxiumum of the wait time,
till the qubit arrives (The default is 0). If the id is given, the exact qubit with the id
is returned, or None if it does not exist.
The qubit is removed from the quantum storage.
Args:
from_host_id (str): Host id from who the qubit has been received.
q_id (str): Optional Id, to return the exact qubit with the Id.
purpose (str): Optional, purpose of the Qubit.
wait (int): Default is 0. The maximum blocking time. -1 if blocking forever.
Returns:
(bool): If such a qubit exists, it returns the qubit. Otherwise, None
is returned.
"""
# Block forever if wait is -1
if wait == -1:
wait = None
self.lock.acquire_write()
ret = self._get_qubit_from_host(from_host_id, q_id, purpose)
if ret is not None or wait == 0:
self.lock.release_write()
return ret
q = queue.Queue()
args = [q, from_host_id, q_id, purpose]
req_id = self._add_request(args)
self.lock.release_write()
ret = None
try:
ret = q.get(timeout=wait)
except queue.Empty:
pass
if ret is None:
self.lock.acquire_write()
self._remove_request(req_id)
self.lock.release_write()
return ret
def _get_qubit_from_host(self, from_host_id, q_id=None, purpose=None):
if q_id is not None:
qubit = self._pop_qubit_with_id_and_host_from_qubit_dict(
q_id, from_host_id, purpose=purpose)
if qubit is not None:
qubit, purp = qubit
if from_host_id not in self._host_dict or \
qubit not in self._host_dict[from_host_id]:
# Qubit with the ID exists, but does not belong to the host requested
self._add_qubit_to_qubit_dict(qubit, purp, from_host_id)
return None
self._host_dict[from_host_id].remove(qubit)
self._decrease_qubit_counter(from_host_id)
return qubit
if from_host_id not in self._host_dict:
return None
if self._host_dict[from_host_id]:
# check purposes of all qubits
for _ in range(len(self._host_dict[from_host_id])):
qubit = self._host_dict[from_host_id].pop(0)
out = self._pop_qubit_with_id_and_host_from_qubit_dict(
qubit.id, from_host_id, purpose=purpose)
if out is not None:
self._decrease_qubit_counter(from_host_id)
return out[0]
self._host_dict[from_host_id].append(qubit)
return None
def _pop_qubit_with_id_and_host_from_qubit_dict(self, q_id, from_host_id, purpose=None):
def _pop_purpose_from_purpose_dict():
nonlocal q_id, from_host_id
if q_id not in self._purpose_dict:
return None
pur = self._purpose_dict[q_id].pop(from_host_id, None)
if pur is not None:
if not self._purpose_dict[q_id]:
del self._purpose_dict[q_id]
return pur
return None
purp = _pop_purpose_from_purpose_dict()
if purp is not None:
if purpose is None or purpose == purp:
qubit = self._qubit_dict[q_id].pop(from_host_id, None)
if qubit is not None:
if not self._qubit_dict[q_id]:
del self._qubit_dict[q_id]
return qubit, purp
else:
if q_id not in self._purpose_dict:
self._purpose_dict[q_id] = {}
self._purpose_dict[q_id][from_host_id] = purp
return None
def _add_qubit_to_qubit_dict(self, qubit, purpose, from_host_id):
def _add_purpose_to_purpose_dict(q_id):
nonlocal purpose, from_host_id
if q_id not in self._purpose_dict:
self._purpose_dict[q_id] = {}
self._purpose_dict[q_id][from_host_id] = purpose
if qubit.id not in self._qubit_dict:
self._qubit_dict[qubit.id] = {}
self._qubit_dict[qubit.id][from_host_id] = qubit
_add_purpose_to_purpose_dict(qubit.id)
def _add_new_host(self, host_id):
if host_id not in self._host_dict:
self._host_dict[host_id] = []
if host_id not in self._storage_limits_per_host:
self._storage_limits_per_host[host_id] = self._default_storage_limit_per_host
self._amount_qubits_stored_per_host[host_id] = 0
def _check_qubit_in_system(self, qubit, from_host_id, purpose=None):
"""
True if qubit with same parameters already in the systems
Args:
qubit (Qubit): The qubit in question
from_host_id (str): The ID of the sending host
purpose (int): Qubit's purpose
Returns:
(bool): If the qubit is in the system.
"""
if qubit.id in self._qubit_dict and \
from_host_id in self._qubit_dict[qubit.id]:
if purpose is None or (purpose == self._purpose_dict[qubit.id][from_host_id]):
return True
return False
def _check_memory_limits(self, host_id):
"""
Checks if another qubit can be added to the storage.
Args:
host_id (str): The host_id the qubit should be added to.
Returns:
True if no storage limit has been reached, False if a memory
limit has occurred.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_ALL:
if self._storage_limit == -1:
return True
if self._storage_limit <= self._amount_qubit_stored:
return False
else:
return True
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_PER_HOST:
if self._storage_limit == -1:
return True
if self._storage_limit <= self._amount_qubits_stored_per_host[host_id]:
return False
else:
return True
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
if self._storage_limits_per_host[host_id] == -1:
return True
if self._storage_limits_per_host[host_id] <= self._amount_qubits_stored_per_host[host_id]:
return False
else:
return True
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
def _increase_qubit_counter(self, host_id):
"""
Checks if the qubit counter can be increased, because of memory limits,
and increases the counter.
Args:
host_id (str): From who the qubit comes from.
Returns:
True, if the counter could be increased, False if not.
"""
if not self._check_memory_limits(host_id):
return False
self._amount_qubits_stored_per_host[host_id] += 1
self._amount_qubit_stored += 1
return True
def _reset_qubit_counter(self, host_id):
"""
Args:
host_id (str):
Returns:
(bool): True, if the counter could be decreased, False if not.
"""
if self._amount_qubits_stored_per_host[host_id] <= 0 or \
self._amount_qubit_stored <= 0:
return False
num_qubits = self._amount_qubits_stored_per_host[host_id]
self._amount_qubits_stored_per_host[host_id] = 0
self._amount_qubit_stored -= num_qubits
def _decrease_qubit_counter(self, host_id):
"""
Checks if the qubit counter can be decreased
and decreases the counter.
Args:
host_id (str): From who the qubit comes from.
Returns:
(bool): True, if the counter could be decreased, False if not.
"""
if self._amount_qubits_stored_per_host[host_id] <= 0 or \
self._amount_qubit_stored <= 0:
return False
self._amount_qubits_stored_per_host[host_id] -= 1
self._amount_qubit_stored -= 1
| 37.505495
| 102
| 0.592587
| 20,374
| 0.994921
| 0
| 0
| 1,227
| 0.059918
| 0
| 0
| 6,557
| 0.320197
|
0c6a40a4bea2c4e73231cb976a84217ada08384c
| 2,098
|
py
|
Python
|
tests/test_utils.py
|
Guillerbr/python-pagseguro
|
279eacf251e99a2f15d665f8193fcad0be6ea0bf
|
[
"MIT"
] | 115
|
2015-02-19T22:17:44.000Z
|
2019-07-24T17:31:30.000Z
|
tests/test_utils.py
|
rubens8848/python-pagseguro
|
08a8aa7f934b16d00948ead17a0e470a88f2479f
|
[
"MIT"
] | 49
|
2015-03-04T00:53:31.000Z
|
2019-07-13T16:41:22.000Z
|
tests/test_utils.py
|
rubens8848/python-pagseguro
|
08a8aa7f934b16d00948ead17a0e470a88f2479f
|
[
"MIT"
] | 53
|
2015-01-12T22:13:33.000Z
|
2019-07-20T01:52:48.000Z
|
# -*- coding: utf-8 -*-
import datetime
from pagseguro.utils import (is_valid_cpf, is_valid_cnpj, is_valid_email,
parse_date)
from pagseguro.exceptions import PagSeguroValidationError
import pytest
from dateutil.tz import tzutc
def test_is_valid_email():
valid = 'test@email.com'
valid2 = u'user@росси́я.ро'
not_valid = '@asd.com'
not_valid2 = 'bad'
not_valid3 = u'user@росси́я'
with pytest.raises(PagSeguroValidationError):
is_valid_email(not_valid)
with pytest.raises(PagSeguroValidationError):
is_valid_email(not_valid2)
with pytest.raises(PagSeguroValidationError):
is_valid_email(not_valid3)
assert is_valid_email(valid) == 'test@email.com'
assert is_valid_email(valid2) == u'user@росси́я.ро'
def test_parse_date():
# DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
date_str = '2016-10-10T10:10:10'
assert parse_date(date_str) == datetime.datetime(2016, 10, 10, 10, 10, 10,
tzinfo=tzutc())
def test_is_valid_cpf():
valid = '041.684.826-50'
valid2 = '04168482650'
bad = 'bla///'
max_digits = '1111111111111111111111111'
invalid_cpf = '040.684.826-50'
with pytest.raises(PagSeguroValidationError):
is_valid_cpf(bad)
with pytest.raises(PagSeguroValidationError):
is_valid_cpf(max_digits)
with pytest.raises(PagSeguroValidationError):
is_valid_cpf(invalid_cpf)
assert is_valid_cpf(valid) == valid
assert is_valid_cpf(valid2) == '04168482650'
def test_is_valid_cnpj():
valid = '31331052000174'
valid2 = '72.168.117/0001-90'
invalid = '///'
digits = '1111111'
wrong_number = '31331052000175'
with pytest.raises(PagSeguroValidationError):
is_valid_cnpj(invalid)
with pytest.raises(PagSeguroValidationError):
is_valid_cnpj(digits)
with pytest.raises(PagSeguroValidationError):
is_valid_cnpj(wrong_number)
assert is_valid_cnpj(valid) == '31331052000174'
assert is_valid_cnpj(valid2) == '72168117000190'
| 26.897436
| 78
| 0.674452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 397
| 0.187
|
0c6b6942a6015b98ac3474b28a14d6d14c8b2df9
| 121
|
py
|
Python
|
G53IDS/run.py
|
jayBana/InventoryMan
|
0826f9c98062fb6600f77a721311cbf27719e528
|
[
"Apache-2.0"
] | null | null | null |
G53IDS/run.py
|
jayBana/InventoryMan
|
0826f9c98062fb6600f77a721311cbf27719e528
|
[
"Apache-2.0"
] | null | null | null |
G53IDS/run.py
|
jayBana/InventoryMan
|
0826f9c98062fb6600f77a721311cbf27719e528
|
[
"Apache-2.0"
] | null | null | null |
from server.controller.app import app as webapp
# import Flask app so that it can be launched with gunicorn
app = webapp
| 30.25
| 59
| 0.793388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.487603
|
0c6c60baa3e34ba265cfea8fd4ef73ba5f9cccb2
| 383
|
py
|
Python
|
tests/perf/test-prop-write.py
|
wenq1/duktape
|
5ed3eee19b291f3b3de0b212cc62c0aba0ab4ecb
|
[
"MIT"
] | 4,268
|
2015-01-01T17:33:40.000Z
|
2022-03-31T17:53:31.000Z
|
tests/perf/test-prop-write.py
|
KiraanRK/esp32-duktape
|
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
|
[
"MIT"
] | 1,667
|
2015-01-01T22:43:03.000Z
|
2022-02-23T22:27:19.000Z
|
tests/perf/test-prop-write.py
|
KiraanRK/esp32-duktape
|
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
|
[
"MIT"
] | 565
|
2015-01-08T14:15:28.000Z
|
2022-03-31T16:29:31.000Z
|
def test():
obj = { 'xxx1': 1, 'xxx2': 2, 'xxx3': 4, 'xxx4': 4, 'foo': 123 }
i = 0
while i < 1e7:
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
i += 1
test()
| 21.277778
| 68
| 0.373368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 79
| 0.206266
|
0c6c952788acdca410a606d2447a82cf6396e05a
| 3,563
|
py
|
Python
|
clickhouse_driver/numpy/result.py
|
fasttrack-solutions/clickhouse-driver
|
676dfb09f74b8b55bfecaedbe70ddc971e1badd7
|
[
"MIT"
] | 823
|
2017-05-16T15:30:15.000Z
|
2022-03-31T08:39:04.000Z
|
clickhouse_driver/numpy/result.py
|
fasttrack-solutions/clickhouse-driver
|
676dfb09f74b8b55bfecaedbe70ddc971e1badd7
|
[
"MIT"
] | 277
|
2017-07-11T11:35:34.000Z
|
2022-03-08T06:52:09.000Z
|
clickhouse_driver/numpy/result.py
|
fasttrack-solutions/clickhouse-driver
|
676dfb09f74b8b55bfecaedbe70ddc971e1badd7
|
[
"MIT"
] | 175
|
2017-10-11T08:41:12.000Z
|
2022-03-22T03:59:35.000Z
|
from itertools import chain
import numpy as np
import pandas as pd
from pandas.api.types import union_categoricals
from ..progress import Progress
from ..result import QueryResult
class NumpyQueryResult(QueryResult):
"""
Stores query result from multiple blocks as numpy arrays.
"""
def store(self, packet):
block = getattr(packet, 'block', None)
if block is None:
return
# Header block contains no rows. Pick columns from it.
if block.num_rows:
if self.columnar:
self.data.append(block.get_columns())
else:
self.data.extend(block.get_rows())
elif not self.columns_with_types:
self.columns_with_types = block.columns_with_types
def get_result(self):
"""
:return: stored query result.
"""
for packet in self.packet_generator:
self.store(packet)
if self.columnar:
data = []
# Transpose to a list of columns, each column is list of chunks
for column_chunks in zip(*self.data):
# Concatenate chunks for each column
if isinstance(column_chunks[0], np.ndarray):
column = np.concatenate(column_chunks)
elif isinstance(column_chunks[0], pd.Categorical):
column = union_categoricals(column_chunks)
else:
column = tuple(chain.from_iterable(column_chunks))
data.append(column)
else:
data = self.data
if self.with_column_types:
return data, self.columns_with_types
else:
return data
class NumpyProgressQueryResult(NumpyQueryResult):
"""
Stores query result and progress information from multiple blocks.
Provides iteration over query progress.
"""
def __init__(self, *args, **kwargs):
self.progress_totals = Progress()
super(NumpyProgressQueryResult, self).__init__(*args, **kwargs)
def __iter__(self):
return self
def __next__(self):
while True:
packet = next(self.packet_generator)
progress_packet = getattr(packet, 'progress', None)
if progress_packet:
self.progress_totals.increment(progress_packet)
return (
self.progress_totals.rows, self.progress_totals.total_rows
)
else:
self.store(packet)
def get_result(self):
# Read all progress packets.
for _ in self:
pass
return super(NumpyProgressQueryResult, self).get_result()
class NumpyIterQueryResult(object):
"""
Provides iteration over returned data by chunks (streaming by chunks).
"""
def __init__(
self, packet_generator,
with_column_types=False):
self.packet_generator = packet_generator
self.with_column_types = with_column_types
self.first_block = True
super(NumpyIterQueryResult, self).__init__()
def __iter__(self):
return self
def __next__(self):
packet = next(self.packet_generator)
block = getattr(packet, 'block', None)
if block is None:
return []
if self.first_block and self.with_column_types:
self.first_block = False
rv = [block.columns_with_types]
rv.extend(block.get_rows())
return rv
else:
return block.get_rows()
| 28.733871
| 78
| 0.596969
| 3,372
| 0.946393
| 0
| 0
| 0
| 0
| 0
| 0
| 543
| 0.1524
|
0c6dab3b29d248c78a200aec1e3449a5aeb04604
| 33,383
|
py
|
Python
|
sandbox/riskModelsResultsEval.py
|
danbirks/PredictCode
|
b4d7010d13706c771ba57437e9c7589e5c94329b
|
[
"Artistic-2.0"
] | null | null | null |
sandbox/riskModelsResultsEval.py
|
danbirks/PredictCode
|
b4d7010d13706c771ba57437e9c7589e5c94329b
|
[
"Artistic-2.0"
] | null | null | null |
sandbox/riskModelsResultsEval.py
|
danbirks/PredictCode
|
b4d7010d13706c771ba57437e9c7589e5c94329b
|
[
"Artistic-2.0"
] | 2
|
2020-01-28T23:02:54.000Z
|
2020-02-03T16:04:38.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 10:22:47 2019
@author: lawdfo
Purpose:
Read in the csv results file generated by (e.g.) riskModelsParamSweep.py
and report back some useful statistics.
"""
# Some fairly standard modules
import os, csv, lzma
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import descartes
from itertools import product
from collections import Counter, defaultdict
import datetime
import csv
import random
import time
from copy import deepcopy
import statistics
# The geopandas module does not come standard with anaconda,
# so you'll need to run the anaconda prompt as an administrator
# and install it via "conda install -c conda-forge geopandas".
# That installation will include pyproj and shapely automatically.
# These are useful modules for plotting geospatial data.
import geopandas as gpd
import pyproj
import shapely.geometry
# These modules are useful for tracking where modules are
# imported from, e.g., to check we're using our local edited
# versions of open_cp scripts.
import sys
import inspect
import importlib
# In order to use our local edited versions of open_cp
# scripts, we insert the parent directory of the current
# file ("..") at the start of our sys.path here.
sys.path.insert(0, os.path.abspath(".."))
# Elements from PredictCode's custom "open_cp" package
import open_cp
"""
import open_cp.geometry
import open_cp.plot
import open_cp.sources.chicago as chicago
import open_cp.retrohotspot as retro
import open_cp.prohotspot as phs
import open_cp.knox
"""
# Load custom functions that make dealing with datetime and timedelta easier
from crimeRiskTimeTools import generateDateRange, \
generateLaterDate, \
generateEarlierDate, \
getTimedPointsInTimeRange, \
getSixDigitDate, \
_day
"""
Expected data format of input CSV file, by column:
Header name Type Typical contents
dataset str Chicago
event_types str BURGLARY
cell_width int 100
eval_date np.datetime64 2016-03-01
train_len str 8W
test_len str 1D
coverage_rate float 0.01/0.02/0.05/0.1
test_events int 3/2/5/etc
hit_count int 1/2/0/etc
hit_pct float 0.33333 etc
model str naivecount/phs/etc
rand_seed int
rhs_bandwidth int
phs_time_unit str 1 weeks
phs_time_band str 4 weeks
phs_dist_unit int 100
phs_dist_band int 400
phs_weight str linear
"""
csv_data_types = [str, \
str, \
int, \
np.datetime64, \
str, \
str, \
float, \
int, \
int, \
float, \
str, \
int, \
int, \
str, \
str, \
int, \
int, \
str]
def splitDataByTimespans(datalist, timespan, dateinfoname="eval_date"):
print("Performing splitDataByTimespans")
date_list = sorted(set([d[dateinfoname] for d in datalist]))
earliest_date = date_list[0]
latest_date = date_list[-1]
daterange_list = generateDateRange(start=earliest_date,
end=latest_date+_day,
step=timespan)
data_by_daterange = defaultdict(list)
for d in datalist:
d_time = d[dateinfoname]
for t in daterange_list:
if d_time >= t and d_time < generateLaterDate(t, timespan):
data_by_daterange[t].append(d)
break
print("Ending splitDataByTimespans")
return data_by_daterange
"""
Each element of output should have this info:
earliest test date of range
time band
dist band
avg hit rate
"""
def getPhsSpanStats(datalist, timespan):
print("Performing getPhsSpanStats")
data_by_daterange = splitDataByTimespans(datalist, timespan)
phs_band_rate_summary = []
for daterange in data_by_daterange:
hit_rates = getPhsHitRates(data_by_daterange[daterange])
for bp in hit_rates:
phs_band_rate_summary.append((daterange, bp[0], bp[1], hit_rates[bp]["avg_hit_rate"]))
print("Ending getPhsSpanStats")
return phs_band_rate_summary
def getModelSpanStats(datalist, timespan, model):
print("Performing getModelSpanStats")
recognized_model_list = ["random", "naive", "ideal", "phs"]
if model not in recognized_model_list:
print("model required for getModelSpanStats")
sys.exit(1)
if model=="phs":
return getPhsSpanStats(datalist, timespan)
data_by_daterange = splitDataByTimespans(datalist, timespan)
model_stats = []
for daterange, data in data_by_daterange.items():
model_stats.append((daterange, getAvgHitRates(data)))
print("Ending getModelSpanStats")
return model_stats
"""
Each element of output should have this info:
coverage
earliest test date of range
time band
dist band
avg hit rate
"""
def writeModelSummaryCsv(datalists_by_cov, timespan, model, csvname = "temp.csv"):
print("Performing writeModelSummaryCsv")
rate_summaries_by_cov = dict()
for cov, datalist in datalists_by_cov.items():
rate_summaries_by_cov[cov] = getModelSpanStats(datalist, timespan, model)
with open(csvname,"w") as csvf:
writer = csv.writer(csvf, delimiter=",", lineterminator="\n")
for cov, rate_summary in rate_summaries_by_cov.items():
for d in rate_summary:
writer.writerow([cov] + list(d))
print("Ending writeModelSummaryCsv")
sys.exit(0)
def writePhsVariabilityCsv(datalists_by_cov, timespan, csvname = "temp.csv"):
print("Performing writePhsVariabilityCsv")
bp_rate_summaries_by_cov = dict()
for cov, datalist in datalists_by_cov.items():
bp_rate_summaries_by_cov[cov] = getPhsSpanStats(datalist, timespan)
rates_by_covtimedist = defaultdict(list)
for cov, rate_summary in bp_rate_summaries_by_cov.items():
for entry in rate_summary:
rates_by_covtimedist[(cov, entry[1], entry[2])].append(entry[3])
covtimedist_trios = sorted(rates_by_covtimedist)
num_rates_list = [len(rates_by_covtimedist[x]) for x in covtimedist_trios]
num_rates = num_rates_list[0]
if not all([x==num_rates for x in num_rates_list]):
print("Error! Not all (cov, time, dist) trios have same number of results!")
print(num_rates_list)
sys.exit(1)
ratestats_by_covtimedist = dict()
for covtimedist in covtimedist_trios:
ratelist = rates_by_covtimedist[covtimedist]
rate_avg = sum(ratelist)/num_rates
rate_std = statistics.stdev(ratelist)
rate_var = statistics.variance(ratelist)
ratestats_by_covtimedist[covtimedist] = (rate_avg, rate_std, rate_var)
with open(csvname,"w") as csvf:
writer = csv.writer(csvf, delimiter=",", lineterminator="\n")
for covtimedist, ratestats in ratestats_by_covtimedist.items():
writer.writerow(list(covtimedist) + list(ratestats))
print(" ".join([str(x) for x in list(covtimedist) + list(ratestats)]))
print("Ending writePhsVariabilityCsv")
sys.exit(0)
# datalist = list of results for PHS
# timespan = how frequently to check scores. Do we look at the top n models
# from each day, or averaged over each month, etc
# topnum = how many of the top models we consider successful. Top 10? Top 1?
def checkPhsConsistency(datalist, timespan, topnum):
print("Performing checkPhsConsistency")
data_by_daterange = splitDataByTimespans(datalist, timespan)
#best_overallrate_bps = []
best_avgrate_bps = []
for daterange in data_by_daterange:
rate_info = getPhsHitRates(data_by_daterange(daterange))
#d_sort_overallrate = sorted(rate_info.items(), key=lambda ri: ri[1]["overall_hit_rate"], reverse=True)
d_sort_avgrate = sorted(rate_info.items(), key=lambda ri: ri[1]["avg_hit_rate"], reverse=True)
#best_bp_overallrate = d_sort_overallrate[:topnum]
#for d in d_sort_overallrate[topnum:]:
# if d[1]["overall_hit_rate"] < best_bp_overallrate[-1][1]["overall_hit_rate"]:
# break
# best_bp_overallrate.append(d)
best_bp_avgrate = d_sort_avgrate[:topnum]
for d in d_sort_avgrate[topnum:]:
if d[1]["avg_hit_rate"] < best_bp_avgrate[-1][1]["avg_hit_rate"]:
break
best_bp_avgrate.append(d)
#best_overallrate_bps.append(best_bp_overallrate)
best_avgrate_bps.append(best_bp_avgrate)
#findMinimalPhsBandCovering(best_overallrate_bps, daterange_list)
findMinimalPhsBandCovering(best_avgrate_bps, data_by_daterange.keys())
def findMinimalPhsBandCovering(best_bps, daterange_list):
print("Performing findMinimalPhsBandCovering")
covered_span_list = []
covered_span_dates = [[]]
covering_bps = []
bp_set = set([x[0] for x in best_bps[0]])
running_span_count = 0
for i, bp_info in enumerate(best_bps):
new_bp_set = bp_set & set(x[0] for x in bp_info)
# If set is 0, we can no longer cover current time with top choices
if len(new_bp_set) == 0:
covered_span_list.append(int(running_span_count+0))
running_span_count = 1
covered_span_dates.append([])
covering_bps.append(deepcopy(bp_set))
bp_set = set(x[0] for x in bp_info)
else:
bp_set = new_bp_set
running_span_count += 1
covered_span_dates[-1].append(daterange_list[i])
covered_span_list.append(int(running_span_count+0))
covering_bps.append(deepcopy(bp_set))
print(covered_span_list)
print(covered_span_dates)
print(covering_bps)
sorted_covered_span_list = sorted(covered_span_list)
print(sorted_covered_span_list[0])
print(sorted_covered_span_list[-1])
#sys.exit(0)
pass
"""
getPhsHitRates
Input: "datalist" = list where each entry is a dictionary containing the
information from a line of the csv results file (casted
as the appropriate data type) as well as "param_pair"
which is a tuple of the time and dist bandwidths.
Note: Ideally this datalist is a subset of the full csv data, so that
hit rates ar calculated over smaller timespans, e.g. monthly
Output: "info_by_band_pair" = dict that maps bandwidth pairs ("bp") to:
"bands": same as key; can be useful if just grabbing values
"num_tests": Number of experiments/tests/evaluations performed.
All bp's within a datalist fed into this function should end
up with the same number of tests -- I can't think of a reason
why this wouldn't happen. However, note that this number MAY
change across multiple runs of this function with different
data subsets. For example, maybe you calculate over every
month, but months have different numbers of days.
"total_events": Total number of events (i.e. crimes) in the data.
This is calculated by adding the number for the first time
each date is witnessed. So again, it's important that all bp's
are tested on all the same days.
"total_hits": Total number of hits achieved by the bp's model.
"total_rates": Sum of all daily(?) hit rates. This number is
essentially useless on its own, but used for calculating avg.
"avg_hit_rate": Average of all daily hit rates, calculated as
total_rates/num_tests
("overall_hit_rate"): A different average hit rate, being the total
number of hits divided by the total number of events. This
was removed from use (commented out) once we decided this
metric was less useful than avg_hit_rate, since this could be
swayed by a generally poor model that rarely performs extremely
well.
"""
def getPhsHitRates(datalist):
print("Performing getPhsHitRates")
# Obtain set of bandwidths
band_pair_list = sorted(set([d["param_pair"] for d in datalist]))
# Instantiate info to obtain
info_by_band_pair = dict()
for bp in band_pair_list:
info_by_band_pair[bp] = dict([\
("bands", bp),\
("num_tests", 0),\
("total_events", 0),\
("total_hits", 0),\
("total_rates", float(0))\
])
# Update info via running counts for each bandwidth pair
for result in datalist:
bp = result["param_pair"]
info_by_band_pair[bp]["num_tests"] += 1
info_by_band_pair[bp]["total_events"] += result["test_events"]
info_by_band_pair[bp]["total_hits"] += result["hit_count"]
if result["test_events"] > 0:
info_by_band_pair[bp]["total_rates"] += result["hit_count"]/result["test_events"]
# Confirm all bandwidth pairs had the same number of tests
num_tests_per_bp = [info_by_band_pair[bp]["num_tests"] for bp in band_pair_list]
if len(set(num_tests_per_bp)) != 1:
print("Error! Some bandwidth pairs have different numbers of tests!")
print(Counter(num_tests_per_bp))
sys.exit(1)
num_tests = num_tests_per_bp[0]
# Compute the average hit rates for each bandwidth pair
for bp in band_pair_list:
info_by_band_pair[bp]["avg_hit_rate"] = info_by_band_pair[bp]["total_rates"]/num_tests
# The following deprecated code computes the overall hit rate,
# instead of averaging the hit rates
#if info_by_band_pair[bp]["total_events"] == 0:
# info_by_band_pair[bp]["overall_hit_rate"] == 0
#else:
# info_by_band_pair[bp]["overall_hit_rate"] = info_by_band_pair[bp]["total_hits"]/info_by_band_pair[bp]["total_events"]
# Return info
return info_by_band_pair
# Note: 0 hits for 0 events gets counted as a hit rate of 0.
# Perhaps it should be discarded instead?
# But then what if the entire span has 0 events?
def getAvgHitRates(datalist):
print("Performing getAvgHitRates")
num_tests = len(datalist)
total_rates = sum([result["hit_count"]/result["test_events"] for result in datalist if result["test_events"]!=0])
for i, result in enumerate(datalist):
print(result)
toprint = [result["hit_count"], result["test_events"]]
if toprint[1] == 0:
toprint.append(0)
else:
toprint.append(toprint[0]/toprint[1])
print("\t".join([str(x) for x in toprint]))
print(total_rates/num_tests)
return total_rates/num_tests
"""
getDataByCovRate
Given a path to csv results from running risk models,
return a dictionary where keys are coverage rates and
values are the rows of info with that coverage from the csv.
"""
def getDataByCovRate(results_full_path,
header_types = csv_data_types,
earliest_eval_date = None,
latest_eval_date = None,
):
# Keep track of total number of events (i.e., crimes)
total_event_count = 0
dates_seen = []
model_param_names = []
cov_rates = set()
# Instantiate a mapping from coverage rate to {another mapping of results}.
# That other mapping will be from model to results.
# And, those results will be a list of mappings, each entry in the list being
# a different row from the csv results
datadicts_by_cov_rate = defaultdict(lambda: defaultdict(list))
# Open csv output and start reading it
with open(results_full_path, newline="") as f:
reader = csv.reader(f)
# Obtain column names from header in first line
header = next(reader, None)
# Read each line of data
for dataline in reader:
# Instantiate a map from col name to data, for this line
dataline_dict = dict()
# All data is currently in string form.
# Use header_types to cast the data appropriately.
for i,d in enumerate(dataline):
# Default is empty string
casted_data = ""
# Things like int("") don't work, so we catch that here
if d != "":
casted_data = header_types[i](d)
# Transform data into str/int/float/datetime64 before storing it
dataline_dict[header[i]] = casted_data
# Keep track of how many eval_date's we've seen,
# and how many events (crimes) there have been in total
# If date is outside of desired range, continue
dataline_date = dataline_dict["eval_date"]
if earliest_eval_date != None and dataline_date < earliest_eval_date:
continue
if latest_eval_date != None and latest_eval_date < dataline_date:
continue
if dataline_date not in dates_seen:
total_event_count += dataline_dict["test_events"]
dates_seen.append(dataline_date)
# Grab coverage and model, since we'll use those a lot
dataline_cov = dataline_dict["coverage_rate"]
if dataline_cov not in cov_rates:
cov_rates.add(dataline_cov)
dataline_model = dataline_dict["model"]
# Grab the bandwidths for PHS results, store them as "param_pair"
if dataline_model == "phs":
time_band = int(dataline_dict["phs_time_band"][:-1])
dist_band = dataline_dict["phs_dist_band"]
dataline_dict["param_pair"] = (time_band, dist_band)
model_param_name = dataline_model
if dataline_model == "random":
model_param_name += "-" + str(dataline_dict["rand_seed"])
elif dataline_model == "phs":
model_param_name += "-" + "-".join([str(x) for x in dataline_dict["param_pair"]])
if model_param_name not in model_param_names:
model_param_names.append(model_param_name)
# Store dict so they're first sorted by coverage then by model type
datadicts_by_cov_rate[dataline_cov][model_param_name].append(dataline_dict)
return datadicts_by_cov_rate, dates_seen, model_param_names, sorted(cov_rates)
def graphHitRatesOverTime(results_full_path):
datadicts_by_cov_rate, exp_dates, model_names, cov_rates = getDataByCovRate(results_full_path)
for cov_rate in cov_rates:
# Declare figure
print("Declaring figure for graphHitRatesOverTime...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
cov_results_all_models = datadicts_by_cov_rate[cov_rate]
num_dates = len(exp_dates)
num_models = len(model_names)
for mn in model_names:
if len(cov_results_all_models[mn]) != num_dates:
print("Error!")
print(f"Model: {mn}")
print(f"Expected number of experiments: {num_dates}")
print(f"Found number of experiments: {len(cov_results_all_models[mn])}")
sys.exit(0)
result_matrix = np.zeros((num_models, num_dates))
for mn_index, mn in enumerate(model_names):
names_for_legend.append(mn)
model_results = cov_results_all_models[mn]
for mr_index, mr in enumerate(model_results):
result_matrix[mn_index, mr_index] = mr["hit_pct"]
for row_num, row in enumerate(result_matrix):
ax.plot(exp_dates, row)
ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
ax.set_title(f"Hit rates over time, coverage {cov_rate}")
"""
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
x_axis_size = len(hit_rates_dict[model_names[0]][0])
x_axis_values = np.linspace(0,1,x_axis_size)
print(x_axis_size)
for mn in model_names:
for hr in hit_rates_dict[mn]:
ax.plot(x_axis_values, hr)
for mr in model_runs_list[mn]:
names_for_legend.append(mr)
ax.legend(names_for_legend)
"""
return
"""
Copied snippets from riskModelsCompare
Still working out this section...
"""
def graphCoverageVsHitRate(hit_rates_dict, model_runs_list, model_names):
"""
print(len(hit_rates_dict))
for m in hit_rates_dict:
print(m)
print(len(hit_rates_dict[m]))
print(len(hit_rates_dict[m][0]))
print(len(model_runs_list))
print(model_runs_list)
"""
model_hit_rate_pairs = []
for mn in model_names:
model_hit_rate_pairs += list(zip(model_runs_list[mn], hit_rates_dict[mn]))
#hit_rates_flat += hit_rates_dict[mn]
#model_runs_flat += model_runs_list[mn]
#print(len(hit_rates_flat))
#print(len(model_runs_flat))
print(len(model_hit_rate_pairs))
### DECLARE FIGURE FOR HITRATE/COVERAGE
# !!! I should add an option for the x-axis of the figure!!!
#results_count_offset = .025
#results_rate_offset = .005
#results_count_offset = 0
#results_rate_offset = 0
# new version
# Declare figure
print("Declaring figure for graphCoverageVsHitRate...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
x_axis_size = len(hit_rates_dict[model_names[0]][0])
x_axis_values = np.linspace(0,1,x_axis_size)
print(x_axis_size)
for mn in model_names:
for hr in hit_rates_dict[mn]:
ax.plot(x_axis_values, hr)
for mr in model_runs_list[mn]:
names_for_legend.append(mr)
ax.legend(names_for_legend)
return
"""
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
# one of the orig sections from riskModelsCompare
# Declare figure
print("Declaring figure...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
# Declare figure
print("Declaring figure...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
#xcoords = test_data_dates
coverage_rate = 0.10
coverage_cell_index = int(num_cells_region * coverage_rate)-1
print("reg {}".format(num_cells_region))
print("cov {}".format(coverage_rate))
print("cci {}".format(coverage_cell_index))
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
if test_data_counts[exp_num] == 0:
continue
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = \
model_result[0][coverage_cell_index]/test_data_counts[exp_num]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_rate_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
"""
def main():
datadir = os.path.join("..", "..", "Data")
#results_fname = "results_190515_Chicago_160101_1M_1D.csv"
#results_fname = "results_190517_Chicago_020101_1Y_1D.csv"
#results_fname = "results_190517_Chicago_020101_1Y_3D.csv"
#results_fname = "results_190522_Chicago_020101_1Y_7D.csv"
#results_fname = "results_190621_Chicago_160301_1M_1D.csv"
#results_fname = "results_190621_Chicago_160301_9M_1D.csv"
#results_fname = "temp_results_190621_Chicago_010301_17Y_1D.csv"
#results_fname = "results_190621_Chicago_010301_17Y_1D.csv"
results_fname = "results_190628_Chicago_130101_5Y_1D.csv"
# Only include results of tests later OR EQUAL to this date
earliest_eval_date = np.datetime64("2013-01-01")
# Only include results of tests earlier BUT NOT EQUAL to this date
latest_eval_date = None
results_full_path = os.path.join(datadir, results_fname)
# Keep track of dates seen in the output data
dates_seen = set()
datadicts_by_cov_rate = getDataByCovRate(results_full_path)
# Determine the number of evaluation dates in the data
# We expect this to equal the number of instances of random/naive/ideal
# experiments, and also the number of phs experiments when multiplied by
# the number of phs parameter combinations.
num_dates = len(dates_seen)
print(num_dates)
earliest_date_seen =sorted(dates_seen)[0]
latest_date_seen =sorted(dates_seen)[-1]
print(earliest_date_seen)
print(latest_date_seen)
phsdicts_by_cov_rate = dict([(cov, d["phs"]) for cov, d in datadicts_by_cov_rate.items()])
naivedicts_by_cov_rate = dict([(cov, d["naive"]) for cov, d in datadicts_by_cov_rate.items()])
create_naive_csv_summary = True
if create_naive_csv_summary:
timespan = "1M"
date_today = datetime.date.today()
date_today_str = getSixDigitDate(date_today)
earliest_date_str = getSixDigitDate(earliest_date_seen)
latest_date_str = getSixDigitDate(latest_date_seen)
sumcsv_base = f"ratesummary_xsr_nai_{date_today_str}_{earliest_date_str}_{latest_date_str}_{timespan}.csv"
sumcsvname = os.path.join(datadir, sumcsv_base)
writeModelSummaryCsv(naivedicts_by_cov_rate, timespan, "naive", csvname=sumcsvname)
sys.exit(0)
create_phs_csv_summary = False
if create_phs_csv_summary:
timespan = "1M"
date_today = datetime.date.today()
date_today_str = getSixDigitDate(date_today)
earliest_date_str = getSixDigitDate(earliest_date_seen)
latest_date_str = getSixDigitDate(latest_date_seen)
phssumcsv_base = f"ratesummary_xsr_phs_{date_today_str}_{earliest_date_str}_{latest_date_str}_{timespan}.csv"
phssumcsvname = os.path.join(datadir, phssumcsv_base)
#writePhsSummaryCsv(phs_list, timespan, csvname=phssumcsvname)
writeModelSummaryCsv(phsdicts_by_cov_rate, timespan, "phs", csvname=phssumcsvname)
sys.exit(0)
create_phs_csv_var = True
if create_phs_csv_var:
timespan = "1M"
date_today = datetime.date.today()
date_today_str = getSixDigitDate(date_today)
earliest_date_str = getSixDigitDate(earliest_date_seen)
latest_date_str = getSixDigitDate(latest_date_seen)
phssumcsv_base = f"ratevar_{date_today_str}_{earliest_date_str}_{latest_date_str}_{timespan}.csv"
phssumcsvname = os.path.join(datadir, phssumcsv_base)
#writePhsSummaryCsv(phs_list, timespan, csvname=phssumcsvname)
writePhsVariabilityCsv(phsdicts_by_cov_rate, timespan, phssumcsvname)
sys.exit(0)
all_model_names = ["random", "naivecount", "ideal", "rhs", "phs"]
basic_model_names = all_model_names[:3]
for cov, datadicts_by_model in datadicts_by_cov_rate.items():
print(f"Coverage rate: {cov}")
# Get overall result summaries for basic models
for model_name in basic_model_names:
if model_name in datadicts_by_model:
# Obtain list of results for this (coverage, model) combo
datalist = datadicts_by_model[model_name]
# Confirm that we have the expected number of results
if len(datalist) != num_dates:
print("Error! Unexpected number of results!")
print(f"Number expected per model: {num_dates}")
print(f"Number seen for model {model_name}: {len(datalist)}")
sys.exit(1)
# ("Hit" = event in testing period within model's top cov% cells)
# Total number of successful "hits"
total_hit_count = sum([d["hit_count"] for d in datalist])
# Total possible number of "hits"
total_hit_poss = sum([d["test_events"] for d in datalist])
# Overall hit rate
total_hit_rate = total_hit_count/total_hit_poss
# Average of all individual hit rates
average_hit_rate = sum(d["hit_pct"] for d in datalist)/num_dates
print(f"\tModel: {model_name}")
#print(f"\t\tTotal hit rate: {total_hit_count}/{total_hit_poss} = {total_hit_rate:6.4f}")
print(f"\t\tAverage hit rate: {average_hit_rate:6.4f}")
# Generate a table of results for all PHS bandwidth pairs tested
if "phs" in datadicts_by_model:
phs_list = datadicts_by_model["phs"]
# phs_list is what should be fed into checkPhsConsistency etc
#checkPhsConsistency(phs_list, "1M", 10)
#sys.exit(0)
#continue
#getPhsStats(phs_list, "1M")
#sys.exit(0)
print("0\t" + "\t".join([str(x) + " weeks" for x in range(1,9)]))
best_sum_dist_time = (-1, 0, 0)
best_avg_dist_time = (-1, 0, 0)
for dist_band in range(100,1100,100):
toprint_list = [str(dist_band)]
for time_band in range(1,9):
#total_hit_count = sum(d["hit_count"] for d in phs_list if d["param_pair"]==(time_band, dist_band))
#total_hit_rate = total_hit_count/total_event_count
hit_rate_sum = sum(d["hit_pct"] for d in phs_list if d["param_pair"]==(time_band, dist_band))
hit_rate_avg = hit_rate_sum/num_dates
#toprint_list.append(f"{total_hit_rate:6.4f},{hit_rate_avg:6.4f}")
toprint_list.append(f"{hit_rate_avg:6.4f}")
#if total_hit_rate > best_sum_dist_time[0]:
# best_sum_dist_time = (total_hit_rate, dist_band, time_band)
if hit_rate_avg > best_avg_dist_time[0]:
best_avg_dist_time = (hit_rate_avg, dist_band, time_band)
print("\t".join(toprint_list))
#print(f"Best total hit rate result: {best_sum_dist_time[0]:6.4f} {best_sum_dist_time[1:]}")
print(f"Best average hit rate result: {best_avg_dist_time[0]:6.4f} {best_avg_dist_time[1:]}")
sys.exit(0)
if __name__ == "__main__":
main()
| 33.550754
| 130
| 0.623371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15,565
| 0.466255
|
0c6e32f7e7283b6370a0de49f39a51f43f1b82bb
| 1,280
|
py
|
Python
|
HIV model/others.py
|
omisolaidowu/HIV-story-telling
|
290fbb9549ff0177fb2224553575aa24813fdc6a
|
[
"Apache-2.0"
] | null | null | null |
HIV model/others.py
|
omisolaidowu/HIV-story-telling
|
290fbb9549ff0177fb2224553575aa24813fdc6a
|
[
"Apache-2.0"
] | null | null | null |
HIV model/others.py
|
omisolaidowu/HIV-story-telling
|
290fbb9549ff0177fb2224553575aa24813fdc6a
|
[
"Apache-2.0"
] | null | null | null |
'''
-*- coding: utf-8 -*-
Created on Fri Jan 17 12:34:15 2020
@author: Paul
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
df=pd.read_excel(r'C:\Users\Paul\Desktop\Python projects\HIV_3.xlsx')
print(df)
print(df.isnull().sum())
ax=plt.figure(figsize=(8, 8))
years=['2000','2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018']
x=years
print(x)
line=plt.plot
y=df['']
z=df['Number_of_neonatal_deaths']
plt.xlabel('Changes over the years')
plt.ylabel('Occurence by population')
plt.xticks(rotation=90)
line(x,y, 'r', x, z, 'cyan' )
import matplotlib.patches as mpatches
red_patch = mpatches.Patch(color='red', label='New HIV prevalence among youths')
cyan_patch = mpatches.Patch(color='cyan', label='Neonatal deaths')
#blue_patch = mpatches.Patch(color='yellow', label='sIgM+IgG positive')
#orange_patch = mpatches.Patch(color='orange', label='site4')
#brown_patch = mpatches.Patch(color='brown', label='site5')
#black_patch = mpatches.Patch(color='black', label='site6')
plt.legend(handles=[red_patch, cyan_patch], loc=(0, 1))
plt.show()
| 29.767442
| 158
| 0.683594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 652
| 0.509375
|
0c71acdaf0692c5c50594e6de8fdef846a2f49a3
| 2,425
|
py
|
Python
|
iproxy/ip_select.py
|
tempoker/third
|
e220c79c8646a68e96a996e0fda1346b5f7de2e3
|
[
"MIT"
] | null | null | null |
iproxy/ip_select.py
|
tempoker/third
|
e220c79c8646a68e96a996e0fda1346b5f7de2e3
|
[
"MIT"
] | null | null | null |
iproxy/ip_select.py
|
tempoker/third
|
e220c79c8646a68e96a996e0fda1346b5f7de2e3
|
[
"MIT"
] | null | null | null |
#-*- conding:utf-8 -*-
#2018-02-02 11:04:54
import time,random,requests
from lxml import etree
from fake_useragent import UserAgent
import urllib.request as ur
from pymongo import MongoClient
from multiprocessing.dummy import Pool as ThreadPool
client = MongoClient('localhost',27017)
db = client['test']
ip_list = db['ip_list']
ua = UserAgent()
headers = {'User-Agent':ua.random}
def get_ip(url):
try:
data = requests.get(url,headers=headers).text
content = etree.HTML(data)
ip = content.xpath('//*[@class="odd"]/td[2]/text()')
port = content.xpath('//*[@class="odd"]/td[3]/text()')
with open('ip_list.txt','a+') as fh:
for i in range(len(ip)):
ip_port = str(ip[i])+':'+str(port[i])
fh.write(ip_port+'\n')
print('第{num}条ip记录成功'.format(num=i+1))
except Exception as e:
print('一不意外:{error}'.format(error=e))
def verif_ip(ip_port):
proxy = {'http':'%s:%s' %(ip_port.split(':')[0],ip_port.split(':')[1][:-2])}
print('正在测试的ip是:{ip}'.format(ip=proxy))
support = ur.ProxyHandler(proxy)
opener = ur.build_opener(support,ur.HTTPHandler)
ur.install_opener(opener)
test_url = 'https://www.baidu.com/'
resp = requests.get(test_url,headers=headers)
time.sleep(random.random()*10)
try:
if resp.status_code == 200:
res = resp.text
print('字节数为:{n}'.format(n=len(res)))
db.ip_list.insert(proxy)
with open('ip_userful.txt','a+') as fh:
fh.write(ip_port+'\n')
else :
print('there is some problem!')
except Exception as e:
print('出了问题:'+str(e))
def main():
url_base = 'http://www.xicidaili.com/nn/{page}'
urls = [url_base.format(page=i+1) for i in range(12)]
pool = ThreadPool(12)
pool.map(get_ip,urls)
pool.close()
pool.join()
time.sleep(random.random()*10)
with open ('ip_list.txt','r') as fh:
try:
ip_ports = fh.readlines()
pool = ThreadPool(12)
pool.map(verif_ip,ip_ports)
pool.close()
pool.join()
#for ip_port in ip_ports:
# verif_ip(ip_port)
except Exception :
pass
if __name__ == '__main__':
main()
| 33.680556
| 81
| 0.547629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 481
| 0.194187
|
0c726da14e32932a0096adc8ac65fa50f4a28ab1
| 5,359
|
py
|
Python
|
utils/node_data_snapshot.py
|
nigelbowden/wlanpi-chat-bot
|
cedab0e83d6e33d47f66c1a3be202564f95ac408
|
[
"MIT"
] | null | null | null |
utils/node_data_snapshot.py
|
nigelbowden/wlanpi-chat-bot
|
cedab0e83d6e33d47f66c1a3be202564f95ac408
|
[
"MIT"
] | null | null | null |
utils/node_data_snapshot.py
|
nigelbowden/wlanpi-chat-bot
|
cedab0e83d6e33d47f66c1a3be202564f95ac408
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Read & write snapshot of node status data dumped to local file system.
This data is used to detect things like sytem changes that may need to be reported.
"""
import json
import time
import logging
import os
import socket
import psutil
import netifaces as ni
logging.basicConfig(level=logging.INFO)
class_logger = logging.getLogger('DataSnapshot')
#class_logger.setLevel(logging.DEBUG)
class DataSnapshot(object):
'''
Read & write snapshot of status data dumped to file system
'''
def __init__(self, local_file="/tmp/snapshot.json",):
self.local_file = local_file
self.err_msg = ''
self.data = {}
def write_data(self):
"""
Write current data snapshot to file
"""
class_logger.debug("Writing snapshot data to local file...")
try:
with open(self.local_file, 'w') as f:
json.dump(self.data, f, indent=4)
class_logger.debug("Data written OK.")
return True
except Exception as ex:
self.err_msg = "Issue writing data file: {}".format(ex)
class_logger.error(self.err_msg)
return False
def read_data(self):
"""
Read data from snapshot file
"""
class_logger.debug("Reading snapshot data from local file...")
try:
with open(self.local_file, 'r') as f:
data = json.load(f)
class_logger.debug("Data read OK.")
return data
except Exception as ex:
self.err_msg = "Issue reading data file: {}".format(ex)
class_logger.error(self.err_msg)
return False
def check_snapshot_exists(self):
"""
Check snapshot file exists on local file system, create if not
"""
if os.path.exists(self.local_file):
return True
return False
def get_hostname(self):
# get hostname
return socket.gethostname()
def get_uptime(self, format="string"):
seconds = time.time() - psutil.boot_time()
if format == "raw":
return seconds
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
day, hour = divmod(hour, 24)
return "%d days %d:%02d:%02d" % (day, hour, min, sec)
def get_interface_details(self):
interfaces = ni.interfaces()
AF_INET = 2
interface_data = {}
for interface in interfaces:
# ignore loopback
if interface == 'lo':
continue
# check if interface has IP address
interface_details = ni.ifaddresses(interface)
if AF_INET in interface_details.keys():
address = interface_details[AF_INET][0]['addr']
interface_data[interface] = address
return interface_data
def init_snapshot(self):
"""
Create snapshot file from scratch
"""
interface_data = self.get_interface_details()
hostname = self.get_hostname()
self.data = {
'interfaces': interface_data,
'hostname': hostname,
}
return self.data
def node_status(self):
unit_status = ''
# check if snapshot exists, create if not
class_logger.debug("Checking if we already have a status snapshot...")
if self.check_snapshot_exists():
class_logger.debug("Snapshot exists, create new one and compare to original (any diff)?")
# snapshot exists - compare existing snapshot with new snapshot
if self.read_data() == self.init_snapshot():
class_logger.debug("Snapshots match, no changes detected.")
# nothing has changed, nothing to report
return False
else:
# something has changed with the config, set status
class_logger.debug("Snapshots do not match...config change detected")
self.write_data()
unit_status = "Config change"
else:
# unit must have freshly booted, create snapshot &
# set status to rebooted
class_logger.debug("Boot detected...create snapshot")
self.init_snapshot()
self.write_data()
unit_status = "Rebooted"
if self.get_uptime(format="raw") > 120:
unit_status = "Running"
class_logger.debug("Create status data...")
# get hostname
hostname = self.data['hostname']
# get uptime
uptime = self.get_uptime()
# Figure out the interface addresses:
interfaces = self.data['interfaces']
ip_addresses = []
for name, ip in interfaces.items():
# ignore loopback interface
if name == 'lo':
continue
ip_addresses.append(f" {name}: {ip}")
# Construct message to send
now = time.ctime()
messages = [
f"Time: {now}",
f"Hostname: {hostname}",
f"Uptime: {uptime}",
f"Unit status: {unit_status}",
'\nInterfaces: '] + ip_addresses
return messages
| 28.505319
| 101
| 0.560552
| 4,920
| 0.918082
| 0
| 0
| 0
| 0
| 0
| 0
| 1,751
| 0.32674
|
0c729770e81b2c493f1f39b13a63dd8b4098a0c6
| 104
|
py
|
Python
|
micropsi_core/world/island/__init__.py
|
brucepro/micropsi2
|
84c304d5339f25d112da5565fb2cd98c31524f94
|
[
"Apache-2.0"
] | 119
|
2015-01-23T11:24:58.000Z
|
2022-03-13T08:00:50.000Z
|
micropsi_core/world/island/__init__.py
|
Chediak/micropsi2
|
74a2642d20da9da1d64acc5e4c11aeabee192a27
|
[
"MIT"
] | 9
|
2015-02-18T20:44:58.000Z
|
2021-09-17T14:38:05.000Z
|
micropsi_core/world/island/__init__.py
|
Chediak/micropsi2
|
74a2642d20da9da1d64acc5e4c11aeabee192a27
|
[
"MIT"
] | 34
|
2015-04-01T20:48:49.000Z
|
2022-03-13T08:02:00.000Z
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
"""
__author__ = 'joscha'
__date__ = '03.08.12'
| 9.454545
| 23
| 0.548077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.692308
|
0c72ac90b8c93f18a7c53036beae18e66ed1f754
| 273
|
py
|
Python
|
settings/DataLogger_HS2_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | null | null | null |
settings/DataLogger_HS2_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 1
|
2019-10-22T21:28:31.000Z
|
2019-10-22T21:39:12.000Z
|
settings/DataLogger_HS2_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 2
|
2019-06-06T15:06:46.000Z
|
2020-07-20T02:03:22.000Z
|
counter_name = 'I0_PIN'
Size = wx.Size(1007, 726)
logfile = '/net/helix/data/anfinrud_1502/Logfiles/I0_PIN-2.log'
average_count = 1
max_value = 11
min_value = 0
start_fraction = 0.401
reject_outliers = False
outlier_cutoff = 2.5
show_statistics = True
time_window = 172800
| 22.75
| 63
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.223443
|
0c755c497d1f3421add1deb1d2e25dd35e398b8e
| 1,976
|
py
|
Python
|
register_printer/generators/uvm_generator/print_uvm.py
|
zhangyiant/RegisterPrinter
|
9d26032eeb486c7abc04d11794d07df2fd1dfe4e
|
[
"MIT"
] | null | null | null |
register_printer/generators/uvm_generator/print_uvm.py
|
zhangyiant/RegisterPrinter
|
9d26032eeb486c7abc04d11794d07df2fd1dfe4e
|
[
"MIT"
] | 4
|
2021-01-25T07:05:41.000Z
|
2021-01-25T12:07:37.000Z
|
register_printer/generators/uvm_generator/print_uvm.py
|
zhangyiant/RegisterPrinter
|
9d26032eeb486c7abc04d11794d07df2fd1dfe4e
|
[
"MIT"
] | null | null | null |
import re
import os
import os.path
import logging
from register_printer.template_loader import get_template
LOGGER = logging.getLogger(__name__)
def print_uvm_block(block, out_path):
uvm_block_name = block.block_type.lower() + "_reg_model"
file_name = os.path.join(
out_path,
uvm_block_name + ".sv")
if os.path.exists(file_name):
os.remove(file_name)
template = get_template("reg_model.sv")
content = template.render(
{
"block": block
}
)
with open(file_name, "w") as bfh:
bfh.write(content)
return
def print_uvm_sys(top_sys, out_path):
uvm_sys_name = top_sys.name.lower() + "_reg_model"
file_name = os.path.join(
out_path,
uvm_sys_name + ".sv")
if os.path.exists(file_name):
os.remove(file_name)
template = get_template("sys_model.sv")
content = template.render(
{
"top_sys": top_sys
}
)
with open(file_name, "w") as bfh:
bfh.write(content)
return
def print_sv_defines(top_sys, out_path):
sv_def_name = top_sys.name.lower() + "_register_defines"
file_name = os.path.join(
out_path,
sv_def_name + ".svh")
if os.path.exists(file_name):
os.remove(file_name)
template = get_template("register_defines.svh")
content = template.render(
{
"top_sys": top_sys
}
)
with open(file_name, "w") as bfh:
bfh.write(content)
return
def print_uvm(top_sys, output_path):
LOGGER.debug("Generating UVM register model...")
out_dir = os.path.join(
output_path,
"regmodels")
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for block in top_sys.blocks:
print_uvm_block(block, out_dir)
print_uvm_sys(top_sys, out_dir)
print_sv_defines(top_sys, out_dir)
LOGGER.debug("UVM register model generated in directory %s", out_dir)
return
| 20.163265
| 73
| 0.624494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.118421
|
0c76b0962d8629ce997e757dfe7bab0062971c5a
| 5,466
|
py
|
Python
|
django/blog/statistics.py
|
nekrassov01/docker-django-blog
|
0d3d8815e1f82dd356e9aa81e2b5d1d082cb961b
|
[
"MIT"
] | null | null | null |
django/blog/statistics.py
|
nekrassov01/docker-django-blog
|
0d3d8815e1f82dd356e9aa81e2b5d1d082cb961b
|
[
"MIT"
] | null | null | null |
django/blog/statistics.py
|
nekrassov01/docker-django-blog
|
0d3d8815e1f82dd356e9aa81e2b5d1d082cb961b
|
[
"MIT"
] | null | null | null |
"""
開発初期段階では、サイトレポートをVIEWに直書きしていた
パフォーマンス改善のため、バッチ処理に変更
このVIEWは今は使われていないが、一応残しておく
"""
from django.conf import settings
from django.db.models import Q, Count
from django.utils import html
from .models import Post, Category, Tag
from datetime import datetime
from janome.tokenizer import Tokenizer
from janome.analyzer import Analyzer
from janome.charfilter import *
from janome.tokenfilter import *
import itertools
""" データ可視化 """
def statistics(request):
label = 'データ可視化'
"""
基本のクエリセット
"""
""" queryset | 公開されている記事一覧 """
published_post = Post.objects.select_related('category').prefetch_related('tag').filter(is_public=True).order_by('published_at', 'created_at', 'updated_at')
""" queryset | (紐づく記事が)公開されているカテゴリ一覧 """
published_category = Category.objects.filter(post__is_public=True).order_by('index')
""" queryset | (紐づく記事が)公開されているタグ一覧 """
published_tag = Tag.objects.filter(post__is_public=True).order_by('name')
"""
カテゴリを基準とした集計
"""
""" list | カテゴリ一覧からカテゴリ名のリストを作る """
category_list = list(published_category.distinct().values_list('name', flat=True))
""" list | カテゴリ一覧からカテゴリ別記事数のリストを作る """
category_post_list = list(published_category.annotate(count=Count('post')).values_list('count', flat=True))
""" list | カテゴリ一覧からカテゴリ別タグ数のリストを作る """
category_tag_list = list(published_category.annotate(count=Count('tag', filter=Q(tag__post__is_public=True), distinct=True)).values_list('count', flat=True))
""" dict | カテゴリ別記事数の辞書を作る """
category_post_dict = dict(zip(category_list, category_post_list))
""" dict | カテゴリ別タグ数の辞書を作る """
category_tag_dict = dict(zip(category_list, category_tag_list))
"""
タグを基準とした集計
"""
""" list | タグ一覧からタグ名のリストを作る """
tag_list = list(published_tag.distinct().values_list('name', flat=True))
""" list | タグ一覧からタグ別記事数のリストを作る """
tag_post_list = list(published_tag.annotate(count=Count('post')).values_list('count', flat=True))
""" dict | タグ別記事数の辞書を作る 順番は記事数降順にする """
tag_post_dict = dict(zip(tag_list, tag_post_list))
tag_post_dict = sorted(tag_post_dict.items(), reverse=True, key=lambda x:x[1])
tag_post_dict = dict(tag_post_dict)
""" タグが多すぎる場合の「その他」化 """
tag_post_dict_length = len(tag_post_dict)
display_length = 20
enable_length = display_length - 1
if enable_length < 0:
enable_length = 0
if tag_post_dict_length > enable_length:
dif_length = tag_post_dict_length - enable_length
if dif_length != 1:
key_list = list(tag_post_dict.keys())[:enable_length]
value_list = list(tag_post_dict.values())[:enable_length]
others_value = sum(list(tag_post_dict.values())[-dif_length:])
tag_post_dict = dict(zip(key_list, value_list))
tag_post_dict['その他'] = others_value
"""
投稿年月を基準とした集計
"""
""" list | 投稿年月リストを作る """
month_list = sorted(list(set(map(lambda date: date.strftime('%Y-%m'), list(published_post.values_list('published_at', flat=True))))))
""" list | 年月リスト、カテゴリリスト、記事数リストを各々作る """
""" warn | ループでSQLが多重発行されるため、より良い方法の模索が必要 """
list1, list2, list3 = [], [], []
for date, category in itertools.product(month_list, category_list):
yyyy, mm = date.split('-')
month_post_count = published_post.filter(published_at__year=yyyy, published_at__month=mm).filter(category__name=category).annotate(count=Count('pk')).count()
list1.append(date)
list2.append(category)
list3.append(month_post_count)
""" list | 年月リスト、カテゴリリスト、記事数リストを一つのリストにまとめる """
month_post_list = list(set(zip(list1, list2, list3)))
"""
ワードクラウドのための形態素解析 前処理
"""
""" 直近のタイトルと説明とテキスト本文を抽出しクレンジング """
post_count = 30
post_titles = list(published_post.values_list('title', flat=True))[:post_count]
post_titles = ' '.join(post_titles).split()
post_descriptions = list(published_post.values_list('description', flat=True))[:post_count]
post_descriptions = ' '.join(post_descriptions).split()
post_texts = list(map(lambda lfc: lfc.replace('\r\n', '\n').replace('\r', '\n').replace('\n', ' '), list(published_post.values_list('text', flat=True))))[:post_count]
post_texts = html.strip_tags(''.join(post_texts)).split()
tokens = ' '.join(post_descriptions + post_titles + post_texts)
"""
janome 形態素解析でワードクラウド生成用の辞書を作る
"""
""" 形態素解析のためのアナライザを定義 """
udic_path = settings.JANOME_DICTIONARY_PATH
char_filters = [UnicodeNormalizeCharFilter(), RegexReplaceCharFilter('\,', '')]
tokenizer = Tokenizer(udic=udic_path, udic_type='simpledic', udic_enc='utf8')
token_filters = [CompoundNounFilter(), POSKeepFilter(['名詞']), LowerCaseFilter(), TokenCountFilter()]
analyzer = Analyzer(char_filters, tokenizer, token_filters)
""" dict | 単語リストから辞書を作る """
word_dict = dict(analyzer.analyze(tokens))
""" レンダリング """
return render(request, 'blog/blog_single_report.html', {
'label': label,
'category_list': category_list,
'category_post_dict': category_post_dict,
'category_tag_dict': category_tag_dict,
'tag_post_dict': tag_post_dict,
'month_list': month_list,
'month_post_list': month_post_list,
'word_dict': word_dict,
})
| 38.492958
| 171
| 0.664654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,444
| 0.373016
|
0c7833d91939bf6585a47068c0db482d70b2ffe6
| 131
|
py
|
Python
|
config/__init__.py
|
IMBINGO95/FairMOT
|
c496e911a89870a9b6988d93f80e680d01ee8afc
|
[
"MIT"
] | null | null | null |
config/__init__.py
|
IMBINGO95/FairMOT
|
c496e911a89870a9b6988d93f80e680d01ee8afc
|
[
"MIT"
] | null | null | null |
config/__init__.py
|
IMBINGO95/FairMOT
|
c496e911a89870a9b6988d93f80e680d01ee8afc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/1/27 22:33
# @Author : BINGO
# @School: 浙江大学
# @Campany: 竺星
# @FileName: __init__.py.py
| 18.714286
| 28
| 0.572519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.958042
|
0c7993194c4b04f1fd68f17d7709a5f26721129f
| 8,502
|
py
|
Python
|
xnmt/expression_seqs.py
|
esalesky/xnmt-devel
|
90598d7cc8d22a7167acf88e29df81e130fc64fc
|
[
"Apache-2.0"
] | 4
|
2020-07-20T08:53:32.000Z
|
2020-11-25T21:21:23.000Z
|
xnmt/expression_seqs.py
|
esalesky/xnmt-devel
|
90598d7cc8d22a7167acf88e29df81e130fc64fc
|
[
"Apache-2.0"
] | null | null | null |
xnmt/expression_seqs.py
|
esalesky/xnmt-devel
|
90598d7cc8d22a7167acf88e29df81e130fc64fc
|
[
"Apache-2.0"
] | 1
|
2020-09-04T07:19:10.000Z
|
2020-09-04T07:19:10.000Z
|
from typing import Optional, Sequence
import dynet as dy
import numpy as np
from xnmt import batchers
class ExpressionSequence(object):
"""A class to represent a sequence of expressions.
Internal representation is either a list of expressions or a single tensor or both.
If necessary, both forms of representation are created from the other on demand.
"""
def __init__(self, expr_list: Optional[Sequence[dy.Expression]] = None, expr_tensor: Optional[dy.Expression] = None,
expr_transposed_tensor: Optional[dy.Expression] = None, mask: Optional['batchers.Mask'] = None) \
-> None:
"""Constructor.
Args:
expr_list: a python list of expressions
expr_tensor: a tensor where last dimension are the sequence items
expr_transposed_tensor: a tensor in transposed form (first dimension are sequence items)
mask: an optional mask object indicating what positions in a batched tensor should be masked
Raises:
valueError: raises an exception if neither expr_list nor expr_tensor are given,
or if both have inconsistent length
"""
self.expr_list = expr_list
self.expr_tensor = expr_tensor
self.expr_transposed_tensor = expr_transposed_tensor
self.mask = mask
if not (self.expr_list or self.expr_tensor or self.expr_transposed_tensor):
raise ValueError("must provide expr_list or expr_tensor")
if self.expr_list and self.expr_tensor:
if len(self.expr_list) != self.expr_tensor.dim()[0][-1]:
raise ValueError("expr_list and expr_tensor must be of same length")
if expr_list:
if not isinstance(expr_list,list):
raise ValueError("expr_list must be list, was:", type(expr_list))
if not isinstance(expr_list[0],dy.Expression):
raise ValueError("expr_list must contain dynet expressions, found:", type(expr_list[0]))
for e in expr_list[1:]:
if e.dim() != expr_list[0].dim():
raise AssertionError()
if expr_tensor:
if not isinstance(expr_tensor,dy.Expression): raise ValueError("expr_tensor must be dynet expression, was:", type(expr_tensor))
if expr_transposed_tensor:
if not isinstance(expr_transposed_tensor,dy.Expression): raise ValueError("expr_transposed_tensor must be dynet expression, was:", type(expr_transposed_tensor))
def __len__(self):
"""Return length.
Returns:
length of sequence
"""
if self.expr_list: return len(self.expr_list)
elif self.expr_tensor: return self.expr_tensor.dim()[0][-1]
else: return self.expr_transposed_tensor.dim()[0][0]
def __iter__(self):
"""Return iterator.
Returns:
iterator over the sequence; results in explicit conversion to list
"""
if self.expr_list is None:
self.expr_list = [self[i] for i in range(len(self))]
return iter(self.expr_list)
def __getitem__(self, key):
"""Get a single item.
Returns:
sequence item (expression); does not result in explicit conversion to list
"""
if self.expr_list: return self.expr_list[key]
else:
if key < 0: key += len(self)
if self.expr_tensor:
return dy.pick(self.expr_tensor, key, dim=len(self.expr_tensor.dim()[0])-1)
else:
return dy.pick(self.expr_transposed_tensor, key, dim=0)
def as_list(self):
"""Get a list.
Returns:
the whole sequence as a list with each element one of the embeddings.
"""
if self.expr_list is None:
self.expr_list = [self[i] for i in range(len(self))]
return self.expr_list
def has_list(self):
"""
Returns:
False if as_list() will result in creating additional expressions, True otherwise
"""
return self.expr_list is not None
def as_tensor(self):
"""Get a tensor.
Returns:
the whole sequence as a tensor expression where each column is one of the embeddings.
"""
if self.expr_tensor is None:
self.expr_tensor = dy.concatenate_cols(self.expr_list) if self.expr_list else dy.transpose(self.expr_transposed_tensor)
return self.expr_tensor
def has_tensor(self):
"""
Returns:
False if as_tensor() will result in creating additional expressions, True otherwise
"""
return self.expr_tensor is not None
def as_transposed_tensor(self):
"""Get a tensor.
Returns:
the whole sequence as a tensor expression where each row is one of the embeddings.
"""
if self.expr_transposed_tensor is None:
self.expr_transposed_tensor = dy.transpose(self.as_tensor())
return self.expr_transposed_tensor
def has_transposed_tensor(self):
"""
Returns:
False if as_transposed_tensor() will result in creating additional expressions, True otherwise
"""
return self.expr_transposed_tensor is not None
def dim(self):
"""
Return dimension of the expression sequence
Returns:
result of self.as_tensor().dim(), without explicitly constructing that tensor
"""
if self.has_tensor(): return self.as_tensor().dim()
else:
return tuple(list(self[0].dim()[0]) + [len(self)]), self[0].dim()[1]
class LazyNumpyExpressionSequence(ExpressionSequence):
"""
This is initialized via numpy arrays, and dynet expressions are only created
once a consumer requests representation as list or tensor.
"""
def __init__(self, lazy_data, mask=None):
"""
Args:
lazy_data: numpy array, or Batcher.Batch of numpy arrays
"""
self.lazy_data = lazy_data
self.expr_list, self.expr_tensor = None, None
self.mask = mask
def __len__(self):
if self.expr_list or self.expr_tensor:
return super(LazyNumpyExpressionSequence, self).__len__()
else:
if batchers.is_batched(self.lazy_data):
return self.lazy_data[0].get_array().shape[1]
else: return self.lazy_data.get_array().shape[1]
def __iter__(self):
if not (self.expr_list or self.expr_tensor):
self.expr_list = [self[i] for i in range(len(self))]
return super(LazyNumpyExpressionSequence, self).__iter__()
def __getitem__(self, key):
if self.expr_list or self.expr_tensor:
return super(LazyNumpyExpressionSequence, self).__getitem__(key)
else:
if batchers.is_batched(self.lazy_data):
return dy.inputTensor(
[self.lazy_data[batch].get_array()[:, key] for batch in range(self.lazy_data.batch_size())], batched=True)
else:
return dy.inputTensor(self.lazy_data.get_array()[:,key], batched=False)
def as_tensor(self):
if not (self.expr_list or self.expr_tensor):
if not batchers.is_batched(self.lazy_data):
raise NotImplementedError()
array = np.concatenate([d.get_array().reshape(d.get_array().shape + (1,)) for d in self.lazy_data], axis=2)
self.expr_tensor = dy.inputTensor(array, batched=batchers.is_batched(self.lazy_data))
return super(LazyNumpyExpressionSequence, self).as_tensor()
class ReversedExpressionSequence(ExpressionSequence):
"""
A reversed expression sequences, where expressions are created in a lazy fashion
"""
def __init__(self, base_expr_seq):
self.base_expr_seq = base_expr_seq
self.expr_tensor = None
self.expr_list = None
if base_expr_seq.mask is None:
self.mask = None
else:
self.mask = base_expr_seq.mask.reversed()
def __len__(self):
return len(self.base_expr_seq)
def __iter__(self):
if self.expr_list is None:
self.expr_list = list(reversed(self.base_expr_seq.as_list()))
return iter(self.expr_list)
def __getitem__(self, key):
return self.base_expr_seq[len(self) - key - 1]
def as_list(self):
if self.expr_list is None:
self.expr_list = list(reversed(self.base_expr_seq.as_list()))
return self.expr_list
def has_list(self):
return self.base_expr_seq.has_list()
def as_tensor(self):
# note: this is quite memory hungry and should be avoided if possible
if self.expr_tensor is None:
if self.expr_list is None:
self.expr_list = list(reversed(self.base_expr_seq.as_list()))
self.expr_tensor = dy.concatenate_cols(self.expr_list)
return self.expr_tensor
def has_tensor(self):
return self.expr_tensor is not None
class CompoundSeqExpression(object):
""" A class that represent a list of Expression Sequence. """
def __init__(self, exprseq_list):
self.exprseq_list = exprseq_list
def __iter__(self):
return iter(self.exprseq_list)
def __getitem__(self, idx):
return self.exprseq_list[idx]
| 35.573222
| 166
| 0.699718
| 8,390
| 0.986827
| 0
| 0
| 0
| 0
| 0
| 0
| 2,639
| 0.310398
|
0c7a5201b457dc27f7c1cc8f8c9c04dfe54b1dc5
| 12,423
|
py
|
Python
|
test/unit/test_localdb.py
|
muchu1983/104_cameo
|
8c7f78de198a5bd8d870589402e3b7e8b59f520a
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/test_localdb.py
|
muchu1983/104_cameo
|
8c7f78de198a5bd8d870589402e3b7e8b59f520a
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/test_localdb.py
|
muchu1983/104_cameo
|
8c7f78de198a5bd8d870589402e3b7e8b59f520a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from cameo.localdb import LocalDbForCurrencyApi
from cameo.localdb import LocalDbForTECHORANGE
from cameo.localdb import LocalDbForBNEXT
from cameo.localdb import LocalDbForPEDAILY
from cameo.localdb import LocalDbForINSIDE
from cameo.localdb import LocalDbForTECHCRUNCH
from cameo.localdb import LocalDbForJD
from cameo.localdb import LocalDbForCROWDCUBE
from cameo.localdb import LocalDbForCRUNCHBASE
"""
測試 本地端資料庫存取
"""
class LocalDbTest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
#收尾
def tearDown(self):
pass
"""
#測試 幣別轉換API 本地端資料庫存取
def test_localdb_for_currency_api(self):
self.db = LocalDbForCurrencyApi()
logging.info("LocalDbTest.test_getMongoDbClient")
self.assertIsNotNone(self.db.mongodb)
#測試 techorange 本地端資料庫存取
def test_localdb_for_techorange(self):
logging.info("LocalDbTest.test_localdb_for_techorange")
db = LocalDbForTECHORANGE()
db.clearTestData() #清除前次測試資料
db.insertTagIfNotExists(strTagName="tag_for_unit_test")
self.assertEquals(db.fetchallNotObtainedTagName()[0], "tag_for_unit_test")
db.updateTagStatusIsGot(strTagName="tag_for_unit_test")
self.assertEquals(db.fetchallCompletedObtainedTagName()[0], "tag_for_unit_test")
db.insertNewsUrlAndNewsTagMappingIfNotExists(strNewsUrl="http://news/for/unit/test", strTagName="tag_for_unit_test")
self.assertEquals(db.fetchallNewsUrlByTagName(strTagName="tag_for_unit_test")[0], "http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsGot(strNewsUrl="http://news/for/unit/test")
self.assertTrue(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsNotGot(strNewsUrlPart="/for/unit")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試 bnext 本地端資料庫存取
def test_localdb_for_bnext(self):
logging.info("LocalDbTest.test_localdb_for_bnext")
db = LocalDbForBNEXT()
db.clearTestData() #清除前次測試資料
db.insertTagIfNotExists(strTagName="tag_for_unit_test")
self.assertEquals(db.fetchallNotObtainedTagName()[0], "tag_for_unit_test")
db.updateTagStatusIsGot(strTagName="tag_for_unit_test")
self.assertEquals(db.fetchallCompletedObtainedTagName()[0], "tag_for_unit_test")
db.insertNewsUrlAndNewsTagMappingIfNotExists(strNewsUrl="http://news/for/unit/test", strTagName="tag_for_unit_test")
self.assertEquals(db.fetchallNewsUrlByTagName(strTagName="tag_for_unit_test")[0], "http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsGot(strNewsUrl="http://news/for/unit/test")
self.assertTrue(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試 pedaily 本地端資料庫存取
def test_localdb_for_pedaily(self):
logging.info("LocalDbTest.test_localdb_for_pedaily")
db = LocalDbForPEDAILY()
db.clearTestData() #清除前次測試資料
db.insertCategoryIfNotExists(strCategoryName="category_for_unit_test")
self.assertEquals(db.fetchallNotObtainedCategoryName()[0], "category_for_unit_test")
db.updateCategoryStatusIsGot(strCategoryName="category_for_unit_test")
self.assertEquals(db.fetchallCompletedObtainedCategoryName()[0], "category_for_unit_test")
db.insertNewsUrlIfNotExists(strNewsUrl="http://news/for/unit/test", strCategoryName="category_for_unit_test")
self.assertEquals(db.fetchallNewsUrlByCategoryName(strCategoryName="category_for_unit_test")[0], "http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsGot(strNewsUrl="http://news/for/unit/test")
self.assertTrue(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
self.assertEquals(db.fetchallCompletedObtainedNewsUrl(), ["http://news/for/unit/test"])
db.updateNewsStatusIsNotGot(strNewsUrl="http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試 inside 本地端資料庫存取
def test_localdb_for_inside(self):
logging.info("LocalDbTest.test_localdb_for_inside")
db = LocalDbForINSIDE()
db.clearTestData() #清除前次測試資料
db.insertTagIfNotExists(strTagPage1Url="http://tag_for_unit_test/p1")
self.assertEquals(db.fetchallNotObtainedTagPage1Url()[0], "http://tag_for_unit_test/p1")
db.updateTagStatusIsGot(strTagPage1Url="http://tag_for_unit_test/p1")
self.assertEquals(db.fetchallCompletedObtainedTagPage1Url()[0], "http://tag_for_unit_test/p1")
db.insertNewsUrlAndNewsTagMappingIfNotExists(strNewsUrl="http://news/for/unit/test", strTagPage1Url="http://tag_for_unit_test/p1")
self.assertEquals(db.fetchallNewsUrlByTagPage1Url(strTagPage1Url="http://tag_for_unit_test/p1")[0], "http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsGot(strNewsUrl="http://news/for/unit/test")
self.assertTrue(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsNotGot(strNewsUrlPart="/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試 techcrunch 本地端資料庫存取
def test_localdb_for_techcrunch(self):
logging.info("LocalDbTest.test_localdb_for_techcrunch")
db = LocalDbForTECHCRUNCH()
db.clearTestData() #清除前次測試資料
db.insertTopicIfNotExists(strTopicPage1Url="http://topic_for_unit_test")
self.assertEquals(db.fetchallNotObtainedTopicUrl()[0], "http://topic_for_unit_test")
db.updateTopicStatusIsGot(strTopicPage1Url="http://topic_for_unit_test")
self.assertEquals(db.fetchallCompletedObtainedTopicUrl()[0], "http://topic_for_unit_test")
db.insertNewsUrlIfNotExists(strNewsUrl="http://news/for/unit/test", strTopicPage1Url="http://topic_for_unit_test")
self.assertEquals(db.fetchallNewsUrlByTopicUrl(strTopicPage1Url="http://topic_for_unit_test")[0], "http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.updateNewsStatusIsGot(strNewsUrl="http://news/for/unit/test")
self.assertTrue(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
self.assertEquals(db.fetchallCompletedObtainedNewsUrl(), ["http://news/for/unit/test"])
db.updateNewsStatusIsNotGot(strNewsUrl="http://news/for/unit/test")
self.assertFalse(db.checkNewsIsGot(strNewsUrl="http://news/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試 京東眾籌 本地端資料庫存取
def test_localdb_for_jd(self):
logging.info("LocalDbTest.test_localdb_for_jd")
db = LocalDbForJD()
db.clearTestData() #清除前次測試資料
db.insertCategoryIfNotExists(strCategoryPage1Url="http://category_for_unit_test", strCategoryName="category_for_unit_test")
self.assertEquals(db.fetchCategoryNameByUrl(strCategoryPage1Url="http://category_for_unit_test"), "category_for_unit_test")
self.assertEquals(db.fetchallNotObtainedCategoryUrl()[0], "http://category_for_unit_test")
db.updateCategoryStatusIsGot(strCategoryPage1Url="http://category_for_unit_test")
self.assertEquals(db.fetchallCompletedObtainedCategoryUrl()[0], "http://category_for_unit_test")
db.insertProjectUrlIfNotExists(strProjectUrl="http://project/for/unit/test", strCategoryPage1Url="http://category_for_unit_test")
db.insertFunderUrlIfNotExists(strFunderUrl="http://funder/for/unit/test", strCategoryPage1Url="http://category_for_unit_test")
self.assertEquals(db.fetchallProjectUrlByCategoryUrl(strCategoryPage1Url="http://category_for_unit_test")[0], "http://project/for/unit/test")
self.assertEquals(db.fetchallFunderUrlByCategoryUrl(strCategoryPage1Url="http://category_for_unit_test")[0], "http://funder/for/unit/test")
self.assertFalse(db.checkProjectIsGot(strProjectUrl="http://project/for/unit/test"))
self.assertFalse(db.checkFunderIsGot(strFunderUrl="http://funder/for/unit/test"))
db.updateProjectStatusIsGot(strProjectUrl="http://project/for/unit/test")
db.updateFunderStatusIsGot(strFunderUrl="http://funder/for/unit/test")
self.assertTrue(db.checkProjectIsGot(strProjectUrl="http://project/for/unit/test"))
self.assertTrue(db.checkFunderIsGot(strFunderUrl="http://funder/for/unit/test"))
self.assertEquals(db.fetchallCompletedObtainedProjectUrl(), ["http://project/for/unit/test"])
self.assertEquals(db.fetchallCompletedObtainedFunderUrl(), ["http://funder/for/unit/test"])
db.updateProjectStatusIsNotGot(strProjectUrl="http://project/for/unit/test")
db.updateFunderStatusIsNotGot(strFunderUrl="http://funder/for/unit/test")
self.assertFalse(db.checkProjectIsGot(strProjectUrl="http://project/for/unit/test"))
self.assertFalse(db.checkFunderIsGot(strFunderUrl="http://funder/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試 crowdcube 本地端資料庫存取
def test_localdb_for_crowdcube(self):
logging.info("LocalDbTest.test_localdb_for_crowdcube")
db = LocalDbForCROWDCUBE()
db.clearTestData() #清除前次測試資料
db.insertAccountIfNotExists(strEmail="ebucdworc+0100@gmail.com", strPassword="bee520")
db.insertAccountIfNotExists(strEmail="ebucdworc+0101@gmail.com", strPassword="bee520")
(strAccountEmail, strAccountPassword) = db.fetchRandomReadyAccount()
self.assertTrue(strAccountEmail.startswith("ebucdworc"))
db.insertCompanyUrlIfNotExists(strCompanyUrl="http://company/for/unit/test")
self.assertEquals(db.fetchallNotObtainedCompanyUrl(), ["http://company/for/unit/test"])
self.assertFalse(db.checkCompanyIsGot(strCompanyUrl="http://company/for/unit/test"))
db.updateCompanyStatusIsGot(strCompanyUrl="http://company/for/unit/test")
self.assertTrue(db.checkCompanyIsGot(strCompanyUrl="http://company/for/unit/test"))
self.assertEquals(db.fetchallCompletedObtainedCompanyUrl(), ["http://company/for/unit/test"])
db.updateCompanyStatusIsNotGot(strCompanyUrl="http://company/for/unit/test")
self.assertFalse(db.checkCompanyIsGot(strCompanyUrl="http://company/for/unit/test"))
db.clearTestData() #清除本次測試資料
"""
#測試 crunchbase 本地端資料庫存取
def test_localdb_for_crunchbase(self):
logging.info("LocalDbTest.test_localdb_for_crunchbase")
db = LocalDbForCRUNCHBASE()
db.clearTestData() #清除前次測試資料
db.insertAccountIfNotExists(strEmail="esabhcnurc+0100@gmail.com", strPassword="bee520")
db.insertAccountIfNotExists(strEmail="esabhcnurc+0101@gmail.com", strPassword="bee520")
(strAccountEmail, strAccountPassword) = db.fetchRandomReadyAccount()
self.assertTrue(strAccountEmail.startswith("esabhcnurc"))
db.insertOrganizationUrlIfNotExists(strOrganizationUrl="http://organization/for/unit/test")
self.assertEquals(db.fetchallNotObtainedOrganizationUrl(), ["http://organization/for/unit/test"])
self.assertFalse(db.checkOrganizationIsGot(strOrganizationUrl="http://organization/for/unit/test"))
db.updateOrganizationStatusIsGot(strOrganizationUrl="http://organization/for/unit/test")
self.assertTrue(db.checkOrganizationIsGot(strOrganizationUrl="http://organization/for/unit/test"))
self.assertEquals(db.fetchallCompletedObtainedOrganizationUrl(), ["http://organization/for/unit/test"])
db.updateOrganizationStatusIsNotGot(strOrganizationUrl="http://organization/for/unit/test")
self.assertFalse(db.checkOrganizationIsGot(strOrganizationUrl="http://organization/for/unit/test"))
db.clearTestData() #清除本次測試資料
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
| 62.115
| 149
| 0.738871
| 12,164
| 0.942142
| 0
| 0
| 0
| 0
| 0
| 0
| 11,272
| 0.873054
|
0c7a5957fe9864225fb891e5477469385f447a91
| 3,456
|
py
|
Python
|
complex_venv/lib/python3.7/site-packages/test/test_graph_list_of_file_inputs.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 1
|
2021-10-06T00:21:10.000Z
|
2021-10-06T00:21:10.000Z
|
complex_venv/lib/python3.7/site-packages/test/test_graph_list_of_file_inputs.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 14
|
2021-01-15T21:51:38.000Z
|
2021-11-10T10:08:22.000Z
|
complex_venv/lib/python3.7/site-packages/test/test_graph_list_of_file_inputs.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 1
|
2021-01-18T10:32:56.000Z
|
2021-01-18T10:32:56.000Z
|
import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, G1_NT, default_namespaces, BASE_FILES_GENERAL
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import NT, TURTLE
_BASE_DIR = BASE_FILES + "graph_list_of_files_input" + pth.sep
class TestGraphListOfFilesInput(unittest.TestCase):
def test_one_turtle(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[G1],
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_one_nt(self): # Should be nt
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[G1_NT],
namespaces_dict=default_namespaces(),
input_format=NT,
all_classes_mode=False,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_several_nt(self): # Should be nt
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[_BASE_DIR + "g1_p1.nt",
_BASE_DIR + "g1_p2.nt"],
namespaces_dict=default_namespaces(),
input_format=NT,
all_classes_mode=False,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_several_turtle(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[_BASE_DIR + "g1_p1.ttl",
_BASE_DIR + "g1_p2.ttl"],
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
| 54.857143
| 120
| 0.550926
| 3,132
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 511
| 0.147859
|
0c7a6a41c0a3a1f235803d1783bd66e37bc01cb9
| 217
|
py
|
Python
|
geoscilabs/gpr/Wiggle.py
|
jcapriot/geosci-labs
|
044a73432b9cb1187924f7761942ab329259d875
|
[
"MIT"
] | 2
|
2019-01-15T03:02:28.000Z
|
2019-03-20T03:41:12.000Z
|
geoscilabs/gpr/Wiggle.py
|
jcapriot/geosci-labs
|
044a73432b9cb1187924f7761942ab329259d875
|
[
"MIT"
] | 1
|
2018-12-30T20:09:25.000Z
|
2018-12-30T20:09:25.000Z
|
geoscilabs/gpr/Wiggle.py
|
jcapriot/geosci-labs
|
044a73432b9cb1187924f7761942ab329259d875
|
[
"MIT"
] | null | null | null |
import numpy as np
def PrimaryWave(x, velocity, tinterp):
return tinterp + 1.0 / velocity * x
def ReflectedWave(x, velocity, tinterp):
time = np.sqrt(x ** 2 / velocity ** 2 + tinterp ** 2)
return time
| 19.727273
| 57
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c7aac81098c986f5d8ddb73b8e89765a14cd635
| 6,487
|
py
|
Python
|
tests/test_mxnet.py
|
haoxintong/byteps
|
495f1372af5f6fd4832393d5e52d4b02b42a7a03
|
[
"Apache-2.0"
] | 1
|
2019-07-01T10:08:00.000Z
|
2019-07-01T10:08:00.000Z
|
tests/test_mxnet.py
|
haoxintong/byteps
|
495f1372af5f6fd4832393d5e52d4b02b42a7a03
|
[
"Apache-2.0"
] | null | null | null |
tests/test_mxnet.py
|
haoxintong/byteps
|
495f1372af5f6fd4832393d5e52d4b02b42a7a03
|
[
"Apache-2.0"
] | 1
|
2021-02-02T02:58:37.000Z
|
2021-02-02T02:58:37.000Z
|
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import byteps.mxnet as bps
import itertools
import mxnet as mx
import os
import unittest
from mxnet.base import MXNetError
from mxnet.test_utils import same
has_gpu = mx.context.num_gpus() > 0
# MLSL supports only byte, float and double data types
mlsl_supported_types = set(['float32', 'float64'])
class MXTest:
"""
Tests for ops in byteps.mxnet.
"""
def _current_context(self):
if has_gpu:
return mx.gpu(bps.local_rank())
else:
return mx.current_context()
def filter_supported_types(self, types):
if 'MLSL_ROOT' in os.environ:
types = [t for t in types if t in mlsl_supported_types]
return types
def test_byteps_push_pull(self):
"""Test that the byteps_push_pull correctly sums 1D, 2D, 3D tensors."""
bps.init()
size = bps.size()
dtypes = self.filter_supported_types(['float32'])
dims = [1]
ctx = self._current_context()
count = 0
shapes = [(), (17)]
for dtype, dim in itertools.product(dtypes, dims):
# MXNet uses gpu_id as part of the seed, so to get identical seeds
# we must set a context.
mx.random.seed(10 + 10 * bps.rank(), ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
print("tensor before push_pull:", tensor)
bps.byteps_declare_tensor(tensor, "tensor_" + str(count))
bps.byteps_push_pull(tensor, name="tensor_"+str(count))
tensor.wait_to_read()
print("tensor after push_pull:", tensor)
bps.shutdown()
def test_byteps_push_pull_inplace(self):
"""Test that the byteps_push_pull correctly sums 1D, 2D, 3D tensors."""
bps.init()
size = bps.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
multiplied = tensor * size
bps.byteps_declare_tensor(tensor, "tensor_" + str(count))
bps.byteps_push_pull(tensor, name= "tensor_" + str(count))
max_difference = mx.nd.max(mx.nd.subtract(tensor, multiplied))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
if max_difference > threshold:
print("self", count, dtype, dim, max_difference, threshold)
print("tensor", bps.rank(), tensor)
print("multiplied", bps.rank(), multiplied)
assert max_difference <= threshold, 'bps.byteps_push_pull produces \
incorrect results for self'
def test_byteps_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
bps.init()
rank = bps.rank()
size = bps.size()
# This test does not apply if there is only one worker.
if size == 1:
return
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
broadcast_tensor = bps.broadcast(tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", bps.rank(), tensor)
print("root_tensor", bps.rank(), root_tensor)
print("comparison", bps.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'bps.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", bps.rank(), broadcast_tensor)
print("root_tensor", bps.rank(), root_tensor)
print("comparison", bps.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'bps.broadcast produces incorrect broadcasted tensor'
if __name__ == '__main__':
mxtest = MXTest()
mxtest.test_byteps_push_pull()
| 39.554878
| 80
| 0.564668
| 5,297
| 0.816556
| 0
| 0
| 0
| 0
| 0
| 0
| 1,791
| 0.276091
|
0c7b118e6f6819cf4d1ad8f8d9c8320221d64d32
| 889
|
py
|
Python
|
frux_app_server/migrations/versions/299e2bc265af_favorites.py
|
camidvorkin/frux-app-server
|
21098234a7867908250022e3e1c0580417d1ca35
|
[
"Apache-2.0",
"MIT"
] | 3
|
2021-08-03T21:52:01.000Z
|
2021-09-14T19:39:10.000Z
|
frux_app_server/migrations/versions/299e2bc265af_favorites.py
|
camidvorkin/frux-app-server
|
21098234a7867908250022e3e1c0580417d1ca35
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
frux_app_server/migrations/versions/299e2bc265af_favorites.py
|
camidvorkin/frux-app-server
|
21098234a7867908250022e3e1c0580417d1ca35
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""Favorites
Revision ID: 299e2bc265af
Revises: 24f1df018932
Create Date: 2021-06-25 17:24:27.020289
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '299e2bc265af'
down_revision = '24f1df018932'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('favorites',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('project_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'project_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('favorites')
# ### end Alembic commands ###
| 25.4
| 65
| 0.68054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 462
| 0.519685
|
0c7d87c2d7143decedcd6ade00a74af0379219d4
| 902
|
py
|
Python
|
src/lib/libpasteurize/fixes/fix_newstyle.py
|
thonkify/thonkify
|
2cb4493d796746cb46c8519a100ef3ef128a761a
|
[
"MIT"
] | 17
|
2017-08-04T15:41:05.000Z
|
2020-10-16T18:02:41.000Z
|
src/lib/libpasteurize/fixes/fix_newstyle.py
|
thonkify/thonkify
|
2cb4493d796746cb46c8519a100ef3ef128a761a
|
[
"MIT"
] | 3
|
2017-08-04T23:37:37.000Z
|
2017-08-04T23:38:34.000Z
|
src/lib/libpasteurize/fixes/fix_newstyle.py
|
thonkify/thonkify
|
2cb4493d796746cb46c8519a100ef3ef128a761a
|
[
"MIT"
] | 3
|
2017-12-07T16:30:59.000Z
|
2019-06-16T02:48:28.000Z
|
u"""
Fixer for "class Foo: ..." -> "class Foo(object): ..."
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import LParen, RParen, Name
from libfuturize.fixer_util import touch_import_top
def insert_object(node, idx):
node.insert_child(idx, RParen())
node.insert_child(idx, Name(u"object"))
node.insert_child(idx, LParen())
class FixNewstyle(fixer_base.BaseFix):
# Match:
# class Blah:
# and:
# class Blah():
PATTERN = u"classdef< 'class' NAME ['(' ')'] colon=':' any >"
def transform(self, node, results):
colon = results[u"colon"]
idx = node.children.index(colon)
if (node.children[idx - 2].value == '(' and
node.children[idx - 1].value == ')'):
del node.children[idx - 2:idx]
idx -= 2
insert_object(node, idx)
touch_import_top(u'builtins', 'object', node)
| 26.529412
| 65
| 0.599778
| 548
| 0.607539
| 0
| 0
| 0
| 0
| 0
| 0
| 202
| 0.223947
|
0c7de1d97913a4091e354725bc2562a43f7d9e56
| 14,825
|
py
|
Python
|
pinochle/Game.py
|
Atrus619/DeckOfCards
|
bf0668ea26041e7faab2b88a03d42ba6887d054a
|
[
"MIT"
] | 1
|
2019-06-27T12:14:38.000Z
|
2019-06-27T12:14:38.000Z
|
pinochle/Game.py
|
Atrus619/DeckOfCards
|
bf0668ea26041e7faab2b88a03d42ba6887d054a
|
[
"MIT"
] | 18
|
2019-07-14T17:40:22.000Z
|
2019-11-11T01:54:07.000Z
|
pinochle/Game.py
|
Atrus619/DeckOfCards
|
bf0668ea26041e7faab2b88a03d42ba6887d054a
|
[
"MIT"
] | null | null | null |
from pinochle.State import State
from classes.Deck import Deck
from classes.Hand import Hand
from pinochle.MeldUtil import MeldUtil
from pinochle.Meld import Meld
from pinochle.Trick import Trick
from pinochle.MeldTuple import MeldTuple
from util.Constants import Constants as cs
from util.util import print_divider
from copy import deepcopy
import random
import numpy as np
import util.state_logger as sl
import logging
from config import Config as cfg
import pinochle.card_util as cu
import time
import pandas as pd
from util.Vectors import Vectors as vs
logging.basicConfig(format='%(levelname)s:%(message)s', level=cfg.logging_level)
# pinochle rules: https://www.pagat.com/marriage/pin2hand.html
class Game:
def __init__(self, name, players, run_id="42069", current_cycle=None, human_test=False, config=cfg):
# Setting run_id = None results in no records being saved to database
self.run_id = run_id
self.name = name.upper()
self.players = players # This is a list
self.number_of_players = len(self.players)
self.dealer = players[0]
self.trump_card = None
self.trump = None
self.priority = random.randint(0, 1)
self.meld_util = None
self.current_cycle = current_cycle # To determine the current value of epsilon
self.human_test = human_test
self.config = config
self.exp_df = pd.DataFrame(columns=['agent_id', 'opponent_id', 'run_id', 'vector', 'action', 'next_vector',
'reward', 'meld_action'])
self.last_meld_state = None
if self.name == cs.PINOCHLE:
self.deck = Deck("pinochle")
else:
self.deck = Deck()
self.hands = {}
self.melds = {}
self.scores = {}
self.meldedCards = {}
self.discard_pile = Hand()
self.player_inter_trick_history = {} # One entry per player, each entry is a tuple containing (prior_state, row_id entry in initial db update)
for player in self.players:
self.hands[player] = Hand()
self.melds[player] = Meld()
self.scores[player] = [0]
self.meldedCards[player] = {}
def create_state(self, played_card=None):
return State(self, played_card)
def deal(self):
for i in range(12):
for player in self.players:
self.hands[player].add_cards(self.deck.pull_top_cards(1))
self.trump_card = self.deck.pull_top_cards(1)[0]
self.trump = self.trump_card.suit
self.meld_util = MeldUtil(self.trump)
# Expected card input: VALUE,SUIT. Example: Hindex
# H = hand, M = meld
def collect_trick_cards(self, player, state):
if type(player).__name__ == 'Human':
trick_input = player.get_action(state, msg=player.name + " select card for trick:")
else: # Bot
if self.human_test:
logging.debug("Model hand before action:")
state.convert_to_human_readable_format(player)
trick_index, meld_index = player.get_action(state, self, current_cycle=self.current_cycle, is_trick=True)
trick_input, _ = player.convert_model_output(trick_index=trick_index, meld_index=meld_index, game=self, is_trick=True)
source = trick_input[0]
index = int(trick_input[1:])
if source == "H":
card_input = self.hands[player].cards[index]
card = self.hands[player].pull_card(card_input)
elif source == "M":
mt = self.melds[player].pull_melded_card(self.melds[player].melded_cards[index])
card = mt.card
print_divider()
logging.debug("Player " + player.name + " plays: " + str(card)) # TODO: Fix this later (possible NULL)
return card
def collect_meld_cards(self, player, state, limit=12):
"""
Collecting cards for meld scoring from player who won trick
:param player: Player we are collecting from
:param state: Current state of game
:param limit: Maximum number of cards that can be collected
:return: list of MeldTuples and whether the interaction was valid (boolean)
"""
first_hand_card = True
valid = True
original_hand_cards = deepcopy(self.hands[player])
original_meld_cards = deepcopy(self.melds[player])
collected_hand_cards = []
collected_meld_cards = []
score = 0
meld_class = None
combo_name = None
if type(player).__name__ == 'Human':
while len(collected_hand_cards) + len(collected_meld_cards) < limit:
if first_hand_card:
print_divider()
logging.debug("For meld please select first card from hand.")
user_input = player.get_action(state, msg=player.name + " select card, type 'Y' to exit:")
if user_input == 'Y':
break
source = user_input[0]
index = int(user_input[1:])
if first_hand_card:
if source != "H":
print_divider()
logging.debug("In case of meld, please select first card from hand.")
continue
first_hand_card = False
if source == "H":
card_input = self.hands[player].cards[index]
card = self.hands[player].pull_card(card_input)
collected_hand_cards.append(card)
elif source == "M":
mt = self.melds[player].pull_melded_card(self.melds[player].melded_cards[index])
collected_meld_cards.append(mt)
# Combine collected hand and meld card lists for score calculation
collected_cards = collected_hand_cards + [mt.card for mt in collected_meld_cards]
if len(collected_cards) > 0:
score, meld_class, combo_name = self.meld_util.calculate_score(collected_cards)
if score == 0:
valid = False
else:
for mt in collected_meld_cards:
original_meld_class = mt.meld_class
if original_meld_class == meld_class:
original_meld_score = mt.score
if original_meld_score <= score:
valid = False
break
if not valid:
self.hands[player] = original_hand_cards
self.melds[player] = original_meld_cards
else: # Bot
valid = True
trick_action, meld_action = player.get_action(state, self, current_cycle=self.current_cycle, is_trick=False)
if meld_action == vs.MELD_COMBINATIONS_ONE_HOT_VECTOR.__len__():
# model chose to pass melding
return [], valid
score, meld_class, combo_name, collected_cards = \
player.convert_model_output(trick_index=trick_action, meld_index=meld_action, game=self, is_trick=False)
return [MeldTuple(card, combo_name, meld_class, score) for card in collected_cards], valid
def play_trick(self):
"""
priority: 0 or 1 for index in player list
:return: index of winner (priority for next trick)
"""
print_divider()
logging.debug(f'Phase 1\tTrick #{12 - len(self.deck)//2}\t{len(self.deck)} card{"s" if len(self.deck) > 1 else ""} remaining in deck')
trick_start_state = self.create_state()
trick = Trick(self.players, self.trump)
# Determine which player goes first based on priority arg
""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# TRICK PLAYER LIST IS NOT ALWAYS THE SAME AS THE GAME PLAYER LIST
# THEY COULD BE IN DIFFERENT ORDER
"""
player_order = list(self.players)
player_1 = player_order.pop(self.priority)
player_2 = player_order[0]
trick_player_list = [player_1, player_2]
# Collect card for trick from each player based on order
card_1 = self.collect_trick_cards(player_1, trick_start_state) # Collect card from first player based on priority
if self.human_test:
time.sleep(cfg.human_test_pause_length)
# Recording the first card that was played
first_move_state = self.create_state(card_1)
if self.human_test and 'get_Qs' in dir(self.players[0].model):
print_divider()
bot_state = trick_start_state if self.players[0] == player_1 else first_move_state
human_state = trick_start_state if self.players[1] == player_1 else first_move_state
logging.debug(self.players[0].model.get_Qs(player=self.players[0], player_state=bot_state, opponent=self.players[1], opponent_state=human_state))
if self.players[0] in self.player_inter_trick_history and self.run_id is not None: # Don't update on first trick of game
p1_update_dict = {'player': player_1, 'state_1': self.player_inter_trick_history[player_1][0],
'state_2': trick_start_state, 'row_id': self.player_inter_trick_history[player_1][1]}
p2_update_dict = {'player': player_2, 'state_1': self.player_inter_trick_history[player_2][0],
'state_2': first_move_state, 'row_id': self.player_inter_trick_history[player_2][1]}
self.exp_df = sl.update_state(df=self.exp_df, p1=p1_update_dict, p2=p2_update_dict, win_reward=self.config.win_reward)
card_2 = self.collect_trick_cards(player_2, first_move_state) # Collect card from second player based on priority
if self.human_test:
time.sleep(cfg.human_test_pause_length)
print_divider()
logging.debug("LETS GET READY TO RUMBLE!!!!!!!!!!!!!!!!!!!!!!!")
logging.debug("Card 1: " + str(card_1))
logging.debug("Card 2: " + str(card_2))
if self.human_test:
time.sleep(cfg.human_test_pause_length)
# Determine winner of trick based on collected cards
result = cu.compare_cards(self.trump, card_1, card_2)
print_divider()
logging.debug("VICTOR : " + str(player_1.name if result == 0 else player_2.name))
if self.human_test:
time.sleep(cfg.human_test_pause_length)
# Separate winner and loser for scoring, melding, and next hand
winner = trick_player_list.pop(result)
loser = trick_player_list[0]
# Winner draws a card from the stock, followed by the loser drawing a card from the stock
# TODO: Come back here and allow winner to choose when down to last 2 cards (optional af)
self.hands[winner].add_cards(self.deck.pull_top_cards(1))
if len(self.deck) == 0:
self.hands[loser].add_cards(self.trump_card)
else:
self.hands[loser].add_cards(self.deck.pull_top_cards(1))
# Winner can now meld if they so choose
print_divider()
logging.debug(winner.name + " select cards for meld:")
# Verify that meld is valid. If meld is invalid, force the user to retry.
self.last_meld_state = self.create_state()
mt_list = []
# no melding in this version
while 1:
mt_list, valid = self.collect_meld_cards(winner, self.last_meld_state)
if valid:
break
else:
print_divider()
logging.debug("Invalid combination submitted, please try again.")
# Update scores
if len(mt_list) == 0: # No cards melded, so score is 0
meld_score = 0
else:
meld_score = mt_list[0].score # Score is the same for all MeldTuples in mt_list
trick_score = trick.calculate_trick_score(card_1, card_2)
total_score = meld_score + trick_score
self.discard_pile.add_cards([card_1, card_2])
# log states and actions, player order = TRICK ORDER
if self.run_id is not None:
p1_dict = {'player': player_1, 'state': trick_start_state, 'card': card_1}
p2_dict = {'player': player_2, 'state': first_move_state, 'card': card_2}
meld_dict = {'player': winner, 'meld': mt_list}
self.exp_df, self.player_inter_trick_history = \
sl.log_state(df=self.exp_df, p1=p1_dict, p2=p2_dict, meld=meld_dict,
run_id=self.run_id, history=self.player_inter_trick_history)
self.scores[winner].append(self.scores[winner][-1] + total_score)
self.scores[loser].append(self.scores[loser][-1])
# Update winner's meld
for mt in mt_list:
self.melds[winner].add_melded_card(mt)
# set new priority
self.priority = self.players.index(winner)
def play(self):
while len(self.deck) > 0:
self.play_trick()
final_scores = [self.scores[player][-1] for player in self.players]
winner_index = np.argmax(final_scores)
if self.run_id is not None:
# GAME ORDER (because it doesn't matter here)
end_game_state = self.create_state()
p1_update_dict = {'player': self.players[0], 'state_1': self.player_inter_trick_history[self.players[0]][0], 'state_2': end_game_state,
'row_id': self.player_inter_trick_history[self.players[0]][1]}
p2_update_dict = {'player': self.players[1], 'state_1': self.player_inter_trick_history[self.players[1]][0], 'state_2': end_game_state,
'row_id': self.player_inter_trick_history[self.players[1]][1]}
self.exp_df = sl.update_state(df=self.exp_df, p1=p1_update_dict, p2=p2_update_dict, winner=self.players[winner_index], win_reward=self.config.win_reward,
final_trick_winner=self.players[self.priority])
self.exp_df = sl.log_final_meld(df=self.exp_df, meld_state=self.last_meld_state, history=self.player_inter_trick_history,
final_trick_winner=self.players[self.priority], end_game_state=end_game_state, run_id=self.run_id,
winner=self.players[winner_index], win_reward=self.config.win_reward)
print_divider()
logging.debug("Winner: " + str(self.players[winner_index]) + "\tScore: " + str(final_scores[winner_index]))
logging.debug(
"Loser: " + str(self.players[1 - winner_index]) + "\tScore: " + str(final_scores[1 - winner_index]))
return winner_index, None if self.run_id is None else self.exp_df
| 44.924242
| 165
| 0.616256
| 14,120
| 0.952445
| 0
| 0
| 0
| 0
| 0
| 0
| 2,994
| 0.201956
|
0c806a0eaf687b2d10c9f68f4752a320058970ab
| 737
|
py
|
Python
|
.eggs/py2app-0.14-py3.6.egg/py2app/recipes/matplotlib.py
|
stfbnc/mtsa_py
|
0dd14f0e51e3251f10b3da781867fbc7173608eb
|
[
"MIT"
] | 17
|
2018-08-28T04:40:07.000Z
|
2021-12-15T06:19:31.000Z
|
.eggs/py2app-0.14-py3.6.egg/py2app/recipes/matplotlib.py
|
stfbnc/mtsa_py
|
0dd14f0e51e3251f10b3da781867fbc7173608eb
|
[
"MIT"
] | 4
|
2019-05-17T09:35:30.000Z
|
2022-03-13T03:50:20.000Z
|
.eggs/py2app-0.14-py3.6.egg/py2app/recipes/matplotlib.py
|
stfbnc/mtsa_py
|
0dd14f0e51e3251f10b3da781867fbc7173608eb
|
[
"MIT"
] | 3
|
2019-01-15T07:13:53.000Z
|
2020-03-29T00:48:39.000Z
|
import os
def check(cmd, mf):
m = mf.findNode('matplotlib')
if m is None or m.filename is None:
return None
if cmd.matplotlib_backends:
backends = {}
for backend in cmd.matplotlib_backends:
if backend == '-':
pass
elif backend == '*':
mf.import_hook('matplotlib.backends', m, ['*'])
else:
mf.import_hook('matplotlib.backends.backend_%s' % (
backend,), m)
else:
backends = {'packages': ['matplotlib']}
return dict(
prescripts=['py2app.recipes.matplotlib_prescript'],
resources=[os.path.join(os.path.dirname(m.filename), 'mpl-data')],
**backends
)
| 24.566667
| 74
| 0.530529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 143
| 0.19403
|
0c82f844757360c3545bf98aad68a6f84622e3da
| 711
|
py
|
Python
|
app/core/migrations/0009_auto_20210214_2113.py
|
Valentin-Golyonko/FlaskTestRPi
|
b9796a9acb2bb1c122301a3ef192f43c857eb27b
|
[
"Apache-2.0"
] | null | null | null |
app/core/migrations/0009_auto_20210214_2113.py
|
Valentin-Golyonko/FlaskTestRPi
|
b9796a9acb2bb1c122301a3ef192f43c857eb27b
|
[
"Apache-2.0"
] | null | null | null |
app/core/migrations/0009_auto_20210214_2113.py
|
Valentin-Golyonko/FlaskTestRPi
|
b9796a9acb2bb1c122301a3ef192f43c857eb27b
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-02-14 18:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20210214_2039'),
]
operations = [
migrations.AlterField(
model_name='device',
name='ip_address',
field=models.GenericIPAddressField(blank=True, help_text='e.g. 192.168.0.17', null=True, verbose_name='IP address'),
),
migrations.AlterField(
model_name='device',
name='mac_address',
field=models.CharField(blank=True, help_text='e.g. A1:B2:C3:D4:5E:6F', max_length=100, null=True, verbose_name='MAC address'),
),
]
| 29.625
| 138
| 0.613221
| 618
| 0.869198
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.26301
|
0c83ca38634452225fc5796f360c1d45205cc40e
| 1,552
|
py
|
Python
|
grabber.py
|
serj12342/pstreehunter
|
255b2da9a7110ca37f776baa3071ecbb10fec2f5
|
[
"MIT"
] | 1
|
2022-01-03T13:30:01.000Z
|
2022-01-03T13:30:01.000Z
|
grabber.py
|
serj12342/pstreehunter
|
255b2da9a7110ca37f776baa3071ecbb10fec2f5
|
[
"MIT"
] | null | null | null |
grabber.py
|
serj12342/pstreehunter
|
255b2da9a7110ca37f776baa3071ecbb10fec2f5
|
[
"MIT"
] | null | null | null |
import requests
import re
pattern_hashes = "^[a-f0-9]{64}$"
pattern_url = "https?:\/\/.*"
pattern_ip = "(?:(?:\d|[01]?\d\d|2[0-4]\d|25[0-5])\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d|\d)(?:\/\d{1,2})?"
pattern_domain = "^[a-zA-Z0-9][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9]{0,1}\.([a-zA-Z]{1,6}|[a-zA-Z0-9-]{1,30}\.[a-zA-Z]{2,3})$"
headers = {
'Accept': 'application/json'
}
r = requests.get('https://labs.inquest.net/api/iocdb/list', headers = headers)
j = r.json()
def ioc_grabber(fileHandle, pattern, data, notUseRegular=False):
if notUseRegular:
writerHandle(fileHandle, data)
return
sections = re.findall(pattern, data, re.DOTALL)
if any(isinstance(e, str) and len(e) > 0 for e in sections):
writerHandle(fileHandle, "\n".join(sections))
def writerHandle(fileHandle, data):
fileHandle.write(data)
fileHandle.write('\n')
with open("links.txt", "a") as links:
with open("hashes.txt", "a") as ioc_hash:
with open("urls.txt", "a") as ioc_url:
with open("ip.txt", "a") as ioc_ip:
with open("domains.txt", "a") as ioc_domain:
for each in (j['data']):
ioc_grabber(links, None, each['reference_link'], True)
ioc_grabber(ioc_hash, pattern_hashes, each['artifact'])
ioc_grabber(ioc_url, pattern_url, each['artifact'])
ioc_grabber(ioc_ip, pattern_ip, each['artifact'])
ioc_grabber(ioc_domain, pattern_domain, each['artifact'])
ioc_domain.close()
ioc_ip.close()
ioc_url.close()
ioc_hash.close()
links.close()
| 32.333333
| 118
| 0.607603
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.276418
|
0c844a07b81367c6e40feb94bf39e5508b8f654c
| 1,134
|
py
|
Python
|
prac2_2.py
|
JulianAZW/bookish-lamp
|
24f354d8fa16841f256a2ad40668126604131cef
|
[
"MIT"
] | 1
|
2022-03-26T01:07:57.000Z
|
2022-03-26T01:07:57.000Z
|
prac2_2.py
|
JulianAZW/bookish-lamp
|
24f354d8fa16841f256a2ad40668126604131cef
|
[
"MIT"
] | null | null | null |
prac2_2.py
|
JulianAZW/bookish-lamp
|
24f354d8fa16841f256a2ad40668126604131cef
|
[
"MIT"
] | 1
|
2022-03-27T02:32:39.000Z
|
2022-03-27T02:32:39.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 17 19:08:54 2022
@author: julian
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def heatmap(df):
correlation_matrix = df.corr().round(2)
print(correlation_matrix)
sns_h = sns.heatmap(data=correlation_matrix, annot=True)
sns_h.figure.savefig("Mapa de calor de correlacion")
sns_h.figure.clear()
def plotXY(df):
for i in range(0,8):
plt_disp = plt.scatter(df.iloc[:,i],df.iloc[:,8])
plt.xlabel(df.columns[i])
plt.ylabel(df.columns[8])
cadena = "plt"+str(i)+"_"+str(8)+".png"
print(cadena)
plt_disp
plt_disp.figure.clear()
if __name__=='__main__':
df1 = pd.read_csv("data_train.csv", sep=',', engine='python')
df2 = pd.read_csv("medianHouseValue_train.csv", sep=',', engine='python')
df = pd.concat([df1,df2], axis=1)
print("El DataFrame con el 80 por ciento de los datos: \n", df)
#plt.scatter(df.iloc[:,7],df.iloc[:,8])
#plt.xlabel(df.columns[7])
#plt.ylabel(df.columns[8])
#plt.show()
heatmap(df)
plotXY(df)
| 26.372093
| 77
| 0.613757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 357
| 0.314815
|
0c854c215d158012c99078b8d02a0bf8423f9708
| 20,048
|
py
|
Python
|
bookworm/ocr/ocr_dialogs.py
|
xingkong0113/bookworm
|
7214067f48e7a951198806a1f9170e3fd8fc0cce
|
[
"MIT"
] | 36
|
2020-11-15T03:21:39.000Z
|
2022-03-05T01:11:26.000Z
|
bookworm/ocr/ocr_dialogs.py
|
xingkong0113/bookworm
|
7214067f48e7a951198806a1f9170e3fd8fc0cce
|
[
"MIT"
] | 90
|
2020-10-06T14:46:07.000Z
|
2022-03-31T03:03:34.000Z
|
bookworm/ocr/ocr_dialogs.py
|
xingkong0113/bookworm
|
7214067f48e7a951198806a1f9170e3fd8fc0cce
|
[
"MIT"
] | 20
|
2020-09-30T17:40:44.000Z
|
2022-03-17T19:59:53.000Z
|
# coding: utf-8
import wx
import wx.lib.sized_controls as sc
from dataclasses import dataclass
from functools import partial
from wx.adv import CommandLinkButton
from bookworm import app
from bookworm import config
from bookworm import typehints as t
from bookworm.i18n import LocaleInfo
from bookworm.concurrency import threaded_worker
from bookworm.gui.settings import SettingsPanel, ReconciliationStrategies
from bookworm.gui.components import (
make_sized_static_box,
SimpleDialog,
RobustProgressDialog,
SnakDialog,
AsyncSnakDialog,
ImmutableObjectListView,
ColumnDefn,
)
from bookworm.utils import restart_application
from bookworm.logger import logger
from bookworm.platform_services._win32 import tesseract_download
from bookworm.ocr_engines.tesseract_ocr_engine import TesseractOcrEngine
from bookworm.ocr_engines.image_processing_pipelines import (
ImageProcessingPipeline,
DebugProcessingPipeline,
DPIProcessingPipeline,
ThresholdProcessingPipeline,
BlurProcessingPipeline,
TwoInOneScanProcessingPipeline,
DeskewProcessingPipeline,
InvertColourProcessingPipeline,
ErosionProcessingPipeline,
DilationProcessingPipeline,
ConcatImagesProcessingPipeline,
SharpenColourProcessingPipeline,
)
log = logger.getChild(__name__)
@dataclass
class OcrOptions:
language: LocaleInfo
zoom_factor: float
_ipp_enabled: int
image_processing_pipelines: t.Tuple[ImageProcessingPipeline]
store_options: bool
class OcrPanel(SettingsPanel):
config_section = "ocr"
def addControls(self):
self._service = wx.GetApp().service_handler.get_service("ocr")
self._engines = self._service._available_ocr_engines
_engines_display = [_(e.display_name) for e in self._engines]
# Translators: the label of a group of controls in the reading page
generalOcrBox = self.make_static_box(_("OCR Options"))
self.ocrEngine = wx.RadioBox(
generalOcrBox,
-1,
# Translators: the title of a group of radio buttons in the OCR page
# in the application settings.
_("Default OCR Engine"),
majorDimension=1,
style=wx.RA_SPECIFY_COLS,
choices=_engines_display,
)
# Translators: the label of a group of controls in the OCR page
# of the settings related to Tesseract OCR engine
tessBox = self.make_static_box(_("Tesseract OCR Engine"))
if not tesseract_download.is_tesseract_available():
tessEngineDlBtn = CommandLinkButton(
tessBox,
-1,
_("Download Tesseract OCR Engine"),
_(
"Get a free, high-quality OCR engine that supports over 100 languages."
),
)
self.Bind(wx.EVT_BUTTON, self.onDownloadTesseractEngine, tessEngineDlBtn)
else:
tessLangDlBtn = CommandLinkButton(
tessBox,
-1,
_("Manage Tesseract OCR Languages"),
_("Add support for new languages, and /or remove installed languages."),
)
self.Bind(wx.EVT_BUTTON, self.onDownloadTesseractLanguages, tessLangDlBtn)
# Translators: the label of a group of controls in the reading page
# of the settings related to image enhancement
miscBox = self.make_static_box(_("Image processing"))
wx.CheckBox(
miscBox,
-1,
# Translators: the label of a checkbox
_("Enable default image enhancement filters"),
name="ocr.enhance_images",
)
def reconcile(self, strategy=ReconciliationStrategies.load):
if strategy is ReconciliationStrategies.load:
self.ocrEngine.SetSelection(
self._engines.index(self._service.get_first_available_ocr_engine())
)
elif strategy is ReconciliationStrategies.save:
selected_engine = self._engines[self.ocrEngine.GetSelection()]
if self.config["engine"] != selected_engine.name:
self.config["engine"] = selected_engine.name
self._service._init_ocr_engine()
super().reconcile(strategy=strategy)
if strategy is ReconciliationStrategies.save:
self._service._init_ocr_engine()
def onDownloadTesseractEngine(self, event):
AsyncSnakDialog(
task=tesseract_download.get_tesseract_download_info,
done_callback=self._on_tesseract_download_info,
message=_("Retrieving download info, please wait..."),
parent=self,
)
def onDownloadTesseractLanguages(self, event):
TesseractLanguageManager(
title=_("Manage Tesseract OCR Engine Languages"), parent=self
).ShowModal()
def _on_tesseract_download_info(self, future):
if (
info := tesseract_download.get_tesseract_download_info_from_future(
future, self
)
) is None:
return
dl_url = info.get_engine_download_url()
progress_dlg = RobustProgressDialog(
self,
# Translators: title of a progress dialog
_("Downloading Tesseract OCR Engine"),
# Translators: message of a progress dialog
_("Getting download information..."),
maxvalue=100,
can_abort=True,
)
threaded_worker.submit(
tesseract_download.download_tesseract_engine, dl_url, progress_dlg
).add_done_callback(partial(self._after_tesseract_install, progress_dlg))
def _after_tesseract_install(self, progress_dlg, future):
progress_dlg.Dismiss()
if future.result() is True:
wx.GetApp().mainFrame.notify_user(
_("Restart Required"),
_(
"Bookworm will now restart to complete the installation of the Tesseract OCR Engine."
),
)
wx.CallAfter(restart_application)
class OCROptionsDialog(SimpleDialog):
"""OCR options."""
def __init__(
self, *args, stored_options=None, languages=(), force_save=False, **kwargs
):
self.stored_options = stored_options
self.languages = languages
self.force_save = force_save
self._return_value = None
self.image_processing_pipelines = []
self.stored_ipp = (
()
if self.stored_options is None
else self.stored_options.image_processing_pipelines
)
super().__init__(*args, **kwargs)
def addControls(self, parent):
# Translators: the label of a combobox
label = wx.StaticText(parent, -1, _("Recognition Language:"))
self.langChoice = wx.Choice(
parent, -1, choices=[l.description for l in self.languages]
)
self.langChoice.SetSizerProps(expand=True)
wx.StaticText(parent, -1, _("Supplied Image resolution::"))
self.zoomFactorSlider = wx.Slider(parent, -1, minValue=0, maxValue=10)
# Translators: the label of a checkbox
self.should_enhance_images = wx.CheckBox(
parent, -1, _("Enable image enhancements")
)
ippPanel = sc.SizedPanel(parent)
# Translators: the label of a checkbox
imgProcBox = make_sized_static_box(
ippPanel, _("Available image pre-processing filters:")
)
for (ipp_cls, lbl, should_enable) in self.get_image_processing_pipelines_info():
chbx = wx.CheckBox(imgProcBox, -1, lbl)
if self.stored_options is not None:
chbx.SetValue(ipp_cls in self.stored_ipp)
else:
chbx.SetValue(should_enable)
self.image_processing_pipelines.append((chbx, ipp_cls))
wx.StaticLine(parent)
if not self.force_save:
self.storeOptionsCheckbox = wx.CheckBox(
parent,
-1,
# Translators: the label of a checkbox
_("&Save these options until I close the current book"),
)
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
if self.stored_options is None:
self.langChoice.SetSelection(0)
self.zoomFactorSlider.SetValue(2)
self.should_enhance_images.SetValue(config.conf["ocr"]["enhance_images"])
else:
self.langChoice.SetSelection(
self.languages.index(self.stored_options.language)
)
self.zoomFactorSlider.SetValue(self.stored_options.zoom_factor)
self.should_enhance_images.SetValue(self.stored_options._ipp_enabled)
if not self.force_save:
self.storeOptionsCheckbox.SetValue(self.stored_options.store_options)
enable_or_disable_image_pipelines = lambda: ippPanel.Enable(
self.should_enhance_images.IsChecked()
)
self.Bind(
wx.EVT_CHECKBOX,
lambda e: enable_or_disable_image_pipelines(),
self.should_enhance_images,
)
enable_or_disable_image_pipelines()
def onOK(self, event):
if not self.should_enhance_images.IsChecked():
selected_image_pp = []
else:
selected_image_pp = [
ipp_cls
for c, ipp_cls in self.image_processing_pipelines
if c.IsChecked()
]
self._return_value = OcrOptions(
language=self.languages[self.langChoice.GetSelection()],
zoom_factor=self.zoomFactorSlider.GetValue() or 1,
_ipp_enabled=self.should_enhance_images.IsChecked(),
image_processing_pipelines=selected_image_pp,
store_options=self.force_save or self.storeOptionsCheckbox.IsChecked(),
)
self.Close()
def ShowModal(self):
super().ShowModal()
return self._return_value
def get_image_processing_pipelines_info(self):
ipp = [
(DPIProcessingPipeline, _("Increase image resolution"), True),
(ThresholdProcessingPipeline, _("Binarization"), True),
(
TwoInOneScanProcessingPipeline,
_("Split two-in-one scans to individual pages"),
False,
),
(ConcatImagesProcessingPipeline, _("Combine images"), False),
(BlurProcessingPipeline, _("Blurring"), False),
(DeskewProcessingPipeline, _("Deskewing"), False),
(ErosionProcessingPipeline, _("Erosion"), False),
(DilationProcessingPipeline, _("Dilation"), False),
(SharpenColourProcessingPipeline, _("Sharpen image"), False),
(InvertColourProcessingPipeline, _("Invert colors"), False),
]
if app.debug:
ipp.append((DebugProcessingPipeline, _("Debug"), False))
return ipp
class TesseractLanguageManager(SimpleDialog):
"""
A dialog to manage the languages for the managed
version of Tesseract OCR Engine on Windows.
"""
def __init__(self, *args, **kwargs):
self.online_languages = ()
super().__init__(*args, **kwargs)
self.SetSize((600, -1))
self.CenterOnScreen()
def addControls(self, parent):
# Translators: label of a list control containing bookmarks
wx.StaticText(parent, -1, _("Tesseract Languages"))
listPanel = sc.SizedPanel(parent)
listPanel.SetSizerType("horizontal")
listPanel.SetSizerProps(expand=True, align="center")
self.tesseractLanguageList = ImmutableObjectListView(
listPanel, wx.ID_ANY, style=wx.LC_REPORT | wx.SUNKEN_BORDER, size=(500, -1)
)
self.btnPanel = btnPanel = sc.SizedPanel(parent, -1)
btnPanel.SetSizerType("horizontal")
btnPanel.SetSizerProps(expand=True)
# Translators: text of a button to add a language to Tesseract OCR Engine (best quality model)
self.addBestButton = wx.Button(btnPanel, wx.ID_ANY, _("Download &Best Model"))
# Translators: text of a button to add a language to Tesseract OCR Engine (fastest model)
self.addFastButton = wx.Button(btnPanel, wx.ID_ANY, _("Download &Fast Model"))
# Translators: text of a button to remove a language from Tesseract OCR Engine
self.removeButton = wx.Button(btnPanel, wx.ID_REMOVE, _("&Remove"))
self.Bind(wx.EVT_BUTTON, self.onAdd, self.addFastButton)
self.Bind(wx.EVT_BUTTON, self.onAdd, self.addBestButton)
self.Bind(wx.EVT_BUTTON, self.onRemove, id=wx.ID_REMOVE)
self.Bind(
wx.EVT_LIST_ITEM_FOCUSED,
self.onListFocusChanged,
self.tesseractLanguageList,
)
AsyncSnakDialog(
task=tesseract_download.get_tesseract_download_info,
done_callback=self._on_tesseract_dl_info,
message=_("Getting download information, please wait..."),
parent=self,
)
def getButtons(self, parent):
btnsizer = wx.StdDialogButtonSizer()
# Translators: the label of a button to close a dialog
btnsizer.AddButton(wx.Button(self, wx.ID_CANCEL, _("&Close")))
btnsizer.Realize()
return btnsizer
def _on_tesseract_dl_info(self, future):
if (
info := tesseract_download.get_tesseract_download_info_from_future(
future, self
)
) is None:
return
self.online_languages = info.languages
self.populate_list()
def populate_list(self):
language_identifiers = set(
(True, lang.given_locale_name)
for lang in TesseractOcrEngine.get_recognition_languages()
)
_installed_langs = {lang[1].lower() for lang in language_identifiers}
language_identifiers.update(
(False, lang)
for lang in self.online_languages
if lang.lower() not in _installed_langs
)
languages = [
(
lang[0],
LocaleInfo.from_three_letter_code(lang[1]),
)
for lang in sorted(language_identifiers, key=lambda l: l, reverse=True)
]
column_defn = [
ColumnDefn(
# Translators: the title of a column in the Tesseract language list
_("Language"),
"left",
450,
lambda lang: lang[1].description,
),
ColumnDefn(
# Translators: the title of a column in the Tesseract language list
_("Installed"),
"center",
100,
lambda lang: _("Yes") if lang[0] else _("No"),
),
]
self.tesseractLanguageList.set_columns(column_defn)
self.tesseractLanguageList.set_objects(languages, focus_item=0)
# Maintain the state of the list
if not any(languages):
self.addBestButton.Enable(False)
self.addFastButton.Enable(False)
self.removeButton.Enable(False)
self.btnPanel.Enable(False)
def onAdd(self, event):
if (selected := self.tesseractLanguageList.get_selected()) is None:
return
lang = selected[1]
variant = "best" if event.GetEventObject() == self.addBestButton else "fast"
AsyncSnakDialog(
task=tesseract_download.get_tesseract_download_info,
done_callback=partial(
self._on_download_language, lang.given_locale_name, variant
),
message=_("Getting download information, please wait..."),
parent=self,
)
def onRemove(self, event):
if (selected := self.tesseractLanguageList.get_selected()) is None:
return
lang = selected[1]
msg = wx.MessageBox(
# Translators: content of a messagebox
_("Are you sure you want to remove language:\n{lang}?").format(
lang=lang.description
),
# Translators: title of a messagebox
_("Confirm"),
style=wx.YES_NO | wx.ICON_WARNING,
)
if msg == wx.NO:
return
try:
tesseract_download.get_language_path(lang.given_locale_name).unlink()
self.populate_list()
except:
log.exception(f"Could not remove language {lang}", exc_info=True)
def onListFocusChanged(self, event):
if (selected := self.tesseractLanguageList.get_selected()) is not None:
is_installed = selected[0]
self.addBestButton.Enable(not is_installed)
self.addFastButton.Enable(not is_installed)
self.removeButton.Enable(is_installed)
def _on_download_language(self, lang_name, variant, future):
if (
info := tesseract_download.get_tesseract_download_info_from_future(
future, self
)
) is None:
return
if lang_name not in info.languages:
log.debug(f"Could not find download info for language {lang_name}")
return
target_file = tesseract_download.get_language_path(lang_name)
if target_file.exists():
msg = wx.MessageBox(
# Translators: content of a messagebox
_(
"A version of the selected language model already exists.\n"
"Are you sure you want to replace it."
),
# Translators: title of a messagebox
_("Confirm"),
style=wx.YES_NO | wx.ICON_WARNING,
parent=self,
)
if msg == wx.NO:
return
try:
target_file.unlink(missing_ok=True)
except:
return
progress_dlg = RobustProgressDialog(
wx.GetApp().mainFrame,
# Translators: title of a progress dialog
_("Downloading Language"),
# Translators: content of a progress dialog
_("Getting download information..."),
maxvalue=100,
can_hide=True,
can_abort=True,
)
url = info.get_language_download_url(lang_name, variant=variant)
threaded_worker.submit(
tesseract_download.download_language, url, target_file, progress_dlg
).add_done_callback(
lambda future: wx.CallAfter(
self._after_download_language, progress_dlg, future
)
)
def _after_download_language(self, progress_dlg, future):
progress_dlg.Dismiss()
try:
if future.result():
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Language Added"),
_("The Language Model was downloaded successfully."),
parent=self,
)
self.populate_list()
except ConnectionError:
log.exception("Failed to download language data from {url}", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Connection Error"),
# Translators: content of a messagebox
_(
"Failed to download language data.\nPlease check your internet connection."
),
icon=wx.ICON_ERROR,
)
except:
log.exception("Failed to install language data from {url}", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Error"),
# Translators: content of a messagebox
_("Failed to install language data.\nPlease try again later."),
icon=wx.ICON_ERROR,
parent=self,
)
| 39.232877
| 105
| 0.610934
| 18,718
| 0.933659
| 0
| 0
| 187
| 0.009328
| 0
| 0
| 3,639
| 0.181514
|
0c85c0c3fc3e88d3d1512b447e6a7c16569279b2
| 1,376
|
py
|
Python
|
nginx_rtmp_wizard/models.py
|
Gerhut/nginx-rtmp-wizard
|
c821c3bb262503ee26408b8b3bf4a252b49a29d6
|
[
"Unlicense"
] | null | null | null |
nginx_rtmp_wizard/models.py
|
Gerhut/nginx-rtmp-wizard
|
c821c3bb262503ee26408b8b3bf4a252b49a29d6
|
[
"Unlicense"
] | 1
|
2021-06-10T20:32:59.000Z
|
2021-06-10T20:32:59.000Z
|
nginx_rtmp_wizard/models.py
|
Gerhut/nginx-rtmp-wizard
|
c821c3bb262503ee26408b8b3bf4a252b49a29d6
|
[
"Unlicense"
] | null | null | null |
from django.conf import settings
from django.core import validators
from django.db import models
DEFAULT_RTMP_PORT = 1935
class Server(models.Model):
listen = models.PositiveIntegerField(
default=DEFAULT_RTMP_PORT,
unique=True,
validators=[
validators.MinValueValidator(1024),
validators.MaxValueValidator(65535)
])
def __str__(self):
if self.listen == DEFAULT_RTMP_PORT:
return 'rtmp://{}'.format(settings.RTMP_HOSTNAME)
else:
return 'rtmp://{}:{}'.format(settings.RTMP_HOSTNAME, self.listen)
class Application(models.Model):
server = models.ForeignKey(Server, on_delete=models.CASCADE)
name = models.SlugField(default='live')
live = models.BooleanField(default=False)
def __str__(self):
return '{}/{}'.format(self.server, self.name)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['server', 'name'],
name='unique_server_application_name')]
class Push(models.Model):
application = models.ForeignKey(Application, on_delete=models.CASCADE)
url = models.CharField(
max_length=255,
unique=True,
validators=[
validators.URLValidator(schemes=['rtmp'])
])
def __str__(self):
return 'push {};'.format(self.url)
| 27.52
| 77
| 0.634448
| 1,244
| 0.90407
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.072674
|
0c873f69a18440e8e7c7b2463204d9290fbcbf4a
| 6,513
|
py
|
Python
|
fsmonitor/polling.py
|
ljmccarthy/fsmonitor
|
4d84d9817dce7b274cb4586b5c2091dea96982f9
|
[
"MIT"
] | 26
|
2018-03-24T06:38:19.000Z
|
2022-02-18T10:22:51.000Z
|
fsmonitor/polling.py
|
ljmccarthy/fsmonitor
|
4d84d9817dce7b274cb4586b5c2091dea96982f9
|
[
"MIT"
] | 5
|
2018-06-19T21:35:00.000Z
|
2018-06-26T21:11:38.000Z
|
fsmonitor/polling.py
|
ljmccarthy/fsmonitor
|
4d84d9817dce7b274cb4586b5c2091dea96982f9
|
[
"MIT"
] | 9
|
2018-06-19T21:35:53.000Z
|
2022-03-26T17:01:11.000Z
|
# Copyright (c) 2012 Luke McCarthy <luke@iogopro.co.uk>
#
# This is free software released under the MIT license.
# See COPYING file for details, or visit:
# http://www.opensource.org/licenses/mit-license.php
#
# The file is part of FSMonitor, a file-system monitoring library.
# https://github.com/shaurz/fsmonitor
import sys, os, time, threading, errno
from .common import FSEvent, FSMonitorError
def get_dir_contents(path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
class FSMonitorDirWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorDirWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
def getstate(self):
return self._contents
def delstate(self):
self._contents = []
self._deleted = True
def setstate(self, state):
self._contents = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorFileWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._stat = os.stat(path)
self._deleted = False
except OSError as e:
self._stat = None
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorFileWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return os.stat(path)
def getstate(self):
return self._stat
def delstate(self):
self._stat = None
self._deleted = True
def setstate(self, state):
self._stat = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorWatch %r>" % self.path
def _compare_contents(watch, new_contents, events_out, before):
name_to_new_stat = dict(new_contents)
for name, old_stat in watch._contents:
new_stat = name_to_new_stat.get(name)
if new_stat:
_compare_stat(watch, new_stat, events_out, before, old_stat, name)
else:
events_out.append(FSEvent(watch, FSEvent.Delete, name))
old_names = frozenset(x[0] for x in watch._contents)
for name, new_stat in new_contents:
if name not in old_names:
events_out.append(FSEvent(watch, FSEvent.Create, name))
def _compare_stat(watch, new_stat, events_out, before, old_stat, filename):
if new_stat.st_atime != old_stat.st_atime and new_stat.st_atime < before:
events_out.append(FSEvent(watch, FSEvent.Access, filename))
if new_stat.st_mtime != old_stat.st_mtime:
events_out.append(FSEvent(watch, FSEvent.Modify, filename))
def round_fs_resolution(t):
if sys.platform == "win32":
return t // 2 * 2
else:
return t // 1
class FSMonitor(object):
def __init__(self):
self.__lock = threading.Lock()
self.__dir_watches = set()
self.__file_watches = set()
self.polling_interval = 0.5
@property
def watches(self):
with self.__lock:
return list(self.__dir_watches) + list(self.__file_watches)
def add_dir_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorDirWatch(path, flags, user)
with self.__lock:
self.__dir_watches.add(watch)
return watch
def add_file_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorFileWatch(path, flags, user)
with self.__lock:
self.__file_watches.add(watch)
return watch
def remove_watch(self, watch):
with self.__lock:
if watch in self.__dir_watches:
self.__dir_watches.discard(watch)
elif watch in self.__file_watches:
self.__file_watches.discard(watch)
def remove_all_watches(self):
with self.__lock:
self.__dir_watches.clear()
self.__file_watches.clear()
def enable_watch(self, watch, enable=True):
watch.enabled = enable
def disable_watch(self, watch):
watch.enabled = False
def read_events(self, timeout=None):
now = start_time = time.time()
watches = self.watches
watches.sort(key=lambda watch: abs(now - watch._timestamp), reverse=True)
events = []
for watch in watches:
now = time.time()
if watch._timestamp < now:
tdiff = now - watch._timestamp
if tdiff < self.polling_interval:
time.sleep(self.polling_interval - tdiff)
watch._timestamp = now
if not watch.enabled:
continue
before = round_fs_resolution(time.time())
try:
new_state = watch.new_state(watch.path)
except OSError as e:
if e.errno == errno.ENOENT:
if not watch._deleted:
del watch.state
events.append(FSEvent(watch, FSEvent.DeleteSelf))
else:
if isinstance(watch, FSMonitorDirWatch):
_compare_contents(watch, new_state, events, before)
elif isinstance(watch, FSMonitorFileWatch):
_compare_stat(watch, new_state, events, before,
watch.state, watch.path)
watch.state = new_state
return events
| 29.876147
| 81
| 0.60172
| 4,923
| 0.755873
| 0
| 0
| 359
| 0.055121
| 0
| 0
| 385
| 0.059113
|
0c88e62584b3021b90b1b02ca77fbe06e08b2b02
| 2,302
|
py
|
Python
|
cogs/commands.py
|
ibx34/logger
|
2b99d579c5313434ef82fe500bd45aef94627696
|
[
"Apache-2.0"
] | null | null | null |
cogs/commands.py
|
ibx34/logger
|
2b99d579c5313434ef82fe500bd45aef94627696
|
[
"Apache-2.0"
] | 1
|
2021-01-21T13:06:17.000Z
|
2021-01-21T14:15:35.000Z
|
cogs/commands.py
|
ibx34/logger
|
2b99d579c5313434ef82fe500bd45aef94627696
|
[
"Apache-2.0"
] | null | null | null |
import math
import random
import string
from datetime import datetime, timedelta
from textwrap import dedent
import config
import discord
from discord.ext import commands
import asyncio
class etc(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="clear")
async def _messages_clear(self, ctx):
def me(m):
return m.author == self.bot.user
await ctx.channel.purge(check=me)
await ctx.message.add_reaction("👌")
@commands.command(name="recent")
async def _recent_cases(self, ctx):
async with self.bot.pool.acquire() as conn:
cases = await conn.fetch("SELECT * FROM infractions WHERE guild = $1",ctx.guild.id)
case_list = ""
for x in cases:
case_list += f"**{x['real_id']}** | Serial: {x['id']} | {x['moderator']} | {x['target']} | {x['guild']} | {x['time_punished']} | {x['reason']}\n"
await ctx.send(case_list)
@commands.command(name="reason")
async def _update_reason(self,ctx,case,*,new_reason):
if case.lower() in ['|','^','%','&','/','?','recent','r','~','-']:
case = self.bot.cases[ctx.guild.id]
async with self.bot.pool.acquire() as conn:
fetch_case = await conn.fetchrow("SELECT * FROM infractions WHERE real_id = $1 AND guild = $2",int(case),ctx.guild.id)
if not fetch_case:
return await ctx.send(":wood: not a case.")
try:
await conn.execute("UPDATE infractions SET reason = $1 WHERE real_id = $2 AND guild = $3",new_reason,int(case),ctx.guild.id)
except Exception as err:
return await ctx.send(f"There was an error.\n```{err}```")
await ctx.send(":ok_hand:")
@commands.command(name="reset")
async def _reset_cases(self,ctx):
async with self.bot.pool.acquire() as conn:
try:
await conn.execute("DELETE FROM infractions WHERE guild = $1",ctx.guild.id)
del self.bot.cases[ctx.guild.id]
except Exception as err:
return await ctx.send(f"There was an error.\n```{err}```")
await ctx.send(":ok_hand:")
def setup(bot):
bot.add_cog(etc(bot))
| 35.415385
| 172
| 0.577324
| 2,072
| 0.898915
| 0
| 0
| 1,971
| 0.855098
| 1,825
| 0.791757
| 534
| 0.23167
|
0c8967029f04eaa76a7e75ae4334c986e1287fc3
| 1,887
|
py
|
Python
|
dataset_generator/learning/imitation/tensorflow/_layers.py
|
rjean/duckie-segmentation
|
5e720e1a96ef61c4560823030549ac1d5d16e2a4
|
[
"Apache-2.0"
] | 1
|
2021-02-03T02:23:34.000Z
|
2021-02-03T02:23:34.000Z
|
dataset_generator/learning/imitation/tensorflow/_layers.py
|
rjean/mobile-segmentation
|
5e720e1a96ef61c4560823030549ac1d5d16e2a4
|
[
"Apache-2.0"
] | null | null | null |
dataset_generator/learning/imitation/tensorflow/_layers.py
|
rjean/mobile-segmentation
|
5e720e1a96ef61c4560823030549ac1d5d16e2a4
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
L2_LAMBDA = 1e-04
def _residual_block(x, size, dropout=False, dropout_prob=0.5, seed=None):
residual = tf.layers.batch_normalization(x) # TODO: check if the defaults in Tf are the same as in Keras
residual = tf.nn.relu(residual)
residual = tf.layers.conv2d(
residual,
filters=size,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=tf.keras.initializers.he_normal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
)
if dropout:
residual = tf.nn.dropout(residual, dropout_prob, seed=seed)
residual = tf.layers.batch_normalization(residual)
residual = tf.nn.relu(residual)
residual = tf.layers.conv2d(
residual,
filters=size,
kernel_size=3,
padding="same",
kernel_initializer=tf.keras.initializers.he_normal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
)
if dropout:
residual = tf.nn.dropout(residual, dropout_prob, seed=seed)
return residual
def one_residual(x, keep_prob=0.5, seed=None):
nn = tf.layers.conv2d(
x,
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=tf.keras.initializers.he_normal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
)
nn = tf.layers.max_pooling2d(nn, pool_size=3, strides=2)
rb_1 = _residual_block(nn, 32, dropout_prob=keep_prob, seed=seed)
nn = tf.layers.conv2d(
nn,
filters=32,
kernel_size=1,
strides=2,
padding="same",
kernel_initializer=tf.keras.initializers.he_normal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
)
nn = tf.keras.layers.add([rb_1, nn])
nn = tf.layers.flatten(nn)
return nn
| 29.484375
| 109
| 0.650768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.044515
|
0c8a7a498a0f1b713f01a2cb053d95a45b749576
| 3,071
|
py
|
Python
|
test_API/demo4.py
|
UppASD/aiida-uppasd
|
f3f26d523280cb7484ad24c826ad275a6d329c01
|
[
"MIT"
] | 2
|
2020-12-03T13:29:33.000Z
|
2022-01-03T11:36:24.000Z
|
test_API/demo4.py
|
UppASD/aiida-uppasd
|
f3f26d523280cb7484ad24c826ad275a6d329c01
|
[
"MIT"
] | null | null | null |
test_API/demo4.py
|
UppASD/aiida-uppasd
|
f3f26d523280cb7484ad24c826ad275a6d329c01
|
[
"MIT"
] | 2
|
2021-04-22T07:19:21.000Z
|
2022-03-16T21:49:29.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 23 15:15:05 2021
@author: qichen
"""
from aiida.plugins import DataFactory, CalculationFactory
from aiida.engine import run
from aiida.orm import Code, SinglefileData, Int, Float, Str, Bool, List, Dict, ArrayData, XyData, SinglefileData, FolderData, RemoteData
import numpy as np
import aiida
import os
from aiida.engine import submit
aiida.load_profile()
code = Code.get_from_string('uppasd_dev@uppasd_local')
aiida_uppasd = CalculationFactory('UppASD_core_calculations')
builder = aiida_uppasd.get_builder()
#pre-prepared files
dmdata = SinglefileData(
file=os.path.join(os.getcwd(), "input_files2", 'dmdata'))
jij = SinglefileData(
file=os.path.join(os.getcwd(), "input_files2", 'jij'))
momfile = SinglefileData(
file=os.path.join(os.getcwd(), "input_files2", 'momfile'))
posfile = SinglefileData(
file=os.path.join(os.getcwd(), "input_files2", 'posfile'))
qfile = SinglefileData(
file=os.path.join(os.getcwd(), "input_files2", 'qfile'))
# inpsd.dat file selection
inpsd_dict = {
'simid': Str('SCsurf_T'),
'ncell': Str('128 128 1'),
'BC': Str('P P 0 '),
'cell': Str('''1.00000 0.00000 0.00000
0.00000 1.00000 0.00000
0.00000 0.00000 1.00000'''),
'do_prnstruct': Int(2),
'maptype': Int(2),
'SDEalgh': Int(1),
'Initmag': Int(3),
'ip_mode': Str('Q'),
'qm_svec': Str('1 -1 0 '),
'qm_nvec': Str('0 0 1'),
'mode': Str('S'),
'temp': Float(0.000),
'damping': Float(0.500),
'Nstep': Int(5000),
'timestep': Str('1.000d-15'),
'qpoints': Str('F'),
'plotenergy': Int(1),
'do_avrg': Str('Y'),
}
r_l = List(list=['coord.{}.out'.format(inpsd_dict['simid'].value),
'qm_minima.{}.out'.format(inpsd_dict['simid'].value),
'qm_sweep.{}.out'.format(inpsd_dict['simid'].value),
'qpoints.{}.out'.format(inpsd_dict['simid'].value),
'totenergy.{}.out'.format(inpsd_dict['simid'].value),
'averages.{}.out'.format(inpsd_dict['simid'].value),
'inp.{}.out'.format(inpsd_dict['simid'].value),
'qm_restart.{}.out'.format(inpsd_dict['simid'].value),
'restart.{}.out'.format(inpsd_dict['simid'].value),
'qpoints.out',
'fort.2000'])
# set up calculation
inpsd = Dict(dict=inpsd_dict)
builder.code = code
builder.dmdata = dmdata
builder.jij = jij
builder.momfile = momfile
builder.posfile = posfile
builder.qfile = qfile
builder.inpsd = inpsd
builder.retrieve_list_name = r_l
builder.inpsd_dat_exist = Int(0)
builder.metadata.options.resources = {'num_machines': 1}
builder.metadata.options.max_wallclock_seconds = 120
builder.metadata.options.input_filename = 'inpsd.dat'
builder.metadata.options.parser_name = 'UppASD_core_parsers'
builder.metadata.label = 'Demo4'
builder.metadata.description = 'Test demo4 for UppASD-AiiDA'
job_node = submit(builder)
print('Job submitted, PK: {}'.format(job_node.pk))
| 32.670213
| 136
| 0.643113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,019
| 0.331814
|
0c8aee4b13af709adea28d410ab48e9fcca43ac4
| 83
|
py
|
Python
|
pyqt_horizontal_selection_square_graphics_view/__init__.py
|
berty-2007/pyqt-horizontal-selection-square-graphics-view
|
29d3d6f63a2d464b0c4b1d64c451439de6f1eded
|
[
"MIT"
] | 1
|
2021-12-23T14:44:07.000Z
|
2021-12-23T14:44:07.000Z
|
pyqt_horizontal_selection_square_graphics_view/__init__.py
|
berty-2007/pyqt-horizontal-selection-square-graphics-view
|
29d3d6f63a2d464b0c4b1d64c451439de6f1eded
|
[
"MIT"
] | null | null | null |
pyqt_horizontal_selection_square_graphics_view/__init__.py
|
berty-2007/pyqt-horizontal-selection-square-graphics-view
|
29d3d6f63a2d464b0c4b1d64c451439de6f1eded
|
[
"MIT"
] | null | null | null |
from .horizontalSelectionSquareGraphicsView import *
from .selectionSquare import *
| 41.5
| 52
| 0.86747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c8b19dac043c6f1fd044d080e97281c266335ea
| 557
|
py
|
Python
|
src/tests/t_kadm5_hook.py
|
tizenorg/platform.upstream.krb5
|
a98efd0c8f97aba9d71c2130c048f1adc242772e
|
[
"MIT"
] | 372
|
2016-10-28T10:50:35.000Z
|
2022-03-18T19:54:37.000Z
|
src/tests/t_kadm5_hook.py
|
tizenorg/platform.upstream.krb5
|
a98efd0c8f97aba9d71c2130c048f1adc242772e
|
[
"MIT"
] | 317
|
2016-11-02T17:41:48.000Z
|
2021-11-08T20:28:19.000Z
|
src/tests/t_kadm5_hook.py
|
tizenorg/platform.upstream.krb5
|
a98efd0c8f97aba9d71c2130c048f1adc242772e
|
[
"MIT"
] | 107
|
2016-11-03T19:25:16.000Z
|
2022-03-20T21:15:22.000Z
|
#!/usr/bin/python
from k5test import *
plugin = os.path.join(buildtop, "plugins", "kadm5_hook", "test",
"kadm5_hook_test.so")
hook_krb5_conf = {
'all' : {
"plugins" : {
"kadm5_hook" : {
"module" : "test:" + plugin
}
}
}
}
realm = K5Realm(krb5_conf=hook_krb5_conf, create_user=False, create_host=False)
output = realm.run_kadminl ('addprinc -randkey test')
if "create: stage precommit" not in output:
fail('kadm5_hook test output not found')
success('kadm5_hook')
| 24.217391
| 79
| 0.59246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 200
| 0.359066
|
0c8d8ebe08a6b4445a731df985a3abbe390f7b84
| 11,465
|
py
|
Python
|
src/visaplan/plone/tools/setup/_get_object.py
|
visaplan/plone.tools
|
080da21af710334033540cd2e7e0c63358d234d2
|
[
"Apache-2.0"
] | 1
|
2021-01-04T01:30:50.000Z
|
2021-01-04T01:30:50.000Z
|
src/visaplan/plone/tools/setup/_get_object.py
|
visaplan/plone.tools
|
080da21af710334033540cd2e7e0c63358d234d2
|
[
"Apache-2.0"
] | null | null | null |
src/visaplan/plone/tools/setup/_get_object.py
|
visaplan/plone.tools
|
080da21af710334033540cd2e7e0c63358d234d2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- äöü vim: sw=4 sts=4 et tw=79
"""
Tools für Produkt-Setup (Migrationsschritte, "upgrade steps"): _tree
"""
# Python compatibility:
from __future__ import absolute_import
# Standard library:
from posixpath import normpath
# Zope:
from Products.CMFCore.utils import getToolByName
# Plone:
from plone.uuid.interfaces import IUUID
# Local imports:
from visaplan.plone.tools._have import HAS_SUBPORTALS
from visaplan.plone.tools.setup._args import (
_extract_move_args,
extract_layout_switch,
extract_menu_switch,
)
from visaplan.plone.tools.setup._misc import _traversable_path
from visaplan.plone.tools.setup._o_tools import (
handle_language,
handle_layout,
handle_menu,
handle_title,
make_notes_logger,
)
if HAS_SUBPORTALS:
from visaplan.plone.tools.setup._o_tools import handle_subportal
# Local imports:
from visaplan.plone.tools.setup._reindex import make_reindexer
# Logging / Debugging:
import logging
# Exceptions:
__all__ = [
'make_object_getter',
]
# see also _make_folder.py: make_subfolder_creator
def make_object_getter(context, **kwargs):
"""
Return a function which finds an object ...
- by 'id', if a 'parent' is given;
- by 'uid' (if given);
- by 'path' (relative to the portal object).
Getting the object is the first part only; the function is used to make
sure the object has certain properties as well!
Further understood options are:
- title - a title to be checked and/or set
- language - a language to be checked and/or set
- canonical - an object which was returned by some .getCanonical method
call. Usually requires a language value as well.
- info - a (usually empty) dict; see `return_tuple` below.
Options to the factory:
- keys - the sequence of the keys 'id', 'uid' and 'path',
specifying the order in which they are tried
- verbose - log informations about problems
- logger - a logger to use if verbose
... defaults for the function:
- set_title - set the title (if given and not matching)
- set_uid - set the UUID (if given and not matching)
- set_language - set the language (if given and not matching)
- set_canonical - link the canonical translation (if given)
- reindex - True: reindex in any case,
False: ... under no circumstances,
None: ... if changes were made (default).
- return_tuple - if True, return a 2-tuple (object, info);
by default, only the object (or None) is returned.
Unless return_tuple=True is specified,
the returned function will simply return the object or None;
in this (standard) case, you can deliver the info dictionary yourself,
which will be changed in-place, to get access to the
detailed information, including an "updates" subdict.
The "set_..." options mean, "set the object property", e.g. call
setLanguage if a language key is given and mismatching;
the "get_..." options do the opposite: they write the found value to the
info dictionary, to be more precise: to the info['updates'] dictionary.
Thus, to make use of the get_... results, an "info" dictionary must be
provided.
"""
pop = kwargs.pop
keys = pop('keys', None) or ['path', 'id', 'uid']
parent = pop('parent', None)
portal = getToolByName(context, 'portal_url').getPortalObject()
reference_catalog = getToolByName(context, 'reference_catalog')
verbose = pop('verbose', 1)
if 'logger' in kwargs:
logger = pop('logger')
elif verbose:
logger = logging.getLogger('get_object')
reindexer = kwargs.pop('reindexer', None)
idxs = kwargs.pop('idxs', None)
if reindexer is None and (set_menu is None or set_menu):
reindexer = make_reindexer(logger=logger,
context=parent,
idxs=idxs)
elif reindexer is not None and idxs is not None:
logger.warn('Ignoring idxs value %(idxs)r', locals())
reindex = kwargs.pop('reindex', reindexer is not None
or None)
if reindex and reindexer is None:
reindexer = make_reindexer(logger=logger,
context=parent)
set_title = pop('set_title', True)
set_uid = pop('set_uid', False)
get_uid = pop('get_uid', None)
set_language = pop('set_language', True)
set_canonical = pop('set_canonical', None)
if set_canonical is None:
set_canonical = set_language
set_subportal = pop('set_subportal', None)
subportal = pop('subportal', None)
return_tuple = pop('return_tuple', False)
def _err(msg, notes, logger=logger):
notes.append(('ERROR', msg))
if logger is not None:
logger.error(msg)
def _info(msg, notes, log=True, logger=logger):
notes.append(('INFO', msg))
if log and logger is not None:
logger.info(msg)
def get_object(
id=None, uid=None, path=None,
info=None, # specify an empty dict to get information!
parent=parent,
reindex=reindex,
reindexer=reindexer,
set_title=set_title,
set_uid=set_uid,
get_uid=get_uid,
set_language=set_language,
set_canonical=set_canonical,
set_subportal=set_subportal,
subportal=subportal,
return_tuple=return_tuple,
**kwargs):
"""
This function is designed to be called with keyword arguments only.
In a Python-3-only release of the package, this will be enforced!
"""
if info is None:
info = {}
info.update({
'found': False,
'reindexed': False,
'changes': 0,
'notes': [],
'specs': None, # set to a string below
'updates': {},
})
notes = info['notes']
# for notes from _o_tools.py:
lognotes = make_notes_logger(logger, info['notes'])
updates = info['updates']
tried_keys = set()
o = None
found_by = None
changes = 0
specs = []
for key in keys:
if key in tried_keys:
continue
tried_keys.add(key)
if key == 'id':
if id is not None:
specs.append('id=%(id)r' % locals())
if parent is not None:
o = getattr(parent, id, None)
if o is None:
_err('%(parent)r.%(id)r not found!' % locals(),
notes)
elif key == 'uid':
if uid is not None:
specs.append('uid=%(uid)r' % locals())
o = reference_catalog.lookupObject(uid)
if o is None:
_err('UID %(uid)r not found!' % locals(),
notes)
elif key == 'path':
if path is not None:
specs.append('path=%(path)r' % locals())
if normpath(path) in ('.', '/'):
o = portal
_info('path=%(path)r -> using %(portal)r!' % locals(),
notes)
else:
try:
# restrictedTraverse dislikes leading slashes, at least:
o = portal.restrictedTraverse(_traversable_path(path))
except KeyError:
o = None
if o is None:
_err('%(portal)r[%(path)r] not found!' % locals(),
notes)
else:
_err('Unknown key: %(key)r' % locals(), notes)
if found_by is None and o is not None:
found_by = key
break
info['specs'] = ', '.join(specs) or 'no non-empty specifications!'
if o is None:
return ((o, info) if return_tuple
else o)
info['found'] = True
info['found_by'] = found_by
if found_by == 'uid':
if get_uid >= 2:
updates['uid'] = uid
else:
if get_uid is None:
get_uid = True
if get_uid or uid is not None:
found_uid = IUUID(o, None)
if uid is not None:
if found_uid != uid:
if set_uid:
o._setUID(uid)
_info('%(o)r: old UID %(found_uid)r --> new UID %(uid)r'
% locals(),
notes)
changes += 1
if get_uid:
found_uid = uid
else:
_info('%(o)r: UID %(found_uid)r mismatches %(uid)r'
% locals(),
notes)
else:
_info('%(o)r: checked UID (%(found_uid)r)' % locals(),
notes,
verbose > 1)
if get_uid:
updates['uid'] = found_uid
# ---------- [set_]title:
kwargs.update(set_title=set_title)
ch, notes = handle_title(o, kwargs, created=False)
changes += ch
for tup in notes:
lognotes(tup)
# ---------- [set_]language, [set_]canonical:
kwargs.update(set_language=set_language, set_canonical=set_canonical)
ch, notes = handle_language(o, kwargs, created=False)
changes += ch
for tup in notes:
lognotes(tup)
# ---------- [{set,get}_]layout:
ch, notes, upd = handle_layout(o, kwargs, created=False)
changes += ch
for tup in notes:
lognotes(tup)
updates.update(upd) # might contain a new 'layout' key
# ---------- [switch_]menu:
ch, notes = handle_menu(o, kwargs, created=False)
changes += ch
for tup in notes:
lognotes(tup)
if HAS_SUBPORTALS:
# ---------- [set_]subportal:
kwargs.update(subportal=subportal, set_subportal=set_subportal)
ch, notes = handle_subportal(o, kwargs, created=False)
changes += ch
for tup in notes:
lognotes(tup)
info['changes'] = changes
if reindex is None:
if not changes:
_info('%(o)r not changed and not reindexed' % locals(),
notes)
return ((o, info) if return_tuple
else o)
reindex = True
if not reindex:
if changes:
_info('%(o)r has %(changes)d changes but reindexing suppressed'
% locals(),
notes)
return ((o, info) if return_tuple
else o)
if reindexer is None:
o.reindexObject()
else:
reindexer(o)
info['reindexed'] = True
return ((o, info) if return_tuple
else o)
return get_object
| 35.495356
| 84
| 0.527955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,823
| 0.333333
|
0c90583bb1e8038246e08d81681f08ae3de8075d
| 3,738
|
py
|
Python
|
tests/test_remove.py
|
sanjaymsh/Fiona
|
7be39538f7317efec3e60b8dc722af7e8fea6d52
|
[
"BSD-3-Clause"
] | 1
|
2020-03-06T21:13:54.000Z
|
2020-03-06T21:13:54.000Z
|
tests/test_remove.py
|
sanjaymsh/Fiona
|
7be39538f7317efec3e60b8dc722af7e8fea6d52
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_remove.py
|
sanjaymsh/Fiona
|
7be39538f7317efec3e60b8dc722af7e8fea6d52
|
[
"BSD-3-Clause"
] | 1
|
2021-04-12T05:38:58.000Z
|
2021-04-12T05:38:58.000Z
|
import logging
import sys
import os
import itertools
from .conftest import requires_gpkg
import pytest
import fiona
from fiona.errors import DatasetDeleteError
def create_sample_data(filename, driver, **extra_meta):
meta = {
'driver': driver,
'schema': {
'geometry': 'Point',
'properties': {}
}
}
meta.update(extra_meta)
with fiona.open(filename, 'w', **meta) as dst:
dst.write({
'geometry': {
'type': 'Point',
'coordinates': (0, 0),
},
'properties': {},
})
assert(os.path.exists(filename))
drivers = ["ESRI Shapefile", "GeoJSON"]
kinds = ["path", "collection"]
specify_drivers = [True, False]
test_data = itertools.product(drivers, kinds, specify_drivers)
@pytest.mark.parametrize("driver, kind, specify_driver", test_data)
def test_remove(tmpdir, kind, driver, specify_driver):
"""Test various dataset removal operations"""
extension = {"ESRI Shapefile": "shp", "GeoJSON": "json"}[driver]
filename = "delete_me.{extension}".format(extension=extension)
output_filename = str(tmpdir.join(filename))
create_sample_data(output_filename, driver=driver)
if kind == "collection":
to_delete = fiona.open(output_filename, "r")
else:
to_delete = output_filename
assert os.path.exists(output_filename)
if specify_driver:
fiona.remove(to_delete, driver=driver)
else:
fiona.remove(to_delete)
assert not os.path.exists(output_filename)
def test_remove_nonexistent(tmpdir):
"""Attempting to remove a file that does not exist results in an IOError"""
filename = str(tmpdir.join("does_not_exist.shp"))
assert not os.path.exists(filename)
with pytest.raises(IOError):
fiona.remove(filename)
@requires_gpkg
def test_remove_layer(tmpdir):
filename = str(tmpdir.join("a_filename.gpkg"))
create_sample_data(filename, "GPKG", layer="layer1")
create_sample_data(filename, "GPKG", layer="layer2")
create_sample_data(filename, "GPKG", layer="layer3")
create_sample_data(filename, "GPKG", layer="layer4")
assert fiona.listlayers(filename) == ["layer1", "layer2", "layer3", "layer4"]
# remove by index
fiona.remove(filename, layer=2)
assert fiona.listlayers(filename) == ["layer1", "layer2", "layer4"]
# remove by name
fiona.remove(filename, layer="layer2")
assert fiona.listlayers(filename) == ["layer1", "layer4"]
# remove by negative index
fiona.remove(filename, layer=-1)
assert fiona.listlayers(filename) == ["layer1"]
# invalid layer name
with pytest.raises(ValueError):
fiona.remove(filename, layer="invalid_layer_name")
# invalid layer index
with pytest.raises(DatasetDeleteError):
fiona.remove(filename, layer=999)
def test_remove_layer_shapefile(tmpdir):
"""Removal of layer in shapefile actually deletes the datasource"""
filename = str(tmpdir.join("a_filename.shp"))
create_sample_data(filename, "ESRI Shapefile")
fiona.remove(filename, layer=0)
assert not os.path.exists(filename)
def test_remove_layer_geojson(tmpdir):
"""Removal of layers is not supported by GeoJSON driver
The reason for failure is slightly different between GDAL 2.2+ and < 2.2.
With < 2.2 the datasource will fail to open in write mode (IOError), while
with 2.2+ the datasource will open but the removal operation will fail (not
supported).
"""
filename = str(tmpdir.join("a_filename.geojson"))
create_sample_data(filename, "GeoJSON")
with pytest.raises((RuntimeError, IOError)):
fiona.remove(filename, layer=0)
assert os.path.exists(filename)
| 31.677966
| 81
| 0.677368
| 0
| 0
| 0
| 0
| 1,763
| 0.471643
| 0
| 0
| 1,109
| 0.296683
|
0c90a406e4f9fdbea0679ef7fa814f9200555402
| 15,023
|
py
|
Python
|
taylor_based_methods.py
|
PabloAMC/TFermion
|
ed313a7d9cae0c4ca232732bed046f56bc8594a2
|
[
"Apache-2.0"
] | 4
|
2021-12-02T09:13:16.000Z
|
2022-01-25T10:43:50.000Z
|
taylor_based_methods.py
|
PabloAMC/TFermion
|
ed313a7d9cae0c4ca232732bed046f56bc8594a2
|
[
"Apache-2.0"
] | 3
|
2021-12-21T14:22:57.000Z
|
2022-02-05T18:35:16.000Z
|
taylor_based_methods.py
|
PabloAMC/TFermion
|
ed313a7d9cae0c4ca232732bed046f56bc8594a2
|
[
"Apache-2.0"
] | null | null | null |
import math
import sympy
import numpy as np
from scipy.special import binom
import scipy
class Taylor_based_methods:
def __init__(self, tools):
self.tools = tools
# Taylorization (babbush2016exponential)
# Let us know calcula the cost of performing Phase Estimation.
# 1. We have already mentioned that in this case, controlling the direction of the time evolution adds negligible cost. We will also take the unitary $U$ in Phase estimation to be $U_r$. The number of segments we will have to Hamiltonian simulate in the phase estimation protocol is $r \\approx \\frac{4.7}{\\epsilon_{\\text{PEA}}}$.
# 2. Using oblivious amplitude amplification operator $G$ requires to use $\\mathcal{W}$ three times.
# 3. Each operator $G$ requires to use Prepare$(\\beta)$ twice and Select$(V)$ once.
# 4. The cost of Select$(V)$ is bounded in $8N\\lceil \\log_2\\Gamma + 1\\rceil\\frac{K(K+1)(2K+1)}{3}+ 16N K(K+1)$.
# 5. The cost of Prepare$(\\beta)$ is $(20+24\\log\\epsilon^{-1}_{SS})K$ T gates for the preparation of $\\ket{k}$; and $(10+12\\log\\epsilon^{-1}_{SS})2^{\\lceil \\log \\Gamma \\rceil + 1}K$ T gates for the implementation of the $K$ Prepare$(W)$ circuits. Here notice that $2K$ and $2^{\\lceil \\log \\Gamma \\rceil + 1}K$ rotations up to error $\\epsilon_{SS}$ will be implemented.
# Remember that
# $$ K = O\\left( \\frac{\\log(r/\\epsilon_{HS})}{\\log \\log(r/\\epsilon_{HS})} \\right)$$
# Notice that the $\\lambda$ parameters comes in the algorithm only implicitly,
# since we take the evolution time of a single segment to be $t_1 = \\ln 2/\\lambda$ such that the first segment in Phase estimation has $r = \\frac{\\lambda t_1}{\\ln 2} = 1$ as it should be.
# In general, we will need to implement $r \\approx \\frac{4.7}{\\epsilon_{PEA}}$. However, since $\\epsilon_{PEA}$ makes reference to $H$ and we are instead simulating $H \\ln 2/ \\lambda$,
# we will have to calculate the eigenvalue to precision $\\epsilon \\ln 2/ \\lambda$; so it is equivalently to fixing an initial time $t_1$ and running multiple segments in each of the $U$ operators in Phase Estimation.
def taylor_naive(self, epsilons, p_fail, lambda_value, Gamma, N):
epsilon_QPE = epsilons[0]
epsilon_HS = epsilons[1]
epsilon_S = epsilons[2]
t = np.pi/epsilon_QPE*(1/2+1/(2*p_fail))
r = np.ceil(t*lambda_value / np.log(2)) # Number of time segments
K = np.ceil( -1 + 2* np.log(2*r/epsilon_HS)/np.log(np.log(2*r/epsilon_HS)+1))
arb_state_synt = self.tools.arbitrary_state_synthesis(4*np.ceil(np.log2(N)))
epsilon_SS = epsilon_S /(r*3*2*(K*arb_state_synt + 2*K) ) # 3 from AA, 2 for for Prepare and Prepare^+, then Prepare_beta_1 and Prepare_beta_2, finally r
Select_j = 4*N*self.tools.multi_controlled_not(np.ceil(np.log2(N))+2) + 4*N + N*self.tools.multi_controlled_not(np.ceil(np.log2(N)))
# We use an accumulator that applies C-Z and upon stop applies the X or Y with phase: The 4 comes from values of q, the N from values of j;
# the first term applies the X or Y (and phase); the 4N comes from the Toffolis in the C-Z; the third term deactivates the accumulator
Select_H = 4*Select_j # 4 creation/annihilation operators per H_\gamma
QPE_adaptation = self.tools.multi_controlled_not(np.ceil(K/2) + 1)
Select_V = Select_H * K + QPE_adaptation
crot_synt = self.tools.c_pauli_rotation_synthesis(epsilon_SS)
rot_synt = self.tools.pauli_rotation_synthesis(epsilon_SS)
Prepare_beta_1 = crot_synt*K
Prepare_beta_2 = rot_synt*K*arb_state_synt
Prepare_beta = Prepare_beta_1 + Prepare_beta_2
R = self.tools.multi_controlled_not((K+1)*np.ceil(np.log2(Gamma)) + N) # The prepare qubits and the select qubits (in Jordan-Wigner there are N)
result = r*(3*(2*Prepare_beta + Select_V) + 2*R) # 3 from AA, 2 Prepare_beta for Prepare and Prepare^+
return result
def taylor_on_the_fly(self, epsilons, p_fail, N, Gamma, phi_max, dphi_max, zeta_max_i, J):
epsilon_QPE = epsilons[0]
epsilon_HS = epsilons[1]
epsilon_S = epsilons[2]
epsilon_H = epsilons[3]
eps_tay = epsilons[4]
'''
Error terms
eps_PEA: Phase estimation
eps_HS: the truncation of K
eps_S: gate synthesis
eps_H: discretization of integrals
eps_taylor: Used for arithmetic operations such as taylor series, babylon algorithm for the sqrt and CORDIC algorithm for cos
zeta_max_i: maximum nuclear charge
J: number of atoms
'''
d = 6 # Number of Gaussians per basis function
t = np.pi/epsilon_QPE*(1/2+1/(2*p_fail))
x_max = np.log(N * t/ epsilon_H)* self.tools.config_variables['xmax_mult_factor_taylor'] # eq 68 in the original paper
Vol_max_w_gamma = (2**6*phi_max**4 * x_max**5) # eq 66 in the original article
lambda_value = Gamma*Vol_max_w_gamma # eq 60 in the original article
r = np.ceil(lambda_value* t / np.log(2))
K = np.ceil( -1 + 2* np.log(2*r/epsilon_HS)/np.log(np.log(2*r/epsilon_HS)+1))
# zeta = epsilon_HS /(2*3*K*r*Gamma*Vol); eq 55 in the original article
M = lambda_value* 2*3*K*r/epsilon_H # = 6*K*r*Gamma*Vol_max_w_gamma/epsilon_H; eq 55 in the original article
epsilon_SS = epsilon_S /(r*3*2*(2*K)) # 3 from AA, 2 Prepare_beta for Prepare and Prepare^+, 2K T gates in the initial theta rotations
number_of_taylor_expansions = (((4+2+2)*d*N + (J+1))*K*2*3*r) #4+2+2 = two_body + kinetic + external_potential
eps_tay_s = eps_tay/number_of_taylor_expansions
x = sympy.Symbol('x')
exp_order = self.tools.order_find(lambda x:math.exp(zeta_max_i*(x)**2), e = eps_tay_s, xeval = x_max, function_name = 'exp')
sqrt_order = self.tools.order_find(lambda x:math.sqrt(x), e = eps_tay_s, xeval = x_max, function_name = 'sqrt')
mu = ( r*3*2*K/epsilon_H *2*(4*dphi_max + phi_max/x_max)*phi_max**3 * x_max**6 )**6
n = np.ceil(np.ceil(np.log2(mu))/3) #each coordinate is a third
sum = self.tools.sum_cost(n)
mult = self.tools.multiplication_cost(n)
div = self.tools.divide_cost(n)
tay = exp_order*sum + (exp_order-1)*(mult + div) # For the exp
babylon = sqrt_order*(div + sum) # For the sqrt
Q = N*d*((3*sum) + (3*mult +2*sum) + (mult) + tay + (3*mult)) #In parenthesis each step in the list
Qnabla = Q + N*d*(4*sum+mult+div)
R = 2*mult + sum + babylon
xi = 3*sum
two_body = xi + 4*Q + R + 4*mult
kinetic = Q + Qnabla + mult
external_potential = 2*Q + J*R + J*mult + (J-1)*sum + xi*J
sample = two_body + (kinetic + external_potential + sum)
# Notice the change of n here: it is the size of register |m>
n = np.ceil(np.log2(M))
sum = self.tools.sum_cost(n)
mult = self.tools.multiplication_cost(n)
div = self.tools.divide_cost(n)
comp = self.tools.compare_cost(max(np.ceil(np.log2(M)),np.ceil(np.log2(mu))))
kickback = 2*(mult + 3*sum + comp) #For the comparison operation. The rotation itself is Clifford, as it is a C-R(pi/2)
crot_synt = self.tools.c_pauli_rotation_synthesis(epsilon_SS)
Prepare_beta_1 = crot_synt*K
Prepare_beta_2 = ( 2*sample + kickback )*K
Prepare_beta = Prepare_beta_1 + Prepare_beta_2
Select_j = 4*N*self.tools.multi_controlled_not(np.ceil(np.log2(N))+2) + 4*N + N*self.tools.multi_controlled_not(np.ceil(np.log2(N)))
# The 4 comes from values of q, the N from values of j; the 4N comes from the Toffolis in the C-Z; the third term deactivates the accumulator
Select_H = 4*Select_j
QPE_adaptation = self.tools.multi_controlled_not(np.ceil(K/2) + 1)
Select_V = Select_H * K + QPE_adaptation
R = self.tools.multi_controlled_not((K+1)*np.log2(Gamma) + N) # The prepare qubits and the select qubits (in Jordan-Wigner there are N)
result = r*(3*(2*Prepare_beta + Select_V) + 2*R)
return result
def configuration_interaction(self, epsilons, p_fail, N, eta, alpha, gamma1, gamma2, zeta_max_i, phi_max, J):
epsilon_QPE = epsilons[0]
epsilon_HS = epsilons[1]
epsilon_S = epsilons[2]
epsilon_H = epsilons[3]
eps_tay = epsilons[4]
'''
gamma1, gamma2, alpha are defined in 28, 29 and 30 of the original paper https://iopscience.iop.org/article/10.1088/2058-9565/aa9463/meta
'''
d = 6 ## THIS IS SORT OF AN HYPERPARAMETER: THE NUMBER OF GAUSSIANS PER BASIS FUNCTION
K0 = 26*gamma1/alpha**2 + 8*np.pi*gamma2/alpha**3 + 32*np.sqrt(3)*gamma1*gamma2 # eq 37 in original article
K1 = 8*np.pi**2/alpha**3*(alpha + 2) + 1121*(8*gamma1 + np.sqrt(2)) # eq 41 in original article
K2 = 128*np.pi/alpha**6*(alpha + 2) + 2161*np.pi**2*(20*gamma1 + np.sqrt(2)) # eq 45 in original article
t = np.pi/epsilon_QPE*(1/2+1/(2*p_fail))
x_max = 1 # Default units are Angstroms. See https://en.wikipedia.org/wiki/Atomic_radius and https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
Gamma = binom(eta, 2)*binom(N-eta, 2) + binom(eta,1)*binom(N-eta,1) + 1 # = d
Zq = eta
'''
Warning, we have a circular definition here of delta, mu_M_zeta and r.
In practice we compute the equality value r given by the lemmas in the paper:
r ~= r_bound_calc(r)
'''
def r_bound_calc(r):
K = np.ceil( -1 + 2* np.log(2*r/epsilon_HS)/np.log(np.log(2*r/epsilon_HS)+1))
delta = epsilon_H/(2*3*r*K) # delta is the error in calculating a single integral. There are 2*3K*r of them in the simulation,
# as r segments are simulated, for a total time of t
mu_M_zeta_bound = np.max([
672*np.pi**2/(alpha**3)*phi_max**4*x_max**5*(np.log(K2*phi_max**4*x_max**5/delta))**6,
256*np.pi**2/(alpha**3)*Zq*phi_max**2*x_max**2*(np.log(K1*Zq*phi_max**2*x_max**2/delta))**3,
32*gamma1**2/(alpha**3)*phi_max**2*x_max*(np.log(K0*phi_max**2*x_max/delta))**3
]) #This bound is so because Lemmas 1-3 are bounding aleph_{\gamma,\rho}. Taking the definition of M, it is clear.
r_bound = 2*Gamma*t*mu_M_zeta_bound/np.log(2)
return r_bound
result = scipy.optimize.minimize(fun = lambda logr: (logr - np.log(r_bound_calc(np.exp(logr))))**2, x0 = 25, tol = .05, options = {'maxiter': 5000}, method='COBYLA') # Works with COBYLA, but not with SLSQP (misses the boundaries) or trust-constr (oscillates)
logr = np.ceil(result['x'])
r = np.exp(logr)
#bound = r_bound_calc(r) #This should be close to each r, relatively speaking
#r_alt = r_bound_calc(Gamma*t) #Alternative and less accurate way of computing the result
K = np.ceil( -1 + 2* np.log(2*r/epsilon_HS)/np.log(np.log(2*r/epsilon_HS)+1))
delta = epsilon_H/(2*3*r*K)
mu_M_zeta = np.max([
672*np.pi**2/(alpha**3)*phi_max**4*x_max**5*(np.log(K2*phi_max**4*x_max**5/delta))**6,
256*np.pi**2/(alpha**3)*Zq*phi_max**2*x_max**2*(np.log(K1*Zq*phi_max**2*x_max**2/delta))**3,
32*gamma1**2/(alpha**3)*phi_max**2*x_max*(np.log(K0*phi_max**2*x_max/delta))**3
])
log2mu = np.max([
6*(np.log2(K2*phi_max**4*x_max**5) + np.log2(1/delta) + 7*np.log2(1/alpha*(np.log(K2*phi_max**4*x_max**5)+np.log(1/delta)))),
3*(np.log2(K1*Zq*phi_max**2*x_max**2)+np.log2(1/delta) + 4*np.log2(2/alpha*(np.log(K1*Zq*phi_max**2*x_max**2)+np.log(1/delta)))),
3*(np.log2(K0*phi_max**2*x_max)+np.log2(1/delta) +4*np.log2 (2/alpha*(np.log(K0*phi_max**2*x_max)+np.log(1/delta))))
])
#zeta = epsilon_H/(r*Gamma*mu*3*2*K)
log2M = np.ceil(np.log2(mu_M_zeta)+ np.log2(3*2*K*r*Gamma)+ np.log2(1/epsilon_H)) #M = mu_M_zeta*/(mu*zeta)
epsilon_SS = epsilon_S / (r*3*2*(2*K)) # 3 from AA, 2 Prepare_beta for Prepare and Prepare^+, 2K T gates in the initial theta rotations
crot_synt = self.tools.c_pauli_rotation_synthesis(epsilon_SS)
Prepare_beta = crot_synt*K
#### Qval cost computation
n = np.ceil(log2mu/3) #each coordinate is a third
x = sympy.Symbol('x')
number_of_taylor_expansions = (((2*4+2+2)*d*N + (J+1))*K*2*3*r) #2*4+2+2 = 2*two_body + kinetic + external_potential
eps_tay_s = eps_tay/number_of_taylor_expansions
exp_order = self.tools.order_find(lambda x:math.exp(zeta_max_i*(x)**2), function_name = 'exp', e = eps_tay_s, xeval = x_max)
sqrt_order = self.tools.order_find(lambda x:math.sqrt(x), function_name = 'sqrt', e = eps_tay_s, xeval = x_max)
sum = self.tools.sum_cost(n)
mult = self.tools.multiplication_cost(n)
div = self.tools.divide_cost(n)
tay = exp_order*sum + (exp_order-1)*(mult + div) # For the exp
babylon = sqrt_order*(div + sum) # For the sqrt
Q = N*d*((3*sum) + (3*mult +2*sum) + (mult) + tay + (3*mult)) #In parenthesis each step in the list
Qnabla = Q + N*d*(4*sum+mult+div)
R = 2*mult + sum + babylon
xi = 3*sum
two_body = xi + 4*Q + R + 4*mult
kinetic = Q + Qnabla + mult
external_potential = 2*Q + J*R + J*mult + (J-1)*sum + xi*J
sample_2body = 2*two_body + sum
sample_1body = kinetic + external_potential + sum
comp = self.tools.compare_cost(max(np.ceil(log2M),np.ceil(log2mu)))
kickback = 2*comp
Q_val = 2*(sample_2body + sample_1body) + kickback
### Qcol cost computation
# There will be eta registers with log2(N) qubits each
compare = self.tools.compare_cost(np.ceil(np.log2(N)))
sort = eta*(4 + compare) # 4 for the c-swap and one comparison
check = self.tools.multi_controlled_not(eta*np.ceil(np.log2(N)))
sum = self.tools.sum_cost(np.ceil(np.log2(N)))
find_alphas = 2* eta*(4*sum + check + sort + compare) #The 2 is because if it fails we have to reverse the computation
find_gammas_2y4 = 2*(3*sum + check+ sort+ compare +3*4) + find_alphas # The 3*4 is the final 3 Toffolis; the 2 is is because if it fails we have to reverse the computation
Q_col = 2*find_alphas + 2*find_gammas_2y4
Select_H = Q_val + 2*Q_col # +swaps, but they are Clifford
QPE_adaptation = self.tools.multi_controlled_not(np.ceil(K/2) + 1)
Select_V = K*Select_H + QPE_adaptation
R = self.tools.multi_controlled_not((K+1)*np.ceil(np.log2(Gamma)) + N) # The prepare qubits and the select qubits (in Jordan-Wigner there are N)
result = r*(3*(2*Prepare_beta + Select_V) + 2*R)
return result
| 55.847584
| 387
| 0.628969
| 14,934
| 0.99401
| 0
| 0
| 0
| 0
| 0
| 0
| 5,727
| 0.38119
|
0c914b89127421ea137b1e5268f255c0188586da
| 775
|
py
|
Python
|
contoh_2.py
|
sumarouno/4x4-Matrix-Keypad-Library-for-CHIP
|
42794c3460818714fccc1c5a967e151504ef2ade
|
[
"MIT"
] | null | null | null |
contoh_2.py
|
sumarouno/4x4-Matrix-Keypad-Library-for-CHIP
|
42794c3460818714fccc1c5a967e151504ef2ade
|
[
"MIT"
] | null | null | null |
contoh_2.py
|
sumarouno/4x4-Matrix-Keypad-Library-for-CHIP
|
42794c3460818714fccc1c5a967e151504ef2ade
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from time import sleep
from sys import exit
import keypad_4x4_lib_sumar
kp =keypad_4x4_lib_sumar.keypad()
# Setup variables
attempt = "0000"
passcode = "1912"
counter = 0
# Loop while waiting for a keypress
while True:
# Loop to get a pressed digit
digit = None
while digit == None:
digit = kp.getKey()
# Print the result
print ("Digit Entered: %s"%digit)
attempt = (attempt[1:] + str(digit))
print ("Attempt value: %s"%attempt)
# Check for passcode match
if (attempt == passcode):
print ("Your code was correct, goodbye.")
exit()
else:
counter += 1
print ("Entered digit count: %s"%counter)
if (counter >= 4):
print ("Incorrect code!")
sleep(3)
print ("Try Again")
sleep(1)
counter = 0
sleep(0.5)
| 18.452381
| 43
| 0.658065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.375484
|
0c914d8fbc8a5b30dab1155628e15079383dc757
| 1,342
|
py
|
Python
|
apps/images/migrations/0001_initial.py
|
coogger/coogger
|
9e5e3ca172d8a14272948284a6822000b119119c
|
[
"MIT"
] | 48
|
2018-04-13T13:00:10.000Z
|
2020-03-17T23:35:23.000Z
|
apps/images/migrations/0001_initial.py
|
coogger/coogger
|
9e5e3ca172d8a14272948284a6822000b119119c
|
[
"MIT"
] | 77
|
2018-03-25T13:17:12.000Z
|
2020-08-11T08:24:49.000Z
|
apps/images/migrations/0001_initial.py
|
coogger/coogger
|
9e5e3ca172d8a14272948284a6822000b119119c
|
[
"MIT"
] | 35
|
2018-03-30T21:43:21.000Z
|
2020-08-11T05:51:46.000Z
|
# Generated by Django 3.0.3 on 2020-02-28 13:21
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Image",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(
blank=True,
help_text="Title | Optional",
max_length=55,
null=True,
verbose_name="",
),
),
(
"image",
models.ImageField(upload_to="images/", verbose_name=""),
),
(
"created",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="Created",
),
),
],
),
]
| 26.84
| 76
| 0.354694
| 1,220
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.093145
|
0c91e1acb55c07461c81aa43572f944b6b04b240
| 4,007
|
py
|
Python
|
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_facerec-resnet64_112_96_11G_1.3/code/get_aligned_face/api/detect_api.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | 1
|
2020-12-18T14:49:19.000Z
|
2020-12-18T14:49:19.000Z
|
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_facerec-resnet64_112_96_11G_1.3/code/get_aligned_face/api/detect_api.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | null | null | null |
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_facerec-resnet64_112_96_11G_1.3/code/get_aligned_face/api/detect_api.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.misc
import scipy.io
from matplotlib.patches import Rectangle
import datetime
import cv2
import sys
def nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
class Detect(object):
def __init__(self):
self.expand_scale_=0.0
self.force_gray_=False
self.input_mean_value_=128.0
self.input_scale_=1.0
self.pixel_blob_name_='pixel-loss'
self.bb_blob_name_='bb-output-tiled'
self.res_stride_=4
self.det_threshold_=0.7
self.nms_threshold_=0.3
self.caffe_path_=""
self.input_channels_=3
def model_init(self,caffe_python_path,model_path,def_path):
sys.path.insert(0,caffe_python_path)
import caffe
self.caffe_path_=caffe_python_path
self.net_=caffe.Net(def_path,model_path,caffe.TEST)
def detect(self,image):
#sys.path.insert(0,self.caffe_path_)
import caffe
#caffe.set_mode_cpu()
#caffe.set_device(0)
self.transformer_=caffe.io.Transformer({'data': (1,self.input_channels_,image.shape[0],image.shape[1])})
if self.force_gray_:
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
self.input_channels_=1
else:
self.transformer_.set_transpose('data', (2,0,1))
transformed_image=self.transformer_.preprocess('data',image)
transformed_image=(transformed_image-self.input_mean_value_)*self.input_scale_
sz=image.shape
self.net_.blobs['data'].reshape(1, self.input_channels_, sz[0], sz[1])
self.net_.blobs['data'].data[0, ...] = transformed_image
output = self.net_.forward()
prob = output[self.pixel_blob_name_][0, 1, ...]
bb = output[self.bb_blob_name_][0, ...]
gy = np.arange(0, sz[0], self.res_stride_)
gx = np.arange(0, sz[1], self.res_stride_)
gy = gy[0 : bb.shape[1]]
gx = gx[0 : bb.shape[2]]
[x, y] = np.meshgrid(gx, gy)
#print bb.shape[1],len(gy),sz[0],sz[1]
bb[0, :, :] += x
bb[2, :, :] += x
bb[1, :, :] += y
bb[3, :, :] += y
bb = np.reshape(bb, (4, -1)).T
prob = np.reshape(prob, (-1, 1))
bb = bb[prob.ravel() > self.det_threshold_, :]
prob = prob[prob.ravel() > self.det_threshold_, :]
rects = np.hstack((bb, prob))
keep = nms(rects, self.nms_threshold_)
rects = rects[keep, :]
rects_expand=[]
for rect in rects:
rect_expand=[]
rect_w=rect[2]-rect[0]
rect_h=rect[3]-rect[1]
rect_expand.append(int(max(0,rect[0]-rect_w*self.expand_scale_)))
rect_expand.append(int(max(0,rect[1]-rect_h*self.expand_scale_)))
rect_expand.append(int(min(sz[1],rect[2]+rect_w*self.expand_scale_)))
rect_expand.append(int(min(sz[0],rect[3]+rect_h*self.expand_scale_)))
rects_expand.append(rect_expand)
return rects_expand
| 32.577236
| 108
| 0.639631
| 2,517
| 0.628151
| 0
| 0
| 0
| 0
| 0
| 0
| 769
| 0.191914
|