blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
244f75e62fee8f8b35e9746f07e2afee7fd1090b
|
03499bffca838f407e3a22b216df0d1807fe8c97
|
/dive/customserializer.py
|
6ec8bb14312c7cc3ddf3f6986a2bc9beda769819
|
[] |
no_license
|
tsukit2/mypythonlearning
|
54dfe9845937f1e678db91f154ed66f32d995289
|
a8a85bf25fe91b0fc12c8a43f701552bacd8acbe
|
refs/heads/master
| 2021-01-10T19:55:32.598001
| 2011-03-17T23:46:10
| 2011-03-17T23:46:10
| 32,286,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import time
def to_json(python_object):
if isinstance(python_object, time.struct_time):
return {'__class__': 'time.asctime',
'__value__': time.asctime(python_object)}
if isinstance(python_object, bytes):
return {'__class__': 'bytes',
'__value__': list(python_object)}
raise TypeError(repr(python_object) + ' is not JSON serializable')
def from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'time.asctime':
return time.strptime(json_object['__value__'])
if json_object['__class__'] == 'bytes':
return bytes(json_object['__value__'])
return json_object
|
[
"tsukit@135c47a1-433c-b587-80eb-c937e1a09c14"
] |
tsukit@135c47a1-433c-b587-80eb-c937e1a09c14
|
c6d010f69671e1eab1e3fee2a0b591e19cbad069
|
ea356efac5279de4e5926b5eeb6ecf477146fa12
|
/bookmart/views.py
|
93f481efd29562bcaf2b9d1ab6360a57c7c0094a
|
[] |
no_license
|
ndneighbor/bookshop
|
75581b72aba9f0bd462249545492789af63e7355
|
6a47166dcd441c27c062184016141c7adb1d0fbb
|
refs/heads/master
| 2020-03-29T04:00:15.924221
| 2018-11-22T01:54:35
| 2018-11-22T01:54:35
| 149,510,812
| 0
| 4
| null | 2018-11-22T03:03:44
| 2018-09-19T20:51:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
from django.views.generic.list import ListView
from books.models import Book
class HomePageView(ListView):
model = Book
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
context['latest_books'] = Book.objects.all().order_by('-release_date')[:8]
return context
|
[
"asara019@fiu.edu"
] |
asara019@fiu.edu
|
ccda4da05cc51efda1da87649564874afc8467cc
|
ef68be7662c2f189e381682b08ec6f41bdf374b3
|
/path_finding/scripts/plan_path.py
|
559f2efe363625bb24040a838736b09ccad2354f
|
[] |
no_license
|
DudekPL/tiago
|
65ef3e8863d27308ea6d45cb8aa1e7f024998426
|
5fdea4a34e3fb5b9cc3045ac9c7dafb6936072fa
|
refs/heads/master
| 2023-02-21T20:20:28.925499
| 2021-01-26T22:25:27
| 2021-01-26T22:25:27
| 329,358,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,146
|
py
|
#!/usr/bin/env python
import rospkg
import sqlite3 as sql
import rospy
from path_finding.srv import Plan_path, Plan_pathResponse
from path_finding.msg import Point
import itertools as itt
import random
start_point = 'spawn'
class Object:
def __init__(self, _name, _expiring, _priority):
self.name = _name
self.expiring = _expiring
self.priority = _priority
def get_objects_from_db(_cur, _objects_names):
_query = "SELECT name, expiring, prority FROM objects WHERE name IN ("+",".join(["?"]*len(_objects_names))+")"
_cur.execute(_query, _objects_names)
_dict = {}
for _row in _cur.fetchall():
_dict[_row['name']] = Object(_row['name'], _row['expiring'], _row['prority'])
return _dict
def get_look_up_points_from_db(_cur, _objects, _world_name):
_query = "SELECT * FROM look_up_points WHERE map=(?) AND object IN ("+",".join(["?"]*len(_objects))+")"
_args = [_world_name]
_args.extend([_o.name for _o in _objects])
_cur.execute(_query, _args)
_list = []
for _row in _cur.fetchall():
_list.append(Point(_row['id'], _row['object'], _row['x'], _row['y'], _row['yaw']))
return _list
def get_distances_dict_from_db(_cur, _points, _spawn):
_all_points = list(_points)
_all_points.append(_spawn)
_query = 'SELECT * FROM distances WHERE start IN (' + ",".join(["?"] * len(_all_points)) + ")"
_query = _query + 'AND finish IN (' + ",".join(["?"] * len(_all_points)) + ")"
_args = [_p.id for _p in _all_points]
_args.extend([_p.id for _p in _all_points])
_cur.execute(_query, _args)
_dict = {_p.id: {} for _p in _all_points}
for _row in _cur.fetchall():
_dict[_row['start']][_row['finish']] = _row['distance']
return _dict
def get_spawn_from_db(_cur, _map):
_query = 'SELECT * FROM look_up_points WHERE object=(?) AND map=(?)'
_cur.execute(_query, [start_point, _map])
_row = _cur.fetchone()
return Point(_row['id'], _row['object'], _row['x'], _row['y'], _row['yaw'])
def get_spawn_distances_from_db(_cur, _spawn_id):
_query = 'SELECT * FROM distances WHERE start=(?)'
_cur.execute(_query, [_spawn_id])
_dict = {}
for _row in _cur.fetchall():
_dict[_row['finish']] = _row['distance']
return _dict
def get_priority(_point, _objects):
return _objects[_point.object].priority
def path_length(_path, _spawn_distances, _distance_dictionary):
prev_p = _path[0]
_dist = _spawn_distances[_path[0].id]
for point in _path:
_dist += _distance_dictionary[prev_p.id][point.id]
prev_p = point
_dist += _spawn_distances[prev_p.id]
return _dist
def choose_random(_list):
return random.choice(_list)
def initialize_GRASP(_sorted_points, _d, _k, _spawn_point, _distance_dictionary, _objects):
_groups = itt.groupby(_sorted_points, key=lambda _p: get_priority(_p, _objects))
_points_to_visit = list(_sorted_points)
_path = []
last_point = _spawn_point
for priority, group in _groups:
d_relaxed = [_p for _p in _points_to_visit if get_priority(_p, _objects) >= priority - _d]
current_group = [_p for _p in d_relaxed if get_priority(_p, _objects) == priority]
while len(current_group) > 0: # not empty
d_relaxed = sorted(d_relaxed, key=lambda _p: _distance_dictionary[last_point.id][_p.id])
restricted_candidates = d_relaxed[:_k]
_random = choose_random(restricted_candidates)
_path.append(_random)
# remove point from lists
if get_priority(_random, _objects) == priority:
current_group.remove(_random)
d_relaxed.remove(_random)
_points_to_visit.remove(_random)
_path.append(_spawn_point)
return _path
def plan_path(_req, _Imax, _d, _k):
to_pickup = _req.objects
world_name = _req.world_name
# connection to database
db_path = rospkg.RosPack().get_path('path_finding')
conn = sql.connect(db_path + "/../test.db")
conn.row_factory = sql.Row
cursor = conn.cursor()
# selects from db
spawn = get_spawn_from_db(cursor, world_name)
objects = get_objects_from_db(cursor, to_pickup)
points = get_look_up_points_from_db(cursor, objects.values(), world_name)
dist_dict = get_distances_dict_from_db(cursor, points, spawn)
spawn_dist = get_spawn_distances_from_db(cursor, spawn.id)
shortest_path = []
shortest_dist = float('inf')
_sorted_points = sorted(points, reverse=True, key=lambda _p: get_priority(_p, objects))
for i in range(0, _Imax):
_path = initialize_GRASP(_sorted_points, _d, _k, spawn, dist_dict, objects)
_dist = path_length(_path, spawn_dist, dist_dict)
if _dist < shortest_dist:
shortest_dist = _dist
shortest_path = list(_path)
conn.close()
response = Plan_pathResponse()
response.path = shortest_path
return response
if __name__ == '__main__':
rospy.init_node('plan_path')
Imax = 5
d = 2
k = 4
rospy.Service('plan_path_srv', Plan_path, lambda _req: plan_path(_req, Imax, d, k))
rospy.spin()
|
[
""
] | |
39c7e75594ee71653f29814148eeebb5b8a2bc8e
|
b4024a1b406361542d3e91e4cae1ad02ec238b7b
|
/VideoGameMarket/VideoGameMarket/urls.py
|
acd8d89d55dbb94f55a61280803ddb4806aa102e
|
[] |
no_license
|
oscargarza356/UIGameMarket
|
2a52b19597bed3e7788a601ceb52a416b2a8aa33
|
a5e75be1f9f198e4eeea08e27fde2d0523e019f7
|
refs/heads/master
| 2020-04-04T20:57:35.385940
| 2018-12-11T21:11:29
| 2018-12-11T21:11:29
| 156,266,965
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
"""VideoGameMarket URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import accounts.views
from games import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('accounts.urls')),
url(r'^$', views.home, name="home"),
url(r'^games/', include('games.urls')),
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"oscar35689@hotmail.com"
] |
oscar35689@hotmail.com
|
7cbba5c870078f20312a03fa2521edf549d6b736
|
ade562bdcf0c1c0cc06e0c19984b04edeb29cf0c
|
/swarooop_c_h/a_byte_of_python_russian/world/__init__.py
|
5fd989663d759471fcb6c3be3a30b699040de390
|
[] |
no_license
|
FoctoDev/Learning_Python_3
|
f6a0b9bd805f60fa6519c587c7b12c7aaecddc3a
|
7e4b6e847b1831477bec1b9a88cf8f70c7f198b8
|
refs/heads/master
| 2022-11-11T09:18:47.072687
| 2020-07-05T08:45:37
| 2020-07-05T08:45:37
| 263,550,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
# Пакет world содержит субпакеты africa И asia
|
[
"foctodev@gmail.com"
] |
foctodev@gmail.com
|
5cd98bad4ada4798b10dc59ca7971861b1bb8d7b
|
dad3a74ca26e0fa955a8a199f771af07f5465635
|
/train_options.py
|
b154cc770eb0538cd0d5140228a547fda84ec939
|
[] |
no_license
|
mujtaba-hike/canny_dlib_semantic_maps
|
92b3d503971c7af9fa602f311dec0b5c480ac612
|
f87e50a1bc235f0f55c599f1a584be2575bb0c68
|
refs/heads/master
| 2021-01-04T08:57:59.016331
| 2020-02-14T10:19:27
| 2020-02-14T10:19:27
| 240,477,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,223
|
py
|
from base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=10, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=10, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--TTUR', action='store_true', help='Use TTUR training scheme')
self.parser.add_argument('--gan_mode', type=str, default='ls', help='(ls|original|hinge)')
self.parser.add_argument('--pool_size', type=int, default=1, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# for discriminators
self.parser.add_argument('--num_D', type=int, default=2, help='number of patch scales in each discriminator')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='number of layers in discriminator')
self.parser.add_argument('--no_vgg', action='store_true', help='do not use VGG feature matching loss')
self.parser.add_argument('--no_ganFeat', action='store_true', help='do not match discriminator features')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching')
self.parser.add_argument('--sparse_D', action='store_true', help='use sparse temporal discriminators to save memory')
# for temporal
self.parser.add_argument('--lambda_T', type=float, default=10.0, help='weight for temporal loss')
self.parser.add_argument('--lambda_F', type=float, default=10.0, help='weight for flow loss')
self.parser.add_argument('--n_frames_D', type=int, default=3, help='number of frames to feed into temporal discriminator')
self.parser.add_argument('--n_scales_temporal', type=int, default=2, help='number of temporal scales in the temporal discriminator')
self.parser.add_argument('--max_frames_per_gpu', type=int, default=1, help='max number of frames to load into one GPU at a time')
self.parser.add_argument('--max_frames_backpropagate', type=int, default=1, help='max number of frames to backpropagate')
self.parser.add_argument('--max_t_step', type=int, default=1, help='max spacing between neighboring sampled frames. If greater than 1, the network may randomly skip frames during training.')
self.parser.add_argument('--n_frames_total', type=int, default=30, help='the overall number of frames in a sequence to train with')
self.parser.add_argument('--niter_step', type=int, default=5, help='how many epochs do we change training batch size again')
self.parser.add_argument('--niter_fix_global', type=int, default=0, help='if specified, only train the finest spatial layer for the given iterations')
self.isTrain = True
|
[
"noreply@github.com"
] |
mujtaba-hike.noreply@github.com
|
b9e099fbd31518d084e13449c5d6d6f2991e681e
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/Gluon_ResNet50_v1c_for_PyTorch/timm/models/hardcorenas.py
|
af5cfed312b0317f6a062e08f0427637cf188e9d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"CC-BY-NC-4.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,655
|
py
|
# Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .efficientnet_blocks import SqueezeExcite
from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels
from .helpers import build_model_with_cfg, default_cfg_for_features
from .layers import get_act_fn
from .mobilenetv3 import MobileNetV3, MobileNetV3Features
from .registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = {
'hardcorenas_a': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_A_Green_38ms_75.9_23474aeb.pth'),
'hardcorenas_b': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_B_Green_40ms_76.5_1f882d1e.pth'),
'hardcorenas_c': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_C_Green_44ms_77.1_d4148c9e.pth'),
'hardcorenas_d': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_D_Green_50ms_77.4_23e3cdde.pth'),
'hardcorenas_e': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_E_Green_55ms_77.9_90f20e8a.pth'),
'hardcorenas_f': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_F_Green_60ms_78.1_2855edf1.pth'),
}
def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs):
"""Creates a hardcorenas model
Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS
Paper: https://arxiv.org/abs/2102.11646
"""
num_features = 1280
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=num_features,
stem_size=32,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=se_layer,
**kwargs,
)
features_only = False
model_cls = MobileNetV3
kwargs_filter = None
if model_kwargs.pop('features_only', False):
features_only = True
kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
model = build_model_with_cfg(
model_cls, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_strict=not features_only,
kwargs_filter=kwargs_filter,
**model_kwargs)
if features_only:
model.default_cfg = default_cfg_for_features(model.default_cfg)
return model
@register_model
def hardcorenas_a(pretrained=False, **kwargs):
""" hardcorenas_A """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_b(pretrained=False, **kwargs):
""" hardcorenas_B """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'],
['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_c(pretrained=False, **kwargs):
""" hardcorenas_C """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre',
'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_d(pretrained=False, **kwargs):
""" hardcorenas_D """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_e(pretrained=False, **kwargs):
""" hardcorenas_E """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25',
'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_f(pretrained=False, **kwargs):
""" hardcorenas_F """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k3_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs)
return model
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
d1fcc2af5073643edff19b55c7c399a98d88cac1
|
e79e28d847df55aa46374888ac20e351c8a58b9b
|
/LongestSubstringNoRepeats.py
|
67cbd951c25ddc8f8bfe7b52d2811c8a40662ae5
|
[] |
no_license
|
anderskswanson/lc
|
2adeedaaf99b2ae0ddb1865ec0efb262e1872134
|
f38e2a1d2680f3e5240e06775707722825ba5e0f
|
refs/heads/master
| 2020-03-23T04:16:53.363927
| 2018-07-25T04:18:06
| 2018-07-25T04:18:06
| 141,074,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
start = 0
bestlength = 0
chars = dict()
n = len(s)
for i in range(n):
if s[i] in chars and start <= chars[s[i]]:
start = chars[s[i]] + 1
else:
bestlength = max(bestlength, i - start + 1)
chars[s[i]] = i
return bestlength
|
[
"anders@AnderPC.localdomain"
] |
anders@AnderPC.localdomain
|
23801067b7034dc6aae9117c4c155c9e03938021
|
e59a742c56cb4671dff31b044adbcf93b2341670
|
/packages/sdk/odahuflow/sdk/models/user_info.py
|
274f340938588ce93c3f76a33113a8b3e8e72826
|
[
"Apache-2.0"
] |
permissive
|
odahu/odahu-flow
|
c90f638dfb6a03942d6f1686ec4d95564e073915
|
58c3220a266a61bb893cf79c4b994569e3445097
|
refs/heads/develop
| 2023-03-06T22:47:21.215830
| 2022-04-27T11:14:36
| 2022-04-27T11:14:36
| 219,930,786
| 12
| 4
|
Apache-2.0
| 2023-02-25T03:18:51
| 2019-11-06T06:50:47
|
Go
|
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from odahuflow.sdk.models.base_model_ import Model
from odahuflow.sdk.models import util
class UserInfo(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, email: str=None, username: str=None): # noqa: E501
"""UserInfo - a model defined in Swagger
:param email: The email of this UserInfo. # noqa: E501
:type email: str
:param username: The username of this UserInfo. # noqa: E501
:type username: str
"""
self.swagger_types = {
'email': str,
'username': str
}
self.attribute_map = {
'email': 'email',
'username': 'username'
}
self._email = email
self._username = username
@classmethod
def from_dict(cls, dikt) -> 'UserInfo':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UserInfo of this UserInfo. # noqa: E501
:rtype: UserInfo
"""
return util.deserialize_model(dikt, cls)
@property
def email(self) -> str:
"""Gets the email of this UserInfo.
:return: The email of this UserInfo.
:rtype: str
"""
return self._email
@email.setter
def email(self, email: str):
"""Sets the email of this UserInfo.
:param email: The email of this UserInfo.
:type email: str
"""
self._email = email
@property
def username(self) -> str:
"""Gets the username of this UserInfo.
:return: The username of this UserInfo.
:rtype: str
"""
return self._username
@username.setter
def username(self, username: str):
"""Sets the username of this UserInfo.
:param username: The username of this UserInfo.
:type username: str
"""
self._username = username
|
[
"noreply@github.com"
] |
odahu.noreply@github.com
|
481330b2b92ea8b4d3678661c58325f6c3a0c8a0
|
3cf249eb18fe5b60bc581c4570a3de6a4a28c1dc
|
/httpserver.py
|
ad45fc6f9da0ed8b0564d42a1f1dc8baa683eae9
|
[] |
no_license
|
nandix/BasicPythonServer
|
a59e73dfd2a34022f5c5b34afe391bb973a8e14a
|
53cd684cd05aa7860da0af4c4e5f48509c14bafc
|
refs/heads/master
| 2021-01-14T12:56:55.300555
| 2014-03-05T03:13:51
| 2014-03-05T03:13:51
| 17,293,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,760
|
py
|
'''
Name: prog1_server.py
Brief: This program acts as a simple python server
and responds to http get requests.
By: Daniel Nix
Date: 2/28/2014
For: Networking (CSC 492)
Description:
This python script acts as a server to
send html files using http protocols.
It sets up a socket at port 9876 and
listens for incoming http requests. Upon
receiving a request the server tries to
find the file requested and serves it to
the client. If the file is not found a
404 Not Found response is sent and an
appropriate 404.html page is presented.
The server runs indefinitely and has no
elegant way of closing. The most effective
method to close the server is using Ctrl-C
from the command line.
'''
#import socket module to use sockets
from socket import *
#import threading to use threads
import threading
#create a socket object for the server to use
serverSocket = socket(AF_INET, SOCK_STREAM)
#Prepare a sever socket
#Set and fix (bind) the port number
serverPort = 9876
serverSocket.bind(('',serverPort))
#Start the server by letting it listen for requests.
serverSocket.listen(1)
def processRequest( connectionSocket, addr ):
'''
Funtion: processRequest
Arguments:
connectionSocket: socket connection to client
addr: ip address of client
Description:
Process Request gets a client socket and
processes a request incoming from that client.
'''
try:
# Wait for an incoming request
message = connectionSocket.recv(1024)
# Get the name of the file requested
filename = message.split()[1]
# Open the file
f = open(filename[1:])
# Read the file's contents into a string
outputdata = f.read()
#Send one HTTP header line into socket
connectionSocket.send( b"HTTP/1.1 200 OK \r\n" )
#Send the content of the requested file to the client
for i in range(0, len(outputdata)):
connectionSocket.send( str.encode(outputdata[i]) )
#Close the client's socket
connectionSocket.close()
except IOError:
#Send response message for file not found
# Open the 404 file
f = open("404.html")
# Read the 404 file
outputdata = f.read()
# Send the 404 response
connectionSocket.send( b"HTTP/1.1 404 Not Found \r\n")
# Send the 404 file
for i in range(0, len(outputdata)):
connectionSocket.send( str.encode(outputdata[i]))
#Close client socket
connectionSocket.close()
#Infinite loop for server to run forever to process requests
while True:
#Establish the connection
connectionSocket, addr = serverSocket.accept()
#Creat a thread to process the request
threading.Thread( target=processRequest, \
args=(connectionSocket, addr), \
).start()
#Close the Server's connection socket
serverSocket.close()
|
[
"dan@localhost.localdomain"
] |
dan@localhost.localdomain
|
83cc7cd16e0edfaed53c3e05ce4cc3b73b988e29
|
57074435fcf3b2779376bdd43b937a4d250f6b6e
|
/2_Vigenere/GUI_Vigenere.py
|
a9f1ef988058a365c9c66a1c8844d9c0e76f9a11
|
[] |
no_license
|
NickLc/Ciphers
|
f9854c7b8ba1433e9a59e47048c72b753c19285d
|
3bf58042327bd4d04dd4d9191c9111f93b79c7f3
|
refs/heads/master
| 2022-11-11T20:35:20.165964
| 2020-07-04T02:34:33
| 2020-07-04T02:34:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,694
|
py
|
import tkinter as tk
from tkinter import Spinbox
from tkinter.ttk import Combobox
from tkinter import scrolledtext
from tkinter import Frame
from tkinter import Entry
from tkinter import messagebox
class Vigenere:
def __init__(self, abc="ABCDEFGHIJKLMNOPQRSTUVWXYZÑÁÉÍÓÚ"):
self.abc = abc
def encriptar(self,text,key):
newText = ''
keyIndex = 0
for i in text:
num = self.abc.find(i)
if num != -1:
num += self.abc.find(key[keyIndex])
num %= len(self.abc)
newText += self.abc[num]
keyIndex +=1
if keyIndex == len(key):
keyIndex = 0
else:
newText += i
return newText
def desencriptar(self, text, key):
newText = ''
keyIndex = 0
for i in text:
num = self.abc.find(i)
if num != -1:
num -= self.abc.find(key[keyIndex])
num %= len(self.abc)
newText += self.abc[num]
keyIndex +=1
if keyIndex == len(key):
keyIndex = 0
else:
newText += i
return newText
class App:
def __init__(self):
self.window = tk.Tk()
self.window.title("Cripto Vigenere")
self.window.geometry('430x410')
self.window.config(bg="#FFFFFF")
self.vigenere = Vigenere()
frameHead = Frame(self.window)
lbl_method = tk.Label(frameHead, text='Metodo')
lbl_method.grid(column=0, row=0, sticky='W', padx=5)
lbl_key = tk.Label(frameHead, text='Clave')
lbl_key.grid(column=1, row=0, sticky='W', padx=5)
frameHead.grid(column=0,row=0)
self.frame = Frame(self.window)
self.combo = Combobox(self.frame)
self.combo['values'] = ("Encriptar", "Desencriptar")
self.combo.grid(column=0, row=0, sticky='W',padx=10)
self.keyInput = Entry(self.frame)
self.keyInput.grid(column =1, row=0, sticky='W',padx=10)
buton = tk.Button(self.frame, text='Start', command = self.clicked)
buton.grid(column=2, row=0, sticky='W',padx=10)
self.frame.grid(column=0, row=1)
lbl_txt_init = tk.Label(self.window, text='Input')
lbl_txt_init.grid(column =0, row=3, sticky='W',padx=10)
self.scroll_txt_init = tk.scrolledtext.ScrolledText(self.window,width=50,height=10)
self.scroll_txt_init.grid(column=0,row=4, sticky='W',padx=10)
lbl_txt_end = tk.Label(self.window, text='Output')
lbl_txt_end.grid(column =0, row=5, sticky='W',padx=10)
self.scroll_txt_end = scrolledtext.ScrolledText(self.window,width=50,height=10)
self.scroll_txt_end.grid(column=0,row=6, sticky='W',padx=10)
self.window.mainloop()
def clicked(self):
# Clear buffer f
self.scroll_txt_end.delete(0.0, tk.END)
textInput = self.scroll_txt_init.get("0.0", tk.END)
key = self.keyInput.get()
typeAction = self.combo.get()
if(len(key)>len(textInput)):
messagebox.showerror('Mensaje Error', 'La longitud de la clave debe ser menor que del input')
else:
# Decode string
if typeAction == "Encriptar":
textOutput = self.vigenere.encriptar(textInput, key)
else:
textOutput = self.vigenere.desencriptar(textInput, key)
# Put new string into of stroll
self.scroll_txt_end.insert(tk.INSERT, textOutput)
if __name__=='__main__':
# Start App
Apx = App()
|
[
"edsonlc_007@hotmail.com"
] |
edsonlc_007@hotmail.com
|
13572e80382af8809f09917252cdbee893598358
|
824ddb497551887f04d2f70fe89666b67b4515e0
|
/02_OldCustLossing_Model/construction_featrues.py
|
b87f9011f0c244ac813c4a01a5b070943915be9f
|
[] |
no_license
|
luzhonghe999/WorkDemo-MachineLearning
|
75ae40d91210f44b7391457268b46a8ae45c85a3
|
0298277e5b66be3259ae0ceb00c4f315609ecc82
|
refs/heads/master
| 2021-04-06T10:31:49.443602
| 2018-04-09T07:24:31
| 2018-04-09T07:24:31
| 125,346,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,948
|
py
|
import numpy as np
import pandas as pd
class construction_featrues:
'''
用于计算衍生指标,输出衍生指标dataframe
'''
def __init__(self,data):
self.data = data
def cal_featrues(self):
df = self.data
df = df[(df['TARGET'] == 1) | (df['TARGET'] == 0)]
#(3.2) asset
df['asset_up1']=df['ASSET_NOW1']-df['ASSET_NOW2']
df['asset_up2']=df['ASSET_NOW2']-df['ASSET_NOW3']
df['asset_up']=df['asset_up1']+df['asset_up2']
df['hold1']= df['STK_NOW1']+ df['PRD_NOW1']
df['hold2']= df['STK_NOW2']+ df['PRD_NOW2']
df['hold3']= df['STK_NOW3']+ df['PRD_NOW3']
df['hold_chg1']=df['hold1']-df['hold2']
df['hold_chg2']=df['hold2']-df['hold3']
df['hold_chg']=df['hold1']-df['hold3']
df['hold_chg_percent']=df['hold_chg']/(np.abs(df['hold3'])+1)
df['hold_stk_cnt_chg']=df['STK_BS_CNT1']-df['STK_BS_CNT2']
df['cash_flow1']= df['OUT_CASH1'] - df['IN_CASH1']
df['cash_flow2']= df['OUT_CASH2'] - df['IN_CASH2']
df['cash_flow']= df['cash_flow1']+df['cash_flow2']
df['cash_cnt_in']= df['IN_CNT1'] - df['IN_CNT2']
df['cash_cnt_out']= df['OUT_CNT1'] - df['OUT_CNT2']
df['stk_flow']= df['STK_SELL_AMT1'] - df['STK_BUY_AMT1']
df['stk_sell_all']=df['STK_SELL_AMT1'] +df['STK_SELL_AMT2']
df['stk_buy_all']=df['STK_BUY_AMT1'] +df['STK_BUY_AMT2']
df['login1'] = df['APP_LOGIN1'] +df['FY_LOGIN1']+df['ZHLC_LOGIN1']
df['login2'] = df['APP_LOGIN2'] +df['FY_LOGIN2']+df['ZHLC_LOGIN2']
df['login_up']= df['login1'] - df['login2']
df['cash_in_per_login1']=df['IN_CNT1']/(df['login1'] +1)
df['cash_out_per_login1']=df['OUT_CNT1']/(df['login1'] +1)
df['cash_in_per_login2']=df['IN_CNT2']/(df['login2'] +1)
df['cash_out_per_login2']=df['OUT_CNT2']/(df['login2'] +1)
df['cash_in_per_login_chg']=df['cash_in_per_login1']-df['cash_in_per_login2']
df['cash_out_per_login_chg']=df['cash_out_per_login1']-df['cash_out_per_login2']
df['asset_top_flow']=df['ASSET_TOP1']-df['ASSET_TOP2']
df['asset_chg_percent1']=(df['ASSET_NOW1']-df['ASSET_NOW2'])/(df['ASSET_NOW2']+1)
df['asset_chg_percent2']=(df['ASSET_NOW2']-df['ASSET_NOW3'])/(df['ASSET_NOW3']+1)
df['asset_chg_percent']=(df['asset_up1']+df['asset_up2'])/(df['ASSET_NOW3']+1)
df['stk_percent1']=(df['STK_NOW1']+1)/(df['ASSET_NOW1']+1)
df['stk_percent2']=(df['STK_NOW2']+1)/(df['ASSET_NOW2']+1)
df['stk_percent3']=(df['STK_NOW3']+1)/(df['ASSET_NOW3']+1)
df['stk_percent_chg1']=df['stk_percent1']-df['stk_percent2']
df['stk_percent_chg2']=df['stk_percent2']-df['stk_percent3']
df['in_out_cnt_times']=(df['IN_CNT1'] + df['IN_CNT2']+1)/(df['OUT_CNT1'] + df['OUT_CNT2']+1)
df['in_cash_aseet_percent1']=(df['IN_CASH1'])/(df['IN_CASH1']+df['OUT_CASH1']+1)
df['out_cash_aseet_percent1']=(df['OUT_CASH1'])/(df['IN_CASH1']+df['OUT_CASH1']+1)
df['in_cash_aseet_percent2']=(df['IN_CASH2'])/(df['IN_CASH2']+df['OUT_CASH2']+1)
df['out_cash_aseet_percent2']=(df['OUT_CASH2'])/(df['IN_CASH2']+df['OUT_CASH2']+1)
df['asset_now_top1']=df['ASSET_NOW1']-df['ASSET_TOP1']
df['asset_now_top2']=df['ASSET_NOW2']-df['ASSET_TOP2']
df['stk_now_top1']=df['STK_NOW1']-df['STK_TOP1']
df['stk_now_top2']=df['STK_NOW2']-df['STK_TOP2']
df['prd_now_top1']=df['PRD_NOW1']-df['PRD_TOP1']
df['prd_now_top2']=df['PRD_NOW2']-df['PRD_TOP2']
df[df['AGE']>100].AGE=100
df[df['AGE']<16].AGE=0
df[df['S_AGE']>30].S_AGE=30
df[df['S_AGE']<0].S_AGE=0
df['out_asset_up1']=df['OUT_ASEET_NOW1']-df['OUT_ASEET_NOW2']
df['out_asset_up2']=df['OUT_ASEET_NOW2']-df['OUT_ASEET_NOW3']
df['out_asset_up']=df['out_asset_up1']+df['out_asset_up2']
df['commi_all']=df['NET_COMMI_GJ1']+df['NET_COMMI_GJ1']
df['commi_rate']=(df['NET_COMMI_GJ1']+df['NET_COMMI_GJ1'])/(df['stk_sell_all']+df['stk_buy_all']+1)
df[df['commi_rate']<=0].commi_rate=0
df['trade_days_dif']=df['TRADE_DAYS1']-df['TRADE_DAYS2']
df['is_oper1']=df['OUT_CASH1']+df['IN_CASH1']+df['STK_BUY_AMT1']+df['STK_SELL_AMT1']
df[df['is_oper1']>0].is_oper1=1
df[df['is_oper1']<=0].is_oper1=0
df['is_oper2']=df['OUT_CASH2']+df['IN_CASH2']+df['STK_BUY_AMT2']+df['STK_SELL_AMT2']
df[df['is_oper2']>0].is_oper2=1
df[df['is_oper2']<=0].is_oper2=0
df['is_oper']=df['is_oper1']+df['is_oper2']
df['profit_asset1']=df['PROFIT1']/(df['ASSET_NOW2']+1)
df['profit_asset2']=df['PROFIT2']/(df['ASSET_NOW3']+1)
df['profit_asset']=(df['PROFIT1']+df['PROFIT2'])/(df['ASSET_NOW3']+1)
# (3.3) false
del df['CUSTOMER_NO']
del df['PROM_TYPE']
print('df.shape',df.shape)
print('Generate Finish')
return df
|
[
"luzhonghe999@gmail.com"
] |
luzhonghe999@gmail.com
|
1c7724ee37d22d1684aed42520fca9c81429947f
|
e70a17e8a37847a961f19b136f3bbe74393fa2af
|
/PC/build/turtlebot3_gazebo/catkin_generated/installspace/_setup_util.py
|
99c7bb3b668de77abd4ff88f2cc163915ae0ae5e
|
[
"MIT"
] |
permissive
|
Mondiegus/ROS-4x4-CAR-AI
|
1413ead6f46a8b16005abeea3e0b215caa45f27e
|
124efe39168ce96eec13d57e644f4ddb6dfe2364
|
refs/heads/Master
| 2023-07-14T23:56:53.519082
| 2021-03-27T17:28:45
| 2021-03-27T17:28:45
| 334,233,839
| 0
| 0
|
MIT
| 2021-02-02T13:00:30
| 2021-01-29T18:46:16
|
Makefile
|
UTF-8
|
Python
| false
| false
| 13,337
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python3/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/mondi/catkin_ws/devel;/opt/ros/noetic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
[
"Mondiegus9@gmail.com"
] |
Mondiegus9@gmail.com
|
b6587d832b17a5f3e431610d1d4d49eadc8e30f6
|
a3d9701d8074ba66edf51a54038707acb9e0e9b6
|
/food_express/wsgi.py
|
bd72fa0536afffa0ebcdbb1da2c4547e6e16b67c
|
[] |
no_license
|
jdshah98/Food-Express
|
2e6b5cc7e0e6bc26f35a12b1a1bb2e336b756187
|
3b152239be47228540556e5de96586f7618d7de5
|
refs/heads/master
| 2020-06-19T04:13:04.258793
| 2019-09-10T07:36:01
| 2019-09-10T07:36:01
| 196,556,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for food_express project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'food_express.settings')
application = get_wsgi_application()
|
[
"jdshahstudio@gmail.com"
] |
jdshahstudio@gmail.com
|
d55910c880d0c3d1a2f34f4cbfb944fd9d8adc4e
|
06268dbf15a1ef8e1e522977dad6e9f52e04ee40
|
/src/deploy/osp_deployer/sah.py
|
0dfa9fec1e499484dd3cfaa07660760c09c71594
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
juliovp01/JetPack
|
a909b0d66fa0054bf5074922280c286779129efe
|
88ab3a50cc5f7e6581b006954b722d113924a96c
|
refs/heads/master
| 2021-07-22T23:44:07.747074
| 2017-11-01T21:17:52
| 2017-11-01T21:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,244
|
py
|
#!/usr/bin/env python
# Copyright (c) 2015-2017 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from osp_deployer.settings.config import Settings
from infra_host import InfraHost
from auto_common import Scp, Ssh, FileHelper
import logging
import time
import shutil
import os
import subprocess
logger = logging.getLogger("osp_deployer")
exitFlag = 0
class Sah(InfraHost):
def __init__(self):
self.settings = Settings.settings
self.user = "root"
self.ip = self.settings.sah_node.public_api_ip
self.pwd = self.settings.sah_node.root_password
self.root_pwd = self.settings.sah_node.root_password
def update_kickstart_usb(self):
sets = self.settings
shutil.copyfile(sets.sah_kickstart, sets.cloud_repo_dir +
"/../osp-sah.ks")
sets.sah_kickstart = sets.cloud_repo_dir + "/../osp-sah.ks"
FileHelper.replace_expression(sets.sah_kickstart,
'^HostName=.*',
'HostName="' +
sets.sah_node.hostname +
"." + sets.domain + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^SystemPassword=.*',
'SystemPassword="' +
sets.sah_node.root_password +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^SubscriptionManagerUser=.*',
'SubscriptionManagerUser="' +
sets.subscription_manager_user +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^SubscriptionManagerPassword=.*',
'SubscriptionManagerPassword="' +
sets.subscription_manager_password +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^SubscriptionManagerPool=.*',
'SubscriptionManagerPool="' +
sets.subscription_manager_pool_sah +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^Gateway=.*',
'Gateway="' +
sets.public_api_gateway +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^NameServers=.*',
'NameServers="' +
sets.name_server +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^NTPServers=.*',
'NTPServers="' +
sets.ntp_server +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^TimeZone=.*',
'TimeZone="' +
sets.time_zone +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^anaconda_interface=.*',
'anaconda_interface="' +
sets.sah_node.anaconda_ip + '/' +
sets.public_api_netmask + ' ' +
sets.sah_node.anaconda_iface +
' no"')
FileHelper.replace_expression(sets.sah_kickstart,
'^anaconda_vlanid=.*',
'anaconda_vlanid="' +
sets.public_api_vlanid +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^extern_bond_name=.*',
'extern_bond_name="' +
sets.sah_node.public_bond +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^extern_bond_opts=.*',
'extern_bond_opts="mode=' +
sets.sah_bond_opts +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^extern_ifaces=.*',
'extern_ifaces="' +
sets.sah_node.public_slaves +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^internal_bond_name=.*',
'internal_bond_name="' +
sets.sah_node.private_bond +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^internal_bond_opts=.*',
'internal_bond_opts="mode=' +
sets.sah_bond_opts +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^internal_ifaces=.*',
'internal_ifaces="' +
sets.sah_node.private_slaves +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^prov_bond_name=.*',
'prov_bond_name="bond0.' +
sets.provisioning_vlanid +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^stor_bond_name=.*',
'stor_bond_name="bond0.' +
sets.storage_vlanid + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^mgmt_bond_name=.*',
'mgmt_bond_name="bond0.' +
sets.management_vlanid + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^pub_api_bond_name=.*',
'pub_api_bond_name="bond1.' +
sets.public_api_vlanid + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^priv_api_bond_name=.*',
'priv_api_bond_name="bond0.' +
sets.private_api_vlanid + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^br_prov_boot_opts=.*',
'br_prov_boot_opts="onboot static ' +
sets.sah_node.provisioning_ip + '/' +
sets.provisioning_netmask + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^br_stor_boot_opts=.*',
'br_stor_boot_opts="onboot static ' +
sets.sah_node.storage_ip + '/' +
sets.storage_netmask + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^br_mgmt_boot_opts=.*',
'br_mgmt_boot_opts="onboot static ' +
sets.sah_node.management_ip + '/' +
sets.management_netmask + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^br_pub_api_boot_opts=.*',
'br_pub_api_boot_opts="onboot static ' +
sets.sah_node.public_api_ip + '/' +
sets.public_api_netmask + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^br_priv_api_boot_opts=.*',
'br_priv_api_boot_opts="onboot static ' +
sets.sah_node.private_api_ip + '/' +
sets.private_api_netmask + '"')
FileHelper.replace_expression(sets.sah_kickstart,
'^prov_network=.*',
'prov_network="' +
sets.provisioning_network.split("/")[0] +
'"')
FileHelper.replace_expression(sets.sah_kickstart,
'^prov_netmask=.*',
'prov_netmask="' +
sets.provisioning_netmask +
'"')
time.sleep(3)
if self.settings.is_fx is True:
cmds = ["sed -i 's/{AnacondaIface_device}/{" +
"AnacondaIface_device}." +
self.settings.public_api_vlanid +
"/' " + sets.sah_kickstart,
"sed -i 's/bootproto=static/vlanid=" +
self.settings.public_api_vlanid +
" --bootproto=static/' " +
sets.sah_kickstart]
for cmd in cmds:
os.system(cmd)
def upload_iso(self):
shutil.copyfile(self.settings.rhel_iso,
"/store/data/iso/RHEL7.iso")
def clear_known_hosts(self):
hosts = [
self.settings.director_node.public_api_ip,
self.settings.rhscon_node.public_api_ip
]
# Check wether we're running from the SAH node
out = subprocess.check_output("ip addr",
stderr=subprocess.STDOUT,
shell=True)
if self.settings.sah_node.public_api_ip in out:
for host in hosts:
cmd = 'ssh-keygen -R ' + host
self.run(cmd)
else:
for host in hosts:
subprocess.check_output('ssh-keygen -R ' + host,
stderr=subprocess.STDOUT,
shell=True)
def handle_lock_files(self):
files = [
'rhscon_vm.vlock',
'director_vm.vlock',
]
# Delete any staged locking files to prevent accidental reuse
for eachone in files:
staged_file_name = '/root/' + eachone
if os.path.isfile(staged_file_name):
os.remove(staged_file_name)
if self.settings.version_locking_enabled is True:
logger.debug(
"Uploading version locking files for director & rhscon VMs")
for eachone in files:
source_file_name = self.settings.lock_files_dir + "/" + eachone
dest_file_name = '/root/' + eachone
shutil.copyfile(source_file_name, dest_file_name)
def upload_director_scripts(self):
remote_file = "/root/deploy-director-vm.sh"
self.upload_file(self.settings.director_deploy_sh,
remote_file)
self.run("chmod 777 /root/deploy-director-vm.sh")
def create_director_vm(self):
director_conf = "/root/director.cfg"
self.run("rm " + director_conf + " -f")
conf = ("rootpassword " + self.settings.director_node.root_password,
"timezone " + self.settings.time_zone,
"smuser " + self.settings.subscription_manager_user,
"smpassword " + self.settings.subscription_manager_password,
"smpool " + self.settings.subscription_manager_pool_vm_rhel,
"hostname " + self.settings.director_node.hostname + "." +
self.settings.domain,
"gateway " + self.settings.public_api_gateway,
"nameserver " + self.settings.name_server,
"ntpserver " + self.settings.sah_node.provisioning_ip,
"user " + self.settings.director_install_account_user,
"password " + self.settings.director_install_account_pwd,
"# Iface IP NETMASK ",)
conf = conf + ("eth0 " +
self.settings.director_node.public_api_ip +
" " + self.settings.public_api_netmask,)
conf = conf + ("eth1 " +
self.settings.director_node.provisioning_ip +
" " + self.settings.provisioning_netmask,)
conf = conf + ("eth2 " +
self.settings.director_node.management_ip +
" " + self.settings.management_netmask,)
conf = conf + ("eth3 " +
self.settings.director_node.private_api_ip +
" " + self.settings.private_api_netmask,)
for line in conf:
self.run("echo '" +
line +
"' >> " +
director_conf)
remote_file = "sh /root/deploy-director-vm.sh " + \
director_conf + " " + \
"/store/data/iso/RHEL7.iso"
re = self.run_tty(remote_file)
startVM = True
for ln in re[0].split("\n"):
if "Restarting guest" in ln:
startVM = False
if startVM:
logger.debug(
"=== wait for the director vm install "
"to be complete")
while "shut off" not in \
self.run("virsh list --all | grep director")[0]:
time.sleep(60)
logger.debug("=== power on the director VM ")
self.run("virsh start director")
logger.debug("=== waiting for the director vm to boot up")
self.wait_for_vm_to_come_up(self.settings.director_node.public_api_ip,
"root",
self.settings.director_node.root_password)
logger.debug("director host is up")
def wait_for_vm_to_come_up(self, target_ip, user, password):
while True:
status = Ssh.execute_command(
target_ip,
user,
password,
"ps")[0]
if status != "host not up":
break
logger.debug("vm is not up. Sleeping...")
time.sleep(10)
def delete_director_vm(self):
while "director" in \
self.run("virsh list --all | grep director")[0]:
self.run("virsh destroy director")
time.sleep(20)
self.run("virsh undefine director")
time.sleep(20)
def create_rhscon_vm(self):
remote_file = "/root/deploy-rhscon-vm.py"
self.upload_file(self.settings.rhscon_deploy_py,
remote_file)
logger.debug("=== create rhscon.cfg")
rhscon_conf = "/root/rhscon.cfg"
self.run("rm " + rhscon_conf + " -f")
conf = ("rootpassword " + self.settings.rhscon_node.root_password,
"timezone " + self.settings.time_zone,
"smuser " + self.settings.subscription_manager_user,
"smpassword " + self.settings.subscription_manager_password,
"smpool " + self.settings.subscription_manager_vm_ceph,
"hostname " + self.settings.rhscon_node.hostname + "." +
self.settings.domain,
"gateway " + self.settings.public_api_gateway,
"nameserver " + self.settings.name_server,
"ntpserver " + self.settings.sah_node.provisioning_ip,
"# Iface IP NETMASK ",)
conf = conf + ("eth0 " +
self.settings.rhscon_node.public_api_ip +
" " + self.settings.public_api_netmask,)
conf = conf + ("eth1 " +
self.settings.rhscon_node.storage_ip +
" " + self.settings.storage_netmask,)
for comd in conf:
self.run("echo '" + comd + "' >> " + rhscon_conf)
logger.debug("=== kick off the Storage Console VM deployment")
re = self.run_tty("python " +
remote_file +
" /root/rhscon.cfg " +
"/store/data/iso/RHEL7.iso")
startVM = True
for ln in re[0].split("\n"):
if "Restarting guest" in ln:
startVM = False
if startVM:
logger.debug(
"=== wait for the Storage Console VM install to be complete \
& power it on")
while "shut off" \
not in self.run("virsh list --all | grep rhscon")[0]:
time.sleep(60)
logger.debug("=== power on the Storage Console VM ")
self.run("virsh start rhscon")
logger.debug("=== waiting for the Storage Console vm to boot up")
self.wait_for_vm_to_come_up(self.settings.rhscon_node.public_api_ip,
"root",
self.settings.rhscon_node.root_password)
logger.debug("Storage Console VM is up")
def delete_rhscon_vm(self):
# Also delete any leftover "ceph" VM so that it cannot interfere
# with the new "rhscon" VM that replaces it.
for vm in "ceph", "rhscon":
if vm in self.run("virsh list --all | grep {}".format(vm))[0]:
if vm == "ceph":
logger.info("=== deleting deprecated ceph VM")
if "running" in self.run("virsh domstate {}".format(vm))[0]:
self.run("virsh destroy {}".format(vm))
time.sleep(20)
self.run("virsh undefine {}".format(vm))
time.sleep(20)
|
[
"gael01@gmail.com"
] |
gael01@gmail.com
|
2451db96195a6ccf741363e24b3bd80870964b28
|
d317a0828f8aa49d0a29a67251e221a642fd90c0
|
/content/migrations/0002_auto_20160511_0822.py
|
d6d1229dbe2a5def7591a6dc386d99969cfda4df
|
[] |
no_license
|
ntuckovic/simple_cms
|
45136d65703e789656aa7ae44c3bb85c3cae88a3
|
4d484ae4b47ba088c9af04c8df8626548e64dc63
|
refs/heads/master
| 2016-09-12T16:24:53.307081
| 2016-05-20T18:40:59
| 2016-05-20T18:40:59
| 58,266,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-11 06:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Category', 'verbose_name_plural': 'Categories'},
),
]
|
[
"nikola.tuckovic@styria.hr"
] |
nikola.tuckovic@styria.hr
|
8c493f0ad40467553ef929410b4c2e22d271f3a9
|
07f932c30bec781f383c16ccaa4bb0188bbeb719
|
/class4/exercise1.py
|
f76d5047c5234de093cbcf30c62f73384d4af26e
|
[] |
no_license
|
mrreyes512/pynet_ansible
|
1e4b8381080a393ef74fce35a2cd31c2e7a43011
|
98d773443dd224c580ed60fc089ce9352cae9547
|
refs/heads/master
| 2021-01-11T03:29:13.879845
| 2017-01-19T07:08:17
| 2017-01-19T07:08:17
| 71,002,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
#!/usr/bin/env python
"""
Use Paramiko to retrieve the entire 'show version' output from pynet-rtr2.
"""
import paramiko
from getpass import getpass
ip_addr = '184.105.247.71'
port = 22
username = 'pyclass'
#password = getpass()
password = '88newclass'
remote_conn_pre=paramiko.SSHClient()
#remote_conn_pre.load_system_host_keys()
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
remote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False, port=port)
stdin, stdout, stderr = remote_conn_pre.exec_command('show version\n')
#print stdout.readlines()
for line in stdout.readlines():
print line.strip()
#remote_conn = remote_conn_pre.invoke_shell()
#remote_conn.send("show ip int brief\n")
#outp = remote_conn.recv(5000)
#print outp
|
[
"mark.reyes@twcable.com"
] |
mark.reyes@twcable.com
|
e72041dd1699534744c115c7c5505d836205fc6e
|
9ee4692e9e0c6d9b9ccd865ecb26d97f682d34c7
|
/factorial_restocking.py
|
67abdcd7c48e7ab646fadebcbc0803c28d49139c
|
[] |
no_license
|
yanickdi/clsc_numeric
|
d752f2ff028ea97db307c9978ecd7136c4c75bac
|
e58a2de81ae29949cfc5ebcf8297cb5e61763fb4
|
refs/heads/master
| 2021-01-23T04:45:26.794805
| 2018-03-19T10:53:15
| 2018-03-19T10:53:15
| 92,942,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,028
|
py
|
import sys, os.path, re
from statistics import mean
from jinja2 import Template
from solver import SolverProxy, Parameter, MODEL_2, MODEL_NB, ModelNBSolver
LATEX_SUBS = (
(re.compile(r'\\'), r'\\textbackslash'),
(re.compile(r'([{}_#%&$])'), r'\\\1'),
(re.compile(r'~'), r'\~{}'),
(re.compile(r'\^'), r'\^{}'),
(re.compile(r'"'), r"''"),
(re.compile(r'\.\.\.+'), r'\\ldots'),
)
def escape_tex(value):
newval = value
for pattern, replacement in LATEX_SUBS:
newval = pattern.sub(replacement, newval)
return newval
class Factorial:
tau_lh = lambda : (.15, .5)
s_lh = lambda cn: (0, cn/2)
cr_lh = lambda cn, delta : (.1*cn, .4*cn)
delta_lh = lambda : (.5, .85)
cn_lh = lambda : (.1, .5)
a_lh = lambda : (.001, .01)
LOW, HIGH = 'LOW', 'HIGH'
def __init__(self):
self.__dict = {}
self.__proxy = SolverProxy()
self.__calc__()
self.__fullFactorialTable()
def __key(self, model, tau, a, s, cr, cn, delta):
assert model in (MODEL_2, MODEL_NB)
for val in (tau, a, s, cr, cn): assert val in (Factorial.LOW, Factorial.HIGH)
return '{},tau={},a={},s={},cr={},cn={},delta={}'.format(model, tau, a, s, cr, cn,delta)
def _add(self, key, par, sol):
assert key not in self.__dict
self.__dict[key] = {'par' : par, 'sol': sol}
def _get(self, key):
return self.__dict[key]
def __calc__(self):
p = Factorial
iter = self.__state_iter
proxy = SolverProxy()
num = 0
for tau, tau_state in iter(p.tau_lh()):
for delta, delta_state in iter(p.delta_lh()):
for cn, cn_state in iter(p.cn_lh()):
for cr, cr_state in iter(p.cr_lh(cn, delta)):
for s, s_state in iter(p.s_lh(cn)):
for a, a_state in iter(p.a_lh()):
# calculate model with restocking_fee
par_nb = Parameter(MODEL_NB, tau=tau, a=a, s=s, cn=cn)
sol_nb = proxy.read_or_calc_and_write(par_nb, resolution='middle')
num += 1; print(num)
#sol_nb = ModelNBSolver.solve(par_nb, 'low')
key_n = self.__key(MODEL_NB, tau_state, a_state, s_state, cr_state, cn_state, delta_state)
self._add(key_n, par_nb, sol_nb)
# calculate model two
par_o = Parameter(MODEL_2, tau=tau, a=a, s=s, cr=cr, cn=cn, delta=delta)
sol_o = proxy.calculate(par_o)
key_o = self.__key(MODEL_2, tau_state, a_state, s_state, cr_state, cn_state, delta_state)
self._add(key_o, par_o, sol_o)
proxy.commit()
def __fullFactorialTable(self):
self.__ff_table = [[None for j in range(8)] for i in range(8)]
lohi = (Factorial.LOW, Factorial.HIGH)
for i, (s, cr, a) in enumerate([(s, cr, a) for s in lohi for cr in lohi for a in lohi]):
for j, (cn, delta, tau) in enumerate([(cn, delta, tau) for cn in lohi for delta in lohi for tau in lohi]):
key_nb = self.__key(MODEL_NB, tau, a, s, cr, cn, delta)
key_o = self.__key(MODEL_2, tau, a, s, cr, cn, delta)
val_n = self._get(key_nb)
val_o = self._get(key_o)
self.__ff_table[i][j] = {'o' : val_o, 'n' : val_n}
def __anal_line_parameters(self, table, par):
f = Factorial
pars = []
if table == 'delta':
delta = par
for tau, cn in [(tau,cn) for tau in f.tau_lh() for cn in f.cn_lh()]:
for a, s, cr in [(a,s,cr) for a in f.a_lh() for s in f.s_lh(cn) for cr in f.cr_lh(cn,delta)]:
pars.append([tau, a, s, cr, cn, delta])
if table == 'tau':
tau = par
for delta, cn in [(delta,cn) for delta in f.delta_lh() for cn in f.cn_lh()]:
for a, s, cr in [(a,s,cr) for a in f.a_lh() for s in f.s_lh(cn) for cr in f.cr_lh(cn,delta)]:
pars.append([tau, a, s, cr, cn, delta])
if table == 'cn':
cn = par
for tau, delta in [(tau, delta) for tau in f.tau_lh() for delta in f.delta_lh()]:
for a, s, cr in [(a,s,cr) for a in f.a_lh() for s in f.s_lh(cn) for cr in f.cr_lh(cn,delta)]:
pars.append([tau, a, s, cr, cn, delta])
if table == 'a':
a = par
for tau, delta, cn in [(tau,delta,cn) for tau in f.tau_lh() for delta in f.delta_lh() for cn in f.cn_lh()]:
for s, cr in [(s,cr) for s in f.s_lh(cn) for cr in f.cr_lh(cn,delta)]:
pars.append([tau, a, s, cr, cn, delta])
assert len(pars) == int(2**5)
return pars
def getAnalysisLine(self, table, par):
raise NotImplementedError()
def __both_solutions(self, tau, a, s, cr, cn, delta):
raise NotImplementedError()
def __state_iter(self, iterable):
state = Factorial.LOW
for val in iterable:
yield (val, state)
state = Factorial.HIGH if state == Factorial.LOW else Factorial.LOW
def getTableValue(self, table, i, j):
o_val, n_val = self.__ff_table[i][j]['o'], self.__ff_table[i][j]['n']
o_sol, n_sol = o_val['sol'], n_val['sol']
if table == 'case':
print(n_sol)
return '{}/{}'.format(o_sol.case, n_sol.case)
elif table == 'profits':
prof = (o_val['sol'].profit_man / n_val['sol'].profit_man)*100
return '{:.2f}\\%'.format(prof)
elif table == 'retailerprof':
prof = (o_val['sol'].profit_ret / n_val['sol'].profit_ret)*100
return '{:.2f}\\%'.format(prof)
elif table == 'rho':
rho_dec = (o_val['sol'].dec.rho / n_val['sol'].dec.rho) *100
return '{:.2f}\\%'.format(rho_dec)
elif table == 'price_new':
price_dec = (o_val['sol'].dec.pn / n_val['sol'].dec.pn) * 100
return '{:.2f}\\%'.format(price_dec)
elif table == 'wholesale_price':
wn_dec = (o_val['sol'].dec.wn / n_val['sol'].dec.wn) * 100
#wn_dec = n_val['sol'].dec.wn
return '{:.2f}\\%'.format(wn_dec)
elif table == 'restocking_price':
#b_rel = n_val['sol'].dec.b
b_rel = (n_sol.dec.b - n_val['par'].s) / (n_sol.dec.wn - n_val['par'].s)
#'{:.2f}\\%'.format(b_rel)
return '{:.2f}\\%'.format(b_rel)
elif table.startswith('par'):
par = table.split('_')[1]
if par == 'tau':
return o_val['par'].tau
elif par == 'cn':
return o_val['par'].cn
elif par == 'cr':
return o_val['par'].cr
elif par == 'a':
return o_val['par'].a
elif par == 's':
return o_val['par'].s
elif par == 'delta':
return o_val['par'].delta
def main():
if len(sys.argv) != 2:
print('usage {} template.tex'.format(os.path.basename(sys.argv[0])))
sys.exit()
fullFactorial = Factorial()
tplfile = sys.argv[1]
# read template file
with open(tplfile, 'r') as f:
template = Template(f.read())
# open output file
output_file = 'output.tex' if tplfile.endswith('.tex') else 'output.csv'
with open(output_file, 'w', newline='\n') as f:
renderedString = template.render({
'full': fullFactorial,
'ft' : fullFactorial.getTableValue,
'at' : fullFactorial.getAnalysisLine,
'esc' : escape_tex
})
f.write(renderedString)
if __name__ == '__main__':
main()
|
[
"yanick.dickbauer@uni-graz.at"
] |
yanick.dickbauer@uni-graz.at
|
3cbf8df1e8e863b25afe9f35429a2b205af7ac9c
|
795cb0b80b5f8cf709a07843dbfb93adf2ad8942
|
/ghmirror/app/__init__.py
|
f19c3fdc4744f2cfc0482d7886d6f26bc7ff7dd8
|
[] |
no_license
|
maorfr/github-mirror
|
8ff2db7a8c9a085fe7a599586697def1f0a5c849
|
1a282fe7028aab61e307372edf93f909500c7bbc
|
refs/heads/master
| 2022-11-15T01:16:51.462768
| 2020-02-10T17:05:01
| 2020-02-10T17:15:09
| 239,732,236
| 0
| 0
| null | 2020-02-11T10:14:23
| 2020-02-11T10:14:22
| null |
UTF-8
|
Python
| false
| false
| 4,224
|
py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2020
# Author: Amador Pahim <apahim@redhat.com>
"""
The GitHub Mirror endpoints
"""
import hashlib
import logging
import requests
import flask
from ghmirror.app.response import MirrorResponse
from ghmirror.data_structures.monostate import RequestsCache
from ghmirror.data_structures.monostate import StatsCache
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(message)s')
LOG = logging.getLogger(__name__)
GH_API = 'https://api.github.com'
APP = flask.Flask(__name__)
@APP.route('/healthz', methods=["GET"])
def healthz():
"""
Health check endpoint for Kubernetes.
"""
return flask.Response('OK')
@APP.route('/stats', methods=["GET"])
def stats():
"""
Cache statistics endpoint.
"""
stats_cache = StatsCache()
return flask.jsonify(
{
'cache_hit': stats_cache.hits,
'cache_miss': stats_cache.misses,
}
)
@APP.route('/', defaults={'path': ''})
@APP.route('/<path:path>', methods=["GET", "POST", "PUT", "PATCH", "DELETE"])
def ghmirror(path):
"""
Default endpoint, matching any url without a specific endpoint.
"""
url = f'{GH_API}/{path}'
if flask.request.args:
url += '?'
for key, value in flask.request.args.items():
url += f'{key}={value}&'
url = url.rstrip('&')
headers = {}
authorization = flask.request.headers.get('Authorization')
auth_sha = None
if authorization is not None:
headers['Authorization'] = authorization
# The authorization token will be used as cache key,
# so let's hash it for additional security.
auth_sha = hashlib.sha1(authorization.encode()).hexdigest()
if flask.request.method != 'GET':
LOG.info('[%s] BYPASS %s', flask.request.method, url)
resp = requests.request(flask.request.method,
url=url,
headers=headers,
data=flask.request.data)
return flask.Response(resp.content,
resp.status_code)
stats_cache = StatsCache()
cache = RequestsCache()
cache_key = (url, auth_sha)
if cache_key in cache:
etag = cache[cache_key].headers.get('ETag')
if etag is not None:
headers['If-None-Match'] = etag
last_mod = cache[cache_key].headers.get('Last-Modified')
if last_mod is not None:
headers['If-Modified-Since'] = last_mod
resp = requests.request(flask.request.method,
url=url,
headers=headers)
if resp.status_code != 304:
LOG.info('[GET] CACHE_MISS %s', url)
stats_cache.miss()
# Caching only makes sense when at least one
# of those headers is present
if any(['ETag' in resp.headers,
'Last-Modified' in resp.headers]):
cache[cache_key] = resp
mirror_response = MirrorResponse(original_response=resp,
gh_api_url=GH_API,
gh_mirror_url=flask.request.host_url)
else:
LOG.info('[GET] CACHE_HIT %s', url)
stats_cache.hit()
mirror_response = MirrorResponse(original_response=cache[cache_key],
gh_api_url=GH_API,
gh_mirror_url=flask.request.host_url)
return flask.Response(mirror_response.content,
mirror_response.status_code,
mirror_response.headers)
if __name__ == '__main__': # pragma: no cover
APP.run(host='127.0.0.1', debug=True, port='8080')
|
[
"apahim@redhat.com"
] |
apahim@redhat.com
|
9e0ede3a2753d931b34906babaf3125fe709eebd
|
34975bd680962abba1ecd970c7320c8454a3a3b7
|
/05_List/05_List_14.py
|
787d2dc923ee345a234957bf1fcb339d5adfb754
|
[] |
no_license
|
miello/com_prog
|
64df2dfa57f7f65cacef2d737e3639fa9b656fdf
|
074241646f42f05c117649a94651eb2916fb468e
|
refs/heads/master
| 2023-01-07T19:03:31.781243
| 2020-11-15T18:21:24
| 2020-11-15T18:21:24
| 288,693,402
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
inp = [int(e) for e in input().split()]
co = 0
for p in range(1, len(inp) - 1):
if inp[p - 1] < inp[p] and inp[p] > inp[p + 1]:
co += 1
print(co)
|
[
"partearth1212@gmail.com"
] |
partearth1212@gmail.com
|
22793485a0f7007c064110e5272ecbb694f25b5c
|
07b0a4cb98d79295ebd26bb8c8d7a46afdfb083c
|
/解答/14文字列操作_回答例/14_3.py
|
d40d5363c6b17d027c1d528d2b13d0e5e45894d9
|
[] |
no_license
|
kubotaaaaaaaaaa/python
|
a9d023a4ae4c3eee0e96e1f1c38ee2f96dc307ba
|
aa8a4239e56d2101101db6c8ff0d6657c7475877
|
refs/heads/master
| 2023-08-01T13:23:45.927737
| 2021-09-22T02:21:09
| 2021-09-22T02:21:09
| 409,025,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# 入力された文字列が11文字以上だったら10文字目以降は削除して表示
# 文字列を入力
in_str = input("文字列を入力:")
# 11文字以上だったら10文字目以降は削除する
if len(in_str) > 10:
in_str = in_str[0:10]
# 文字数を表示
print("結果文字列:", in_str, sep="")
|
[
"ykh2135212@stu.o-hara.ac.jp"
] |
ykh2135212@stu.o-hara.ac.jp
|
c38d87877d10582f005561fd99d0f442d987576d
|
27f13ed7130ac753111beb3025e406b8e6ecee5d
|
/amazon/books.py
|
e03fd1d037ba27a230b5a545df42e37195f3d556
|
[] |
no_license
|
jonzingale/jonzingale.github.io
|
85d4da81a49bf18765c3fbfbcc22f8b6c14b53ea
|
15cb257adb5b9f9b3982c380666a6bd3b5737e37
|
refs/heads/master
| 2023-06-21T15:29:21.552683
| 2023-06-14T04:43:15
| 2023-06-14T04:43:15
| 139,901,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
# import urllib3
# from bs4 import BeautifulSoup
from pdb import set_trace as st
import json
import csv
# urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Parse Amazon purchases, and display books
# in a clever way. Consider using LCC for classification.
class Graph:
def __init__(self):
self.json = {}
self.edges = self.json.values()
self.nodes = self.json.keys()
# subgraph :: {user_stub: [user_stubs]}
def add_subgraph(self, node, edges):
self.json[node] = edges
class Agent:
def __init__(self):
self.http = urllib3.PoolManager()
self.csvData = self.parseCSV()
self.graph = Graph()
self.writeJSON()
def writeJSON(self):
encoder = json.JSONEncoder()
file = open("./opponents_of_%s.json" % self.user, "w")
file.write(encoder.encode(self.graph.json))
file.close()
def parseCSV(self, user): #### Write this correctly.
file = open("./opponents_of_%s.json" % self.user, "r")
page = BeautifulSoup(file.data, features="lxml")
# ordered data rather than csv.reader: data[0]['game_id']
dictCsv = csv.DictReader(page.p.text.splitlines(), delimiter='\t')
return dictCsv
def data_writer(self, file, data):
jsonEnc = json.JSONEncoder()
file.write(jsonEnc.encode(data))
agent = Agent()
|
[
"jonzingale@gmail.com"
] |
jonzingale@gmail.com
|
0036b7e44c99c4b71575861c0070e88fcab5cc22
|
484f4ff9103a0f997eaa9a84efa35a12377d6540
|
/rfc_record.py
|
a65074e6adc0633c9071480e991d973c6cfcd959
|
[] |
no_license
|
ruturajvy/P2P-Distributed-Index
|
85199b62deb658d2bbf472894a2814db0d53a754
|
6e77d3aebdba65d0144f54fb93234bb8de21bdd8
|
refs/heads/master
| 2020-04-12T11:54:19.670334
| 2018-12-19T18:32:47
| 2018-12-19T18:32:47
| 162,475,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
class rfc_record:
'''Class that describes each record of RFC index'''
def __init__(self, id, title, hostname, ttl = 7200):
self.id = id
self.title = title
self.hostname = hostname
self.ttl = ttl
def rfc_rec_string(self):
sep ='*'
rfc_rec_str = str(self.id) + sep + self.title + sep + str(self.hostname) + sep + str(self.ttl)
return rfc_rec_str
|
[
"ruturaj8396@gmail.com"
] |
ruturaj8396@gmail.com
|
d22000aa293a9346c4dc46c9a669f81b8433ecd2
|
896717bb0762e5a6c259dd9194726ba2a84e8f42
|
/beg95.py
|
024b6b31cd53191c2b532676d18afffd6b3c118b
|
[] |
no_license
|
sarureddi/floorvalue
|
d0cb81db0f0081091f9cfadf1d85230035f34593
|
27105728d260f236460cce0e59da74bdcd4c5fe6
|
refs/heads/master
| 2020-06-03T19:42:58.872883
| 2019-06-13T06:50:02
| 2019-06-13T06:50:02
| 191,707,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
Pr1,Ti1,Rt1=map(int,input().split())
i1=Pr1*Ti1*Rt1
si1=i1//100
print(si1)
|
[
"noreply@github.com"
] |
sarureddi.noreply@github.com
|
72e69dc8ecc9b944bfda48844915f1c3a7db0ec0
|
5daee2edd3f676ad0d1cebf223a1ecc6bc251e14
|
/Course4FinalProject/controller2d.py
|
b12979bea5fc2ffc962b39433022c2716bca4e40
|
[] |
no_license
|
Aniket-Gujarathi/Motion-Planning-for-Self-Driving-Cars
|
48d4fc06b7d0afeea5a509d44051eaee6811be3c
|
a446341b71ab5abf3b21b7ca7b31a2ca7fe00ea3
|
refs/heads/master
| 2020-12-14T22:20:51.778698
| 2020-03-30T15:26:08
| 2020-03-30T15:26:08
| 234,890,721
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,393
|
py
|
#!/usr/bin/env python3
"""
2D Controller Class to be used for the CARLA waypoint follower demo.
"""
import cutils
import numpy as np
class Controller2D(object):
def __init__(self, waypoints):
self.vars = cutils.CUtils()
self._lookahead_distance = 2.0
self._current_x = 0
self._current_y = 0
self._current_yaw = 0
self._current_speed = 0
self._desired_speed = 0
self._current_frame = 0
self._current_timestamp = 0
self._start_control_loop = False
self._set_throttle = 0
self._set_brake = 0
self._set_steer = 0
self._waypoints = waypoints
self._conv_rad_to_steer = 180.0 / 70.0 / np.pi
self._pi = np.pi
self._2pi = 2.0 * np.pi
def update_values(self, x, y, yaw, speed, timestamp, frame):
self._current_x = x
self._current_y = y
self._current_yaw = yaw
self._current_speed = speed
self._current_timestamp = timestamp
self._current_frame = frame
if self._current_frame:
self._start_control_loop = True
def get_lookahead_index(self, lookahead_distance):
min_idx = 0
min_dist = float("inf")
for i in range(len(self._waypoints)):
dist = np.linalg.norm(np.array([
self._waypoints[i][0] - self._current_x,
self._waypoints[i][1] - self._current_y]))
if dist < min_dist:
min_dist = dist
min_idx = i
total_dist = min_dist
lookahead_idx = min_idx
for i in range(min_idx + 1, len(self._waypoints)):
if total_dist >= lookahead_distance:
break
total_dist += np.linalg.norm(np.array([
self._waypoints[i][0] - self._waypoints[i-1][0],
self._waypoints[i][1] - self._waypoints[i-1][1]]))
lookahead_idx = i
return lookahead_idx
def update_desired_speed(self):
min_idx = 0
min_dist = float("inf")
desired_speed = 0
for i in range(len(self._waypoints)):
dist = np.linalg.norm(np.array([
self._waypoints[i][0] - self._current_x,
self._waypoints[i][1] - self._current_y]))
if dist < min_dist:
min_dist = dist
min_idx = i
self._desired_speed = self._waypoints[min_idx][2]
def update_waypoints(self, new_waypoints):
self._waypoints = new_waypoints
def get_commands(self):
return self._set_throttle, self._set_steer, self._set_brake
def set_throttle(self, input_throttle):
# Clamp the throttle command to valid bounds
throttle = np.fmax(np.fmin(input_throttle, 1.0), 0.0)
self._set_throttle = throttle
def set_steer(self, input_steer_in_rad):
# Covnert radians to [-1, 1]
input_steer = self._conv_rad_to_steer * input_steer_in_rad
# Clamp the steering command to valid bounds
steer = np.fmax(np.fmin(input_steer, 1.0), -1.0)
self._set_steer = steer
def set_brake(self, input_brake):
# Clamp the steering command to valid bounds
brake = np.fmax(np.fmin(input_brake, 1.0), 0.0)
self._set_brake = brake
def update_controls(self):
######################################################
# RETRIEVE SIMULATOR FEEDBACK
######################################################
x = self._current_x
y = self._current_y
yaw = self._current_yaw
v = self._current_speed
self.update_desired_speed()
v_desired = self._desired_speed
t = self._current_timestamp
waypoints = self._waypoints
throttle_output = 0
steer_output = 0
brake_output = 0
self.vars.create_var('kp', 0.50)
self.vars.create_var('ki', 0.30)
self.vars.create_var('integrator_min', 0.0)
self.vars.create_var('integrator_max', 10.0)
self.vars.create_var('kd', 0.13)
self.vars.create_var('kp_heading', 8.00)
self.vars.create_var('k_speed_crosstrack', 0.00)
self.vars.create_var('cross_track_deadband', 0.01)
self.vars.create_var('x_prev', 0.0)
self.vars.create_var('y_prev', 0.0)
self.vars.create_var('yaw_prev', 0.0)
self.vars.create_var('v_prev', 0.0)
self.vars.create_var('t_prev', 0.0)
self.vars.create_var('v_error', 0.0)
self.vars.create_var('v_error_prev', 0.0)
self.vars.create_var('v_error_integral', 0.0)
# Skip the first frame to store previous values properly
if self._start_control_loop:
self.vars.v_error = v_desired - v
self.vars.v_error_integral += self.vars.v_error * \
(t - self.vars.t_prev)
v_error_rate_of_change = (self.vars.v_error - self.vars.v_error_prev) /\
(t - self.vars.t_prev)
# cap the integrator sum to a min/max
self.vars.v_error_integral = \
np.fmax(np.fmin(self.vars.v_error_integral,
self.vars.integrator_max),
self.vars.integrator_min)
throttle_output = self.vars.kp * self.vars.v_error +\
self.vars.ki * self.vars.v_error_integral +\
self.vars.kd * v_error_rate_of_change
# Find cross track error (assume point with closest distance)
crosstrack_error = float("inf")
crosstrack_vector = np.array([float("inf"), float("inf")])
ce_idx = self.get_lookahead_index(self._lookahead_distance)
crosstrack_vector = np.array([waypoints[ce_idx][0] - \
x - self._lookahead_distance*np.cos(yaw),
waypoints[ce_idx][1] - \
y - self._lookahead_distance*np.sin(yaw)])
crosstrack_error = np.linalg.norm(crosstrack_vector)
# set deadband to reduce oscillations
print(crosstrack_error)
if crosstrack_error < self.vars.cross_track_deadband:
crosstrack_error = 0.0
# Compute the sign of the crosstrack error
crosstrack_heading = np.arctan2(crosstrack_vector[1],
crosstrack_vector[0])
crosstrack_heading_error = crosstrack_heading - yaw
crosstrack_heading_error = \
(crosstrack_heading_error + self._pi) % \
self._2pi - self._pi
crosstrack_sign = np.sign(crosstrack_heading_error)
# Compute heading relative to trajectory (heading error)
# First ensure that we are not at the last index. If we are,
# flip back to the first index (loop the waypoints)
if ce_idx < len(waypoints)-1:
vect_wp0_to_wp1 = np.array(
[waypoints[ce_idx+1][0] - waypoints[ce_idx][0],
waypoints[ce_idx+1][1] - waypoints[ce_idx][1]])
trajectory_heading = np.arctan2(vect_wp0_to_wp1[1],
vect_wp0_to_wp1[0])
else:
vect_wp0_to_wp1 = np.array(
[waypoints[0][0] - waypoints[-1][0],
waypoints[0][1] - waypoints[-1][1]])
trajectory_heading = np.arctan2(vect_wp0_to_wp1[1],
vect_wp0_to_wp1[0])
heading_error = trajectory_heading - yaw
heading_error = \
(heading_error + self._pi) % self._2pi - self._pi
# Compute steering command based on error
steer_output = heading_error + \
np.arctan(self.vars.kp_heading * \
crosstrack_sign * \
crosstrack_error / \
(v + self.vars.k_speed_crosstrack))
######################################################
# SET CONTROLS OUTPUT
######################################################
self.set_throttle(throttle_output) # in percent (0 to 1)
self.set_steer(steer_output) # in rad (-1.22 to 1.22)
self.set_brake(brake_output) # in percent (0 to 1)
self.vars.x_prev = x
self.vars.y_prev = y
self.vars.yaw_prev = yaw
self.vars.v_prev = v
self.vars.v_error_prev = self.vars.v_error
self.vars.t_prev = t
|
[
"aniketg.vnit@gmail.com"
] |
aniketg.vnit@gmail.com
|
1cd5e48e7a21108e144335c0a8f255ad91eded7d
|
a4f3e7f4f0d28f2c072a6378487760a067e838e6
|
/Webmap_with_folium/webmap_main.py
|
aa5facae500ca7d8e6aaed7a1025d0b1f2b1c919
|
[] |
no_license
|
algebra-det/Python
|
50cacdb517a863ef914dd8ce8b835091f28aa9f6
|
c4506f4df0c2ec103b1bcebcfedca78cbe705541
|
refs/heads/master
| 2020-12-14T15:35:26.727430
| 2020-05-01T10:21:09
| 2020-05-01T10:21:09
| 234,789,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,188
|
py
|
from folium import *
import pandas
# For Volcanoes
data = pandas.read_csv("Volcanoes_USA.txt")
lat = list(data["LAT"])
lon = list(data["LON"])
elev = list(data["ELEV"])
name_v = list(data["NAME"])
# For airport
data_airports = pandas.read_csv("airport.txt")
cor = list(data_airports["coordinates"]) # as in airports.txt file contains cordinates but they are not in list unlike Volcanoes_USA.txt and are seperated by commas
name = list(data_airports["name"])
lati_for_airport = []
long_for_airport = []
for i in cor: # breaking the coordinates and storing them seperately in a list
j,k = i.split(",")
lati_for_airport.append(j)
long_for_airport.append(k)
def color_producer(elevation):
if elevation<1500:
return "green"
elif 1500<=elevation<2500:
return "grey"
else:
return "cadetblue"
# Initializing Map
map = Map(location=[28.339080, 77.319134], zoom_start=7, title="Mapbox Bright")
# INDIA - Defining a Marker
map.add_child(Marker(location=[28.539080, 77.319134], popup="<i>Pochinki</i>", tooltip="click to know more", icon=Icon(color='blue')))
#creating a FeatureGroup() for airport
# AIRPORT Marker
fg_airport = FeatureGroup(name="Airports")
for laa,loa,na in zip(lati_for_airport,long_for_airport,name):
fg_airport.add_child(Marker(location=[laa,loa], popup =na, icon=Icon(color="orange")))
fg_airport.add_to(map)
# Taking a variable for Tooltip
tooltip = "Click Me!"
#creating a FeatureGroup() for volcanoes
# for VOLCANOES circles
fg_volcanoes = FeatureGroup(name="Volcanoes")
# Using loop for more markers
for lt, ln, el, nm in zip(lat, lon, elev, name_v):
fg_volcanoes.add_child(CircleMarker(location=[lt, ln], radius=7, popup=nm,
fill_color=color_producer(el), color="grey", fill_opacity=0.7))
map.add_child(fg_volcanoes) # or fg.add_to(Map)
# using GeoJason() in folium to take .json file - Polygon
fg_polygon = FeatureGroup(name="Polygon Layer")
fg_polygon.add_child(GeoJson(data=open("world.json", "r", encoding="utf-8-sig").read(),
control=False, style_function=lambda x: {"fillColor":"yellow" if x["properties"]["POP2005"] < 1000000
else "orange" if 1000000<=x["properties"]["POP2005"]<2000000 else "red"} ))
fg_polygon.add_to(map)
#INDIA - Anothere way of marking - in here we can enable the whole set with map.add_child(fg)
#If map.add_child(fg) is not written this whole fg won't work
fg2 = FeatureGroup(name="Pubg In INDIA")
fg2.add_child(Marker(location=[28.739080, 77.319134], popup="<b>Bootcamp</b>", icon=Icon(color="green", icon="info-sign", angle='30'),tooltip=tooltip))
fg2.add_child(Marker(location=[28.939080, 77.319134], popup="<b>Bootcamp</b>", icon=Icon(color="green",icon="info-sign",angle=30),tooltip=tooltip))
fg2.add_child(Marker(location=[28.139080, 77.319134], popup="<b>Bootcamp</b>", icon=Icon(color="green",icon="info-sign",angle=30),tooltip=tooltip))
map.add_child(fg2)
# INDIA - Iterating via for Loop
fg1 = FeatureGroup(name="Pubg2 In INDIA")
for coordinate in [[28.139080, 77.919134],[28.239080, 77.319134]]:
fg1.add_child(Marker(location=coordinate, popup="Something New", icon=Icon(color="lightgray", icon="cloud")))
fg1.add_to(map)
# INDIA - One more way of marking
Marker([28.739080, 77.519134], popup="Camp Alpha", icon=Icon(color="red", icon="cloud"), tooltip="CLICK").add_to(map)
Marker([27.739080, 77.519134], popup="School", icon=Icon(color="cadetblue",icon_color="white"), tooltip="NewOne").add_to(map)
# INDIA - defining click to mark function
map.add_child(ClickForMarker(popup="Waypoint Bro"))
# defining latitude/longitude popovers
#map.add_child(LatLngPopup())
# Creating custom icon - just simply download any image in png from anywhere in the current direcotry
url = 'http://leafletjs.com/examples/custom-icons/{}'.format
icon = CustomIcon(
icon_image="leaf-green.png",
icon_size=(28,55),
icon_anchor=(22,94),
shadow_image="leaf-shadow.png",
shadow_size=(50,64),
shadow_anchor=(4,62),
popup_anchor=(-3,-76)
)
mark = Marker(location=[27.739080, 76.519134], Popup="Ballabgarh",icon=icon)
map.add_child(mark)
map.add_child(LayerControl())
map.save("map.html")
|
[
"noreply@github.com"
] |
algebra-det.noreply@github.com
|
b6ae1fdd6178033aedd235ce4f78cc3d7e1cd15f
|
7477865482948b6206c89df9e6fed64e1b92e879
|
/flowretrieval.py
|
a30932dfafc6b0b1977a75ff84b681ac68979054
|
[] |
no_license
|
mann8352/Multipath-routing-using-SDN
|
fb6250b60181d57d31314e93fd3144b6540e7ae7
|
5811f83c1f64da448947a069d0ae72358403510b
|
refs/heads/master
| 2021-04-15T12:38:51.382381
| 2016-08-02T12:53:13
| 2016-08-02T12:53:13
| 62,341,284
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
import json
import httplib2
import traceback
class FlowRetrieval:
"""Retrives all details of flow of each switch"""
def __init__(self):
pass
def retrieveFlow(self, switch):
"""retrieves flows"""
h = httplib2.Http(".cache")
h.add_credentials('admin', 'admin')
base_url='http://localhost:8181/restconf/operational/opendaylight-inventory:nodes/node/'
tail_url='/table/0/'
url=base_url+switch+tail_url
header={'Content-Type':'application/json', 'Accept': 'application/json'}
try:
resp, content = h.request(url, "GET")
response=json.loads(content)
flowlist=response['flow-node-inventory:table'][0]['flow']
except:
print ' No flow exists'
print '-----------------------------------'
return
return flowlist
def getFlowDetails(self, switch):
"""gives the details of each flow of a switch"""
flist=[]
flowlist=self.retrieveFlow(switch)
try:
for f in flowlist:
flowdict={'id':'', 'match':{}, 'instruction':{}}
flowdict['id']=f['id']
flowdict['match']=f['match']
try:
flowdict['instruction']=f['instructions']
except:
flist.append(flowdict)
continue
flist.append(flowdict)
except:
return
return flist
def getFlowIds(self, switch):
"""returns flow-ids of all flows of a switch"""
try:
flowlist=self.getFlowDetails(switch)
flowidlst=[]
for flow in flowlist:
flowidlst.append(str(flow['id']))
return flowidlst
except:
return []
def main():
print FlowRetrieval().getFlowIds('openflow:24')
if __name__ == '__main__':
main()
|
[
"mann8352@gmail.com"
] |
mann8352@gmail.com
|
c366dc8950f6732516f2639a9e4367e64256fa62
|
665a7b06935defb8e7b36b7dd91c3901f997aa22
|
/build/test_python.py
|
750882ee8e40def5dfd4c3a40393a88b833f5c62
|
[] |
no_license
|
SemQ20/CppPython
|
1512a80e432b8e06c5cb0f6971218df4cc3d79b6
|
463c626cf505c30c6fca318e70907839065d708e
|
refs/heads/master
| 2023-03-09T10:26:54.036735
| 2021-02-09T18:33:18
| 2021-02-09T18:33:18
| 260,982,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
def sum_one(a,b,d):
c = a + b + d
return c
def sum(a,b,d):
c = sum_one(a, b, d)
return c
|
[
"goodsk1llomg@gmail.com"
] |
goodsk1llomg@gmail.com
|
e5f1c102be91be7d06c912ccc3583ef91985a394
|
067078390e2250174f9cf9ce42da7c3770940ef8
|
/src/modules/user_input_handle_block_action.py
|
5f43bfb2d20d6936ad89dc3e34c8b57f4c3c52d9
|
[
"MIT"
] |
permissive
|
AndreasVikke-School/ComputerScience-Final
|
0444e93fb500efe724f59af82b0ebb6f5f239e41
|
52d09a5876bfde661a00736712db6e3d19be877d
|
refs/heads/master
| 2023-02-10T16:38:13.171988
| 2021-01-15T11:21:25
| 2021-01-15T11:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,670
|
py
|
"""
Processes user input block action from slack
:license: MIT
"""
import json
from typing import Dict, List
from src.modules.private_metedata import read_metadata
from src.modules.slack_template_parts import (
create_checkbox, create_checkboxes_with_initial_values,
create_select_for_drop_down, create_input_box)
from src.modules.type_enum import Types
from src.modules.user_input_global import UserInputGlobal
class UserInputHandleBlockAction():
'''
User Input Handle Block Action Class
'''
def __init__(self, payload: Dict, test: bool = False):
'''
Init for class
-
:param payload: Payload of the event
:param test: true for test
'''
self.payload = payload
self.metadata = read_metadata(payload)
self.user_input_global = UserInputGlobal(payload, test)
def send_to_sqs(self, value, message_type: Types, action_type = None, consultant_uuid = None):
'''
Send message to SQS
'''
return self.user_input_global.send_to_sqs(
self.payload['actions'][0]['action_id'],
self.payload['actions'][0]['type']\
if action_type is None else action_type,
value,
message_type,
consultant_uuid)
def handle_block_action(self):
'''
Handle block action
'''
if self.payload['actions'][0]['type'] == 'button':
if self.payload['actions'][0]['value'].split(';')[0] == 'start':
response = self.make_customers_modal()
elif self.payload['actions'][0]['value'].split(';')[0] == 'time':
response = self.send_to_sqs(self.payload['actions'][0]['value'].split(';')[1],\
Types.UserInput)
self.update_time_view()
elif self.payload['actions'][0]['value'].split(';')[0] == 'signup':
response = self.make_sign_up_home_tap('signup')
elif self.payload['actions'][0]['type'] == 'checkboxes':
if self.payload['actions'][0]['action_id'] == 'same_day_checkin':
response = self.make_sign_up_home_tap('same_day_checkin')
else:
response = self.send_to_sqs(self.payload['actions'][0]['selected_options'],\
Types.UserInput)
elif self.payload['actions'][0]['type'] == 'radio_buttons':
response = self.send_to_sqs(self.payload['actions'][0]['selected_option'],\
Types.UserInput)
elif self.payload['actions'][0]['type'] == 'static_select':
self.update_customers_modal()
response = self.send_to_sqs(self.payload['actions'][0]['selected_option'],\
Types.UserInput)
elif self.payload['actions'][0]['type'] == 'timepicker':
response = self.make_sign_up_home_tap('time_for_checkin')
elif self.payload['actions'][0]['type'] == 'plain_text_input':
if self.payload['actions'][0]['block_id'] == 'add_new_project_block':
self.update_time_modal()
response = self.send_to_sqs(self.payload['actions'][0]['value'],\
Types.Project_Create)
print("Response: ", response)
print(response)
def make_customers_modal(self):
'''
Publishes the modal to the user
'''
checkin_id = self.payload['actions'][0]['value'].split(';')[1]
checkin = self.user_input_global.checkin_model.get(checkin_id)
predictions = json.loads(checkin.predictions)
customers = sorted(self.user_input_global.customers_model.scan(),\
key=lambda x: x.friendlyName)
# Load UserInput if not none
if checkin.user_input is not None:
user_inputs = json.loads(checkin.user_input)
else:
user_inputs = []
# Load Modal view
body = self.user_input_global.reader(checkin_id, 'modal_customer_template', None, None)
initial_values, new_elements = create_checkboxes_with_initial_values(
predictions, user_inputs, 'activity', 'customers', 4,\
self.user_input_global.customers_model)
# Append selected customers from dropdown to variables
selected_customers = next((x for x in user_inputs if x['action_id'] == 'customers'),\
{}).get('value', [])
for selected in list(filter(lambda x: not x['unchecked'], selected_customers)):
customer = next((x for x in customers if x.uuid == selected['value']), None)
checkbox = create_checkbox(customer.friendlyName, 'activity', selected['value'])
if checkbox not in new_elements:
initial_values.append(checkbox)
new_elements.append(checkbox)
# Append initial_values and new_elements to body
if initial_values is not None:
body['blocks'][1]['elements'][0]['initial_options'] = initial_values
print(new_elements)
print(len(new_elements))
if new_elements is None or len(new_elements) <= 0:
new_elements.append(create_checkbox(customers[0].friendlyName,\
'activity', customers[0].uuid))
body['blocks'][1]['elements'][0]['options'] = new_elements
# Append all customers to drop down
exclude = list(map(lambda x: x['value'].split(';')[1], new_elements))
all_customers_block = get_by_block_or_action_id(body['blocks'], 'all_customers')
if all_customers_block is not None:
all_customers_block['elements'][0]['options'] =\
[create_select_for_drop_down(x.friendlyName,'add_customer', x.uuid)\
for x in list(filter(lambda x: x.uuid not in exclude, customers))]
if len(all_customers_block['elements'][0]['options']) == 0:
body['blocks'].pop(2)
# Get yesterdays date to set the actual day
body['blocks'][0]['text']['text'] = self.user_input_global.get_yesterday_date(checkin.date)
activities = next((x for x in user_inputs if x['action_id'] == 'activity'), None)
if activities is not None:
initial_values = [create_checkbox(x['value'], 'activity', x['value'])\
for x in activities['value'] if not x['unchecked']]
if len(initial_values) > 0:
next((x for x in body['blocks'] if 'block_id' in x\
and x['block_id'] == 'activities'),\
{})['elements'][0]['initial_options'] = initial_values
print(body)
return self.user_input_global.post('https://slack.com/api/views.open', {
"trigger_id": self.payload['trigger_id'],
"view": body
})
def update_time_view(self):
'''
Updates the current view on a modal
'''
checkin_id = self.metadata['checkin_id']
checkin = self.user_input_global.checkin_model.get(checkin_id)
body = self.user_input_global.reader(checkin_id, 'modal_time_template',\
self.metadata['current_customer'], self.metadata['current_activity'])
body['blocks'] = self.payload['view']['blocks']
value = self.payload['actions'][0]['value']
if value is not None:
for button in body['blocks'][1]['elements']:
if button['value'] == value:
button['style'] = 'primary'
else:
button.pop('style', None)
# Get yesterdays date to set the actual day
current_text = self.metadata['current_activity'] if self.metadata['current_activity']\
is not None else self.metadata['current_customer']
if self.metadata['current_customer'] is not None:
body['blocks'][0]['text']['text'] =\
self.user_input_global.get_yesterday_date(checkin.date,\
current_text, self.metadata['current_activity'] is not None)
data = {
"view_id": self.payload['view']['id'],
"view": body
}
print('DATA FROM BUTTON PRESSED: ', json.dumps(data))
return self.user_input_global.post('https://slack.com/api/views.update', data)
def update_customers_modal(self) -> Dict:
'''
Update the customer modal
'''
checkin_id = self.metadata['checkin_id']
body = self.user_input_global.reader(checkin_id, 'modal_customer_template', None, None)
body['blocks'] = self.payload['view']['blocks']
selected_option = self.payload['actions'][0]['selected_option']
checkbox = create_checkbox(selected_option['text']['text'], 'activity',\
selected_option['value'].split(';')[1])
if 'initial_options' not in body['blocks'][1]['elements'][0]:
body['blocks'][1]['elements'][0]['initial_options'] = []
body['blocks'][1]['elements'][0]['initial_options'].append(checkbox)
body['blocks'][1]['elements'][0]['options'].append(checkbox)
data = {
"view_id": self.payload['view']['id'],
"view": body
}
print('DATA FROM BUTTON PRESSED: ', json.dumps(data))
return self.user_input_global.post('https://slack.com/api/views.update', data)
def update_time_modal(self):
'''
Update the time modal.
'''
checkin_id = self.metadata['checkin_id']
body = self.user_input_global.reader(checkin_id, 'modal_time_template',\
self.metadata['current_customer'], self.metadata['current_activity'])
body['blocks'] = self.payload['view']['blocks']
index = len(self.payload['view']['blocks']) - 3
print("index length: ", index)
block_builder = {
"type": "input",
"optional": True,
"element": {
"type": "plain_text_input",
"placeholder": {
"type": "plain_text",
"text": "Enter time and description (Format H.M desc)"
}
},
"label": {
"type": "plain_text",
"text": self.payload['actions'][0]['value'],
"emoji": True
}
}
block_builder['element'] = create_input_box(self.metadata['current_customer'],\
'time_desc_input', None, False, project = self.payload['actions'][0]['value'])
body['blocks'].insert(index, block_builder)
print("BUTTON PRESSED!: ", self.payload)
print("METADATA!!!: ", self.metadata)
data = {
"view_id": self.payload['view']['id'],
"view": body
}
print('DATA FROM BUTTON PRESSED: ', json.dumps(data))
return self.user_input_global.post('https://slack.com/api/views.update', data)
def make_sign_up_home_tap(self, action_type: str):
'''
Makes the new Home Page Tap for singedup users
-
:param action_type: type of home page action
'''
user_id = self.payload['user']['id']
consultant = next(self.user_input_global.consultant_model.slack_id_index.query(user_id),\
None)
if action_type == 'signup':
response = self.send_to_sqs(user_id, Types.Consultant_Update, 'sign_up', "None")
elif action_type == 'time_for_checkin':
response = self.send_to_sqs(self.payload['actions'][0]['selected_time'],\
Types.Consultant_Update, 'checkin_time', consultant.uuid)
elif action_type == 'same_day_checkin':
response = self.send_to_sqs(str(len(self.payload['actions'][0]['selected_options'])\
> 0), Types.Consultant_Update, 'checkin_time', consultant.uuid)
print(response)
return "Saved To Consultant"
def get_by_block_or_action_id(blocks: List, block_action_id: str, return_type = None):
'''
Get block by block id or action id
-
:param blocks: array to look in
:param block_action_id: block or action id
:param return_type: the return type if not found
'''
output = next((x for x in blocks if 'block_id' in x and x['block_id']\
== block_action_id), return_type)
if output is None or not output or output == return_type:
output = next((x for x in blocks if 'action_id' in x and x['action_id']\
== block_action_id), return_type)
return output
|
[
"andreasvikke@gmail.com"
] |
andreasvikke@gmail.com
|
2831a8999135f4fcca2518ad672cebed6c20e6a6
|
08bfc8a1f8e44adc624d1f1c6250a3d9635f99de
|
/SDKs/swig/Examples/python/import_template/runme.py
|
21a876dcc50a33ed51ef5728620bd028545ae124
|
[] |
no_license
|
Personwithhat/CE_SDKs
|
cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02
|
7afbd2f7767c9c5e95912a1af42b37c24d57f0d4
|
refs/heads/master
| 2020-04-09T22:14:56.917176
| 2019-07-04T00:19:11
| 2019-07-04T00:19:11
| 160,623,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6e7ca728db56fe89feafc30fb2c2d46f69a2f7d25eb2e7eb8c294346908c389c
size 1744
|
[
"personwithhats2@Gmail.com"
] |
personwithhats2@Gmail.com
|
6aa52154d3a60b6b0b2211e5fdf177377d5dfcc3
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/ICal/recurrencetype_frequency.py
|
f35061c4dcfda7a3be139e19b0a60512a84f5e30
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 13,990
|
py
|
# encoding: utf-8
# module gi.repository.ICal
# from /usr/lib64/girepository-1.0/ICal-3.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gobject as __gobject
class recurrencetype_frequency(__gobject.GEnum):
# no doc
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
Return integer ratio.
Return a pair of integers, whose ratio is exactly equal to the original int
and with a positive denominator.
>>> (10).as_integer_ratio()
(10, 1)
>>> (-10).as_integer_ratio()
(-10, 1)
>>> (0).as_integer_ratio()
(0, 1)
"""
pass
def bit_length(self): # real signature unknown; restored from __doc__
"""
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def from_bytes(self, *args, **kwargs): # real signature unknown
"""
Return the integer represented by the given array of bytes.
bytes
Holds the array of bytes to convert. The argument must either
support the buffer protocol or be an iterable object producing bytes.
Bytes and bytearray are examples of built-in objects that support the
buffer protocol.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Indicates whether two's complement is used to represent the integer.
"""
pass
def to_bytes(self, *args, **kwargs): # real signature unknown
"""
Return an array of bytes representing an integer.
length
Length of bytes object to use. An OverflowError is raised if the
integer is not representable with the given number of bytes.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Determines whether two's complement is used to represent the integer.
If signed is False and a negative integer is given, an OverflowError
is raised.
"""
pass
def __abs__(self, *args, **kwargs): # real signature unknown
""" abs(self) """
pass
def __add__(self, *args, **kwargs): # real signature unknown
""" Return self+value. """
pass
def __and__(self, *args, **kwargs): # real signature unknown
""" Return self&value. """
pass
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __ceil__(self, *args, **kwargs): # real signature unknown
""" Ceiling of an Integral returns itself. """
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __divmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(self, value). """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __float__(self, *args, **kwargs): # real signature unknown
""" float(self) """
pass
def __floordiv__(self, *args, **kwargs): # real signature unknown
""" Return self//value. """
pass
def __floor__(self, *args, **kwargs): # real signature unknown
""" Flooring an Integral returns itself. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __index__(self, *args, **kwargs): # real signature unknown
""" Return self converted to an integer, if self is suitable for use as an index into a list. """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __int__(self, *args, **kwargs): # real signature unknown
""" int(self) """
pass
def __invert__(self, *args, **kwargs): # real signature unknown
""" ~self """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __mod__(self, *args, **kwargs): # real signature unknown
""" Return self%value. """
pass
def __mul__(self, *args, **kwargs): # real signature unknown
""" Return self*value. """
pass
def __neg__(self, *args, **kwargs): # real signature unknown
""" -self """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __or__(self, *args, **kwargs): # real signature unknown
""" Return self|value. """
pass
def __pos__(self, *args, **kwargs): # real signature unknown
""" +self """
pass
def __pow__(self, *args, **kwargs): # real signature unknown
""" Return pow(self, value, mod). """
pass
def __radd__(self, *args, **kwargs): # real signature unknown
""" Return value+self. """
pass
def __rand__(self, *args, **kwargs): # real signature unknown
""" Return value&self. """
pass
def __rdivmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(value, self). """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __rfloordiv__(self, *args, **kwargs): # real signature unknown
""" Return value//self. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rmod__(self, *args, **kwargs): # real signature unknown
""" Return value%self. """
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
""" Return value*self. """
pass
def __ror__(self, *args, **kwargs): # real signature unknown
""" Return value|self. """
pass
def __round__(self, *args, **kwargs): # real signature unknown
"""
Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
pass
def __rpow__(self, *args, **kwargs): # real signature unknown
""" Return pow(value, self, mod). """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
def __rsub__(self, *args, **kwargs): # real signature unknown
""" Return value-self. """
pass
def __rtruediv__(self, *args, **kwargs): # real signature unknown
""" Return value/self. """
pass
def __rxor__(self, *args, **kwargs): # real signature unknown
""" Return value^self. """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __sub__(self, *args, **kwargs): # real signature unknown
""" Return self-value. """
pass
def __truediv__(self, *args, **kwargs): # real signature unknown
""" Return self/value. """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, *args, **kwargs): # real signature unknown
""" Return self^value. """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
value_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
value_nick = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
DAILY_RECURRENCE = 3
HOURLY_RECURRENCE = 2
MINUTELY_RECURRENCE = 1
MONTHLY_RECURRENCE = 5
NO_RECURRENCE = 7
SECONDLY_RECURRENCE = 0
WEEKLY_RECURRENCE = 4
YEARLY_RECURRENCE = 6
__class__ = type
__dict__ = None # (!) real value is "mappingproxy({'__module__': 'gi.repository.ICal', '__dict__': <attribute '__dict__' of 'recurrencetype_frequency' objects>, '__doc__': None, '__gtype__': <GType PyICalrecurrencetype_frequency (94628528832464)>, '__enum_values__': {0: <enum ICAL_SECONDLY_RECURRENCE of type ICal.recurrencetype_frequency>, 1: <enum ICAL_MINUTELY_RECURRENCE of type ICal.recurrencetype_frequency>, 2: <enum ICAL_HOURLY_RECURRENCE of type ICal.recurrencetype_frequency>, 3: <enum ICAL_DAILY_RECURRENCE of type ICal.recurrencetype_frequency>, 4: <enum ICAL_WEEKLY_RECURRENCE of type ICal.recurrencetype_frequency>, 5: <enum ICAL_MONTHLY_RECURRENCE of type ICal.recurrencetype_frequency>, 6: <enum ICAL_YEARLY_RECURRENCE of type ICal.recurrencetype_frequency>, 7: <enum ICAL_NO_RECURRENCE of type ICal.recurrencetype_frequency>}, '__info__': gi.EnumInfo(recurrencetype_frequency), 'SECONDLY_RECURRENCE': <enum ICAL_SECONDLY_RECURRENCE of type ICal.recurrencetype_frequency>, 'MINUTELY_RECURRENCE': <enum ICAL_MINUTELY_RECURRENCE of type ICal.recurrencetype_frequency>, 'HOURLY_RECURRENCE': <enum ICAL_HOURLY_RECURRENCE of type ICal.recurrencetype_frequency>, 'DAILY_RECURRENCE': <enum ICAL_DAILY_RECURRENCE of type ICal.recurrencetype_frequency>, 'WEEKLY_RECURRENCE': <enum ICAL_WEEKLY_RECURRENCE of type ICal.recurrencetype_frequency>, 'MONTHLY_RECURRENCE': <enum ICAL_MONTHLY_RECURRENCE of type ICal.recurrencetype_frequency>, 'YEARLY_RECURRENCE': <enum ICAL_YEARLY_RECURRENCE of type ICal.recurrencetype_frequency>, 'NO_RECURRENCE': <enum ICAL_NO_RECURRENCE of type ICal.recurrencetype_frequency>})"
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
}
__gtype__ = None # (!) real value is '<GType PyICalrecurrencetype_frequency (94628528832464)>'
__info__ = gi.EnumInfo(recurrencetype_frequency)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
fc4f948f429da2c1ee07543726234d782b8fdc93
|
3d55f067e6645cc7d073f57b16318253c34bed75
|
/build_network_for_autoencoder.py
|
ca5a73da0e7bf3099b26eea3905f0b0f5a8ba09e
|
[] |
no_license
|
ibrahimkaya754/modules2import
|
73ebde4db0704bf035d5b369abe6bae676934909
|
746fd65b6b93d1e420168b3b6c17939456988e32
|
refs/heads/master
| 2023-01-01T21:02:24.695663
| 2020-02-24T10:43:52
| 2020-02-24T10:43:52
| 242,708,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,536
|
py
|
from import_modules import *
from helper_functions import *
class prepare_inputs:
def __init__(self, data_splitted, list_tests, list_params,
feature_keys, target_keys,cnn=False):
##########################################################################################################
self.data_splitted = data_splitted
self.list_tests = list_tests
self.list_params = list_params
self.feature_keys = feature_keys
self.target_keys = target_keys
self.dict_x = {}
self.dict_y = {}
self.dict_x_sc = {}
self.dict_y_sc = {}
self.cnn = cnn
self.list_split_through = [key for key in self.data_splitted.dict_x.keys()]
for key in self.list_split_through:
self.dict_x[key] = self.data_splitted.dict_x[key]
self.dict_y[key] = self.data_splitted.dict_y[key]
self.dict_x_sc[key] = self.data_splitted.dict_x_sc[key]
self.dict_y_sc[key] = self.data_splitted.dict_y_sc[key]
print('Shapes of the %s and Sets of Input Features \n' % (self.list_split_through))
for data in self.list_split_through:
print('--------------------------------------------------------------')
for feature in self.feature_keys:
print(data,' input set shape for ',feature,' is: ', self.dict_x_sc[data][feature].shape)
print('\n*******************************************************************************\n')
# Shapes of the Train, Test and Validation Sets of Output Targets
print('Shapes of the %s and Sets of Output Targets \n' % (self.list_split_through))
for data in self.list_split_through:
print('--------------------------------------------------------------')
for target in self.target_keys:
print(data,' target output set shape for ',target,' is: ', self.dict_y_sc[data][target].shape)
print('\n')
##########################################################################################################
self.input_dl = {}
self.output_dl = {}
for key in self.list_split_through:
self.input_dl[key] = {'input_all' : self.dict_x_sc[key][list_params[0]]}
self.output_dl[key] = {'all_targets' : self.dict_y_sc[key][list_params[0]]}
for key in self.list_split_through:
for param in list_params[1:]:
self.input_dl[key]['input_all'] = np.hstack((self.input_dl[key]['input_all'],self.dict_x_sc[key][param]))
self.output_dl[key]['all_targets'] = np.hstack((self.output_dl[key]['all_targets'],self.dict_y_sc[key][param]))
if self.cnn:
self.input_all = Input(shape=(self.input_dl['train']['input_all'].shape[1],1), name='input_all')
for key in self.list_split_through:
self.input_dl[key]['input_all'] = self.input_dl[key]['input_all'].reshape(self.input_dl[key]['input_all'].shape[0],
self.input_dl[key]['input_all'].shape[1],1)
else:
self.input_all = Input(shape=(self.input_dl['train']['input_all'].shape[1],), name='input_all')
##########################################################################################################
# BUILD CUSTOM NETWORK
class model(prepare_inputs):
def __init__(self, data_splitted, list_tests, list_params,
feature_keys, target_keys,cnn,mdl_name, act='tanh',
trainable_layer=True, bottleneck=3, initializer='glorot_normal',
list_nn=[150,100,20],load_weights=True):
super().__init__(data_splitted, list_tests, list_params,
feature_keys, target_keys,cnn=False)
self.model_name = mdl_name
self.act = act
self.trainable_layer = trainable_layer
self.init = initializer
self.opt = Yogi(lr=0.001)
self.list_nn = list_nn
self.bottleneck = bottleneck
self.losses = {}
self.lossWeights = {}
self.scaler_path = {'feature' : None,
'target' : None}
self.regularization_paramater = 0.0
self.dict_scalery = data_splitted.dict_scalery
self.dict_scalerx = data_splitted.dict_scalerx
L1 = Dense(self.list_nn[0], activation=self.act,
kernel_initializer=self.init, trainable = self.trainable_layer,
kernel_regularizer=regularizers.l2(self.regularization_paramater))(self.input_all)
for ii in range(1,len(self.list_nn)):
L1 = Dense(self.list_nn[ii], activation=self.act, trainable = self.trainable_layer,
kernel_initializer=self.init,
kernel_regularizer=regularizers.l2(self.regularization_paramater))(L1)
L1 = Dense(self.bottleneck, activation='linear', name='bottleneck',
kernel_initializer=self.init,
kernel_regularizer=regularizers.l2(self.regularization_paramater))(L1)
for ii in range(0,len(self.list_nn)):
L1 = Dense(self.list_nn[-ii-1], activation=self.act, trainable = self.trainable_layer,
kernel_initializer=self.init,
kernel_regularizer=regularizers.l2(self.regularization_paramater))(L1)
LOut = Dense(len(self.target_keys), activation=self.act, name='all_targets',
kernel_initializer=self.init,
kernel_regularizer=regularizers.l2(self.regularization_paramater))(L1)
self.model = Model(inputs=[self.input_all], outputs=LOut)
self.description = None
self.losses['all_targets'] = huber_loss
self.lossWeights['all_targets'] = 1.0
self.model_path = os.getcwd()+"/" + self.model_name + '.hdf5'
self.learning_rate_decrease_factor = 0.97
self.learning_rate_patience = 5
self.number_of_params = self.model.count_params()
self.reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=self.learning_rate_decrease_factor,
patience=self.learning_rate_patience,
min_lr=0.0000001, mode='min', verbose=1)
self.checkpoint = ModelCheckpoint(self.model_path,
monitor='val_loss', verbose=1,
save_best_only=True, period=1,
mode='min',save_weights_only=False)
self.model.compile(optimizer=self.opt, loss=self.losses['all_targets'], metrics=['mse'])
plot_model(self.model,to_file=self.model_name+'.png', show_layer_names=True,show_shapes=True)
print('\n%s with %s params created' % (self.model_name,self.number_of_params))
if os.path.exists(self.model_path):
if load_weights:
print('weights loaded for %s' % (self.model_name))
self.model.load_weights(self.model_path)
# Make the prediction for bottleneck layer
self.bottleneck_layer = Model(self.model.input,self.model.get_layer('bottleneck').output)
self.target_bn = ['dim'+str(ii) for ii in range(self.bottleneck)]
def __describe__(self):
return self.description
def summary(self):
self.model.summary()
print('\nModel Name is: ',self.model_name)
print('\nModel Path is: ',self.model_path)
print('\nActivation Function is: ',self.act)
print('\nLearning Rate Decreases by a factor of %s with patience of %s' % (self.learning_rate_decrease_factor,
self.learning_rate_patience))
if self.description != None:
print('\nModel Description: '+self.__describe__())
def run(self,num_epochs,batch_size):
self.num_epochs = num_epochs
self.batch_size = batch_size
print('Start Running \n')
self.history = self.model.fit(self.input_dl['train'],
self.output_dl['train'],
batch_size=self.batch_size, epochs=self.num_epochs, shuffle=True,
callbacks=[self.checkpoint, self.reduce_lr],
validation_data=(self.input_dl['test'],self.output_dl['test']), verbose=1)
self.val_loss = np.min(self.history.history['val_loss'])
def results(self,load_weights=False):
if load_weights:
self.model.load_weights(self.model_path)
print('Weights Loaded')
self.out_dl_predicted = {}
self.out_dl_predicted_bottleneck = {}
for data in self.list_split_through:
self.out_dl_predicted[data] = {'all_target' : self.model.predict(self.input_dl[data], batch_size=None)}
self.out_dl_predicted_bottleneck[data] = {'all_target' : self.bottleneck_layer.predict(self.input_dl[data], batch_size=None)}
print('Prediction for %s set is completed' % (data))
print('Bottleneck Prediction for %s set is completed' % (data))
for value,key in enumerate(self.list_params):
self.out_dl_predicted[data][key] = {'scaled' : self.out_dl_predicted[data]['all_target'][:,value]}
self.out_dl_predicted[data][key]['inverse'] = self.dict_scalery[key].inverse_transform(self.out_dl_predicted[data][key]['scaled'].reshape(-1,1))
for value,key in enumerate(self.target_bn):
self.out_dl_predicted_bottleneck[data][key] = {'scaled' : self.out_dl_predicted_bottleneck[data]['all_target'][:,value]}
print('-------------------------------------------------------------------------------------\n')
for data in self.list_split_through:
print('\nExplained Variance Calculation for %s set' % (data))
for param in self.list_params:
print("Explained Variance of %s set for %s : %.8f" % (data,param,explained_variance_score(self.dict_y_sc[data][param],
self.out_dl_predicted[data][param]['scaled'].reshape(self.out_dl_predicted[data][param]['scaled'].shape[0],1))))
print('-------------------------------------------------------------------------------------')
def plots(self,pnt_number=250,plot_train=False,plot_test=False,plot_valid=False,plot_flight=False):
self.pnt_number = pnt_number
self.plot_list = {'train' :plot_train,
'test' :plot_test,
'valid' :plot_valid,
'flight' :plot_flight}
for data in self.list_split_through:
if data == 'flight':
self.pnt_number = -1
if self.plot_list[data]:
print('\nPlot for %s set\n' % (data))
for param in self.list_params:
print(param)
plt.figure(figsize=(26,9))
plt.plot(self.out_dl_predicted[data][param]['scaled'][0:self.pnt_number],'--',markersize=1,label='predicted',color='tab:red')
plt.plot(self.dict_y_sc[data][param][0:self.pnt_number],'--', markersize=1, label='actual', color = 'tab:blue')
plt.legend()
plt.xlabel('sample point')
plt.ylabel(param)
plt.title('explained variance score for the %s set for %s is: ' % (data,param))
plt.grid()
plt.show()
def scatter_plot_for_bottleneck(self):
for value1,key1 in enumerate(self.target_bn):
if key1 == self.target_bn[-1]:
break
else:
for key2 in self.target_bn[value1+1:]:
if key1 == key2:
continue
else:
fig = plt.figure(figsize=(26,9))
for data in self.list_split_through:
plt.scatter(self.out_dl_predicted_bottleneck[data][key1]['scaled'],self.out_dl_predicted_bottleneck[data][key2]['scaled'],label=data)
plt.legend()
plt.xlabel(key1)
plt.ylabel(key2)
plt.grid()
plt.show()
#fig.savefig('./images/scatterplot_bottleneck_')
def __mae__(self):
self.mae = {}
for data in self.list_split_through:
self.mae[data] = {param:np.zeros((self.out_dl_predicted[data][param]['scaled'].shape[0],1)) for param in self.list_params}
for data in self.list_split_through:
for param in self.list_params:
self.mae[data][param] = self.out_dl_predicted[data][param]['scaled'].reshape(-1,1) - self.dict_y_sc[data][param]
def histogram_mae(self):
self.__mae__()
for param in self.list_params:
print('************ Histogram Plot of Mae for %s set****************\n' % (self.list_split_through))
for value,data in enumerate(self.list_split_through):
fig = plt.figure(figsize=(25,36))
plt.subplot(411+value)
plt.hist(self.mae[data][param],label='%s for %s set' %(param,data),bins=500)
plt.legend()
plt.xlabel(param)
plt.grid()
plt.xlim((-0.50,+0.50))
plt.show()
#fig.savefig('./images/error_hist'+self.target_keys[ii])
print("*******************************************************************************************************************")
print("*******************************************************************************************************************")
def corr(self):
# Pearson Correlation
self.covariance = {}
self.sigma = {}
self.correlation = {}
for data in self.list_split_through:
self.covariance[data] = {}
self.sigma[data] = {}
self.correlation[data] = {}
for data in self.list_split_through:
for dim in self.target_bn:
self.covariance[data][dim] = {}
self.correlation[data][dim] = {}
for data in self.list_split_through:
for dim1 in self.target_bn:
self.sigma[data][dim1] = sigma(self.out_dl_predicted_bottleneck[data][dim1]['scaled'])
for dim2 in self.target_bn:
self.sigma[data][dim2] = sigma(self.out_dl_predicted_bottleneck[data][dim2]['scaled'])
self.covariance[data][dim1][dim2] = covar(self.out_dl_predicted_bottleneck[data][dim1]['scaled'],self.out_dl_predicted_bottleneck[data][dim2]['scaled'])
self.correlation[data][dim1][dim2] = self.covariance[data][dim1][dim2] / (self.sigma[data][dim1] * self.sigma[data][dim2])
# Scaler Plot for the Correlations of Dimensions Obtained in Bottleneck
for data in self.list_split_through:
print('\nCorrelation Coefficient for %s data' % (data))
for dim1 in self.target_bn:
plt.figure(figsize=(26,9))
plt.scatter(np.arange(len(self.correlation[data][dim1])),[self.correlation[data][dim1][dim] for dim in self.target_bn], label= 'correlation for %s' % (dim1))
plt.legend()
plt.xlabel([dim for dim in self.target_bn])
plt.ylabel(dim1)
plt.title('Correlation for %s obtained from the prediction of bottleneck of AutoEncoder' % (dim1))
plt.grid()
plt.show()
def mean_distance(self):
print('Mean Distance for the bottleneck dimensions')
self.shuffling = {}
self.mean_dist = {}
self.mean = {}
self.sigma = {}
for data in self.list_split_through:
self.shuffling[data] = np.random.permutation(np.arange(self.out_dl_predicted_bottleneck[data][self.target_bn[0]]['scaled'].shape[0]))
self.mean_dist[data] = {}
self.mean[data] = {}
self.sigma[data] = {}
for data in self.list_split_through:
for dim in self.target_bn:
self.out_dl_predicted_bottleneck[data][dim]['scaled_shuffled'] = self.out_dl_predicted_bottleneck[data][dim]['scaled'][self.shuffling[data]]
for param in self.list_params:
self.out_dl_predicted[data][param]['scaled_shuffled'] = self.out_dl_predicted[data][param]['scaled'][self.shuffling[data]]
self.out_dl_predicted[data][param]['inverse_shuffled'] = self.out_dl_predicted[data][param]['inverse'][self.shuffling[data]]
self.out_dl_predicted[data][param]['scaled_shuffled_outlier'] = []
self.out_dl_predicted[data][param]['inverse_shuffled_outlier'] = []
for data in self.list_split_through:
for dim in self.target_bn:
self.sigma[data][dim] = {'original' : np.std(self.out_dl_predicted_bottleneck[data][dim]['scaled_shuffled'])}
self.mean[data][dim] = {'original' : np.mean(self.out_dl_predicted_bottleneck[data][dim]['scaled_shuffled'])}
for dim in self.target_bn:
for data1 in self.list_split_through:
self.out_dl_predicted_bottleneck[data1][dim]['ztransformed'] = {data2 : (self.out_dl_predicted_bottleneck[data1][dim]['scaled_shuffled'] - \
self.mean[data2][dim]['original'])/self.sigma[data2][dim]['original'] for data2 in self.list_split_through}
for dim in self.target_bn:
for data1 in self.list_split_through:
self.sigma[data1][dim]['ztransformed'] = {data2 : np.std(self.out_dl_predicted_bottleneck[data1][dim]['ztransformed'][data2]) for data2 in self.list_split_through}
self.mean[data1][dim]['ztransformed'] = {data2 : np.mean(self.out_dl_predicted_bottleneck[data1][dim]['ztransformed'][data2]) for data2 in self.list_split_through}
for data in self.list_split_through:
for dim in self.target_bn:
self.mean_dist[data][dim] = {'all_data' :np.abs(self.out_dl_predicted_bottleneck[data][dim]['ztransformed']['train'] - self.mean['train'][dim]['ztransformed']['train']),
'outlier_indices' :np.where(np.abs(self.out_dl_predicted_bottleneck[data][dim]['ztransformed']['train'] - self.mean['train'][dim]['ztransformed']['train']) > \
(self.mean['train'][dim]['ztransformed']['train']+3*self.sigma['train'][dim]['ztransformed']['train']))}
for data in self.list_split_through:
for dim in self.target_bn:
self.out_dl_predicted_bottleneck[data][dim]['scaled_shuffled_outlier'] = self.out_dl_predicted_bottleneck[data][dim]['scaled_shuffled']\
[self.mean_dist[data][dim]['outlier_indices'][0]]
for data in self.list_split_through:
for param in self.list_params:
for dim in self.target_bn:
for datum in self.out_dl_predicted_bottleneck[data][dim]['scaled_shuffled_outlier']:
self.out_dl_predicted[data][param]['scaled_shuffled_outlier'].append(datum)
# Scatter Plot for the Mean Distance
for data in self.list_split_through:
for dim in self.target_bn:
fig = plt.figure(figsize=(26,9))
plt.scatter(np.arange(self.out_dl_predicted_bottleneck[data][dim]['scaled'].shape[0]),
self.mean_dist[data][dim]['all_data'],
label='%s vs train for %s' % (data,dim))
plt.scatter(np.arange(self.out_dl_predicted_bottleneck[data][dim]['scaled'].shape[0]),
np.ones((self.out_dl_predicted_bottleneck[data][dim]['scaled'].shape[0],1)) * \
(self.mean['train'][dim]['ztransformed']['train']+3*self.sigma['train'][dim]['ztransformed']['train']),
label='3 sigma distance from the mean of %s of the train data' % (dim))
plt.legend()
plt.xlabel('data')
plt.ylabel('distance between ztransformed %s of %s data according to train data' % (dim,data))
plt.grid()
plt.show()
def writeStandartScaler_AsMatFile(self,scaler,fileName,keys):
if os.path.exists('./MatFiles/')==False:
os.makedirs('./MatFiles/')
self.mean = {}
self.variance = {}
self.scale = {}
self.scaler = {}
for key in keys:
self.mean[key] = scaler[key].mean_
self.variance[key] = scaler[key].var_
self.scale[key] = scaler[key].scale_
self.scaler['mean'] = self.mean
self.scaler['variance'] = self.variance
self.scaler['scale'] = self.scale
sio.savemat(fileName, self.scaler)
return self.scaler
def writeMinMaxScaler_AsMatFile(self,scaler,fileName,keys):
if os.path.exists('./MatFiles/')==False:
os.makedirs('./MatFiles/')
self.min = {}
self.max = {}
self.scale = {}
self.data_min = {}
self.data_max = {}
self.scaler = {}
for key in keys:
self.min[key], self.max[key] = scaler[key].feature_range
self.scale[key] = scaler[key].scale_
self.data_min[key] = scaler[key].data_min_
self.data_max[key] = scaler[key].data_max_
self.scaler['min'] = self.min
self.scaler['max'] = self.max
self.scaler['scale'] = self.scale
self.scaler['data_min'] = self.data_min
self.scaler['data_max'] = self.data_max
sio.savemat(fileName, self.scaler)
return self.scaler
|
[
"noreply@github.com"
] |
ibrahimkaya754.noreply@github.com
|
16ccc3251632fa54c80b6e90631f62c807be6e4a
|
8094d7c7270ebb59a7b63444e429de2fd8e0d4fc
|
/algo/bin/easy_install
|
fec17cc0315dded7dc28bf66ece53d35b9585910
|
[
"MIT"
] |
permissive
|
Ge0f3/Algo_comparision
|
8a347b04db287da1f2a475b5b64d3e9671ee7c4d
|
d55087c3273030f8a8e7005a97c15424ea244f38
|
refs/heads/master
| 2021-08-06T08:11:58.851584
| 2018-11-28T02:19:32
| 2018-11-28T02:19:32
| 145,577,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
#!/Users/geoffreygeo/Documents/workspace/Projects/algorithm_comp/algo/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"geoffrey.geofe@gmail.com"
] |
geoffrey.geofe@gmail.com
|
|
8466595a464939367e4ea3817124ca9719941ac2
|
682449f5dcc61c85eda4e673c6404b1af1ebec45
|
/horses/spiders/horseids_spider.py
|
20e28171c352ffa5ad17112e2f3dc5392d8581cd
|
[] |
no_license
|
anailis/dressage-horses
|
2e712eb9e6984dc7d2ebe6732774d0ec78d67238
|
64cf94a6e47c11a56ef79f761b961e6c5dcc0e36
|
refs/heads/master
| 2022-12-10T04:06:06.039667
| 2020-08-22T15:42:52
| 2020-08-22T15:42:52
| 264,661,423
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
import scrapy
class HorseIDSpider(scrapy.Spider):
name = "horseids"
def start_requests(self):
urls = [
'http://webcache.googleusercontent.com/search?q=cache:https://data.fei.org/Ranking/Search.aspx?rankingCode=D_WR',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
links = response.xpath('//*[@id="PlaceHolderBottom_gvcResults"]/tr/td[position()=5]/a/@href').extract()
with open("horselinks.txt", "a") as f:
f.writelines(l + "\n" for l in links)
|
[
"s1533194@ed.ac.uk"
] |
s1533194@ed.ac.uk
|
9b760a64c9099644c16228671bf11e44fb1e847c
|
adf7c2bb565e0fff5086f762fd3e1f078574d9db
|
/test/core/1-main/batch/batch_request_test.py
|
ecbc1c3e97f6ce8a7ed638059576dbc8fbe2723f
|
[
"Apache-2.0"
] |
permissive
|
midnightlynx/py2neo
|
cf7af2a43bf7b4ace8662e683ac18dff42a6ab12
|
3b8a18ad5ac594d16a8fb3be75c7ca64a9aed5bf
|
refs/heads/release/2.0.9
| 2021-01-22T00:24:21.726398
| 2015-10-02T12:56:32
| 2015-10-02T12:56:32
| 44,700,090
| 0
| 0
| null | 2015-10-21T19:45:40
| 2015-10-21T19:45:40
| null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.batch import Job, Target
def test_can_create_batch_request():
method = "POST"
endpoint = "cypher"
target = Target(endpoint)
body = {"query": "CREATE (a) RETURN a"}
request = Job(method, target, body)
assert request.method == method
assert request.target.uri_string == endpoint
assert request.body == body
def test_batch_requests_are_equal_if_same():
method = "POST"
endpoint = "cypher"
target = Target(endpoint)
body = {"query": "CREATE (a) RETURN a"}
request_1 = Job(method, target, body)
request_2 = request_1
assert request_1 == request_2
assert hash(request_1) == hash(request_2)
def test_batch_requests_are_unequal_if_not_same():
method = "POST"
endpoint = "cypher"
target = Target(endpoint)
body = {"query": "CREATE (a) RETURN a"}
request_1 = Job(method, target, body)
request_2 = Job(method, target, body)
assert request_1 != request_2
assert hash(request_1) != hash(request_2)
|
[
"nigel@nigelsmall.com"
] |
nigel@nigelsmall.com
|
d94af8a81ede6c35e19c00d438187b27fe6e7346
|
c195fcc28afdb912ba37de325ee0ab740e0964ca
|
/python/tests/test_euler005.py
|
5462c8fa00fe23b5ce0d2a41dd6effc83cc865f8
|
[] |
no_license
|
gfcharles/euler
|
f979ec77692bf40a59e3ce97d94ce9a319c9e8a1
|
08dc8d6fed8806d0ac72b394ea582dd62e50e61b
|
refs/heads/master
| 2023-07-04T01:01:03.335968
| 2023-06-19T16:39:31
| 2023-06-19T16:39:31
| 65,490,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
from euler005 import euler005
from tests.test_euler import TestEuler, TestEulerSetup
class TestEuler005(TestEulerSetup, TestEuler):
problem_number = 5
function = euler005
|
[
"gregcharles@javaranch.com"
] |
gregcharles@javaranch.com
|
e1a48dad8fe4cf914dd8dbcd3a3ad21b261b2037
|
91cbf0598b824f3936981e40dcd94b17d9644a79
|
/zclassifiershiftedae/ctrl_gen_model.py
|
1f0125bc49b713d1126c10d7eb76630da19f8a26
|
[
"Apache-2.0"
] |
permissive
|
VAShibaev/text_style_transfer
|
c9bcaa17e199c7d24c0bd26ac7a9ec9c010cc675
|
42a4a653d7c47b5f04fe8c2b043f70a28b924e1f
|
refs/heads/master
| 2021-07-21T08:21:54.010645
| 2020-08-14T11:57:04
| 2020-08-14T11:57:04
| 205,176,809
| 45
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,613
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2019 "Style Transfer for Texts: to Err is Human, but Error Margins Matter" Authors. All Rights Reserved.
#
# It's a modified code from
# Toward Controlled Generation of Text, ICML2017
# Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, Eric Xing
# https://github.com/asyml/texar/tree/master/examples/text_style_transfer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NN Model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name, too-many-locals
import tensorflow as tf
import texar as tx
from texar.modules import WordEmbedder, UnidirectionalRNNEncoder, \
MLPTransformConnector, AttentionRNNDecoder, \
GumbelSoftmaxEmbeddingHelper, Conv1DClassifier
from texar.core import get_train_op
from texar.utils import collect_trainable_variables, get_batch_size
class CtrlGenModel(object):
def __init__(self, inputs, vocab, gamma, lambda_g, lambda_z, lambda_z1, lambda_z2, lambda_ae, hparams=None):
self._hparams = tx.HParams(hparams, None)
self._build_model(inputs, vocab, gamma, lambda_g, lambda_z, lambda_z1, lambda_z2, lambda_ae)
def _build_model(self, inputs, vocab, gamma, lambda_g, lambda_z, lambda_z1, lambda_z2, lambda_ae):
embedder = WordEmbedder(
vocab_size=vocab.size,
hparams=self._hparams.embedder)
encoder = UnidirectionalRNNEncoder(hparams=self._hparams.encoder)
enc_text_ids = inputs['text_ids'][:, 1:]
enc_outputs, final_state = encoder(embedder(enc_text_ids),
sequence_length=inputs['length']-1)
z = final_state[:, self._hparams.dim_c:]
# -------------------- CLASSIFIER ---------------------
n_classes = self._hparams.num_classes
z_classifier_l1 = MLPTransformConnector(256, hparams=self._hparams.z_classifier_l1)
z_classifier_l2 = MLPTransformConnector(64, hparams=self._hparams.z_classifier_l2)
z_classifier_out = MLPTransformConnector(n_classes if n_classes > 2 else 1)
z_logits = z_classifier_l1(z)
z_logits = z_classifier_l2(z_logits)
z_logits = z_classifier_out(z_logits)
z_pred = tf.greater(z_logits, 0)
z_logits = tf.reshape(z_logits, [-1])
z_pred = tf.to_int64(tf.reshape(z_pred, [-1]))
loss_z_clas = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(inputs['labels']), logits=z_logits)
loss_z_clas = tf.reduce_mean(loss_z_clas)
accu_z_clas = tx.evals.accuracy(labels=inputs['labels'], preds=z_pred)
# -------------------________________---------------------
label_connector = MLPTransformConnector(self._hparams.dim_c)
labels = tf.to_float(tf.reshape(inputs['labels'], [-1, 1]))
c = label_connector(labels)
c_ = label_connector(1 - labels)
h = tf.concat([c, z], 1)
h_ = tf.concat([c_, z], 1)
# Teacher-force decoding and the auto-encoding loss for G
decoder = AttentionRNNDecoder(
memory=enc_outputs,
memory_sequence_length=inputs['length']-1,
cell_input_fn=lambda inputs, attention: inputs,
vocab_size=vocab.size,
hparams=self._hparams.decoder)
connector = MLPTransformConnector(decoder.state_size)
g_outputs, _, _ = decoder(
initial_state=connector(h), inputs=inputs['text_ids'],
embedding=embedder, sequence_length=inputs['length']-1)
loss_g_ae = tx.losses.sequence_sparse_softmax_cross_entropy(
labels=inputs['text_ids'][:, 1:],
logits=g_outputs.logits,
sequence_length=inputs['length']-1,
average_across_timesteps=True,
sum_over_timesteps=False)
# Gumbel-softmax decoding, used in training
start_tokens = tf.ones_like(inputs['labels']) * vocab.bos_token_id
end_token = vocab.eos_token_id
gumbel_helper = GumbelSoftmaxEmbeddingHelper(
embedder.embedding, start_tokens, end_token, gamma)
soft_outputs_, _, soft_length_, = decoder(
helper=gumbel_helper, initial_state=connector(h_))
soft_outputs, _, soft_length, = decoder(
helper=gumbel_helper, initial_state=connector(h))
# ---------------------------- SHIFTED LOSS -------------------------------------
_, encoder_final_state_ = encoder(embedder(soft_ids=soft_outputs_.sample_id),
sequence_length=inputs['length'] - 1)
_, encoder_final_state = encoder(embedder(soft_ids=soft_outputs.sample_id),
sequence_length=inputs['length'] - 1)
new_z_ = encoder_final_state_[:, self._hparams.dim_c:]
new_z = encoder_final_state[:, self._hparams.dim_c:]
cos_distance_z_ = tf.abs(
tf.losses.cosine_distance(tf.nn.l2_normalize(z, axis=1), tf.nn.l2_normalize(new_z_, axis=1), axis=1))
cos_distance_z = tf.abs(
tf.losses.cosine_distance(tf.nn.l2_normalize(z, axis=1), tf.nn.l2_normalize(new_z, axis=1), axis=1))
# ----------------------------______________-------------------------------------
# Greedy decoding, used in eval
outputs_, _, length_ = decoder(
decoding_strategy='infer_greedy', initial_state=connector(h_),
embedding=embedder, start_tokens=start_tokens, end_token=end_token)
# Creates classifier
classifier = Conv1DClassifier(hparams=self._hparams.classifier)
clas_embedder = WordEmbedder(vocab_size=vocab.size,
hparams=self._hparams.embedder)
# Classification loss for the classifier
clas_logits, clas_preds = classifier(
inputs=clas_embedder(ids=inputs['text_ids'][:, 1:]),
sequence_length=inputs['length']-1)
loss_d_clas = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(inputs['labels']), logits=clas_logits)
loss_d_clas = tf.reduce_mean(loss_d_clas)
accu_d = tx.evals.accuracy(labels=inputs['labels'], preds=clas_preds)
# Classification loss for the generator, based on soft samples
soft_logits, soft_preds = classifier(
inputs=clas_embedder(soft_ids=soft_outputs_.sample_id),
sequence_length=soft_length_)
loss_g_clas = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(1-inputs['labels']), logits=soft_logits)
loss_g_clas = tf.reduce_mean(loss_g_clas)
# Accuracy on soft samples, for training progress monitoring
accu_g = tx.evals.accuracy(labels=1-inputs['labels'], preds=soft_preds)
# Accuracy on greedy-decoded samples, for training progress monitoring
_, gdy_preds = classifier(
inputs=clas_embedder(ids=outputs_.sample_id),
sequence_length=length_)
accu_g_gdy = tx.evals.accuracy(
labels=1-inputs['labels'], preds=gdy_preds)
# Aggregates losses
loss_g = lambda_ae * loss_g_ae + \
lambda_g * loss_g_clas + \
lambda_z1 * cos_distance_z + cos_distance_z_ * lambda_z2 \
- lambda_z * loss_z_clas
loss_d = loss_d_clas
loss_z = loss_z_clas
# Creates optimizers
g_vars = collect_trainable_variables(
[embedder, encoder, label_connector, connector, decoder])
d_vars = collect_trainable_variables([clas_embedder, classifier])
z_vars = collect_trainable_variables([z_classifier_l1, z_classifier_l2, z_classifier_out])
train_op_g = get_train_op(
loss_g, g_vars, hparams=self._hparams.opt)
train_op_g_ae = get_train_op(
loss_g_ae, g_vars, hparams=self._hparams.opt)
train_op_d = get_train_op(
loss_d, d_vars, hparams=self._hparams.opt)
train_op_z = get_train_op(
loss_z, z_vars, hparams=self._hparams.opt
)
# Interface tensors
self.losses = {
"loss_g": loss_g,
"loss_g_ae": loss_g_ae,
"loss_g_clas": loss_g_clas,
"loss_d": loss_d_clas,
"loss_z_clas": loss_z_clas,
"loss_cos_": cos_distance_z_,
"loss_cos": cos_distance_z
}
self.metrics = {
"accu_d": accu_d,
"accu_g": accu_g,
"accu_g_gdy": accu_g_gdy,
"accu_z_clas": accu_z_clas
}
self.train_ops = {
"train_op_g": train_op_g,
"train_op_g_ae": train_op_g_ae,
"train_op_d": train_op_d,
"train_op_z": train_op_z
}
self.samples = {
"original": inputs['text_ids'][:, 1:],
"transferred": outputs_.sample_id,
"z_vector": z,
"labels_source": inputs['labels'],
"labels_target": 1 - inputs['labels'],
"labels_predicted": gdy_preds
}
self.fetches_train_g = {
"loss_g": self.train_ops["train_op_g"],
"loss_g_ae": self.losses["loss_g_ae"],
"loss_g_clas": self.losses["loss_g_clas"],
"loss_shifted_ae1": self.losses["loss_cos"],
"loss_shifted_ae2": self.losses["loss_cos_"],
"accu_g": self.metrics["accu_g"],
"accu_g_gdy": self.metrics["accu_g_gdy"],
"accu_z_clas": self.metrics["accu_z_clas"]
}
self.fetches_train_z = {
"loss_z": self.train_ops["train_op_z"],
"accu_z": self.metrics["accu_z_clas"]
}
self.fetches_train_d = {
"loss_d": self.train_ops["train_op_d"],
"accu_d": self.metrics["accu_d"]
}
fetches_eval = {"batch_size": get_batch_size(inputs['text_ids'])}
fetches_eval.update(self.losses)
fetches_eval.update(self.metrics)
fetches_eval.update(self.samples)
self.fetches_eval = fetches_eval
|
[
"noreply@github.com"
] |
VAShibaev.noreply@github.com
|
1457deb196fdf8459944ff741a9f967060cc1345
|
ef54d37f8a3303013ca7469871a320d303957ed7
|
/robo4.2/fusion/FusionLibrary/__init__.py
|
afedb07962f29a037d08a6de6e4e364cf357fca4
|
[] |
no_license
|
richa92/Jenkin_Regression_Testing
|
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
|
24a74926170cbdfafa47e972644e2fe5b627d8ff
|
refs/heads/master
| 2020-07-12T10:01:59.099137
| 2019-08-27T12:14:53
| 2019-08-27T12:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,541
|
py
|
"""
FusionLibrary instance
"""
from dateutil.parser import parse as parse_date
from dateutil import tz
import datetime
from collections import namedtuple
from git import Repo
import socket
from time import sleep
import json
from Queue import Queue
from threading import Thread
import os
import uuid
from SSHLibrary import SSHLibrary
import sys
from robot import version as robot_version
from robot.libraries.BuiltIn import BuiltIn
from RoboGalaxyLibrary.utilitylib import logging as logger
from RoboGalaxyLibrary.ui.common import ui_lib
from FusionLibrary.version import get_version
from FusionLibrary.keywords.fusion_api import FusionAPIKeywords
from FusionLibrary.keywords.hellfire_api import SVMCAPIKeywords
from FusionLibrary.keywords.hellfire_api import HellfireAPIKeywords
from FusionLibrary.keywords.fusion_sanmanager_ui import FusionSanmanagerUIKeywords
from FusionLibrary.keywords.fusion_san_ui import FusionSanUIKeywords
from FusionLibrary.keywords.fusion_ui import FusionUIKeywords
from FusionLibrary.keywords.mantra_ui import MantraUIKeywords
# from FusionLibrary.keywords.hal_api import HalAPIKeywords
from FusionLibrary.keywords.dcs_api import DCSAPIKeywords
from FusionLibrary.keywords.fusion_srm_api import FusionSRMOaApiKeywords
from FusionLibrary.keywords.fusion_srm_api import FusionSRMIloApiKeywords
from FusionLibrary.keywords.fusion_pmsan_ui import FusionPMSanUiKeywords
from FusionLibrary.cli.oa.oa_operation import OACLIKeywords
from FusionLibrary.cli.oneview.fusion_operation import FusionCLIKeywords
from FusionLibrary.cli.traffic.keywords import TrafficLibraryKeywords
from FusionLibrary.keywords.tru import TRUKeywords
from FusionLibrary.libs.utils.elk import ElkQueueWriter
from FusionLibrary.keywords.cpt.payload_generator import CptPayloadGenerator
from FusionLibrary.keywords.network.config_generator import NetworkConfigGenerator
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(THIS_DIR, 'version.py'))
__version__ = get_version()
# These constants define the endpoint for activity logging
DEFAULT_ACTIVITY_LOGGING_SERVER = 'http://rist-elk.vse.rdlabs.hpecorp.net:9200'
DEFAULT_ACTIVITY_INDEX_NAME = "fusionlibrary-activity"
def _get_host_variable():
"""
find the variable that contains the OV ip
:return:
"""
# typical OneView IP variables
glist = ['${APPLIANCE_IP}', '${IP}', '${FUSION_IP}', '${OV_IP}', '${OV}', '${ONEVIEW}', '${ONEVIEW_IP}']
for k, v in BuiltIn().get_variables().iteritems():
if k.upper() in [i.upper() for i in glist]:
return v
class FusionLibrary(
FusionAPIKeywords,
FusionAPIKeywords.ActiveUserSessionsKeywords,
FusionAPIKeywords.AlertKeywords,
FusionAPIKeywords.AuditLogKeywords,
FusionAPIKeywords.AuthorizationsKeywords,
FusionAPIKeywords.ApplianceDeviceReadCommunityKeywords,
FusionAPIKeywords.ApplianceEulaKeywords,
FusionAPIKeywords.ApplianceFactoryResetKeywords,
FusionAPIKeywords.ApplianceFirmwareKeywords,
FusionAPIKeywords.ApplianceHealthStatusKeywords,
FusionAPIKeywords.ApplianceNetworkInterfacesKeywords,
FusionAPIKeywords.ApplianceNodeInformationKeywords,
FusionAPIKeywords.ApplianceShutdownKeywords,
FusionAPIKeywords.ApplianceStateKeywords,
FusionAPIKeywords.ApplianceSupportDumpKeywords,
FusionAPIKeywords.ApplianceTimeAndLocaleConfigurationKeywords,
FusionAPIKeywords.ApplianceTrapDestinationKeywords,
FusionAPIKeywords.ApplianceSnmpv3TrapDestinationKeywords,
FusionAPIKeywords.ApplianceSnmpv3TrapForwardingUserKeywords,
FusionAPIKeywords.ApplianceUpgrade,
FusionAPIKeywords.BackupKeywords,
FusionAPIKeywords.CertificateAuthorityKeywords,
FusionAPIKeywords.CertificateValidationConfigurationKeywords,
FusionAPIKeywords.CertificateClientRabbitMqKeywords,
FusionAPIKeywords.ClientCertificateKeywords,
FusionAPIKeywords.ConnectionsKeywords,
FusionAPIKeywords.ConnectionTemplateKeywords,
FusionAPIKeywords.DatacenterKeywords,
FusionAPIKeywords.DeviceManagerKeywords,
FusionAPIKeywords.DeploymentManagerKeywords,
FusionAPIKeywords.DriveEnclosureKeywords,
FusionAPIKeywords.DomainsKeywords,
FusionAPIKeywords.EmailNotificationKeywords,
FusionAPIKeywords.EnclosureKeywords,
FusionAPIKeywords.RackManagerKeywords,
FusionAPIKeywords.EnclosureGroupKeywords,
FusionAPIKeywords.EthernetNetworkKeywords,
FusionAPIKeywords.EventKeywords,
FusionAPIKeywords.FabricKeywords,
FusionAPIKeywords.FabricManagerKeywords,
FusionAPIKeywords.FcNetworkKeywords,
FusionAPIKeywords.FcoeNetworkKeywords,
FusionAPIKeywords.FirmwareBundleKeywords,
FusionAPIKeywords.FirmwareDriverKeywords,
FusionAPIKeywords.GlobalSettingsKeywords,
FusionAPIKeywords.HaNodesKeywords,
FusionAPIKeywords.HypervisorManagerKeywords,
FusionAPIKeywords.HypervisorClusterProfileKeywords,
FusionAPIKeywords.HypervisorHostProfileKeywords,
FusionAPIKeywords.HypervisorHostKeywords,
FusionAPIKeywords.HypervisorClustersKeywords,
FusionAPIKeywords.IdPoolKeywords,
FusionAPIKeywords.IdPoolsIpv4RangeKeywords,
FusionAPIKeywords.IdPoolsIpv4SubnetKeywords,
FusionAPIKeywords.IdPoolsVmacRangeKeywords,
FusionAPIKeywords.IdPoolsVsnRangeKeywords,
FusionAPIKeywords.IdPoolsVwwnRangeKeywords,
FusionAPIKeywords.IndexAssociationKeywords,
# FusionAPIKeywords.IndexResourceKeywords,
FusionAPIKeywords.IndexResourceKeywords,
# FusionAPIKeywords.IndexSearchSuggestionKeywords,
# FusionAPIKeywords.IndexTreeKeywords,
FusionAPIKeywords.InterconnectLinkTopologyKeywords,
FusionAPIKeywords.InterconnectKeywords,
FusionAPIKeywords.InterconnectTypesKeywords,
FusionAPIKeywords.InternalLinkSetKeywords,
# FusionAPIKeywords.LabelKeywords,
FusionAPIKeywords.LicensesKeywords,
FusionAPIKeywords.SecurityStandardsKeywords,
FusionAPIKeywords.LoginDetailsKeywords,
FusionAPIKeywords.LoginDomainKeywords,
FusionAPIKeywords.LogicalDownlinkKeywords,
FusionAPIKeywords.LoginDomainsGlobalSettingsKeywords,
FusionAPIKeywords.LoginDomainsLoginCertificatesKeywords,
FusionAPIKeywords.LoginDomainsGroupToRoleMappingKeywords,
FusionAPIKeywords.LoginSessionKeywords,
FusionAPIKeywords.LogicalInterconnectKeywords,
FusionAPIKeywords.LogicalInterconnectGroupKeywords,
FusionAPIKeywords.LogicalSwitchGroupKeywords,
FusionAPIKeywords.LogicalSwitchKeywords,
FusionAPIKeywords.LogicalEnclosureKeywords,
FusionAPIKeywords.ManagedSanKeywords,
FusionAPIKeywords.MetricStreamingKeywords,
FusionAPIKeywords.NetworkSetKeywords,
FusionAPIKeywords.MigratableVcDomainKeywords,
FusionAPIKeywords.PingKeywords,
FusionAPIKeywords.PowerDeviceKeywords,
FusionAPIKeywords.ProviderKeywords,
FusionAPIKeywords.RackKeywords,
FusionAPIKeywords.RemoteSyslogKeywords,
FusionAPIKeywords.RemoteSupportKeywords,
FusionAPIKeywords.ConfigurationKeywords,
FusionAPIKeywords.ReportKeywords,
FusionAPIKeywords.RestoreKeywords,
FusionAPIKeywords.RolesKeywords,
FusionAPIKeywords.SasInterconnectsKeywords,
FusionAPIKeywords.SasInterconnectTypesKeywords,
FusionAPIKeywords.SasLogicalInterconnectGroupKeywords,
FusionAPIKeywords.SasLogicalInterconnectKeywords,
FusionAPIKeywords.ServerHardwareTypesKeywords,
FusionAPIKeywords.ServerHardwareKeywords,
FusionAPIKeywords.ServerProfileKeywords,
FusionAPIKeywords.ServerProfileTemplateKeywords,
FusionAPIKeywords.ServiceAccessKeywords,
FusionAPIKeywords.SessionsKeywords,
FusionAPIKeywords.StartupProgressKeywords,
FusionAPIKeywords.StoragePoolKeywords,
FusionAPIKeywords.StorageSystemKeywords,
FusionAPIKeywords.StorageVolumeKeywords,
FusionAPIKeywords.StorageVolumeTemplateKeywords,
FusionAPIKeywords.StorageVolumeAttachmentKeywords,
FusionAPIKeywords.SwitchKeywords,
FusionAPIKeywords.SwitchTypesKeywords,
FusionAPIKeywords.TaskKeywords,
FusionAPIKeywords.UplinkSetKeywords,
FusionAPIKeywords.UserKeywords,
FusionAPIKeywords.VersionKeywords,
FusionAPIKeywords.SasLogicalJbodsKeywords,
FusionAPIKeywords.SasLogicalJbodAttachmentsKeywords,
FusionAPIKeywords.WebServerCertificateKeywords,
FusionAPIKeywords.HalAPIKeywords,
FusionAPIKeywords.PermAPIKeywords,
FusionAPIKeywords.ProxyServerKeywords,
FusionAPIKeywords.IPKeywords,
FusionAPIKeywords.ScopeKeywords,
FusionAPIKeywords.RepositoryKeywords,
FusionAPIKeywords.SshAccessKeywords,
FusionAPIKeywords.ApplianceCertificateKeywords,
FusionAPIKeywords.RemoteCertificateKeywords,
FusionAPIKeywords.ServerCertificateKeywords,
FusionAPIKeywords.CertificateStatusKeywords,
FusionUIKeywords,
MantraUIKeywords,
FusionSRMOaApiKeywords,
FusionSRMIloApiKeywords,
FusionSanmanagerUIKeywords,
FusionSanUIKeywords,
DCSAPIKeywords,
FusionPMSanUiKeywords,
HellfireAPIKeywords.InfrastructureVmsKeywords,
HellfireAPIKeywords.StoreVirtualVsaClusterKeywords,
OACLIKeywords,
FusionCLIKeywords,
HellfireAPIKeywords,
SVMCAPIKeywords,
TrafficLibraryKeywords,
TrafficLibraryKeywords.VspLibraryKeywords,
TrafficLibraryKeywords.PingTrafficLibraryKeywords,
TrafficLibraryKeywords.IPerfTrafficLibraryKeywords,
TrafficLibraryKeywords.IOMeterLibraryKeywords,
FusionAPIKeywords.OSDeploymentServerKeywords,
TRUKeywords,
CptPayloadGenerator,
NetworkConfigGenerator):
""" Main FusionLibrary keyword class definition """
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
ROBOT_LISTENER_API_VERSION = 2
FILTER_LIBRARIES = ['BuiltIn', 'Collections', 'Dialogs', 'OperatingSystem', 'SSHLibrary', 'String', 'XML']
MAX_QUEUE = 1000 # max elk data queue size
# Elk data encapsulation object
ElkItem = namedtuple("ElkItem", "obj_type, data")
def _gather_repo_info(self):
"""
Private method to initialize variables pertaining to Test repository
:return: None
"""
try:
# Invalid error would be thrown if path is not source_root
repo = Repo(os.path.dirname(THIS_DIR))
self.repo_commit = str(repo.rev_parse('HEAD'))
self.repo_branch_name = repo.git.rev_parse('--abbrev-ref',
'--symbolic-full-name',
'@{u}')
del repo
except: # noqa
pass
def __init__(self):
self.ROBOT_LIBRARY_LISTENER = self
self.elk_queue_writer = None
self.hostname = socket.gethostname()
self.uuid = str(uuid.uuid1())
self._ov = None
self._ssh = None
self.activity_queue = None
self.log_activity = False
self.log_activity_to_cidebug_log = False
self.queue_writer = None
self.repo_commit = 'Not Found'
self.repo_branch_name = 'Not identified'
self._gather_repo_info()
logger._log_to_console_and_log_file("Fusion library version %s" % __version__)
for base in FusionLibrary.__bases__:
base.__init__(self)
def __logging_activity(self):
"""
This private method handles writing to the ElkWriter thread
if logging is enabled.
Note: log activity with -v LOG_ACTIVITY:True
"""
# initialize and start the activity logging queue
self.log_activity = BuiltIn().get_variable_value("${LOG_ACTIVITY}")
if self.log_activity == 'False':
return False
# initialize queue and queue writer
if not self.activity_queue:
self.activity_queue = Queue(maxsize=self.MAX_QUEUE)
if not self.queue_writer:
host = BuiltIn().get_variable_value("${ACTIVITY_LOGGING_SERVER}")
index = BuiltIn().get_variable_value("${ACTIVITY_INDEX_NAME}")
if not host:
host = DEFAULT_ACTIVITY_LOGGING_SERVER
if not index:
index = DEFAULT_ACTIVITY_INDEX_NAME
self.queue_writer = ElkQueueWriter(host, index, self.activity_queue)
self.queue_writer.start()
return True
def __logging_activity_to_cidebug_log(self):
"""
This private method handles writing to the /ci/logs/ciDebug.log file on the appliance
if logging is enabled.
Note: log activity with -v LOG_ACTIVITY_TO_CIDEBUG:True
"""
# initialize and start the activity logging queue
self.log_activity_to_cidebug_log = BuiltIn().get_variable_value("${LOG_ACTIVITY_TO_CIDEBUG}")
if not self.log_activity_to_cidebug_log:
return False
# get the appliance host and open ssh session
if not self._ov:
self._ov = _get_host_variable()
if not self._ssh:
self.__create_ssh_connection_and_login(self._ov)
return True
def __create_ssh_connection_and_login(self, host, username='root', password='hpvse1'):
""" Create a new SSH connection and log in """
try:
self._ssh = SSHLibrary()
self._ssh.set_default_configuration(timeout='15 seconds', term_type='xterm', prompt='#')
self._ssh.open_connection(host)
self._ssh.login(username, password)
except: # noqa
e = sys.exc_info()[0]
logger._log_to_console_and_log_file("unable to connect ssh: {} {}".format(host, e))
self._ssh = None
def __run_ssh_commands(self, cmds):
""" Run an SSH command """
if self._ssh is not None:
if self._ssh.get_connection(host=True) is not None:
try:
self._ssh.write(cmds)
except: # noqa
e = sys.exc_info()[0]
logger._log_to_console_and_log_file("unable to write to ssh: {} {}".format(cmds, e))
self._ssh.close_connection()
self._ssh = None
else:
logger.info("no ssh: session {}".format(cmds))
self._ssh.close_connection()
self._ssh = None
def _write_log(self, ltype, stat, attrs):
""" Write a log entry """
name = None
if 'longname' in attrs:
name = attrs['longname']
elif 'kwname' in attrs:
name = attrs['kwname']
return """date +"%Y-%m-%d %H:%M:%S.%N %Z,INFO,ROBO,{},{},{},{}" >> /ci/logs/ciDebug.log""".format(self.uuid,
ltype.upper(),
name,
stat.upper())
def _add_data_to_attrs(self, name, attrs):
"""
Add additional data to suite/test/keyword attributes for Elk logging.
"""
metadata = BuiltIn().get_variable_value("&{SUITE METADATA}")
if not self._ov:
self._ov = _get_host_variable()
if 'kwname' in attrs:
attrs['name'] = attrs['kwname']
del attrs['kwname']
else:
attrs['name'] = name
attrs['suiteName'] = BuiltIn().get_variable_value("${SUITE_NAME)")
attrs['suiteSource'] = BuiltIn().get_variable_value("${SUITE_SOURCE)")
attrs['testName'] = BuiltIn().get_variable_value("${TEST_NAME)")
attrs['oneViewIp'] = self._ov
attrs['oneViewVersion'] = metadata.get("OneView Version")
if 'starttime' in attrs:
attrs['starttime'] = parse_date(attrs['starttime']).replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc()).isoformat()
if 'endtime' in attrs:
attrs['endtime'] = parse_date(attrs['endtime']).replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc()).isoformat()
attrs['@timestamp'] = attrs.get('starttime')
attrs['hostname'] = self.hostname
attrs['runId'] = self.uuid
attrs['gitCommitId'] = self.repo_commit
attrs['gitRemoteBranch'] = self.repo_branch_name
return attrs
def _start_suite(self, name, attrs): # pylint: disable=unused-argument
"""
This listener logs suite start
"""
if self.__logging_activity_to_cidebug_log():
self.__run_ssh_commands(self._write_log('suite', 'started', attrs))
BuiltIn().set_global_variable("${RUN_UUID}", self.uuid)
def _end_suite(self, name, attrs): # pylint: disable=unused-argument
"""
This listener logs suite activity
"""
if self.__logging_activity_to_cidebug_log():
self.__run_ssh_commands(self._write_log('suite', 'ended', attrs))
self._ssh.close_connection()
if self.__logging_activity():
# If the queue is full, don't write anything (since queue.put blocks, it would halt the test).
# Otherwise, write the name and attrs to Elk
if not self.activity_queue.full():
self.activity_queue.put_nowait(self.ElkItem('suite', self._add_data_to_attrs(name, attrs)))
if attrs.get('id') == 's1':
# In order to process all queue items before the test suite exits,
# it's necessary to wait for the queue to become empty or until the timer expires.
# Otherwise, the test will exit before the queue is fully written.
start = datetime.datetime.now()
while not self.activity_queue.empty() and (datetime.datetime.now() - start).total_seconds() < 10.0:
sleep(1)
def _start_test(self, name, attrs): # pylint: disable=unused-argument
"""
This listener logs test activity
"""
if self.__logging_activity_to_cidebug_log():
self.__run_ssh_commands(self._write_log('test case', 'started', attrs))
def _end_test(self, name, attrs):
"""
This listener logs test activity
"""
if self.__logging_activity_to_cidebug_log():
self.__run_ssh_commands(self._write_log('test case', 'ended', attrs))
# If the queue is full, don't write anything (since queue.put blocks, it would halt the test).
# Otherwise, write the name and attrs to Elk
if self.__logging_activity() and not self.activity_queue.full():
self.activity_queue.put_nowait(self.ElkItem('test', self._add_data_to_attrs(name, attrs)))
def _start_keyword(self, name, attrs): # pylint: disable=unused-argument
"""
This listener logs keyword activity
"""
if self.__logging_activity_to_cidebug_log():
# filter out libraries and keyword types we're not interested in
if attrs.get('libname') not in self.FILTER_LIBRARIES and attrs.get('type') not in ['For', 'For Item']:
self.__run_ssh_commands(self._write_log('keyword', 'started', attrs))
def _end_keyword(self, name, attrs):
"""
This listener logs keyword activity
"""
if self.__logging_activity_to_cidebug_log():
# filter out libraries and keyword types we're not interested in
if attrs.get('libname') not in self.FILTER_LIBRARIES and attrs.get('type') not in ['For', 'For Item']:
self.__run_ssh_commands(self._write_log('keyword', 'ended', attrs))
if self.__logging_activity():
# filter out libraries and keyword types we're not interested in
if attrs.get('libname') not in self.FILTER_LIBRARIES and attrs.get('type') not in ['For', 'For Item']:
if not self.activity_queue.full():
self.activity_queue.put_nowait(self.ElkItem('keyword', self._add_data_to_attrs(name, attrs)))
|
[
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] |
akul@SAC0MKUVCQ.asiapacific.hpqcorp.net
|
6545437c315e183c27122cfafa9f1822e292430d
|
96884ea775ca5dc138e328710cdb7f7a0095530a
|
/django_lesson_5/mysite/article/migrations/0005_article_description_en.py
|
5375de4d3ffecb9ede5ecdd14313ed736a5b2e7b
|
[] |
no_license
|
rudneff13/python-course-alphabet
|
2ec370274ba4bfa46ca36b739854e7f4b3b9bc76
|
d8f50b46919655ec9ac4337551165e1a029b880b
|
refs/heads/master
| 2020-05-09T14:58:22.419700
| 2019-07-20T13:20:42
| 2019-07-20T13:20:42
| 181,216,097
| 1
| 0
| null | 2019-06-20T10:43:20
| 2019-04-13T18:59:17
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
# Generated by Django 2.2.2 on 2019-07-02 17:03
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0004_auto_20190628_1720'),
]
operations = [
migrations.AddField(
model_name='article',
name='description_en',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
]
|
[
"ali@echoua.com"
] |
ali@echoua.com
|
804d650c334596fa3f2252bb27787f4cce9f4663
|
0687bdd028d25829ac424a847a6389bdc9159975
|
/client/delivery/migrations/0001_initial.py
|
78027523fa7bf54cf8618c4cc23c1257a742f827
|
[
"Apache-2.0"
] |
permissive
|
daniel-waruo/e-commerse-api
|
a9fbea57348c3295d1092b906e201fe10c6d1b3e
|
6b080039398fb4099a34335317d649dd67783f63
|
refs/heads/master
| 2020-08-04T07:09:41.840976
| 2020-06-19T16:41:13
| 2020-06-19T16:41:13
| 212,050,169
| 6
| 0
|
Apache-2.0
| 2020-03-15T10:17:53
| 2019-10-01T08:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 977
|
py
|
# Generated by Django 3.0.3 on 2020-02-18 07:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import utils.phone_number_field
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DeliveryInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', utils.phone_number_field.PhoneNumberField(max_length=128, region=None)),
('email', models.EmailField(max_length=254)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Delivery Information',
},
),
]
|
[
"warudaniel@gmail.com"
] |
warudaniel@gmail.com
|
63168dc4fe2a4edf82b769b7d765f861cbb60ccf
|
ab323e14ea148257439dd32ea96dc3c815b1660a
|
/house/__init__.py
|
d9e800339eb9bae0b03dee59ff6323462747fd31
|
[] |
no_license
|
SuperTapood/Discord-Bots
|
b12081180acde2ef1dcb70aaf9329d52e42be096
|
7353766b991ada442cead3ad36dd9f5f1f529ad0
|
refs/heads/main
| 2023-03-24T19:50:35.516065
| 2021-03-24T20:33:11
| 2021-03-24T20:33:11
| 329,584,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
from house.house import House
|
[
"yoav.o252@gmail.com"
] |
yoav.o252@gmail.com
|
b61f84a91f0e0a85dcf189a6d9a80abcab47f3a5
|
a586a86f2e04eea667ceb9ce85ae260bfb4e6cc0
|
/main.py
|
58d75dbbbb70383a5ca0b4319344cbcbf728b1a3
|
[] |
no_license
|
OlafMerkert/avpy
|
87b3dcc3a67c84228d543a80bca662fc0008303f
|
7db08483a2175b529df12101c1673747f341ec54
|
refs/heads/master
| 2021-01-19T08:29:04.600235
| 2011-08-26T16:38:30
| 2011-08-26T16:38:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Olaf Merkert"
import sys
from PyQt4 import QtGui
import ui.data_entry_forms as forms
import data.models
import data.daten as daten
from ui.data_display import AssistentenTabelle, TaetigkeitenTabelle, AssistentEntryUi, TaetigkeitEntryUi
if __name__ == '__main__':
app = QtGui.QApplication([])
daten.load()
# print daten.assistenten
m = AssistentEntryUi()
# m = TaetigkeitEntryUi()
m.show()
sys.exit(app.exec_())
|
[
"olaf@m-merkert.de"
] |
olaf@m-merkert.de
|
8cb048e720abbdf1ceb6f19fe5ccc17db88c7fe4
|
247d3f01f609062410004228297307c42fdf7e52
|
/Stock_Prediction_SM.py
|
2613dba4d925846f65336c2886ca7d987770fc22
|
[] |
no_license
|
SomnathMukherjee88/Apple-Stock-Price-Prediction
|
24dffc8814f02cfeb412b8a136ca89130d12f663
|
0101b9ab585bfe1da027013cfc4d4e8a637ac02e
|
refs/heads/main
| 2023-07-19T12:52:09.171646
| 2021-09-04T10:26:44
| 2021-09-04T10:26:44
| 403,026,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
import pandas as pd
apple = pd.read_csv("AAPL.csv")
print(apple.head())
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.figure(figsize=(10, 4))
plt.title("Apple's Stock Price")
plt.xlabel("Days")
plt.ylabel("Close Price USD ($)")
plt.plot(apple["Close Price"])
plt.show()
apple = apple[["Close Price"]]
print(apple.head())
futureDays = 25
apple["Prediction"] = apple[["Close Price"]].shift(-futureDays)
print(apple.head())
print(apple.tail())
import numpy as np
x = np.array(apple.drop(["Prediction"], 1))[:-futureDays]
print(x)
y = np.array(apple["Prediction"])[:-futureDays]
print(y)
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25)
# Creating the decision tree regressor model
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor().fit(xtrain, ytrain)
# creating the Linear Regression model
from sklearn.linear_model import LinearRegression
linear = LinearRegression().fit(xtrain, ytrain)
xfuture = apple.drop(["Prediction"], 1)[:-futureDays]
xfuture = xfuture.tail(futureDays)
xfuture = np.array(xfuture)
print(xfuture)
treePrediction = tree.predict(xfuture)
print("Decision Tree prediction =",treePrediction)
linearPrediction = linear.predict(xfuture)
print("Linear regression Prediction =",linearPrediction)
predictions = treePrediction
valid = apple[x.shape[0]:]
valid["Predictions"] = predictions
plt.figure(figsize=(10, 6))
plt.title("Apple's Stock Price Prediction Model(Decision Tree Regressor Model)")
plt.xlabel("Days")
plt.ylabel("Close Price USD ($)")
plt.plot(apple["Close Price"])
plt.plot(valid[["Close Price", "Predictions"]])
plt.legend(["Original", "Valid", "Predictions"])
plt.show()
predictions = linearPrediction
valid = apple[x.shape[0]:]
valid["Predictions"] = predictions
plt.figure(figsize=(10, 6))
plt.title("Apple's Stock Price Prediction Model(Linear Regression Model)")
plt.xlabel("Days")
plt.ylabel("Close Price USD ($)")
plt.plot(apple["Close Price"])
plt.plot(valid[["Close Price", "Predictions"]])
plt.legend(["Original", "Valid", "Predictions"])
plt.show()
|
[
"noreply@github.com"
] |
SomnathMukherjee88.noreply@github.com
|
b0fcb097793bbd9511da6ebe8abcf120b17237e0
|
a5cfb7f657c59cae663a20630d6b8c755ab4f48a
|
/root_numpy/tmva/tests.py
|
f3a04f6f4a1044fc5a2fe0eca5e2f8d282bcc82b
|
[
"MIT"
] |
permissive
|
w-c/root_numpy
|
ef916f8ed5ea963c53dc39aea8f10da0502f2c2f
|
42302b41fa81d5a13cff12556468e08a5698a506
|
refs/heads/master
| 2021-01-12T14:43:58.161670
| 2016-03-18T01:23:01
| 2016-03-18T01:23:01
| 54,745,729
| 1
| 0
| null | 2016-03-25T20:27:52
| 2016-03-25T20:27:52
| null |
UTF-8
|
Python
| false
| false
| 10,042
|
py
|
import os
import tempfile
import shutil
from array import array
import atexit
import numpy as np
from numpy.testing import assert_array_equal
from numpy.random import RandomState
import ROOT
from ROOT import TFile, TCut, TMVA
import root_numpy as rnp
from nose.tools import assert_raises, assert_true, assert_equal
ROOT.gErrorIgnoreLevel = ROOT.kFatal
RNG = RandomState(42)
class TMVA_Estimator(object):
def __init__(self, name, n_vars, n_targets=1,
method='BDT', task='Classification'):
self.name = name
self.n_vars = n_vars
self.n_targets = n_targets
self.method = method
self.task = task
self.tmpdir = tempfile.mkdtemp()
self.output = TFile(os.path.join(self.tmpdir, 'tmva_output.root'),
'recreate')
self.factory = TMVA.Factory(name, self.output,
'AnalysisType={0}:Silent'.format(task))
for n in range(n_vars):
self.factory.AddVariable('X_{0}'.format(n), 'F')
if task == 'Regression':
for n in range(n_targets):
self.factory.AddTarget('y_{0}'.format(n), 'F')
def __del__(self):
self.output.Close()
shutil.rmtree(self.tmpdir)
def fit(self, X, y, X_test=None, y_test=None,
weights=None, weights_test=None,
signal_label=None, **kwargs):
# (re)configure settings since deleting a previous Factory resets all
# this. This is poor design, TMVA.
config = TMVA.gConfig()
config.GetIONames().fWeightFileDir = self.tmpdir
config.SetSilent(True)
config.SetDrawProgressBar(False)
self.factory.DeleteAllMethods()
extra_kwargs = dict()
if self.task == 'Regression':
func = rnp.tmva.add_regression_events
else:
func = rnp.tmva.add_classification_events
extra_kwargs['signal_label'] = signal_label
# test exceptions
assert_raises(TypeError, func, object(), X, y)
assert_raises(ValueError, func,
self.factory, X, y[:y.shape[0] / 2])
if weights is not None:
assert_raises(ValueError, func, self.factory, X, y,
weights=weights[:weights.shape[0]/2])
assert_raises(ValueError, func, self.factory, X, y,
weights=weights[:, np.newaxis])
assert_raises(ValueError, func, self.factory, [[[1, 2]]], [1])
assert_raises(ValueError, func, self.factory, [[1, 2]], [[[1]]])
func(self.factory, X, y, weights=weights, **extra_kwargs)
if X_test is None:
X_test = X
y_test = y
weights_test = weights
func(self.factory, X_test, y_test,
weights=weights_test, test=True, **extra_kwargs)
self.factory.PrepareTrainingAndTestTree(
TCut('1'), 'NormMode=EqualNumEvents')
options = []
for param, value in kwargs.items():
if value is True:
options.append(param)
elif value is False:
options.append('!{0}'.format(param))
else:
options.append('{0}={1}'.format(param, value))
options = ':'.join(options)
self.factory.BookMethod(self.method, self.method, options)
self.factory.TrainAllMethods()
def predict(self, X, aux=0.):
reader = TMVA.Reader()
for n in range(self.n_vars):
reader.AddVariable('X_{0}'.format(n), array('f', [0.]))
reader.BookMVA(self.method,
os.path.join(self.tmpdir,
'{0}_{1}.weights.xml'.format(
self.name, self.method)))
assert_raises(TypeError, rnp.tmva.evaluate_reader,
object(), self.method, X)
assert_raises(ValueError, rnp.tmva.evaluate_reader,
reader, 'DoesNotExist', X)
assert_raises(ValueError, rnp.tmva.evaluate_reader,
reader, self.method, [[[1]]])
if self.task != 'Regression':
assert_raises(ValueError, rnp.tmva.evaluate_reader,
reader, self.method, [1, 2, 3])
output = rnp.tmva.evaluate_reader(reader, self.method, X, aux)
if ROOT.gROOT.GetVersionInt() >= 60300:
method = reader.FindMVA(self.method)
assert_raises(TypeError, rnp.tmva.evaluate_method,
object(), X)
assert_raises(ValueError, rnp.tmva.evaluate_method,
method, [[[1]]])
output_method = rnp.tmva.evaluate_method(method, X, aux)
assert_array_equal(output, output_method)
return output
def make_classification(n_features, n_events_per_class, n_classes):
blobs = []
for idx in range(n_classes):
blob = RNG.multivariate_normal(
np.ones(n_features) * idx * 5,
np.diag(np.ones(n_features)),
n_events_per_class)
blobs.append(blob)
X = np.concatenate(blobs)
# class labels
y = np.repeat(np.arange(n_classes), n_events_per_class) * 2 - 1
# event weights
w = RNG.randint(1, 10, n_events_per_class * n_classes)
# shuffle
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
return X, y, w
def test_tmva_methodcuts():
X, y, w = make_classification(2, 300, 2)
est = TMVA_Estimator('Cuts', 2, method='Cuts')
est.fit(X, y,
FitMethod='MC', EffSel=True, SampleSize=100,
VarProp='FSmart')
y_predict_1 = est.predict(X, 0.1)
y_predict_9 = est.predict(X, 0.9)
assert_true((y_predict_1 != y_predict_9).any())
assert_true((y_predict_1 <= y_predict_9).all())
def test_tmva_twoclass():
n_vars = 2
n_events = 1000
X, y, w = make_classification(n_vars, n_events, 2)
X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events]
X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:]
clf = TMVA_Estimator('unweighted', n_vars)
clf.fit(X_train, y_train, X_test=X_test, y_test=y_test,
nCuts=20, NTrees=10, MaxDepth=3)
y_decision = clf.predict(X_test)
y_predicted = 2 * (y_decision > 0) - 1
assert_true(np.sum(np.abs(y_predicted - y_test)) < 0.1 * y_test.shape[0])
clf = TMVA_Estimator('unweighted_label', n_vars)
clf.fit(X_train, y_train, X_test=X_test, y_test=y_test, signal_label=1,
nCuts=20, NTrees=10, MaxDepth=3)
y_decision_label = clf.predict(X_test)
assert_array_equal(y_decision_label, y_decision)
# train with weights
clf = TMVA_Estimator('weighted', n_vars)
clf.fit(X_train, y_train, X_test=X_test, y_test=y_test,
weights=w_train, weights_test=w_test,
nCuts=20, NTrees=10, MaxDepth=3)
y_decision_weighted = clf.predict(X_test)
assert_true(np.any(y_decision_weighted != y_decision))
# unit weights should not change output
clf = TMVA_Estimator('unit_weights', n_vars)
clf.fit(X_train, y_train, X_test=X_test, y_test=y_test,
weights=np.ones(y_train.shape[0]),
weights_test=np.ones(y_test.shape[0]),
nCuts=20, NTrees=10, MaxDepth=3)
y_decision_unit_weights = clf.predict(X_test)
assert_array_equal(y_decision, y_decision_unit_weights)
# events can be 1D
clf = TMVA_Estimator('onedim_events', 1)
clf.fit(X_train[:, 0], y_train, X_test=X_test[:, 0], y_test=y_test,
nCuts=20, NTrees=10, MaxDepth=3)
def test_tmva_multiclass():
n_vars = 2
n_events = 500
X, y, w = make_classification(n_vars, n_events, 3)
# Split into training and test datasets
X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events]
X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:]
clf = TMVA_Estimator('unweighted', n_vars, task='Multiclass')
clf.fit(X_train, y_train, X_test=X_test, y_test=y_test,
nCuts=20, NTrees=10, MaxDepth=3,
BoostType='Grad', Shrinkage='0.10')
y_decision = clf.predict(X_test)
# Class probabilities should sum to one
assert_array_equal(np.sum(y_decision, axis=1),
np.ones(y_decision.shape[0]))
def test_tmva_regression():
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + \
np.sin(6 * X).ravel() + \
RNG.normal(0, 0.1, X.shape[0])
w = RNG.randint(1, 10, y.shape[0])
reg = TMVA_Estimator('regressor', 1, task='Regression')
reg.fit(np.ravel(X), y, X_test=X, y_test=y,
nCuts=20, NTrees=10, MaxDepth=3,
boosttype='AdaBoostR2', SeparationType='RegressionVariance')
y_predict = reg.predict(np.ravel(X))
assert_equal(y_predict.ndim, 1)
# train with weights
reg = TMVA_Estimator('regressor_weighted', 1, task='Regression')
reg.fit(X, y, X_test=X, y_test=y, weights=w, weights_test=w,
nCuts=20, NTrees=10, MaxDepth=3,
boosttype='AdaBoostR2', SeparationType='RegressionVariance')
y_predict_weighted = reg.predict(X)
assert_true(np.any(y_predict_weighted != y_predict))
# unit weights should not change output
reg = TMVA_Estimator('regressor_unit_weights', 1, task='Regression')
reg.fit(X, y, X_test=X, y_test=y,
weights=np.ones(y.shape[0]), weights_test=np.ones(y.shape[0]),
nCuts=20, NTrees=10, MaxDepth=3,
boosttype='AdaBoostR2', SeparationType='RegressionVariance')
y_predict_unit_weights = reg.predict(X)
assert_array_equal(y_predict_unit_weights, y_predict)
# Multi-output
y_multi = np.c_[y, 1. - y]
reg = TMVA_Estimator('regressor_multioutput', 1, n_targets=2,
method='KNN', task='Regression')
reg.fit(X, y_multi, X_test=X, y_test=y_multi,
nkNN=20, ScaleFrac=0.8, SigmaFact=1.0, Kernel='Gaus', UseKernel='F',
UseWeight='T')
y_predict = reg.predict(X)
assert_equal(y_predict.ndim, 2)
|
[
"noel.dawe@gmail.com"
] |
noel.dawe@gmail.com
|
ab497be63b96de1653e351973f94ca5afe61d49d
|
6751b8da62c0e862599e7b56f3e4bbc646047e7e
|
/convertImg.py
|
aee175b0607d32db4c8170f6da759b872123a280
|
[] |
no_license
|
jinlongwang/pic2word
|
2b00dc8d28b1ee6066c160f8862d8eeeac663099
|
a92128edcf33f477218870556bb7010364da036d
|
refs/heads/master
| 2020-04-06T06:16:05.276615
| 2015-06-18T03:48:09
| 2015-06-18T03:48:09
| 37,451,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,585
|
py
|
# -*- coding=utf-8 -*-
from PIL import Image
def convertImg(src):
res = []
img = Image.open(src)
img = img.convert("L")
img.save("tem.jpg")
pixel = img.load()
#print list(img.getdata())
h = img.size[0]
w = img.size[1]
if h != w:
raise Exception("error")
print "hight: %s, width: %s" %(h,w)
for i in range(h):
row = ""
for j in range(w):
#print i, j
row += toBinary(pixel[j,i])
res.append(row)
return res
def toBinary(value):
print (value+1)%32
res = '1'
if value <= 127:
res = '0'
return res
class Img2ascii:
__chars=[' ', ',', '+', '1', 'n','D','&','M']
def getchar(self,pi):
for i in range(0,8):
if pi< (i+1)*32:
return self.__chars[7-i]
def __init__(self,src,resize=1.0):
img = Image.open(src)
if img.mode=='P' or img.mode =='RGBA':
im=Image.new('RGB',img.size,'white')
im.paste(img.convert('RGBA'),img.convert('RGBA'))
img= im
img= img.convert('L')
w,h =img.size
h/=2
w=int(w*resize)
h=int(h*resize)
img=img.resize((w,h),Image.ANTIALIAS)
#img.save('tmp.jpg')
pixs = img.load()
self.data=[]
for i in range(0,h):
line =''
for j in range(0,w):
line+=self.getchar(pixs[j,i])
self.data.append(line)
a = convertImg("img.jpg")
#output = file('aaa.txt','w')
#for line in a.data:
# print >>output, line
#output.close()
|
[
"jinlongwang89@gmail.com"
] |
jinlongwang89@gmail.com
|
4ec31459287dc5c81a8a93a1536b9bf25e9cd3d0
|
105360f2e39a0d53c95341f8edd3cb2d6de135f3
|
/Assignment 4 plotting and Extension.py
|
9d18142d649b6f24a32e0c277ac28540613b80ea
|
[] |
no_license
|
haicizh/Assignment-4
|
0a56b1326b65b1ac99081c53a126aa9692417c2e
|
a0e4141c907168d74ab9c5a72ee887e05fe3ceac
|
refs/heads/master
| 2020-05-09T18:03:50.244146
| 2019-04-14T15:10:02
| 2019-04-14T15:10:02
| 181,325,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,180
|
py
|
# coding: utf-8
# In[1]:
import os
import pandas as pd
import numpy as np
import random
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
# In[2]:
os.chdir(r"D:\Shiny\Machine Learning\Assignment 4")
# In[3]:
sm_value = pd.read_csv("SM_value.csv")
sm_policy = pd.read_csv("SM_policy.csv")
# In[4]:
lm_value = pd.read_csv("LM_value.csv")
lm_policy = pd.read_csv("LM_policy.csv")
# In[6]:
lm_value.head()
# In[56]:
smv_step = sm_value.iloc[:,1]
smv_reward = sm_value.iloc[:,2]
smv_time = sm_value.iloc[:,3]
smp_step = sm_policy.iloc[:,1]
smp_reward = sm_policy.iloc[:,2]
smp_time = sm_policy.iloc[:,3]
# In[7]:
lmv_step = lm_value.iloc[:,1]
lmv_reward = lm_value.iloc[:,2]
lmv_time = lm_value.iloc[:,3]
lmp_step = lm_policy.iloc[:,1]
lmp_reward = lm_policy.iloc[:,2]
lmp_time = lm_policy.iloc[:,3]
# In[8]:
n = np.size(lmv_step)
# In[9]:
np.size(lmp_step)
# In[10]:
def compare_time(n,x,y,title):
plt.figure()
plt.title("Model Training Times: " + title)
plt.xlabel("Iteration")
plt.ylabel("Time (in milliseconds)")
plt.plot(n, x, '-', color="b", label="Value Iteration")
plt.plot(n, y, '-', color="r", label="Policy Iteration")
plt.legend(loc="best")
plt.show()
def compare_reward(n,x,y, title):
plt.figure()
plt.title("Model Reward: " + title)
plt.xlabel("Iteration")
plt.ylabel("Reward")
plt.plot(n, x, '-', color="b", label="Value Iteration")
plt.plot(n, y, '-', color="r", label="Policy Iteration")
plt.legend(loc="best")
plt.show()
def compare_step(n,x,y, title):
plt.figure()
plt.title("Model Step: " + title)
plt.xlabel("Iteration")
plt.ylabel("Step")
plt.plot(n, x, '-', color="b", label="Value Iteration")
plt.plot(n, y, '-', color="r", label="Policy Iteration")
plt.legend(loc="best")
plt.show()
# In[14]:
k = (np.linspace(.01, 1.0, 200)*n).astype('int')
# In[15]:
k
# In[18]:
compare_time(k,lmv_time, lmp_time,'Large Maze')
# In[17]:
compare_reward(k, lmv_reward, lmp_reward, 'Large Maze')
compare_step(k, lmv_step, lmp_step, 'Large Maze')
# In[65]:
smq_1 = pd.read_csv("SMQ_0.1.csv")
smq_2 = pd.read_csv("SMQ_0.3.csv")
smq_3 = pd.read_csv("SMQ_0.5.csv")
smq_4 = pd.read_csv("SMQ_0.7.csv")
smq_5 = pd.read_csv("SMQ_0.9.csv")
# In[19]:
lm_1 = pd.read_csv("LM_0.1.csv")
lm_2 = pd.read_csv("LM_0.3.csv")
lm_3 = pd.read_csv("LM_0.5.csv")
lm_4 = pd.read_csv("LM_0.7.csv")
lm_5 = pd.read_csv("LM_0.9.csv")
# In[20]:
lm_1.head()
# In[21]:
lm1_step = lm_1.iloc[:,1]
lm1_reward = lm_1.iloc[:,2]
lm1_time = lm_1.iloc[:,3]
lm2_step = lm_2.iloc[:,1]
lm2_reward = lm_2.iloc[:,2]
lm2_time = lm_2.iloc[:,3]
lm3_step = lm_3.iloc[:,1]
lm3_reward = lm_3.iloc[:,2]
lm3_time = lm_3.iloc[:,3]
lm4_step = lm_4.iloc[:,1]
lm4_reward = lm_4.iloc[:,2]
lm4_time = lm_4.iloc[:,3]
lm5_step = lm_5.iloc[:,1]
lm5_reward = lm_5.iloc[:,2]
lm5_time = lm_5.iloc[:,3]
# In[22]:
def compare_time(n,x,y,z,m,l,title):
plt.figure()
plt.title("Model Training Times: " + title)
plt.xlabel("Iteration")
plt.ylabel("Time (in milliseconds)")
plt.plot(n, x, '-', color="k", label="Learning rate = 0.1")
plt.plot(n, y, '-', color="b", label="Learning rate = 0.3")
plt.plot(n, z, '-', color="r", label="Learning rate = 0.5")
plt.plot(n, m, '-', color="g", label="Learning rate = 0.7")
plt.plot(n, l, '-', color="m", label="Learning rate = 0.9")
plt.legend(loc="best")
plt.show()
def compare_reward(n,x,y,z,m,l, title):
plt.figure()
plt.title("Model Reward: " + title)
plt.xlabel("Iteration")
plt.ylabel("Reward")
plt.plot(n, x, '-', color="k", label="Learning rate = 0.1")
plt.plot(n, y, '-', color="b", label="Learning rate = 0.3")
plt.plot(n, z, '-', color="r", label="Learning rate = 0.5")
plt.plot(n, m, '-', color="g", label="Learning rate = 0.7")
plt.plot(n, l, '-', color="m", label="Learning rate = 0.9")
plt.legend(loc="best")
plt.show()
def compare_step(n,x,y,z,m,l, title):
plt.figure()
plt.title("Model Step: " + title)
plt.xlabel("Iteration")
plt.ylabel("Step")
plt.plot(n, x, '-', color="k", label="Learning rate = 0.1")
plt.plot(n, y, '-', color="b", label="Learning rate = 0.3")
plt.plot(n, z, '-', color="r", label="Learning rate = 0.5")
plt.plot(n, m, '-', color="g", label="Learning rate = 0.7")
plt.plot(n, l, '-', color="m", label="Learning rate = 0.9")
plt.legend(loc="best")
plt.show()
# In[23]:
n= np.size(lm1_step )
# In[70]:
n= np.size(smq1_step )
# In[24]:
n
# In[30]:
k = (np.linspace(.001, 1.0, 1000)*n).astype('int')
# In[31]:
k
# In[32]:
compare_time(k,lm1_time, lm2_time,lm3_time,lm4_time,lm5_time,'Large Maze')
compare_reward(k, lm1_reward, lm2_reward,lm3_reward,lm4_reward,lm5_reward, 'Large Maze')
compare_step(k, lm1_step, lm2_step,lm3_step,lm4_step,lm5_step, 'Large Maze')
# In[1]:
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/q2ZOEFAaaI0?showinfo=0" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
# In[2]:
import numpy as np
import gym
import random
# In[3]:
from gym.envs.registration import register
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4', 'is_slippery': False},
max_episode_steps=100,
reward_threshold=0.8196, # optimum = .8196, changing this seems have no influence
)
# In[4]:
env = gym.make("FrozenLakeNotSlippery-v0")
# In[5]:
#env = gym.make("FrozenLake-v0")
# In[6]:
action_size = env.action_space.n
state_size = env.observation_space.n
# In[7]:
qtable = np.zeros((state_size, action_size))
# In[34]:
total_episodes = 200 # Total episodes
learning_rate = 0.8 # Learning rate
max_steps = 99 # Max steps per episode
gamma = 0.95 # Discounting rate
# Exploration parameters
epsilon = 0.1 # Exploration rate
max_epsilon = 0.1 # Exploration probability at start
min_epsilon = 0.01 # Minimum exploration probability
decay_rate = 0.001 # Exponential decay rate for exploration prob
#I find that decay_rate=0.001 works much better than 0.01
# In[35]:
rewards = []
# 2 For life or until learning is stopped
for episode in range(total_episodes):
# Reset the environment
state = env.reset()
step = 0
done = False
total_rewards = 0
for step in range(max_steps):
# 3. Choose an action a in the current world state (s)
## First we randomize a number
exp_exp_tradeoff = random.uniform(0, 1)
## If this number > greater than epsilon --> exploitation (taking the biggest Q value for this state)
if exp_exp_tradeoff > epsilon:
action = np.argmax(qtable[state,:])
# Else doing a random choice --> exploration
else:
action = env.action_space.sample()
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, done, info = env.step(action)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
# qtable[new_state,:] : all the actions we can take from new state
qtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action])
total_rewards =total_rewards + reward
# Our new state is state
state = new_state
# If done (if we're dead) : finish episode
if done == True:
break
episode += 1
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
rewards.append(total_rewards)
print ("Score over time: " + str(sum(rewards)/total_episodes))
print(qtable)
print(epsilon)
# In[22]:
env.reset()
env.render()
print(np.argmax(qtable,axis=1).reshape(4,4))
|
[
"noreply@github.com"
] |
haicizh.noreply@github.com
|
e9f46372c63c6378dc0057561505e5256b7b4bc4
|
bd49c7977fb9fa6c1cd3bf040396eaec077b5b22
|
/items.py
|
91c95f18944ef7e3fb343b5fbb8027c280cd5a23
|
[
"MIT"
] |
permissive
|
Jasper2-0/scrape-fontaneljobs
|
d1be5d91f3fa71bb79b4017ba5e64210d3605790
|
f174e42cc2b352302e7432a57a5bad42ea3ee8b0
|
refs/heads/master
| 2020-11-27T05:24:59.322340
| 2019-12-22T22:47:50
| 2019-12-22T22:47:50
| 229,321,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapeFontaneljobsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class CompanyItem(scrapy.Item):
url = scrapy.Field()
logo_url = scrapy.Field()
meta_data = scrapy.Field()
name = scrapy.Field()
short_description = scrapy.Field()
long_description = scrapy.Field()
address = scrapy.Field()
postal_code = scrapy.Field()
city = scrapy.Field()
class VacancyItem(scrapy.Item):
title = scrapy.Field()
short_description = scrapy.Field()
company = scrapy.Field()
job_type = scrapy.Field()
date = scrapy.Field()
|
[
"jasper@jasperschelling.com"
] |
jasper@jasperschelling.com
|
a18e8dceb99b923a3e54ae3ca00e5fcb3d665c0b
|
7663f23c3edf3026bbf729adf5a75a6b954fb940
|
/Project_2/test_gilded_rose.py
|
b24b7cefdf1f4148b22d66ab656c7da1ae9948a1
|
[] |
no_license
|
mehraj24158/gilded-rose
|
b330274932609ece1482bdb95ea6b6265c4bad89
|
f4c7c7a8ae6bd232a0aad84a1b2133ad3a1f5f12
|
refs/heads/master
| 2023-01-05T09:00:34.823937
| 2020-11-03T02:44:43
| 2020-11-03T02:44:43
| 299,934,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
import pytest
from Refactor.gilded_rose import *
"""
Global item list for testing.
"""
items = [
Item(name="+5 Dexterity Vest", sell_in=10, quality=20),
Aged(name="Aged Brie", sell_in=2, quality=0),
Item(name="Elixir of the Mongoose", sell_in=5, quality=7),
LegendaryItem(name="Sulfuras, Hand of Ragnaros", sell_in=0, quality=80),
LegendaryItem(name="Sulfuras, Hand of Ragnaros", sell_in=-1, quality=80),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=15, quality=20),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=10, quality=27),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=5, quality=42),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=2, quality=49),
ConjuredItem(name="Conjured Mana Cake", sell_in=3, quality=6),
]
@pytest.fixture
def test_load():
print("\n----SETUP----")
gild_rose = Inventory(items)
print(gild_rose)
return gild_rose
def test_equality(test_load):
# Ensures the __eq__ method correctly compares items
for inventory_item, item in zip(test_load.items, items):
assert inventory_item == item
def test_print(test_load):
# Ensures the Inventory __str__ method correctly prints all items within the inventory
assert str(test_load) == str(['+5 Dexterity Vest, 10, 20', 'Aged Brie, 2, 0', 'Elixir of the Mongoose, 5, 7',
'Sulfuras, Hand of Ragnaros, 0, 80', 'Sulfuras, Hand of Ragnaros, -1, 80', 'Backstage passes to a TAFKAL80ETC concert, 15, 20',
'Backstage passes to a TAFKAL80ETC concert, 10, 27', 'Backstage passes to a TAFKAL80ETC concert, 5, 42',
'Backstage passes to a TAFKAL80ETC concert, 2, 49', 'Conjured Mana Cake, 3, 6'])
def test_update(test_load):
# Ensures the update method updates the inventory correctly
test_load.update()
new_items = [
Item(name="+5 Dexterity Vest", sell_in=9, quality=19),
Aged(name="Aged Brie", sell_in=1, quality=1),
Item(name="Elixir of the Mongoose", sell_in=4, quality=6),
LegendaryItem(name="Sulfuras, Hand of Ragnaros", sell_in=0, quality=80),
LegendaryItem(name="Sulfuras, Hand of Ragnaros", sell_in=-1, quality=80),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=14, quality=21),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=9, quality=29),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=4, quality=45),
BackStagePass(name="Backstage passes to a TAFKAL80ETC concert", sell_in=1, quality=50),
ConjuredItem(name="Conjured Mana Cake", sell_in=2, quality=4),
]
assert test_load.items == new_items
|
[
"mehraj24158@gmail.com"
] |
mehraj24158@gmail.com
|
1c68155f88d608d49cc19d91519520eeaafcb002
|
d88ad64b1dfaba11f55e8da2934541b02f75bbde
|
/arduino.py
|
1adae7dd26a84a3ad9a85a4535c5bddd898f0a21
|
[] |
no_license
|
Chusa007/Face_Recognition
|
cb4838bfb3dc0bc255722002a1a83ab4cf5b0b9f
|
bfd552f026ae895475688a8c820eed68caffaf25
|
refs/heads/master
| 2021-10-03T10:05:32.400469
| 2018-12-02T11:49:15
| 2018-12-02T11:49:15
| 159,300,861
| 1
| 1
| null | 2018-12-01T21:36:35
| 2018-11-27T08:24:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 167
|
py
|
import serial
import time
ser = serial.Serial('/dev/ttyACM0')
ser.baudrate = 9600
# ser = serial.Serial('COM3', 9600)
time.sleep(2)
ser.write(str.encode("Unknown#"))
|
[
"chusa007@mail.ru"
] |
chusa007@mail.ru
|
e3dbc85560e90e24e1d68b173e25f658e8bf6a7d
|
eaf67c8d08ac7fb3437e521c90abaeda290488b9
|
/tensorflow_federated/python/core/impl/executors/cardinalities_utils.py
|
023d459538f8cebaf8b83e97183cfce3c388a662
|
[
"Apache-2.0"
] |
permissive
|
mhalawad/federated
|
a89194a4b03205fa272b29931bb56e6d0603aaca
|
d683db15d87eb545e3df6530a698ba1c785ebe79
|
refs/heads/master
| 2022-12-22T00:18:43.005904
| 2020-08-26T20:34:43
| 2020-08-26T20:35:15
| 290,794,371
| 1
| 0
|
Apache-2.0
| 2020-08-27T14:15:56
| 2020-08-27T14:15:55
| null |
UTF-8
|
Python
| false
| false
| 3,380
|
py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions needed across executor classes."""
import collections
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl.types import placement_literals
def merge_cardinalities(existing, to_add):
"""Merges dicts `existing` and `to_add`, checking for conflicts."""
py_typecheck.check_type(existing, dict)
py_typecheck.check_type(to_add, dict)
for key, val in existing.items():
py_typecheck.check_type(key, placement_literals.PlacementLiteral)
py_typecheck.check_type(val, int)
if not to_add:
return existing
elif not existing:
return to_add
cardinalities = {}
cardinalities.update(existing)
for key, val in to_add.items():
py_typecheck.check_type(key, placement_literals.PlacementLiteral)
py_typecheck.check_type(val, int)
if key not in cardinalities:
cardinalities[key] = val
elif cardinalities[key] != val:
raise ValueError('Conflicting cardinalities for {}: {} vs {}'.format(
key, val, cardinalities[key]))
return cardinalities
def infer_cardinalities(value, type_spec):
"""Infers cardinalities from Python `value`.
Allows for any Python object to represent a federated value; enforcing
particular representations is not the job of this inference function, but
rather ingestion functions lower in the stack.
Args:
value: Python object from which to infer TFF placement cardinalities.
type_spec: The TFF type spec for `value`, determining the semantics for
inferring cardinalities. That is, we only pull the cardinality off of
federated types.
Returns:
Dict of cardinalities.
Raises:
ValueError: If conflicting cardinalities are inferred from `value`.
TypeError: If the arguments are of the wrong types, or if `type_spec` is
a federated type which is not `all_equal` but the yet-to-be-embedded
`value` is not represented as a Python `list`.
"""
py_typecheck.check_not_none(value)
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec.is_federated():
if type_spec.all_equal:
return {}
py_typecheck.check_type(value, collections.Sized)
return {type_spec.placement: len(value)}
elif type_spec.is_struct():
structure_value = structure.from_container(value, recursive=False)
cardinality_dict = {}
for idx, (_, elem_type) in enumerate(structure.to_elements(type_spec)):
cardinality_dict = merge_cardinalities(
cardinality_dict, infer_cardinalities(structure_value[idx],
elem_type))
return cardinality_dict
else:
return {}
|
[
"tensorflow.copybara@gmail.com"
] |
tensorflow.copybara@gmail.com
|
9f747d70aee6ad7ca96404b2a149820dd89c86a2
|
d38328298ffa320bac87f0ec08df6c04d75cde41
|
/testing_main.py
|
ec8de6cacbf6a6e6b5dc6c76e2ea4f8e7a758897
|
[] |
no_license
|
Wang3960/deep_learning_for_demosaicing
|
0bc5a3886f52d74052318f51b369b614c4945f35
|
6ef83c2763ce06ae2df18c4ac7fb1c26881f352d
|
refs/heads/main
| 2023-04-21T20:34:17.657950
| 2021-05-12T01:12:36
| 2021-05-12T01:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
import os
import math
from tqdm import tqdm
from PIL import Image
import pickle
import torch
import torch.nn as nn
import numpy as np
import argparse
from testing_model import test_model
from model import Net_Superresolution,REDNet_model,SRCNN,model_with_upsampling,VDSR_Net
from model_utils import str2bool
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == "__main__":
# Create the parser
parser = argparse.ArgumentParser()
# Add an argument
parser.add_argument("--trial_number", type=int, required=True,
help="Trial number.")
parser.add_argument("--model", type=str, required=True,
help="The model you want to use.")
parser.add_argument("--psnr_only", type=str2bool, nargs='?',
default=False,
help="Compute psnr only, without saving resulting images.")
# Parse the argument
args = parser.parse_args()
# assign the variables according to the argument values
trialNumber = args.trial_number
psnr_only = args.psnr_only
if args.model == 'deep_residual_network':
model = Net_Superresolution(withRedNet=False,withSRCNN=False)
elif args.model == 'deep_residual_network_rednet':
model = Net_Superresolution(withRedNet=True,withSRCNN=False)
elif args.model == 'deep_residual_network_SRCNN':
model = Net_Superresolution(withRedNet=False,withSRCNN=True)
elif args.model == 'rednet':
model = REDNet_model()
elif args.model == 'SRCNN':
SRCNN_model = SRCNN(num_channels=3)
model = model_with_upsampling(SRCNN_model)
elif args.model == 'VDSR':
VDSR_model = VDSR_Net()
model = model_with_upsampling(VDSR_model)
else:
raise argparse.ArgumentError("Invalid model")
var_save_dir = './data/variables'
var_name = 'dataloaders'+'_trial'+str(trialNumber)+'.pkl'
path = var_save_dir + '/' + var_name
with open(path, 'rb') as file:
# Call load method to deserialze
dataloaders = pickle.load(file)
model_path_retrieve = "./model/"+"trial"+str(trialNumber)+".pth"
model.load_state_dict(torch.load(model_path_retrieve))
model = model.to(device)
image_saving_dir = './data/CUB200_outs'
if not os.path.exists(image_saving_dir):
os.makedirs(image_saving_dir)
if psnr_only:
test_model(model,image_saving_dir,dataloaders,psnr_only=True)
else:
test_model(model,image_saving_dir,dataloaders)
|
[
"lauhoiyangladys@gmail.com"
] |
lauhoiyangladys@gmail.com
|
4331304248d90d998e7b9838a4f4c35eae4055d5
|
a46b064486b703b5424a5e59fb6d567a0c08d480
|
/setup.py
|
30e73a7ef6a7a63f779f7f363b895ca7e75aae22
|
[
"MIT"
] |
permissive
|
nick-youngblut/pushmsg
|
1dd3ca23dbfa8277f92b7261c5eabeb6ea5bd3c6
|
389cd22476077198593bd4b4af3900fd1644da65
|
refs/heads/master
| 2022-07-23T14:52:46.886835
| 2020-05-23T19:21:22
| 2020-05-23T19:21:22
| 71,820,460
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
from setuptools import setup, find_packages
import os
import pushmsg
VERSION = pushmsg.__version__
install_reqs = [
'ipython',
'configparser',
'pushbullet.py>=0.10.0'
]
desc = """An IPython magic extension for sending notifications with Pushbullet.
It is useful for setting notfications for the completion of long jobs.
"""
setup(
name='pushmsg',
version=VERSION,
license='MIT',
description=('IPython magic function to send notifications with Pushbullet'),
author='Nicholas Youngblut',
author_email='nyoungb2@gmail.com',
url='https://github.com/nick-youngblut/pushmsg',
packages=find_packages(exclude=[]),
scripts=['scripts/pushmsg',
'scripts/pushmsg_qstat'],
install_requires=install_reqs,
long_description=desc
)
|
[
"nicholas.youngblut@tuebingen.mpg.de"
] |
nicholas.youngblut@tuebingen.mpg.de
|
2d70297ee8e965de82220eaf045dfbba8bb59865
|
17cd34729422f7f22c8b71c715d57954bcd1577e
|
/events.py
|
9472826c1178b99c34fbb733973f83f35f31db79
|
[] |
no_license
|
ehsan108190/shinytouch
|
4de4a0e5cbd4f43b1b26f925bfc641d3cf0d62fa
|
57f7e3e99c2b739192c58119cc5e9b2f8602312e
|
refs/heads/master
| 2021-01-01T20:41:03.132467
| 2010-01-17T02:10:32
| 2010-01-17T02:10:32
| 37,240,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
import tuio
import math
oxd, oyd = 0, 0
#import mouse
#mousectl = mouse.MouseControl()
#mousedown = False
#lastcoord = (0,0)
#lasttime = 0
def dist(x1, y1, x2, y2):
return math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))
def handle_touch(x, y):
global width, height
#THE MOUSE CONTROLS ON LINUX ONLY
#THIS MOUSE STUFF IS DEPRECATED. USE TUIOMOUSE OR SIMILAR INSTEAD.
#global mousectl, mousedown, lastcoord, lasttime
#import datetime, math
#if lasttime != 0:
# dist = math.sqrt((xd-lastcoord[0])*(xd-lastcoord[0]) + (yd-lastcoord[1])*(yd-lastcoord[1]))
# timediff = datetime.datetime.now() - lasttime
# if dist/(timediff.microseconds/1000) < 5:
# if mousedown == False:
# mousedown = True
# mousectl.mouse_down(1)
# scr = mousectl.get_screen_resolution()
# mousectl.mouse_warp(int(1600*(xd/width.0)),int(1200*(yd/height.0)))
#lasttime = datetime.datetime.now()
xd = width * x
yd = height * y
draw2.rectangle(((xd-2, yd-2),(xd+2, yd+2)), outline=(100,255,100))
global oxd, oyd
if oxd and oyd and dist(xd, yd, oxd, oyd) < 100:
draw2.line(((oxd, oyd),(xd, yd)), fill=(100,255,100))
oxd=xd
oyd=yd
tuio.alive([1]) #one alive
tuio.fseq()
tuio.move(1, x, y)
def handle_lift():
#global mousectl, mousedown
#if mousedown == True:
# pass
# mousectl.mouse_up(1)
#mousedown = False
global oxd, oyd
oxd = 0.0
oyd = 0.0
tuio.alive([]) #none alive
|
[
"antimatter15@56b13c6e-67d3-11de-a84c-2f1c77ec4fbd"
] |
antimatter15@56b13c6e-67d3-11de-a84c-2f1c77ec4fbd
|
aba58cfea3f786c3c26480ea8a19b55e513c103f
|
0b27f0352a75035a8e9bdabb67d137195d3ec789
|
/conv_network.py
|
235b9d8064716033592d67f4571e95e5a43a6f47
|
[] |
no_license
|
mneis1/caption_net
|
b7ced16396abad724ddcc02d8d1dcd843b5ed36e
|
6c4fb619d78cd80f5f0ea8914f2690965d48598f
|
refs/heads/master
| 2020-05-24T13:31:04.087451
| 2019-05-22T03:59:00
| 2019-05-22T03:59:00
| 187,290,479
| 0
| 0
| null | 2019-05-22T03:56:48
| 2019-05-17T22:54:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,775
|
py
|
from keras.applications import vgg16, inception_v3
from keras.models import Model
from keras.preprocessing import image
from PIL import Image
import numpy as np
import os
import pickle
class conv_network:
def __init__(self, imgnet_model_name):
self.model_name = imgnet_model_name
self.imgnet_model, self.model_params = self.fetch_model(imgnet_model_name) #load_model
#remove softmax layer
self.model = Model(inputs=self.imgnet_model.input, outputs=self.imgnet_model.layers[-2].output)
#self.model_params = fetch_model_params(imgnet_model_name)
def get_img_features(self, image_path):
dim = self.model_params['resize_dims']
img = image.load_img(image_path, target_size=(dim,dim))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
#img = img.reshape((-1,img.shape[0],img.shape[1],img.shape[2]))
img = self.img_preprocessing(img)
pred = self.model.predict(img, verbose=0)
return pred
def img_preprocessing(self, img):
model_name_ = self.model_name
if model_name_ == 'vgg16':
return vgg16.preprocess_input(img)
elif model_name_ == 'inception_v3':
return inception_v3.preprocess_input(img)
def compile_features(self, data_path):
features_dict = dict()
for image in os.listdir(data_path):
image_path = data_path + '/' + image
img_id, _ = os.path.splitext(image)
features_dict[img_id] = self.get_img_features(image_path)
#dump features to file so program doesnt have to compute everytime
dump_loc = open('data_features.pkl','wb')
pickle.dump(features_dict, dump_loc)
def fetch_model(self, model_name):
if model_name == 'vgg16':
model = vgg16.VGG16()
model_params = {'resize_dims': 224}
elif model_name == 'inception_v3':
model = inception_v3.InceptionV3()
model_params = {'resize_dims': 299}
#TODO: add error for incorrect model name
return model, model_params
def fetch_model_params(self, model_name):
#get expected resize model expects and other params specific to the pretrained net
if model_name == 'vgg16':
model_params = {'resize_dims': 224}
elif model_name == 'inception_v3':
model_params = {'resize_dims': 299}
return model_params
def load_features(self, dir='data_features.pkl'):
return pickle.load(open(dir, 'rb'))
#returns features that are in a given set of data. data should be a list of data id's to compare
def filter_features(self, features, data):
return {id_: features[id_] for id_ in data}
|
[
"marcneisser@Marcs-MacBook-Pro.local"
] |
marcneisser@Marcs-MacBook-Pro.local
|
4a0be4007c24f7b752c819eb6e9f9fa481354984
|
d0d8ca85502ab9f336ca22ee7a6e9fcd0e522ea3
|
/壁纸爬虫/pic.py
|
197e20aed216ff06d5126b77023cc58ec6253ede
|
[] |
no_license
|
0xl2oot/notes
|
7d752a643cde157bce21a50f1a7eb6c35066d809
|
2e0b2ef6a95085cbf0297a837948de0c753c3365
|
refs/heads/master
| 2021-05-09T11:35:30.717905
| 2019-03-20T10:18:57
| 2019-03-20T10:18:57
| 118,992,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,216
|
py
|
import urllib.request as u
import os,re
from bs4 import BeautifulSoup
def str_del(string):
#处理字符串
ban = ['\\','/',':','*','','"','<','>','|']
for i in string:
if i in ban:
string = string.replace(i,'')
return string
def save_pic(pic_urls,title):
title = str_del(title)
#保存图片
try:
os.mkdir(title)
os.chdir(title)
for i in range(len(pic_urls)):
#得到每张图片
html = url_open(pic_urls[i]).decode('gb2312', 'ignore')
soup = BeautifulSoup(html, "html.parser")
pic = soup.find(id = 'bizhiimg').p.a.img['src']
filename = soup.find(id = 'bizhiimg').p.a.img['alt'] + '.jpg'
filename = str_del(filename)
#保存
with open(filename,'wb') as f:
print('已保存:' + filename)
img = url_open(pic)
f.write(img)
os.chdir('..')
except FileExistsError:
pass
def url_open(url):
#得到并返回转码后的网页列表
req = u.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36')
req.add_header('Referer', 'http://www.zhuoku.com')
res = u.urlopen(req)
html = res.read()
return html
def main():
#先只下载第一页的壁纸
url = 'http://www.zhuoku.com/new/index.html'
#注意,该网页的编码方式是gb2312
html = url_open(url).decode('gb2312', 'ignore')
soup = BeautifulSoup(html, "html.parser")
divs = soup.find_all(id = "xinbizhi")#得到div的列表
for div in divs:
#使用BeautifulSoup来得到需要的信息
title = div.a['title']
num = re.findall('共有.{1,3}张', str(div))[0][2:-1]
urls = 'http://www.zhuoku.com' + div.a['href']
pic_urls = []
for i in range(int(num)):
pic_url = urls.replace('.htm', '(%d).htm'%(i+1))
pic_urls.append(pic_url)
save_pic(pic_urls,title)
if __name__ == '__main__':
try:
os.mkdir('壁纸')
except FileExistsError:
pass
os.chdir('壁纸')
main()
|
[
"yonghongwang@163.com"
] |
yonghongwang@163.com
|
30eaa5f3c004cf9d6014266444ad0822593e74ca
|
3d6dcf456d17aaae71e650f90c7d70e52699403f
|
/mnist/plot.py
|
77fa5b960cd3afbbd069c96a0cd11e038207df8f
|
[] |
no_license
|
CNLHC/ai_accelerator_2020
|
bbcf82593e3f9ed470f620b0037e06d33af2f57a
|
298c97ce83bf4830e3e8c07f62bc5fdc42661420
|
refs/heads/master
| 2023-01-05T05:03:53.294053
| 2020-11-02T09:28:37
| 2020-11-02T09:28:37
| 301,689,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
import matplotlib.pyplot as plt
import matplotlib
def main():
def toFloat(x):
try:
return float(x)
except:
return None
li = [toFloat(x) for x in open("./assets/loss",'r').readlines()]
li = [x for x in li if x is not None]
plt.figure(figsize=(35,20))
matplotlib.rcParams.update({'font.size': 32})
plt.ylabel("Cross Entropy Loss")
plt.xlabel("Samples")
plt.plot(li[::5],linewidth=3,color="k")
plt.savefig("loss.eps")
main()
|
[
"cn_lhc@qq.com"
] |
cn_lhc@qq.com
|
e45daa193b93ee9c1ee5767173a49870d4d8145a
|
b20fde97e70b20c8301d844ad0b07facb70af9b3
|
/hello-flask/main.py
|
0c56546b1e0802e1d5aa5ca3e8e96a03a39dab80
|
[] |
no_license
|
krgirard33/lc101_2018
|
a42ea56cba9119d097c7e8884d90b07db235adfc
|
2603fd0e13e4e537a847a389ef2287764b6d582f
|
refs/heads/master
| 2021-05-01T08:48:44.962435
| 2018-04-24T00:38:50
| 2018-04-24T00:38:50
| 121,175,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
from flask import Flask, request, redirect, render_template
import cgi
import os
import jinja2
import tasklist.py
template_dir = os.path.join(os.path.dirname(__file__),
'templates')
jinja_env = jinja2.Environment(
loader = jinja2.FileSystemLoader(template_dir), autoescape=True
)
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route("/")
def index():
return render_template('hello_form.html')
@app.route('/hello', methods=['POST'])
def hello():
first_name = request.form['first_name']
return render_template('hello_greeting.html', name=first_name)
app.run()
|
[
"ken.girard@gmail.com"
] |
ken.girard@gmail.com
|
5006858f56019a73a6dd2128541e817171e202e9
|
0d7308b8fe87b61a8294688f61fd3100c3be5bd4
|
/src/star_sampler1.py
|
5b95b7744697cb6c280b646b8798dc68e0232421
|
[] |
no_license
|
mlm2299/StoneMallernee
|
2ff07091dbdfcd1d0c475ed2e48669aad2eb5252
|
80d738f8ad7824db04fb08365afac033a84bad49
|
refs/heads/master
| 2021-05-08T00:39:02.532420
| 2017-10-20T15:43:40
| 2017-10-20T15:43:40
| 107,691,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,689
|
py
|
'''
Created on June 16, 2015
@author: Maggie Mallernee
To sample stellar orbits assuming isotropy and spherical distribution, with angles in DEGREES
'''
#from scipy import constants as c
import random
from math import cos, asin, degrees
import anomaly_convert as ac
#Function that calculates the radius of influence (of a black hole), in parsecs
#Need to verify units & empirics (r_infl ~ 0.76 ( M_bh / 10^6 M_s)^(0.58)
def get_r_infl(mass):
return (mass / (10**6 * 1.98855 * 10**30)) ** 0.5
#def get_r_max
def star_sampler(mass_bh, num_stars):
sample_array = []
#name = ""
for x in range(1, num_stars+1):
#name = 'S' + str(x)
#randomly sample omega, w, M (mean anomaly) **in DEGREES
omega = random.uniform(0.0, 360.)
w = random.uniform(0.0, 360.)
M = random.uniform(0.0, 360.)
#randomly sample i
sin_i = random.uniform(0.0, 2.0) - 1.0
i = degrees( asin(sin_i) )
#randomly sample e using its cdf F(X) = e**2
#thermal distribution of e
is_ok = False
while(is_ok != True):
X = random.random()
e = X ** 0.5
if(e != (1.0 / cos(1.0) )):
is_ok = True
#get f from M
E = ac.newton(e, 1.0, M) #need method to choose the initial guess
f = ac.get_f(e, E)
#randomly sample a using the Bahcall-Wolf Cusp distribution
X = random.random()
#a = X ** (4./5.) * 1240 #4:2 outer bound?
#for initial GR computational testing -- bound to the 3:1 resonance
#(48.0750 - for 100 AU binary separation)
#(14.4225 - for 30 AU binary separation)
a = X ** (4./5.) * (48.0750)
#radius of pericentre -- for sorting purposes only
r_p = a * (1 - e)
sample_array.append([a, e, i, omega, w, f, r_p])
return sample_array
#sorts by pericentre, ascending
def sort_sample_by_peri(sample):
sample.sort(key=lambda x: x[6])
return sample
def star_sample_file_writer(file_name, mass_bh, num_stars_to_sample, num_to_include):
sample = sort_sample_by_peri(star_sampler(mass_bh, num_stars_to_sample))
get_file(file_name, sample, num_to_include)
#to_include ought to be a list of different numbers to sample
def sample_multiple(mass_bh, num_stars, to_include):
sample = sort_sample_by_peri(star_sampler(mass_bh, num_stars))
for n in range(len(to_include)):
file_name = "small" + str(to_include[n]) + ".in"
get_file(file_name, sample, to_include[n])
#returns a file with the smallest num_included number of stars with pericentres greater than twice the tidal radius
#r_t currently set to 1.0016247683 AU
def get_file(file_name, sample, num_included):
with open(file_name, 'w') as file:
file.write(')O+_06 Small-body initial data (WARNING: Do not delete this line!!) \n')
file.write(") Lines beginning with `)' are ignored. \n")
file.write(")--------------------------------------------------------------------- \n")
file.write(" style (Cartesian, Asteroidal, Cometary) = Ast \n")
file.write(")--------------------------------------------------------------------- \n")
name = ""
r_t = 1.0016247683
n = 0
i = 1
while i <= num_included:
if sample[n][6] > (2 * r_t):
name = "S" + str(i)
i += 1
#write to the file
#TEST file.write("Test - radius of pericentre: %s \n" % (sample[n][6]))
file.write(' %s ep=2450400.5 \n' % (name))
if i > num_included:
file.write(" %s %s %s %s %s %s 0 0 0\n\n" % (sample[n][0], sample[n][1], sample[n][2], sample[n][3], sample[n][4], sample[n][5]))
else:
file.write(" %s %s %s %s %s %s 0 0 0\n" % (sample[n][0], sample[n][1], sample[n][2], sample[n][3], sample[n][4], sample[n][5]))
n += 1
#name = "S" + str(num_included)
#file.write(' %s ep=2450400.5 \n' % (name))
#file.write(" %s %s %s %s %s %s 0 0 0 " % (sample[n][0], sample[n][1], sample[n][2], sample[n][3], sample[n][4], sample[n][5]))
return file
#MAIN
|
[
"mlm2299@columbia.edu"
] |
mlm2299@columbia.edu
|
929a7e390ef29badc40568612ddcfd1a26849d40
|
f7c482bd2a5af72d56f7506219fd3ae81ea0eb69
|
/venv/Scripts/rst2man.py
|
7d48736a4a76bbf70945812b4d7034e56a46d001
|
[] |
no_license
|
Shakikhanli/Repo-Finder
|
d80ab19f8a5c5e4b421b1eaf376ffa9fce195fc8
|
f922ab491d77dd7eacb574bdfd2f4117d1c62aa0
|
refs/heads/master
| 2022-11-12T14:22:33.165194
| 2020-07-07T10:10:42
| 2020-07-07T10:10:42
| 277,782,498
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!C:\MPF\Programming\Python Projects\Github Repo Finder\venv\Scripts\python.exe
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
[
"shekikhanli@gmail.com"
] |
shekikhanli@gmail.com
|
94f6e0c71843c2dce709f89a98354f01226fc707
|
07e69d44a874884678297a668632c040b7df8eef
|
/dataAnalysis.py
|
ba5677b5601721850dc94f13953a5cefd82d9ffa
|
[] |
no_license
|
maigahurairah15/Hurairah-GWC-2018
|
178e10e719ec8803fe4bd46a12450265c02aaa70
|
b55f32fd887c5ab149aab129a2ff3589ee04dbc5
|
refs/heads/master
| 2020-03-26T16:48:50.181435
| 2018-08-17T13:50:08
| 2018-08-17T13:50:08
| 145,124,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
import school_scores
list_of_records = school_scores.get_all()
print(list_of_records[0])
for i in list_of_records:
print(i["State"]["Name"],i["Gender"]["Female"],i["Score Ranges"]["Between 600 to 700"]["Math"])
|
[
"noreply@github.com"
] |
maigahurairah15.noreply@github.com
|
ccd19b0b85e78710f0ee8289121587e93d8b0e6f
|
ee96b1349c9a8926836e8a00ecd99af9ae3fd9d3
|
/fetch_scrapy/fetch/pipelines/__init__.py
|
75d7ec22cde0a75d82b15426f61ed511b6fe07be
|
[] |
no_license
|
aiportal/zb123
|
f201e701e6dff4e4e957410f9df0cadb8feb9d51
|
7316880e2444a8af02e2f44af38dd7ae708ccbb6
|
refs/heads/master
| 2023-08-06T13:23:47.421260
| 2018-08-14T03:03:49
| 2018-08-14T03:03:49
| 170,792,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from .mysql import MysqlPipeline
from .sqlite import SQLitePipeline
from .zip import ZipPackagePipeline
from .csv import CsvFilePipeline
|
[
"bfbd@163.com"
] |
bfbd@163.com
|
0f4121425d642124405d6605b83f29e697901a2e
|
84de064b2994516f14950cd31e815a410bce1433
|
/week5/FileDownload1.py
|
042f9d092bd6be38f952a7e61a93e80a5fe366c3
|
[] |
no_license
|
stefancoe/uw_python
|
442697d6d815a6b3a3f8ec87c8bbff54b480a243
|
1bd7cd1a15fd994c7690f04e7b55984df68bd173
|
refs/heads/master
| 2020-05-29T13:42:15.806345
| 2012-02-29T03:02:55
| 2012-02-29T03:02:55
| 3,099,659
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
import urllib2
from BeautifulSoup import BeautifulSoup
from urlparse import *
from urllib import urlretrieve
import os
import sys
site = 'http://jon-jacky.github.com/uw_python/winter_2012/index.html'
page = urllib2.urlopen(site).read()
soup = BeautifulSoup(page)
anchors = soup.findAll('a')
o = urlparse(site)
print type (anchors)
for item in anchors:
myUrl = str(item)
if myUrl.rfind('.py<') != -1:
myUrl = myUrl[myUrl.find("=")+2:]
myUrl = myUrl[:0] + myUrl[:myUrl.find(">")-1]
myUrl = urljoin(site, myUrl)
fileName = myUrl.split("/")
fileName = fileName[-1]
print fileName
folder = 'C:/Temp/'
folder = folder + "/" + fileName
urlretrieve(myUrl, folder)
#print soup.prettify()
|
[
"coestefan@gmail.com"
] |
coestefan@gmail.com
|
acfb84dcfe41a00d92a140f5dee85df17a52d2a9
|
afa377c993ac69c03d8cfd9c587409a5616ce9b9
|
/recursividad.py
|
e0c9e2d7918751e016c174c6f5a97371ee872b9c
|
[] |
no_license
|
ealexisaraujo/basicoPython
|
45b01237ad184535d51ce745dc11dc3284e930f7
|
8419595e2884569224207deabd24006d4fa922f6
|
refs/heads/master
| 2023-05-29T01:53:43.954711
| 2020-03-15T14:46:23
| 2020-03-15T14:46:23
| 245,044,203
| 0
| 0
| null | 2023-05-22T21:38:57
| 2020-03-05T01:47:04
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
# -*- coding: utf-8 -*-
def sumrec(number):
if number == 1:
return 1
return number + sumrec(number-1)
def run():
number = int(input('Ingresa el número a sumar: '))
res = sumrec(number)
print('El resultado de la suma recursiva, tomando como base el {} es :{}'.format(
number, res))
if __name__ == '__main__':
run()
|
[
"ala1289@gmail.com"
] |
ala1289@gmail.com
|
aff53125e814dfb37ed5e60ec9904b09180fa0fe
|
86beb084541d582c7547ca8fabafd326a150a8a9
|
/ReviewPython/linting/better_hello.py
|
5b490b9eef19a012b485c709e8a7fbd044ec6126
|
[] |
no_license
|
Computational-Physics-Research/CompPhys
|
ea8e806a0f79560c21aad7969337b0a1e97f2fbd
|
7ae65dc1ba754c8b6addd8fbde3f7beff4f00523
|
refs/heads/main
| 2023-04-19T00:14:41.369953
| 2021-04-02T17:35:10
| 2021-04-05T19:16:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
"""
This is the hello world module
"""
def hello():
""" My Hello World program """
print("Hello World")
|
[
"thomay@buffalo.edu"
] |
thomay@buffalo.edu
|
d1155513c9c21f51d0829ee7664d1e093062b6cf
|
e40933249f51eaf43b85ea183bb4ee12d4c32828
|
/pipeline/binaries/evaluation.py
|
b21b4b9e6d8dc710e5aff1bf223860375cb0b2ed
|
[] |
no_license
|
stage-comprection/pipeline
|
3b34447b4f043fcdee9f078197014848ad89a8db
|
1dcf55bffdc3bb8a0aeee0117eba66cb847c9e76
|
refs/heads/master
| 2021-01-10T03:25:58.911767
| 2016-03-23T16:33:07
| 2016-03-23T16:33:07
| 51,996,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
"""
Functions to evaluate the results of correction using the evaluation pipeline.
Parameters are given using a .ini file instead of command-line arguments.
"""
from ..settings import *
import os
def generate_settings_file(settings):
"""
Generates an .ini file used by the evaluation pipeline to initialize
parameter values.
"""
pathToSettingsFile = (settings[GENERAL][OUTPUT_PATH] +
settings[EVALUATION][SETTINGS_FILE])
with open(pathToSettingsFile, 'w') as o:
o.write("outputFolderPath=" +
settings[GENERAL][OUTPUT_PATH] + "\n")
o.write("readsFolderPath=" +
settings[DATA][READS_PATH] + "\n")
o.write("referenceFolderPath=" +
settings[DATA][REF_PATH] + "\n")
o.write("readSet=" +
settings[DATA][READS_FILE] + "\n")
o.write("reference=" +
settings[DATA][REF_FILE] + "\n")
o.write("nThreads=" +
str(settings[GENERAL][N_THREADS]) + "\n")
o.write("nTempFiles=" +
str(settings[EVALUATION][N_TEMP_FILES]) + "\n")
def evaluation(settings):
"""Evaluates the results of correction using the evaluation pipeline """
# Generates the settings file used by the evaluation pipeline
generate_settings_file(settings)
# Runs evaluation_correction binary
os.system(settings[EVALUATION][PATH] + 'evaluation_correction ' +
settings[GENERAL][OUTPUT_PATH])
|
[
"romain.feron@evobio.eu"
] |
romain.feron@evobio.eu
|
97a01674c332bb5bad4dc14ea5e57e111626b8d6
|
d7704c107f9cb0cdb92a4c864863ff087fcfda35
|
/etl.py
|
b4bda19b21d5a6ddc698677133fee1276b02070c
|
[] |
no_license
|
maullaina/Data_Modeling_with_Postgres
|
0c8d21d1f5e7fae7b51e73ebf3ebef6ddbc85f3f
|
4111743da6f66456890126f5adc77c67a38ea3bb
|
refs/heads/main
| 2023-08-19T12:21:43.530703
| 2021-10-20T10:41:22
| 2021-10-20T10:41:22
| 419,286,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,133
|
py
|
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
from datetime import datetime
def process_song_file(cur, filepath):
""" Get information about songs and artists from raw data and introduce it
in the SQL tables.
Arguments:
cur {object}: element that will represent a dataset determined by a T-SQL query.
Cursors allow to traverse row by row, read and eventually modify this result set.
filepath {str}: a string with the file path for each document.
Returns:
None
"""
# open song file
df = pd.read_json(filepath, lines=True)
# insert song record
song_data=[]
song_data.append(df.song_id.values[0])
song_data.append(df.title.values[0])
song_data.append(df.artist_id.values[0])
song_data.append(int(df.year.values[0]))
song_data.append(df.duration.values[0])
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data=[]
artist_data.append(df.artist_id.values[0])
artist_data.append(df.artist_name.values[0])
artist_data.append(df.artist_location.values[0])
artist_data.append(df.artist_latitude.values[0])
artist_data.append(df.artist_longitude.values[0])
cur.execute(artist_table_insert, artist_data)
def get_fields_time(x):
""" Get further information from datetime format
Arguments:
x {dataframe}: each of the different rows per each log_data file
Return:
A list with all the values to insert into the time table
"""
hour = x.hour
day = x.day
week = x.week
month = x.month
year = x.year
weekday= x.dayofweek
return [x,hour, day, week, month, year, weekday]
def process_log_file(cur, filepath):
""" Get information about time and users from raw data and introduce it
in the SQL tables and create the fact table songplays.
Arguments:
cur {object}: element that will represent a dataset determined by a T-SQL query.
Cursors allow to traverse row by row, read and eventually modify this result set.
filepath {str}: a string with the file path for each document.
Returns:
None
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df_nxt = df.page=='NextSong'
df = df[df_nxt]
# convert timestamp column to datetime
df.ts = df.ts/1000
t = df.ts.map(datetime.fromtimestamp)
# insert time data records
time_data = [get_fields_time(x) for x in t]
column_labels = [ 'timestamp', 'hour', 'day', 'week', 'month', 'year', 'weekday']
time_df = pd.DataFrame(time_data, columns=column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_data=[]
for index, row in df.iterrows():
user_id=row["userId"]
first_name= row["firstName"]
last_name= row["lastName"]
gendre=row["gender"]
level=row["level"]
list_user=[user_id, first_name, last_name, gendre, level]
if list_user not in user_data:
user_data.append(list_user)
column_labels = ['user_id', 'first_name', 'last_name', 'gender', 'level']
user_df = pd.DataFrame(user_data, columns=column_labels)
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, tuple(row))
# insert songplay records
df.ts=df.ts.map(datetime.fromtimestamp)
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = []
songplay_data.append(row.ts)
songplay_data.append(row.userId)
songplay_data.append(row.level)
songplay_data.append(songid)
songplay_data.append(artistid)
songplay_data.append(row.sessionId)
songplay_data.append(row.location)
songplay_data.append(row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
""" given a created DB space, this function gets the filepath and give it as
an arguments to th etwo functions that will process tha raw data.
Arguments:
cur {object}: element that will represent a dataset determined by a T-SQL query.
Cursors allow to traverse row by row, read and eventually modify this result set.
conn {object}: The connector allows you to connect to the following databases and perform
multiple database actions
filepath {str}: a string with the file path for each document.
func {function}: a specific function name to execute
Return:
None
"""
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
maullaina.noreply@github.com
|
71443d75a70b60067dca5151d9f4b254511d4d0a
|
1cba599fd19adf5c83dcaa68394f47f78dd15039
|
/store/migrations/0002_rename_price_product_unit_price.py
|
212285c394a257f2157a6935639367c08124a7b6
|
[] |
no_license
|
I-am-vishalmaurya/Django_Store_Backend
|
f0b957d79fbad90d1d258b65934797f32911e7df
|
db910d606cdd751c467a4c0ddc6a5f02c1aa1ec6
|
refs/heads/master
| 2023-08-11T16:09:59.677089
| 2021-10-15T03:10:00
| 2021-10-15T03:10:00
| 393,366,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# Generated by Django 3.2.5 on 2021-08-01 10:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='price',
new_name='unit_price',
),
]
|
[
"vishalmaurya3112@gmail.com"
] |
vishalmaurya3112@gmail.com
|
515d67d4076f5ef6779953138fa64a6a4359a46f
|
3b6e75ae5390abf9a47c22867fc967683e257301
|
/opnet_aa/baselines/tracking_utils.py
|
b55119fea4c22594c47cd133e1acfed477768fbe
|
[] |
no_license
|
GakkiChen/OPNet-AA
|
fd24c591128559856c0c66104401fafee1708168
|
ca764439f24d7e1249a5ff6609b1e3dcb2343e88
|
refs/heads/main
| 2023-07-05T21:28:39.624750
| 2021-08-11T03:18:32
| 2021-08-11T03:18:32
| 390,297,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,953
|
py
|
import json
from pathlib import Path
from typing import List, Dict, Tuple
import pandas as pd
import numpy as np
import cv2
class VideoHandling(object):
def __init__(self, vid_path: str, output_path: str = None):
self.vid_path = vid_path
self.output_path = output_path
self.cap: cv2.VideoCapture = None
self.vid_writer: cv2.VideoWriter = None
self.current_frame: np.ndarray = None
self.current_frame_index: int = -1
self.video_still_active: bool = False
self.num_valid_frames = None
self._init_video_cap()
def _init_video_cap(self) -> None:
self.cap = cv2.VideoCapture(self.vid_path)
if not self.cap.isOpened():
raise 'Unable to open video {}'.format(self.vid_path)
# for some reason cap always returns extra frame
# our labels are aligned with the 300 first frames
# thus, we will omit the last frame that cv2 reader returns
self.num_valid_frames = self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1
def _init_video_writer(self, w_frame, h_frame) -> None:
self.vid_writer = cv2.VideoWriter(self.output_path,
cv2.VideoWriter_fourcc('m', 'p', '4', 'v'),
30,
(w_frame, h_frame))
def get_current_frame(self) -> np.ndarray:
return self.current_frame
def get_current_frame_index(self) -> int:
return self.current_frame_index
def check_video_still_active(self) -> bool:
return self.current_frame_index < self.num_valid_frames
def write_debug_frame(self) -> None:
frame = self.current_frame
h_frame, w_frame, _ = frame.shape
# init video writer if not initiated yet
if self.vid_writer is None:
self._init_video_writer(w_frame, h_frame)
self.vid_writer.write(frame)
def read_next_frame(self) -> None:
_, frame = self.cap.read()
# update current frame and status
self.current_frame = frame
self.current_frame_index += 1
def write_bb_to_frame(self, bbox: List[int], color: Tuple[int, int, int]) -> None:
frame = self.current_frame
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 3)
def complete_video_writing(self) -> None:
self.cap.release()
self.vid_writer.release()
class DataHelper(object):
@staticmethod
def parse_obj_gt_bb(bb_gt: Dict[str, List[List[int]]], object_name: str = "small_gold_spl_metal_Spl_0") -> List[List[int]]:
object_bb = bb_gt[object_name]
# transform from xy_wh bb format to xy_xy format
object_bb = [[x, y, x + w, y + h] for x, y, w, h in object_bb]
return object_bb
@staticmethod
def read_obj_gt_bb(vid_path: str, bb_dir_path: str) -> List[List[int]]:
video_name = Path(vid_path).stem
video_bb_file_suffix = video_name + "_bb.json"
video_bb_full_path = Path(bb_dir_path) / video_bb_file_suffix
with open(video_bb_full_path, "rb") as f:
bb_objects_gt: Dict[str, List[List[int]]] = json.load(f)
snitch_gt_bb = DataHelper.parse_obj_gt_bb(bb_objects_gt)
return snitch_gt_bb
@staticmethod
def write_bb_predictions_to_file(video_path: str, predictions_dir: str, snitch_bb_prediction: List[List[int]]) -> None:
video_name = Path(video_path).stem
prediction_file_name = video_name + "_bb.json"
predictions_path = Path(predictions_dir) / prediction_file_name
snitch_bb_prediction = [[int(x1), int(y1), int(x2), int(y2)] for [x1, y1, x2, y2] in snitch_bb_prediction]
with open(predictions_path, 'w') as f:
json.dump(snitch_bb_prediction, f, indent=2)
class ResultsAnalyzer(object):
@staticmethod
def compute_iou_for_frame(box_1: List[int], box_2: List[int]) -> float:
"""
computes intersection over union of two bounding boxes arrays
it's a symmetrical measurement, it doesn't matter which one it prediction
and which one it ground truth
"""
# obtain the X,Y of the intersection rectangle
intersection_x_1: int = max(box_1[0], box_2[0])
intersection_y_1: int = max(box_1[1], box_2[1])
intersection_x_2: int = min(box_1[2], box_2[2])
intersection_y_2: int = min(box_1[3], box_2[3])
# calc the area of intersection rectangle
intersection_area: int = max(0, intersection_x_2 - intersection_x_1 + 1) * max(0, intersection_y_2 - intersection_y_1 + 1)
# compute the area of both the prediction and ground-truth rectangles
box_1_area: int = (box_1[2] - box_1[0] + 1) * (box_1[3] - box_1[1] + 1)
box_2_area: int = (box_2[2] - box_2[0] + 1) * (box_2[3] - box_2[1] + 1)
# Subtracting interArea because we sum it twice
union_area: int = box_1_area + box_2_area - intersection_area
# compute intersection over union
iou: float = intersection_area / float(union_area)
return iou
@staticmethod
def compute_vectorized_iou_for_video(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:
# divide the boxes to coordinates
x11, y11, x12, y12 = np.split(boxes_1, 4, axis=1)
x21, y21, x22, y22 = np.split(boxes_2, 4, axis=1)
# obtain XY of intersection rectangle area
xA: np.ndarray = np.maximum(x11, x21)
yA: np.ndarray = np.maximum(y11, y21)
xB: np.ndarray = np.minimum(x12, x22)
yB: np.ndarray = np.minimum(y12, y22)
# compute intersection area
interArea: np.ndarray = np.maximum((xB - xA + 1), 0) * np.maximum((yB - yA + 1), 0)
# compute each one of the boxes area
boxAArea: np.ndarray = (x12 - x11 + 1) * (y12 - y11 + 1)
boxBArea: np.ndarray = (x22 - x21 + 1) * (y22 - y21 + 1)
iou: np.ndarray = interArea / (boxAArea + boxBArea - interArea)
iou = iou.flatten()
return iou
@classmethod
def init_from_files(cls, bb_prediction_dir: str, bb_gt_dir: str, iou_thresh: float = None):
video_files: List[str] = []
video_bb_predictions: Dict[str, List[List[int]]] = {}
video_bb_gt: Dict[str, List[List[int]]] = {}
# parse predictions
predictions_files = Path(bb_prediction_dir).glob("*.json")
for f_predict in predictions_files:
# extract video name
video_name = f_predict.stem[:-3] # remove _bb suffix
video_files.append(video_name)
# read and parse json file
with open(f_predict, "rb") as f:
snitch_predictions_locations: List[List[int]] = json.load(f)
# fix bug in current ground truth locations, remove first frame annotations
# snitch_predictions_locations = snitch_predictions_locations[1:]
video_bb_predictions[video_name] = snitch_predictions_locations
# parse ground truth locations
gt_files = Path(bb_gt_dir).glob("*.json")
for f_gt in gt_files:
# extract video name
video_name = f_gt.stem[:-3] # remove _bb suffix
# skip videos we don't have predictions for
if video_name in video_files:
# read and parse json file
with open(f_gt, "rb") as f:
all_objects_locations: Dict[str, List[List[int]]] = json.load(f)
snitch_gt_locations: List[List[int]] = DataHelper.parse_obj_gt_bb(all_objects_locations)
video_bb_gt[video_name] = snitch_gt_locations
# sort data to make sure predictions and ground truth are aligned to same video
sorted_bb_predictions = sorted(video_bb_predictions.items(), key=lambda x: x[0])
sorted_bb_gt = sorted(video_bb_gt.items(), key=lambda x: x[0])
video_files = sorted(video_files)
bb_predictions: List[List[List[int]]] = [bb_pred for video_name, bb_pred in sorted_bb_predictions]
bb_gt: List[List[List[int]]] = [bb_gt for video_name, bb_gt in sorted_bb_gt]
return cls(video_files, bb_predictions, bb_gt, iou_thresh)
def __init__(self, videos_files: List[str], bb_predictions: List[List[List[int]]], bb_gt: List[List[List[int]]], iou_thresh: List[float] = None):
assert len(videos_files) == len(bb_predictions) == len(bb_gt)
self.videos_names: List[str] = []
self.videos_num_frames: Dict[str, int] = {}
self.bb_predictions: Dict[str, List[List[int]]] = {}
self.bb_gt: Dict[str, List[List[int]]] = {}
self.iou_results: Dict[str, List[float]] = {}
self.map_results: Dict[str, Dict[str, List[bool]]] = {thresh: {} for thresh in iou_thresh} if iou_thresh else {}
self.videos_metrics: Dict[str, Dict[str, float]] = {}
self.iou_thresh: List[float] = iou_thresh
self._init_videos_data(videos_files, bb_predictions, bb_gt)
self._compute_iou_results()
self._compute_bool_overlap(self.iou_thresh)
def _init_videos_data(self, videos_files: List[str], bb_predictions: List[List[List[int]]], bb_gt: List[List[List[int]]]) -> None:
all_videos_names = list(map(lambda x: Path(x).stem, videos_files))
num_potential_videos = len(all_videos_names)
for i in range(num_potential_videos):
current_video = all_videos_names[i]
current_predictions = bb_predictions[i]
current_gt = bb_gt[i]
current_video_length = len(current_gt)
if -100 in current_predictions:
continue # skip defected videos
self.bb_predictions[current_video] = current_predictions
self.bb_gt[current_video] = current_gt
self.videos_num_frames[current_video] = current_video_length
self.videos_names.append(current_video)
def _compute_iou_results(self) -> None:
for video_name in self.videos_names:
video_predictions: np.ndarray = np.array(self.bb_predictions[video_name])
video_gt: np.ndarray = np.array(self.bb_gt[video_name])
batch_video_iou_results = self.compute_vectorized_iou_for_video(video_predictions, video_gt)
self.iou_results[video_name] = batch_video_iou_results
def _compute_bool_overlap(self, iou_thresh) -> None:
if iou_thresh is not None:
for threshold in iou_thresh:
for video_name, frame_iou in self.iou_results.items():
self.map_results[threshold][video_name] = frame_iou > threshold
def get_frames_mask(self, occlusion_frames_file: str) -> Dict[str, np.ndarray]:
occlusion_masks = {}
with open(occlusion_frames_file, "r") as f:
for line in f:
line = line[:-1]
video_name, occlusion_frames_str = line.split("\t")
if video_name not in self.bb_gt:
continue
if occlusion_frames_str == "":
occlusion_frames = []
else:
occlusion_frames = np.array(occlusion_frames_str.split(","), dtype=np.int)
video_length = self.videos_num_frames[video_name]
mask = np.zeros(video_length, dtype=np.bool)
mask[occlusion_frames] = True
occlusion_masks[video_name] = mask
return occlusion_masks
def compute_aggregated_metric(self, aggregations_name: str, aggregation_function, metric: str = "iou") -> None:
if metric == "iou":
video_mean_results = {}
for video_name in self.videos_names:
video_metric_results = np.array(self.iou_results[video_name])
video_mean_metric = float(aggregation_function(video_metric_results))
video_mean_results[video_name] = video_mean_metric
self.videos_metrics[f"{aggregations_name}_{metric}"] = video_mean_results
elif metric == "map":
for iou_threshold, video_overlap_dict in self.map_results.items():
video_mean_results = {}
for video_name in self.videos_names:
video_metric_results = np.array(video_overlap_dict[video_name])
video_mean_metric = float(aggregation_function(video_metric_results))
video_mean_results[video_name] = video_mean_metric
self.videos_metrics[f"{aggregations_name}_{metric}_{iou_threshold}"] = video_mean_results
else:
raise NotImplementedError("This metric is not supported")
def get_videos_names(self):
return self.videos_names
def compute_metric_mask(self, occlusions_mask: Dict[str, np.ndarray], video_metric_results: np.ndarray, aggregation_function, video_name: str):
video_mean_results = {}
video_mask: List[bool] = occlusions_mask[video_name]
num_mask_frames = np.sum(video_mask)
if num_mask_frames == 0:
video_mean_metric = np.nan
else:
video_metric_mask_frames = video_metric_results[video_mask]
video_mean_metric = float(aggregation_function(video_metric_mask_frames))
video_mean_results[video_name] = video_mean_metric
return video_mean_results
def compute_aggregated_metric_masking_frames(self, aggregation_name: str, aggregation_function, occlusions_mask: Dict[str, np.ndarray], metric: str = "iou") -> None:
ratio_column_name = f"{aggregation_name}_ratio"
video_mask_ratio = {}
if metric == "iou":
video_metric = {}
metric_column_name = f"{aggregation_name}_mean_{metric}"
for video_name in self.videos_names:
video_metric_results = np.array(self.iou_results[video_name])
video_metric.update(self.compute_metric_mask(occlusions_mask, video_metric_results,
aggregation_function, video_name))
video_mask = occlusions_mask[video_name]
num_mask_frames = np.sum(video_mask)
if num_mask_frames == 0:
mask_ratio = 0.0
else:
mask_ratio = num_mask_frames / len(video_mask)
video_mask_ratio[video_name] = mask_ratio
self.videos_metrics[metric_column_name] = video_metric
self.videos_metrics[ratio_column_name] = video_mask_ratio
elif metric == "map":
video_metric = {key: {} for key in self.map_results.keys()}
for video_name in self.videos_names:
for threshold, video_overlap in self.map_results.items():
video_metric_results = np.array(video_overlap[video_name])
video_metric[threshold].update(self.compute_metric_mask(occlusions_mask, video_metric_results,
aggregation_function, video_name))
for threshold, map_value in video_metric.items():
metric_column_name = f"{aggregation_name}_mean_{metric}_{threshold}"
self.videos_metrics[metric_column_name] = map_value
else:
raise NotImplementedError("This metric is not supported")
def compute_precision_data(self, thresholds: List[float] = None, occlusions_mask: Dict[str, np.ndarray] = None):
if thresholds is None:
thresholds = [i / 20 for i in range(20)] # 0 - 0.95 with 0.05 step
with_occlusions = False
if occlusions_mask is not None:
with_occlusions = True
for t in thresholds:
def t_agg_func(x):
return np.sum(x > t) / x.shape[0]
if with_occlusions:
aggregation_name = f"occ_precision_{t}"
self.compute_aggregated_metric_masking_frames("iou", aggregation_name, t_agg_func, occlusions_mask)
else:
aggregation_name = f"precision_{t}"
self.compute_aggregated_metric("iou", aggregation_name, t_agg_func)
def get_analysis_df(self) -> pd.DataFrame:
video_data = {
"videos_names": sorted(self.videos_names)
}
for metric_name, metric_data_dict in self.videos_metrics.items():
sorted_metric_data = sorted(metric_data_dict.items(), key=lambda x: x[0])
metric_values = list(map(lambda x: x[1], sorted_metric_data))
video_data[metric_name] = metric_values
results_df = pd.DataFrame.from_dict(video_data)
return results_df
def write_results(self, results_filepath: str) -> None:
# create a dictionary later be converted to DataFrame
results_df = self.get_analysis_df()
results_df = results_df.round(3)
results_df.to_csv(results_filepath, index=None)
|
[
"noreply@github.com"
] |
GakkiChen.noreply@github.com
|
7622759d9e9e7476c8de41b06c5d38bac544425b
|
782a3a5dfd619edf015e3000f9bcd6607dd9a027
|
/MISC/mainDNN.py
|
636f6ef4c420a586628bc13819a24f63f7096198
|
[] |
no_license
|
yujianyuanhaha/BerPredict
|
b0ee5d0ad9b3f7e90f49bab0954d0ba903db89a4
|
57f3d15e04399ac114fd0188c35670de791e30fe
|
refs/heads/master
| 2022-04-21T20:34:09.211610
| 2020-04-20T14:12:31
| 2020-04-20T14:12:31
| 214,231,021
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,849
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 12 11:09:17 2019
@author: Jet
1. TODO, lack of early stop, likely overfit
"""
import time
import os
import math
os.environ["KMP_DUPLICATE_LIB_OK"] = "1" # sometime make "1" for Mac
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import numpy as np
from keras.utils import to_categorical
import hdf5storage
from keras import callbacks
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')
# ---------------- settings---------------------------------
epochs = 20
ID = 0 # 0 for umit, 1 for mit
MAXdB = 40
batch_size = 4096
# ---------------- load data ---------------------------------
t = hdf5storage.loadmat('./DATA/X_200k.mat')
X = t['X']
t = hdf5storage.loadmat('./DATA/Y_200k.mat')
U = t['Y']
Ndat = U.shape[0]
U = - 10*(np.log(U/1000)/np.log(10)); # log value
for i in range(0,Ndat):
for j in range(0,2):
if np.isinf(U[i,j]) or np.isnan(U[i,j]) or U[i,j] > MAXdB:
U[i,j] = MAXdB
U[i,j] = int(U[i,j]/2.50)
plt.figure(2)
plt.hist(U[:,1],bins=64)
plt.ylabel('Number of occurence')
plt.xlabel('BER (-10log)')
plt.grid(True)
plt.title('histogram of BER')
plt.savefig('./hist_BER_umit_float.png')
Y = to_categorical(U[:,ID] )
numClass = Y.shape[1]
Xtest = X[ int(X.shape[0]*0.8):,:]
Ytest = Y[ int(X.shape[0]*0.8):]
Xtrain = X[ :int(X.shape[0]*0.8),:]
Ytrain = Y[ :int(X.shape[0]*0.8)]
# ---------------- constrcut NN ---------------------------------
tic = time.time()
model = Sequential()
model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(numClass, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, \
nesterov=True)
model.compile(loss='mse',
optimizer='Adam',
metrics=['accuracy'])
model.fit(Xtrain, Ytrain,
epochs=epochs,
batch_size=batch_size,
shuffle = True,
validation_split=0.1,
callbacks=[early_stop])
model.summary()
toc = time.time()
timeCost = toc - tic
print( "--- Totally %s seconds ---" %(timeCost))
# ---------------- eval NN ---------------------------------
score = model.evaluate(Xtest, Xtest, batch_size=128)
y_pred = model.predict(Xtest)
y_pred = np.argmax(y_pred,axis=1)
y_test = np.argmax(Ytest,axis=1)
acc = np.sum(y_pred==y_test)*1.0/len(y_test)
print( "--- acc %s ---" %(acc))
import scipy.io as sio
sio.savemat('DNN_Ypred.mat', {'y_pred':y_pred});
sio.savemat('DNN_Ytest.mat',{'y_test': y_test});
|
[
"cherry.figh0918@gmail.com"
] |
cherry.figh0918@gmail.com
|
bcd167bec77fb09db4e886c1b90754f3a3fb8aaf
|
c150eb515e39f81b7e324bb27b1a506ecd2e35a0
|
/project-base/album/migrations/0004_auto_20180401_1945.py
|
d885ade1352a1494f55c5cfa04449bc4da93e9d2
|
[] |
no_license
|
anwar03/PhotoShare
|
194a7ecc7663e14ab64355f9a203ae946f6239a5
|
6851945948be08176ca863f9a722f4168fc917a2
|
refs/heads/master
| 2020-03-07T09:41:37.218387
| 2018-04-05T13:38:30
| 2018-04-05T13:38:30
| 127,413,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
# Generated by Django 2.0.3 on 2018-04-01 19:45
import album.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('album', '0003_auto_20180331_1027'),
]
operations = [
migrations.RenameField(
model_name='album',
old_name='uploaded_at',
new_name='created_at',
),
migrations.RemoveField(
model_name='album',
name='album_name',
),
migrations.AddField(
model_name='album',
name='name',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='album',
name='password',
field=models.CharField(default=album.models.password_generator, max_length=6, unique=True),
),
migrations.AddField(
model_name='album',
name='slug',
field=models.SlugField(default=album.models.slug_generator, max_length=10, unique=True),
),
migrations.AddField(
model_name='image',
name='publisher',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='images', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='uploaded_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='album',
name='publisher',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='album', to=settings.AUTH_USER_MODEL),
),
]
|
[
"anwarazim03@gmail.com"
] |
anwarazim03@gmail.com
|
1f5383f1a0601d5509be673f0da83b08ffd1c253
|
8a025f191ccfee9ffa05af4b0fa03110ab09d777
|
/tests.py
|
0f8fa65a9c15bc266631310491598ab31f919992
|
[
"MIT"
] |
permissive
|
quentinguidee/todo-cli
|
3c68451794d8d24e228ee2f8a8b5d97b2190650d
|
7e91579a9046ef6ce7720835550410ef068bbfbb
|
refs/heads/main
| 2023-06-01T23:57:53.959091
| 2021-06-20T13:01:47
| 2021-06-20T13:04:35
| 376,773,183
| 5
| 2
|
MIT
| 2021-06-19T08:51:10
| 2021-06-14T09:42:57
|
Python
|
UTF-8
|
Python
| false
| false
| 949
|
py
|
import sys
import unittest
from tests.e2e.test_e2e import TestE2E
from tests.unit_tests.test_storage import TestStorage
from tests.unit_tests.test_task import TestTask
from tests.unit_tests.test_task import TestTaskStatus
from tests.unit_tests.test_event import TestEvent
from tests.unit_tests.test_time import TestTime
def tests_suite():
suite = unittest.TestSuite([
unittest.TestLoader().loadTestsFromTestCase(TestTaskStatus),
unittest.TestLoader().loadTestsFromTestCase(TestTask),
unittest.TestLoader().loadTestsFromTestCase(TestTime),
unittest.TestLoader().loadTestsFromTestCase(TestEvent),
unittest.TestLoader().loadTestsFromTestCase(TestStorage),
unittest.TestLoader().loadTestsFromTestCase(TestE2E),
])
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
success = runner.run(tests_suite()).wasSuccessful()
sys.exit(1 if not success else 0)
|
[
"quentin.guidee@gmail.com"
] |
quentin.guidee@gmail.com
|
10d13a7fa211f2e897ffa0cba4545edf6a108753
|
97ae9eb19037ad15943baa5298a12a5dabcd86b5
|
/gridworld.py
|
12e26c19db0a00d1a6153ffb1e2b35108d923ed7
|
[] |
no_license
|
shunzh/Keepaway
|
2f6bf608a67e9163b6b8544761fdbfd63f120090
|
056ba40c7ad5e0c99f705b02e06cd4423f2f15d0
|
refs/heads/master
| 2021-01-21T13:17:52.518043
| 2016-04-18T23:42:32
| 2016-04-18T23:42:32
| 54,331,895
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,924
|
py
|
# gridworld.py
# ------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import random
import sys
import mdp
import environment
import util
import optparse
import pickle
class Gridworld(mdp.MarkovDecisionProcess):
"""
Gridworld
"""
def __init__(self, grid, transition = None):
"""
A gridworld might have biased transition function.
If so, idicate that by transition argument.
Otherwise, that argument is None, and the agent follows uniform distribution.
"""
# layout
if type(grid) == type([]): grid = makeGrid(grid)
self.grid = grid
self.transition = transition
# parameters
self.livingReward = 0.0
self.noise = 0.2
def setLivingReward(self, reward):
"""
The (negative) reward for exiting "normal" states.
Note that in the R+N text, this reward is on entering
a state and therefore is not clearly part of the state's
future rewards.
"""
self.livingReward = reward
def setNoise(self, noise):
"""
The probability of moving in an unintended direction.
"""
self.noise = noise
def getPossibleActions(self, state):
"""
Returns list of valid actions for 'state'.
Note that you can request moves into walls and
that "exit" states transition to the terminal
state under the special action "done".
"""
if state == self.grid.terminalState:
return ()
x,y = state
if type(self.grid[x][y]) == int:
return ('exit',)
return ('north','west','south','east')
def getStates(self):
"""
Return list of all states.
"""
# The true terminal state.
states = [self.grid.terminalState]
for x in range(self.grid.width):
for y in range(self.grid.height):
if self.grid[x][y] != '#':
state = (x,y)
states.append(state)
return states
def getReward(self, state, action, nextState):
"""
Get reward for state, action, nextState transition.
Note that the reward depends only on the state being
departed (as in the R+N book examples, which more or
less use this convention).
"""
if state == self.grid.terminalState:
return 0.0
x, y = state
cell = self.grid[x][y]
if type(cell) == int or type(cell) == float:
return cell
return self.livingReward
def getStartState(self):
"""
Start state should be marked as 'S'
If there are mutliple start states, choose randomly
"""
startStateSet = []
for x in range(self.grid.width):
for y in range(self.grid.height):
if self.grid[x][y] == 'S':
startStateSet.append((x, y))
if startStateSet == []:
raise 'Grid has no start state'
else:
return random.choice(startStateSet)
def isTerminal(self, state):
"""
Only the TERMINAL_STATE state is *actually* a terminal state.
The other "exit" states are technically non-terminals with
a single action "exit" which leads to the true terminal state.
This convention is to make the grids line up with the examples
in the R+N textbook.
"""
return state == self.grid.terminalState
def getTransitionStatesAndProbs(self, state, action):
"""
Returns list of (nextState, prob) pairs
representing the states reachable
from 'state' by taking 'action' along
with their transition probabilities.
"""
if action not in self.getPossibleActions(state):
raise "Illegal action!"
if self.isTerminal(state):
return []
if self.transition != None and type(self.transition[state, action]) == type([]):
# when this transition mapping is initialized
# the transition is in util.Counter type. 0 for uninitialized entries.
return self.transition[state, action]
x, y = state
if type(self.grid[x][y]) == int or type(self.grid[x][y]) == float:
termState = self.grid.terminalState
return [(termState, 1.0)]
successors = []
northState = (self.__isAllowed(y+1,x) and (x,y+1)) or state
westState = (self.__isAllowed(y,x-1) and (x-1,y)) or state
southState = (self.__isAllowed(y-1,x) and (x,y-1)) or state
eastState = (self.__isAllowed(y,x+1) and (x+1,y)) or state
if action == 'north' or action == 'south':
if action == 'north':
successors.append((northState,1-self.noise))
else:
successors.append((southState,1-self.noise))
massLeft = self.noise
successors.append((westState,massLeft/2.0))
successors.append((eastState,massLeft/2.0))
if action == 'west' or action == 'east':
if action == 'west':
successors.append((westState,1-self.noise))
else:
successors.append((eastState,1-self.noise))
massLeft = self.noise
successors.append((northState,massLeft/2.0))
successors.append((southState,massLeft/2.0))
successors = self.__aggregate(successors)
return successors
def __aggregate(self, statesAndProbs):
counter = util.Counter()
for state, prob in statesAndProbs:
counter[state] += prob
newStatesAndProbs = []
for state, prob in counter.items():
newStatesAndProbs.append((state, prob))
return newStatesAndProbs
def __isAllowed(self, y, x):
if y < 0 or y >= self.grid.height: return False
if x < 0 or x >= self.grid.width: return False
return self.grid[x][y] != '#'
class BairdsGridworld(Gridworld):
"""
(0,1), (1,1), (2,1), (3,1), (4,1)
\ \ | / /
> > \ / < <
(2,0)->(3,0)
Rewards are all 0.
The world of Baird's Counter-example.
Its transition model is different from a normal gridworld.
So it's defined separately.
"""
def getPossibleActions(self, state):
"V1 to V5 can only go down to V6. V6 can stay or go right."
if self.isTerminal(state):
return []
elif state == (3, 0):
return ['exit']
else:
return ['south']
class GridworldEnvironment(environment.Environment):
def __init__(self, gridWorld):
self.gridWorld = gridWorld
self.reset()
def getCurrentState(self):
return self.state
def getPossibleActions(self, state):
return self.gridWorld.getPossibleActions(state)
def doAction(self, action):
successors = self.gridWorld.getTransitionStatesAndProbs(self.state, action)
sum = 0.0
rand = random.random()
state = self.getCurrentState()
for nextState, prob in successors:
sum += prob
if sum > 1.0:
raise 'Total transition probability more than one; sample failure.'
if rand < sum:
reward = self.gridWorld.getReward(state, action, nextState)
self.state = nextState
return (nextState, reward)
raise 'Total transition probability less than one; sample failure.'
def reset(self):
self.state = self.gridWorld.getStartState()
class Grid:
"""
A 2-dimensional array of immutables backed by a list of lists. Data is accessed
via grid[x][y] where (x,y) are cartesian coordinates with x horizontal,
y vertical and the origin (0,0) in the bottom left corner.
The __str__ method constructs an output that is oriented appropriately.
"""
def __init__(self, width, height, initialValue=' '):
self.width = width
self.height = height
self.data = [[initialValue for y in range(height)] for x in range(width)]
self.terminalState = 'TERMINAL_STATE'
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, key, item):
self.data[key] = item
def __eq__(self, other):
if other == None: return False
return self.data == other.data
def __hash__(self):
return hash(self.data)
def copy(self):
g = Grid(self.width, self.height)
g.data = [x[:] for x in self.data]
return g
def deepCopy(self):
return self.copy()
def shallowCopy(self):
g = Grid(self.width, self.height)
g.data = self.data
return g
def _getLegacyText(self):
t = [[self.data[x][y] for x in range(self.width)] for y in range(self.height)]
t.reverse()
return t
def __str__(self):
return str(self._getLegacyText())
def makeGrid(gridString):
width, height = len(gridString[0]), len(gridString)
grid = Grid(width, height)
for ybar, line in enumerate(gridString):
y = height - ybar - 1
for x, el in enumerate(line):
grid[x][y] = el
return grid
def getCliffGrid():
grid = [[' ',' ',' ',' ',' '],
['S',' ',' ',' ',10],
[-100,-100, -100, -100, -100]]
return Gridworld(makeGrid(grid))
def getCliffGrid2():
grid = [[' ',' ',' ',' ',' '],
[8,'S',' ',' ',10],
[-100,-100, -100, -100, -100]]
return Gridworld(grid)
def getDiscountGrid():
grid = [[' ',' ',' ',' ',' '],
[' ','#',' ',' ',' '],
[' ','#', 1,'#', 10],
['S',' ',' ',' ',' '],
[-10,-10, -10, -10, -10]]
return Gridworld(grid)
def getBridgeGrid():
grid = [[ '#',-100, -100, -100, -100, -100, '#'],
[ 1, 'S', ' ', ' ', ' ', ' ', 10],
[ '#',-100, -100, -100, -100, -100, '#']]
return Gridworld(grid)
def getBookGrid():
grid = [[' ',' ',' ',+1],
[' ','#',' ',' '],
['S',' ',' ',' ']]
return Gridworld(grid)
def getMazeGrid():
grid = [[' ',' ',' ',+1],
['#','#',' ','#'],
[' ','#',' ',' '],
[' ','#','#',' '],
['S',' ',' ',' ']]
return Gridworld(grid)
def getRandomWalk():
grid = [[0,' ',' ','S',' ',' ',1]]
return Gridworld(grid)
def getFourRoomGrid():
grid = [['S','S','S','S','S','#',' ',' ',' ',' ',' '],
['S','S','S','S','S','#',' ',' ',' ',' ',' '],
['S','S','S','S','S',' ',' ',' ',' ',' ',' '],
['S','S','S','S','S','#',' ',' ',' ',' ',' '],
['S','S','S','S','S','#',' ',' ',' ',' ',' '],
['#',' ','#','#','#','#','#','#',' ','#','#'],
[' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' '],
[' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',1],
[' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ']]
return Gridworld(grid)
def getBairdsGrid():
"""
(0,1), (1,1), (2,1), (3,1), (4,1)
\ \ | / /
> > \ / < <
(2,0)->(3,0)
"""
grid = [['S','S','S','S','S'],
['#','#',' ','#','#']]
transition = util.Counter()
# first row
for i in range(0, 5):
transition[(i, 1), 'south'] = [((2, 0), 1)]
# second row
transition[(2, 0), 'south'] = [((2, 0), .99), ('TERMINAL_STATE', .01)]
return BairdsGridworld(grid, transition)
def getUserAction(state, actionFunction):
"""
Get an action from the user (rather than the agent).
Used for debugging and lecture demos.
"""
import graphicsUtils
action = None
while True:
keys = graphicsUtils.wait_for_keys()
if 'Up' in keys: action = 'north'
if 'Down' in keys: action = 'south'
if 'Left' in keys: action = 'west'
if 'Right' in keys: action = 'east'
if 'q' in keys: sys.exit(0)
if action == None: continue
break
actions = actionFunction(state)
if action not in actions:
action = actions[0]
return action
def printString(x): print x
def runEpisode(agent, environment, discount, decision, display, message, pause, episode):
returns = 0
totalDiscount = 1.0
environment.reset()
if 'startEpisode' in dir(agent): agent.startEpisode()
message("BEGINNING EPISODE: "+str(episode)+"\n")
while True:
# DISPLAY CURRENT STATE
state = environment.getCurrentState()
display(state)
pause()
# END IF IN A TERMINAL STATE
actions = environment.getPossibleActions(state)
if len(actions) == 0:
message("EPISODE "+str(episode)+" COMPLETE: RETURN WAS "+str(returns)+"\n")
return returns
# GET ACTION (USUALLY FROM AGENT)
action = decision(state)
if action == None:
raise 'Error: Agent returned None action'
# EXECUTE ACTION
nextState, reward = environment.doAction(action)
message("Started in state: "+str(state)+
"\nTook action: "+str(action)+
"\nEnded in state: "+str(nextState)+
"\nGot reward: "+str(reward)+"\n")
# UPDATE LEARNER
if 'observeTransition' in dir(agent):
agent.observeTransition(state, action, nextState, reward)
returns += reward * totalDiscount
totalDiscount *= discount
def parseOptions():
optParser = optparse.OptionParser()
optParser.add_option('-d', '--discount',action='store',
type='float',dest='discount',default=0.9,
help='Discount on future (default %default)')
optParser.add_option('-r', '--livingReward',action='store',
type='float',dest='livingReward',default=0.0,
metavar="R", help='Reward for living for a time step (default %default)')
optParser.add_option('-n', '--noise',action='store',
type='float',dest='noise',default=0.2,
metavar="P", help='How often action results in ' +
'unintended direction (default %default)' )
optParser.add_option('-e', '--epsilon',action='store',
type='float',dest='epsilon',default=0.3,
metavar="E", help='Chance of taking a random action in q-learning (default %default)')
optParser.add_option('-x', '--lambda',action='store',
type='float',dest='lambdaValue',default=0.9,
help='Lambda for SARSA(lambda) (default %default)')
optParser.add_option('-l', '--learningRate',action='store',
type='float',dest='learningRate',default=0.1,
metavar="P", help='TD learning rate (default %default)' )
optParser.add_option('-i', '--iterations',action='store',
type='int',dest='iters',default=10,
metavar="K", help='Number of rounds of value iteration (default %default)')
optParser.add_option('-k', '--episodes',action='store',
type='int',dest='episodes',default=1,
metavar="K", help='Number of epsiodes of the MDP to run (default %default)')
optParser.add_option('-g', '--grid',action='store',
metavar="G", type='string',dest='grid',default="BookGrid",
help='Grid to use (case sensitive; options are BookGrid, BridgeGrid, CliffGrid, MazeGrid, default %default)' )
optParser.add_option('-w', '--windowSize', metavar="X", type='int',dest='gridSize',default=150,
help='Request a window width of X pixels *per grid cell* (default %default)')
optParser.add_option('-a', '--agent',action='store', metavar="A",
type='string',dest='agent',default="random",
help='Agent type (options are \'random\', \'value\' and \'q\', default %default)')
optParser.add_option('-f', '--feature',action='store', metavar="A",
type='string',dest='extractor',default="IdentityExtractor",
help='Type of extractor if function approximation is applied.')
optParser.add_option('-t', '--text',action='store_true',
dest='textDisplay',default=False,
help='Use text-only ASCII display')
optParser.add_option('-p', '--pause',action='store_true',
dest='pause',default=False,
help='Pause GUI after each time step when running the MDP')
optParser.add_option('-q', '--quiet',action='store_true',
dest='quiet',default=False,
help='Skip display of any learning episodes')
optParser.add_option('-y', '--replace',action='store_true',
dest='replace',default=False,
help='Replacing trace applied')
optParser.add_option('-z', '--fileoutput',action='store_true',
dest='fileoutput',default=False,
help='File output')
optParser.add_option('-s', '--speed',action='store', metavar="S", type=float,
dest='speed',default=1.0,
help='Speed of animation, S > 1.0 is faster, 0.0 < S < 1.0 is slower (default %default)')
optParser.add_option('-m', '--manual',action='store_true',
dest='manual',default=False,
help='Manually control agent')
optParser.add_option('-v', '--valueSteps',action='store_true' ,default=False,
help='Display each step of value iteration')
opts, args = optParser.parse_args()
if opts.manual and opts.agent != 'q':
print '## Disabling Agents in Manual Mode (-m) ##'
opts.agent = None
# MANAGE CONFLICTS
if opts.textDisplay or opts.quiet:
# if opts.quiet:
opts.pause = False
# opts.manual = False
if opts.manual:
opts.pause = True
return opts
def checkPolicy(agent):
"""
FIXME should be generalized!
the difference between optimal policy and this policy
"""
p = agent.getPolicy
consistence = [p((0, 0)) == 'north', p((0, 1)) == 'north', p((0, 2)) == 'east', p((1, 2)) == 'east', p((2, 2)) == 'east']
return consistence.count(True)
if __name__ == '__main__':
opts = parseOptions()
###########################
# GET THE GRIDWORLD
###########################
import gridworld
mdpFunction = getattr(gridworld, "get"+opts.grid)
mdp = mdpFunction()
mdp.setLivingReward(opts.livingReward)
mdp.setNoise(opts.noise)
env = gridworld.GridworldEnvironment(mdp)
###########################
# GET THE DISPLAY ADAPTER
###########################
import textGridworldDisplay
display = textGridworldDisplay.TextGridworldDisplay(mdp)
if not opts.textDisplay:
import graphicsGridworldDisplay
display = graphicsGridworldDisplay.GraphicsGridworldDisplay(mdp, opts.gridSize, opts.speed)
display.start()
###########################
# GET THE AGENT
###########################
import valueIterationAgents, qlearningAgents, sarsaLambdaAgents
a = None
if opts.agent == 'value':
a = valueIterationAgents.ValueIterationAgent(mdp, opts.discount, opts.iters)
elif opts.agent == 'valueApproximate':
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'iterations': opts.iters,
'mdp': mdp,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'extractor': opts.extractor,
'actionFn': actionFn}
a = valueIterationAgents.ApproximateValueIterAgent(**qLearnOpts)
elif opts.agent == 'q':
#env.getPossibleActions, opts.discount, opts.learningRate, opts.epsilon
#simulationFn = lambda agent, state: simulation.GridworldSimulation(agent,state,mdp)
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'actionFn': actionFn}
a = qlearningAgents.QLearningAgent(**qLearnOpts)
elif opts.agent == 'qApproximate':
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'extractor': opts.extractor,
'actionFn': actionFn}
a = qlearningAgents.ApproximateQAgent(**qLearnOpts)
elif opts.agent == 'sarsa':
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'lambdaValue' : opts.lambdaValue,
'replace' : opts.replace,
'actionFn': actionFn}
a = sarsaLambdaAgents.SarsaLambdaAgent(**qLearnOpts)
elif opts.agent == 'sarsaApproximate':
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'extractor': opts.extractor,
'lambdaValue' : opts.lambdaValue,
'replace' : opts.replace,
'actionFn': actionFn}
a = sarsaLambdaAgents.ApproximateSarsaAgent(**qLearnOpts)
elif opts.agent == 'random':
# # No reason to use the random agent without episodes
if opts.episodes == 0:
opts.episodes = 10
class RandomAgent:
def getAction(self, state):
return random.choice(mdp.getPossibleActions(state))
def getValue(self, state):
return 0.0
def getQValue(self, state, action):
return 0.0
def getPolicy(self, state):
"NOTE: 'random' is a special policy value; don't use it in your code."
return 'random'
def update(self, state, action, nextState, reward):
pass
a = RandomAgent()
else:
if not opts.manual: raise 'Unknown agent type: '+opts.agent
###########################
# RUN EPISODES
###########################
# DISPLAY Q/V VALUES BEFORE SIMULATION OF EPISODES
if not opts.manual and opts.agent == 'value':
if opts.valueSteps:
for i in range(opts.iters):
tempAgent = valueIterationAgents.ValueIterationAgent(mdp, opts.discount, i)
display.displayValues(tempAgent, message = "VALUES AFTER "+str(i)+" ITERATIONS")
display.pause()
display.displayValues(a, message = "VALUES AFTER "+str(opts.iters)+" ITERATIONS")
display.pause()
display.displayQValues(a, message = "Q-VALUES AFTER "+str(opts.iters)+" ITERATIONS")
display.pause()
# FIGURE OUT WHAT TO DISPLAY EACH TIME STEP (IF ANYTHING)
displayCallback = lambda x: None
if not opts.quiet:
if opts.manual and opts.agent == None:
displayCallback = lambda state: display.displayNullValues(state)
else:
if opts.agent == 'random': displayCallback = lambda state: display.displayValues(a, state, "CURRENT VALUES")
if opts.agent == 'value': displayCallback = lambda state: display.displayValues(a, state, "CURRENT VALUES")
if opts.agent == 'q': displayCallback = lambda state: display.displayQValues(a, state, "CURRENT Q-VALUES")
if opts.agent == 'qApproximate': displayCallback = lambda state: display.displayQValues(a, state, "CURRENT Q-VALUES")
if opts.agent == 'sarsa': displayCallback = lambda state: display.displayQValues(a, state, "CURRENT Q-VALUES")
if opts.agent == 'sarsaApproximate': displayCallback = lambda state: display.displayQValues(a, state, "CURRENT Q-VALUES")
messageCallback = lambda x: printString(x)
if opts.quiet:
messageCallback = lambda x: None
# FIGURE OUT WHETHER TO WAIT FOR A KEY PRESS AFTER EACH TIME STEP
pauseCallback = lambda : None
if opts.pause:
pauseCallback = lambda : display.pause()
# FIGURE OUT WHETHER THE USER WANTS MANUAL CONTROL (FOR DEBUGGING AND DEMOS)
if opts.manual:
decisionCallback = lambda state : getUserAction(state, mdp.getPossibleActions)
else:
decisionCallback = a.getAction
# RUN EPISODES
if opts.episodes > 0:
print
print "RUNNING", opts.episodes, "EPISODES"
print
returns = 0
#policyFile = open('policy' + opts.agent + str(opts.lambdaValue), 'a')
#policyFile.write(str(opts.iters) + ' ' + str(checkPolicy(a)) + '\n')
for episode in range(1, opts.episodes+1):
thisReturn = runEpisode(a, env, opts.discount, decisionCallback, displayCallback, messageCallback, pauseCallback, episode)
a.final(None)#fixme
"""
if opts.agent == 'qApproximate' or opts.agent == 'sarsaApproximate':
#a.final('TERMINAL_STATE')
f = open("lambda" + str(a.lambdaValue), 'a')
# get values from value iteration
values = a.getValues(mdp.getStates())
valueIter = pickle.load(open("valueIterAnswer"))
# calculate rms and outoput
f.write(str(episode) + ' ' + str(values.rms(valueIter)) + '\n')
f.close()
"""
returns += thisReturn
if opts.episodes > 0:
print
print "AVERAGE RETURNS FROM START STATE: "+str((returns+0.0) / opts.episodes)
print
print
# DISPLAY POST-LEARNING VALUES / Q-VALUES
if opts.agent != 'random' and not opts.manual:
if not opts.fileoutput:
# original output by gridworld
display.displayQValues(a, message = "Q-VALUES AFTER "+str(opts.episodes)+" EPISODES")
display.pause()
display.displayValues(a, message = "VALUES AFTER "+str(opts.episodes)+" EPISODES")
display.pause()
if (opts.agent == 'value') and not opts.manual:
if opts.fileoutput:
values = a.getValues(mdp.getStates())
f = open("valueIterAnswer", "w")
pickle.dump(values, f)
|
[
"menie482@gmail.com"
] |
menie482@gmail.com
|
4de8d566f66a8afe643d08ee77b4a4f754c26a21
|
a6fe3f180d4c7d418579e4b8186a80ba569d999d
|
/django/blog/blogproject/views.py
|
ef19f4980221b15f591a86fb7dfead53e3e217e9
|
[] |
no_license
|
15871687941/PyCode
|
0f773038ee713b6a16ab9ab7ae7b6823b9b85b3e
|
61f8c0a7aa41574e31765155142d8c16e5759582
|
refs/heads/master
| 2020-04-02T09:24:58.258959
| 2018-10-23T08:12:31
| 2018-10-23T08:12:31
| 154,288,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from .models import Post
def index(request):
post_list = Post.objects.all()
return render(request, 'blog/index.html', context={'title': '博客', 'post_list': post_list})
|
[
"1806521378@qq.com"
] |
1806521378@qq.com
|
093225e40403e92a439ffb6fbb4fd88116bf79bf
|
ddfc0775cbd90f8fff2ee11c7e6b17e72f26b273
|
/crawler/zip_5o_miles.py
|
e4a4aef966869a9c64286e477746cce3e4e3645b
|
[] |
no_license
|
akshaytanmane150294/Dealer_scraper
|
b73389a8e34a6564b176eced9056bdae8494b44d
|
82bbf2f2e2a82c934e14616aa2462ee8a5cfc40d
|
refs/heads/master
| 2020-07-16T13:38:15.977340
| 2019-09-02T07:39:52
| 2019-09-02T07:39:52
| 205,798,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,926
|
py
|
from _dbus_bindings import String
def _zip():
arr = ["17087","95666","53936","83330","17006","42157","57562","28349","85743","38471","81212","82058","84721","62469","97635","87943","58561","95421","24211","47930","64761","56025","49635","28376","69128","84766","30454","59230","32350","12969","31312","12455","65263","01550","48894","88434","36374","35570","97741","68936","74370","69165","75844","59901","99731","73044","81239","14747","55749","88435","71452","32352","00602","55725","89301","00802","75119","89445","97904","68636","50632","30189","53801","61440","79849","85533","83286","48462","36875","61721","84315","66549","02050","54835","93675","93622","70589","40815","72102","15055","14877","59255","69301","45340","72522","01222","50447","82435","72346","82701","04957","53572","12170","52621","71837","38488","49733","03435","85242","70453","25113","95988","87420","82229","16373","78063","59486","27886","47557","96727","49286","38647","57476","65074","67867","70036","78383","04764","68047","87551","96137","20677","57365","39345","78639","41310","96756","36003","95960","68753","55336","03777","18938","39464","00957","35058","74464","23038","71999","72025","84034","83631","86047","73658","34120","76854","05442","77483","98801","97492","71663","35960","95618","32455","29440","76234","99572","81425","38382","76372","35466","29831","54529","97065","83209","82844","63336","49011","37128","75692","36545","97640","57748","49913","98814","42048","24555","46741","71841","54107","89419","58458","85603","59022","80447","32456","84072","40444","94015","03833","57315","39641","10925","43011","48759","24981","23970","80116","97467","59524","68632","78829","58718","58564","80424","67059","37865","76943","77974","61414","67445","13031","84532","84069","93451","72760","14072","71373","77830","97484","89027","85322","82514","82332","99775","58009","79714","97365","67669","49782","46501","59347","26386","97877","74039","21234","63556","60958","83450","81089","55751","85929","75457","47840","16411","89502","99686","32583","29045","80644","79082","58801","95345","42701","39058","97848","58647","59344","89017","85324","96022","85640","98381","57223","36748","57658","88345","95385","57770","27617","50645","56245","76845","49460","17754","76525","64489","42634","28754","78638","58634","31087","81154","76682","13669","67025","96793","54454","97722","68464","79015","97107","48740","73005","97024","95546","59521","33786","29685","59337","98582","56685","87544","74501","44889","02554","37340","77655","67879","73717","73673","67464","59069","79561","69129","72938","54669","67520","39769","91007","38645","12883","29340","32776","59405","35043","04462","29666","23851","59722","27265","95437","54742","56762","67753","59865","13650","56566","13331","79548","58321","74878","34990","87014","28681","41189","79014","57620","73945","82646","58523","14485","87750","29471","72442","78828","23161","83544","56379","68930","88045","84533","79029","18347","92280","81045","98239","48632","31599","28571","96006","08302","83624","61053","79511","52305","82240","77664","38924","97826","50116","63775","81084","97885","99138","45601","33960","34229","21540","37140","28906","56381","27925","59343","65470","65656","06516","32694","93256","88220","93203","21659","88230","45050","45729","66020","34448","45872","79355","68305","44288","97360","92347","59739","88118","23418","68873","85643","58455","78012","53153","38675","99012","16836","95012","74004","78560","62269","70514","97712","31512","72645","39426","83466","66835","57022","74523","64622","99347","49849","30223","33060","30633","87821","97414","32046","65637","13361","47338","76934","51501","36582","24920","98548","62533","69040","54855","76049","63943","70652","79734","29571","83860","49883","93043","84635","59059","47234","81638","63473","42204","73933","54119","25443","99330","92332","24549","54006","56455","82710","25638","67741","58313","32908","69366","73855","42303","89406","13827","75937","71232","63068","59867","57262","65785","81610","57301","04024","54021","65566","61325","97534","73447","81320","76437","55976","87044","67839","67122","87325","82052","96017","49127","49925","57564","88318","81630","83226","74646","58018","75758","76384","59339","04743","66775","86444","97627","86336","57481","92305","51033","28422","89010","04622","73548","92257","76578","98952","45623","33839","60135","66064","56567","89420","38572","83120","59751","93434","57780","88005","31063","71644","56626","53049","22743","68860","36456","98631","49715","83302","61847","03579","82932","97913","24343","59078","71268","79356","84528","32359","49449","69211","56137","44629","51442","36278","62899","99159","88414","04554","78938","57724","86022","59875","89161","82725","63867","92055","77401","83610","59451","28174","47167","62917","04945","31721","75424","95542","79234","15945","83414","40359","50212","50858","88410","71730","57001"]
return arr
|
[
"noreply@github.com"
] |
akshaytanmane150294.noreply@github.com
|
35fa9008f4968f017ed8097fed6ae73e343e2a9b
|
bdbb6000355a717ce55cabd45525700643ab5b5f
|
/Version 2a 050321/mycopi_default_programme.py
|
329bbc638e1aaf77898a5d63f82b2b7cf190a139
|
[] |
no_license
|
Trober-Llemchit/MyCoPi
|
abf8c4817f00541d029e9d695728b5176cd7549a
|
71913d64e9b136417c525243bfba2732bf63fa79
|
refs/heads/main
| 2023-03-15T15:46:18.515042
| 2021-03-05T12:54:24
| 2021-03-05T12:54:24
| 344,796,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,649
|
py
|
#mycopi_default_programme.py
import mycopi_convert
def address_plus_1(address):
dec_address=int(address,16)
dec_address+=1
hex_address=hex(dec_address)
address=hex_address[2:].upper()
return address
def details():
print("1110: ex.13: count")
print("1101: ex.3: AD/PWM")
print("1011: ex.4: random")
print("0111: ex.16: stop on S1")
print("0011: ex.33: start/stop")
print("else: ex. 2 LED blink")
def starts():
print("20: ex.1: 2 LED blink")
print("25: ex.13: count")
print("2A: ex.3: AD/PWM")
print("30: ex.4: random")
print("34: ex.16: stop on S1")
print("40: ex.40: start/stop")
print("80: ex.32: morse")
print("A0: ex.36: combination lock(continues at B0)")
def make_dict():
short_code_0=['64','51','4E','80','C3','98','82','95','4D','80','C3','9E','82','9A','4B','81']
short_code_1=['C3','94','83','90','47','81','C3','9A','83','94','43','82','C3','90','84','90']
short_code_2=['11','28','18','28','34','71','54','59','26','34','69','54','59','26','34','FF']
short_code_3=['54','CE','71','33','22','CC','32','40','22','71','54','CE','34','39','FF','FF']
short_code_4=['86','D0','40','71','54','23','CD','34','D8','40','54','3B','FF','FF','FF','FF']
short_code_5=['4F','93','45','53','19','11','21','19','11','21','19','11','20','B4','10','E0']
short_code_6=['23','CE','32','23','CC','31','E0','FF','23','CF','32','23','CD','31','E0','FF']
short_code_7=['CC','31','40','54','23','CE','32','CF','E0','CC','33','71','23','CC','31','3C']
short_code_8=['8C','D2','26','D0','26','D0','26','D0','26','28','D2','26','D0','26','D0','26']#Morse: SR call changed from 85 to 8C, BK changed to JP
short_code_9=['D2','30','FF','FF','FF','FF','FF','FF','FF','FF','FF','FF','FF','FF','FF','FF']#continuation of code_8
short_code_A=['87','43','51','D0','C3','30','11','45','51','D0','C3','33','10','42','51','D0']#comb. lock
short_code_B=['C3','30','17','4F','59','30','FF','FF','FF','FF','FF','FF','FF','FF','FF','FF']#continuation of code_A
short_code_C=['4F','93','45','53','11','25','B4','10','E0','FF','FF','FF','FF','FF','FF','FF']#new version of Morse sub-routine
code_list=[short_code_0,short_code_1,short_code_2,short_code_3,short_code_4,short_code_5,short_code_6,short_code_7,short_code_8,short_code_9,short_code_A,short_code_B,short_code_C]
address='00'
def_prog={}
for i in code_list:
code=i
page=address[0]
sub_address=address[1]
for i in code:
inst=i[0]
data=i[1]
def_prog[page+sub_address]=[inst,data]
sub_address=address_plus_1(sub_address)
page=mycopi_convert.hex_to_decimal(page)
page+=1
page=mycopi_convert.hex_from_decimal(page)
address=page+'0'
return def_prog
|
[
"noreply@github.com"
] |
Trober-Llemchit.noreply@github.com
|
6acd05799d3e1f656cf1508204ec1ca8480faba4
|
a33d41d895912c7c15f3430f2fee9ff638234e84
|
/ReportCreatorGui/XmlAnalysis.py
|
d4427787fcd86aed36ab53028f55750169cf503a
|
[] |
no_license
|
qbvn/PyCharm-Projects
|
9047f6ba0582c801fc41eba23108189a723d9c84
|
da1e00fa478f641732e86bf71342abbbeba975c7
|
refs/heads/master
| 2021-07-23T20:23:16.522792
| 2021-05-06T11:14:05
| 2021-05-06T11:14:05
| 64,391,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,663
|
py
|
# -*- coding: utf-8 -*-
import os.path
import sys
# import wx
from datetime import datetime
from docx import Document, opc
from docx.shared import Pt, RGBColor
from NiXml import StimulusProfile, Testrun, ErrorElements
import XmlParser
class ReportGenerate:
def __init__(self, xml_file, ms_word_file):
"""
creates a MS Word Report from XML File.
:param xml_file:
:param ms_word_file:
"""
self.xml_file = xml_file
self.ms_word_file_template = "Report_Template.docx"
self.xml_parser = XmlParser.XmlParser(self.xml_file)
self.testrun_l = []
self.createTestruns()
self.errors_l = []
## statistic variables ##
self.nTestruns = len(self.testrun_l)
if self.nTestruns == 0:
# TODO: Outputdialog
print("Keine Testruns in XML File gefunden.")
print("Programm wird beendet.")
#sys.exit()
self.count_ok_testruns = 0
self.count_nok_testruns = 0
self.count_ntestruns = 0
self.runtime_testruns = 0
self.analyse_testruns()
self.StimulusProfileInfo = self.createStimulusProfile()
self.analyse_errors()
# self.create_ms_word_report()
def analyse_errors(self):
"""
create error elements
:return:
"""
for error_el in self.xml_parser.get_error_elements():
self.errors_l.append(ErrorElements(error_el))
def analyse_testruns(self):
"""
count ok/nok in testruns and summarise runningtime of each testrun
:return: None
"""
for test in self.testrun_l:
if "Passed" in test.result:
self.count_ok_testruns = self.count_ok_testruns + 1
else:
self.count_nok_testruns = self.count_nok_testruns + 1
self.count_ntestruns = self.count_ntestruns + 1
self.runtime_testruns = self.runtime_testruns + test.runningTime_seconds
def createTestruns(self):
"""
create Testrun xml tree objects
:return: None
"""
for el in self.xml_parser.get_testruns():
self.testrun_l.append(Testrun(el))
def createStimulusProfile(self):
"""
create StimulusProfile xml tree objects
:return: None
"""
return StimulusProfile(self.xml_parser.get_root_tree())
def create_ms_word_report(self, outputDir):
"""
Creates the microsoft word report from template
:return: None
"""
try:
document = Document(self.ms_word_file_template)
paragraph = document.add_paragraph()
# print sections to document
self.print_section_StimulusProfile(paragraph)
self.print_section_testrun(paragraph)
self.print_section_statistics(paragraph)
if len(self.errors_l):
self.print_section_error(paragraph)
date = datetime.now().isoformat()
date = date.replace(":", "_")
document.save(os.path.join(outputDir, "Report_Date_" + date + ".docx"))
except opc.exceptions.PackageNotFoundError:
# TODO: call dialog
print("Word Template not found" + self.ms_word_file)
raise
except IOError:
# TODO: call dialog
print("Report konnte nicht erstellt werden.")
raise
except Exception, e:
print("Report Erstellung fehlgeschlagen")
raise
def print_section_StimulusProfile(self, paragraph):
"""
print stimulus profile information
:param paragraph: python docx paragraph
:return:
"""
self.add_formated_run(paragraph, "Stimulus Profile:\n\n", 11, True)
self.add_formated_run(paragraph, "Name:\t", 9.5, True)
self.add_formated_run(paragraph, self.StimulusProfileInfo.name + "\n", 9.5)
self.add_formated_run(paragraph, "Pfad:\t", 9.5, True)
self.add_formated_run(paragraph, self.StimulusProfileInfo.path + "\n", 9.5)
# Wenn Beschreibung vorhanden
if self.StimulusProfileInfo.description:
self.add_formated_run(paragraph, "Beschreibung:\t", 9.5, True)
self.add_formated_run(paragraph, self.StimulusProfileInfo.description + "\n\n", 9.5)
else:
self.add_newline(paragraph, 2)
def print_section_testrun(self, paragraph):
"""
print all Testruns to document
:param paragraph: python docx paragraph
:return:
"""
self.add_formated_run(paragraph, "Testrun:\n", 11, True)
self.draw_hline(paragraph, 95)
self.add_newline(paragraph, 1)
for i, testruns in enumerate(self.testrun_l):
i = i + 1
self.add_formated_run(paragraph, "Nr." + str(i) + "\n", 11, True)
self.add_formated_run(paragraph, "Name:\t", 11, True)
self.add_formated_run(paragraph, testruns.name + "\n", 9.5)
self.add_formated_run(paragraph, "Pfad:\t", 11, True)
self.add_formated_run(paragraph, testruns.path + "\n", 9.5)
# Wenn Beschreibung vorhanden
if testruns.description:
self.add_formated_run(paragraph, "Beschreibung:\t", 9.5, True)
self.add_formated_run(paragraph, testruns.description + "\n\n", 9.5)
else:
self.add_newline(paragraph, 2)
self.add_formated_run(paragraph, "Startzeit:\t", 11, True)
self.add_formated_run(paragraph, testruns.starttime.strftime("Datum %d.%m.%Y %H:%M:%S") + "\n", 9.5,
color=[165, 42, 42])
self.add_formated_run(paragraph, "Endzeit:\t", 11, True)
self.add_formated_run(paragraph, testruns.endtime.strftime("Datum %d.%m.%Y %H:%M:%S") + "\n", 9.5,
color=[165, 42, 42])
self.add_formated_run(paragraph, "Laufzeit:\t", 11, True)
self.add_formated_run(paragraph, str(testruns.timedelta.total_seconds()) + " Sekunden\n", 9.5,
color=[165, 42, 42])
self.add_formated_run(paragraph, "Ergebnis:\t", 11, True)
if "Passed" in testruns.result:
self.add_formated_run(paragraph, "OK", 9.5, color=[0, 176, 80])
else:
self.add_formated_run(paragraph, "NOK", 9.5, color=[255, 0, 0])
self.add_newline(paragraph, 1)
self.draw_hline(paragraph, 95)
self.add_newline(paragraph, 2)
def print_section_error(self, paragraph):
"""
print all errors
:param paragraph: Pyth
:return: None
"""
self.add_newline(paragraph, 5)
self.add_formated_run(paragraph, "Error:\n", 11, True)
self.draw_hline(paragraph, 95)
self.add_newline(paragraph, 1)
for i, errors in enumerate(self.errors_l):
i = i + 1
self.add_formated_run(paragraph, "Nr." + str(i) + "\n", 11, True)
self.add_formated_run(paragraph, "Name:\t", 11, True)
self.add_formated_run(paragraph, errors.name + "\n", 9.5)
self.add_formated_run(paragraph, "ID:\t", 11, True)
self.add_formated_run(paragraph, errors.ID + "\n", 9.5)
self.add_formated_run(paragraph, "Beschreibung:\t", 11, True)
self.add_formated_run(paragraph, errors.ErrorDescription + "\n", 9.5)
self.draw_hline(paragraph, 95)
def print_section_statistics(self, paragraph):
"""
:param paragraph:
:return:
"""
self.add_formated_run(paragraph, "Zusammenfassung:\n", 11, True)
self.add_newline(paragraph, 1)
self.add_formated_run(paragraph, "Testruns ", 11, bold_flag=True)
self.add_formated_run(paragraph, "OK", 9.5, bold_flag=True, color=[0, 176, 80])
self.add_formated_run(paragraph,
" :\t\t" + str(self.count_ok_testruns) + "/" + str(self.count_ntestruns) + "\n", 9.5)
self.add_formated_run(paragraph, "Testruns ", 11, bold_flag=True)
self.add_formated_run(paragraph, "NOK", 9.5, bold_flag=True, color=[255, 0, 0])
self.add_formated_run(paragraph, " :\t" + str(self.count_nok_testruns) + "/" + str(self.count_ntestruns) + "\n",
9.5)
self.add_formated_run(paragraph, "Gesamtlaufzeit:\t", 11, True)
self.add_formated_run(paragraph, str(self.runtime_testruns), 9.5)
def add_formated_run(self, paragraph, text, size=10, bold_flag=False, color=None):
"""
wrapper function for formatted printing.
Print at the current cursor position
:param paragraph: paragraph object
:param text: text which will be printed to document
:param size: fontsize
:param bold_flag: True or False
:param color: rgb colorcode as pyhton list [r,g,b]
:return:
"""
run = paragraph.add_run(text)
run.font.size = Pt(size)
run.font.bold = bold_flag
if color:
run.font.color.rgb = RGBColor(*color)
def draw_hline(self, paragraph, length):
"""
draw a horizontal lines from underscores at the current cursor position
:param paragraph: paragraph object
:param length: length of summarised underscores
:return:
"""
underline = "_"
for uline in range(length):
underline = underline + "_"
run = paragraph.add_run(underline)
def add_newline(self, paragraph, n):
"""
add a newline at the current cursor position
:param paragraph: paragraph object
:param n: number of newlines
:return:
"""
newlines = ""
for el in range(n):
newlines = newlines + "\n"
run = paragraph.add_run(newlines)
# def Warn(parent, message, caption='Warning!'):
# dlg = wx.MessageDialog(parent, message, caption, wx.OK | wx.ICON_EXCLAMATION)
# dlg.ShowModal()
# dlg.Destroy()
def XmlAnalysis(xmlFile, outputDir):
if not os.path.isfile(xmlFile):
print("FEHLER: Konnte XML Datei nicht finden->" + xmlFile)
print("Programm ende")
sys.exit(1)
if not os.path.isdir(outputDir):
print("FEHLER: Ziel Ordner für Report nicht gefunden -> " + outputDir)
print("Programm ende")
sys.exit(1)
RG = ReportGenerate(xmlFile, "Report_Template.docx")
RG.create_ms_word_report(outputDir)
# sys.exit(0) # succes
|
[
"quocbinhtrinh@gmail.com"
] |
quocbinhtrinh@gmail.com
|
8cc9ca36a5d4db817f08b76168f2d46d98b3a192
|
220058941fd9810a580b66198c0f921ec95c1650
|
/walmart/pipelines.py
|
c57e3846edf0ce00e167808ada5f5b0ea5b70f13
|
[] |
no_license
|
githubmaster1987/walmart_scrapy
|
d6b3ce9afacc785258bd66f64f8aafcc7e729c26
|
5801aedf0374f0de247f6c8f0cdba92cb103826b
|
refs/heads/master
| 2020-05-20T19:32:24.142306
| 2017-03-10T03:15:13
| 2017-03-10T03:15:13
| 84,512,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,918
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import spiders.config_scrapy as config
import MySQLdb as mdb
import numbers
import random
import json
from datetime import datetime
from items import WalmartItem
import logging
from time import sleep
con = mdb.connect(config.db_host, config.db_user, config.db_pwd, config.db_database)
class WalmartPipeline(object):
save_item_cnt = 0
def insert_many(self,sql, sqldata):
with con:
cur = con.cursor()
cur.executemany(sql, sqldata)
con.commit()
def update_sql(self,sql, sqldata):
with con:
cur = con.cursor()
cur.execute(sql, sqldata)
con.commit()
def process_item(self, item, spider):
if isinstance(item, WalmartItem):
self.insert_products(item)
return item
def retrieve_data(self,sql):
with con:
cur = con.cursor()
cur.execute(sql)
rows = cur.fetchall()
return rows
def delete_product():
sql = "DELETE FROM product"
execute_sql(sql)
sql = "DELETE FROM product_picture"
execute_sql(sql)
sql = "DELETE FROM product_options"
execute_sql(sql)
sql = "DELETE FROM product_detail"
execute_sql(sql)
def insert_products(self, item):
table = item["select_table"]
if table == "upload":
return
# product_url = scrapy.Field()
# product_name = scrapy.Field()
# unique_id = scrapy.Field()
# cost = scrapy.Field()
# delivery_time = scrapy.Field()
# pictures_url = scrapy.Field()
# bullet_points = scrapy.Field()
# details = scrapy.Field()
# review_numbers = scrapy.Field()
# category = scrapy.Field()
# inventory = scrapy.Field()
# options = scrapy.Field()
# seller = scrapy.Field()
# product_type = scrapy.Field()
product_data_list = []
#Product product Part
product_data = (
item["product_id"], #product_id
item["product_type"], #product_type
item["delivery_time"], #delivery_time
item["product_name"], #product_name
item["product_url"], #product_url
item["review_numbers"], #review_numbers
item["cost"], #cost
item["bullet_points"], #bullet_points
item["seller"], #seller
"", #options
item["category"], #category
item["inventory"], #inventory
item["special_offer"], # speciall offer
str(item["fulfillment"]), #shipping info
item["create_time"], #scraping date
str(item["details"]), #details
item["usItemId"], #USItemId
item["unique_id"], #SKU
"")
product_data_list.append(product_data)
product_sql = """INSERT INTO """ + table + """ (product_id, product_type, delivery_time, product_name, product_url, \
review_numbers, cost, bullet_points, seller, options, category, inventory, create_date, special_offer, fulfillment, scraping_date, details, unique_id, usItemId, picture_url) VALUES (\
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
Now(),
%s,
%s,
%s,
%s,
%s,
%s,
%s)"""
picture_sql = """INSERT INTO """ + table + "_pictures " + """ (product_id, picture_url) Values(%s, %s)"""
picture_data_list = []
for picture in item["pictures_url"]:
picture_data = (item["product_id"], picture)
picture_data_list.append(picture_data)
detail_sql = """INSERT INTO """ + table + "_details " + """ (product_id, detail_content) Values(%s, %s)"""
detail_data_list = []
for detail in item["options"]:
detail_data = (item["product_id"], str(detail))
detail_data_list.append(detail_data)
if self.save_item_cnt > random.randrange(0, 100):
logging.info(str(self.save_item_cnt) + " items saved")
self.save_item_cnt = 0
try:
sleep(0.02)
self.insert_many(product_sql, product_data_list)
sleep(0.02)
self.insert_many(picture_sql, picture_data_list)
sleep(0.02)
self.insert_many(detail_sql, detail_data_list)
self.save_item_cnt += 1
except mdb.IntegrityError as err:
#logging.info("Error: {}".format(err))
return
#logging.info(item)
except mdb.OperationalError as err:
logging.info("Error: {}".format(err))
# logging.info(detail_data_list)
# logging.info(picture_data_list)
# logging.info(product_data_list)
con = mdb.connect(config.db_host, config.db_user, config.db_pwd, config.db_database)
return
except mdb.DataError as err:
logging.info("Error: {}".format(err))
return
|
[
"silverstart1987913@yahoo.com"
] |
silverstart1987913@yahoo.com
|
252bc4abf7927c6d07ff920723e8ad236a229b65
|
ee49b6c2dbe0f163737d79ffd88264541d304951
|
/eventwarning/views.py
|
a1238402443440cd0d6136ae5c60672e75ac2fff
|
[] |
no_license
|
duplico/event-traffic-warning
|
4b2b19a76a399f6fd85e8ea2fbfbfdaba3c320c9
|
b2533170d72fccc2abbc010163503d188f84a4e6
|
refs/heads/master
| 2023-05-12T09:40:40.682219
| 2014-01-30T04:59:29
| 2014-01-30T04:59:29
| 277,707,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,978
|
py
|
from datetime import datetime
from flask import request, make_response, render_template, url_for, flash
from flask import redirect
import flask
import danger_backend as danger
#danger.VERBOSE = False
from eventwarning import app
from eventwarning import models
from eventwarning import executor, running_futures
DEFAULT_ZIP = "74103"
# /
# /zip/<zip>/d/<date>
#
#
@app.route('/', methods=['GET',])
def landing():
return redirect(url_for('danger_zip', zip='74103',
date=datetime.now().strftime('%Y-%m-%d')))
@app.route('/d/<date>/')
def default_zip_danger(date):
return danger_zip(DEFAULT_ZIP, date)
@app.route('/zip/<zip>/d/<date>/', methods=['GET',])
def danger_zip(zip, date):
day_obj = datetime.strptime(date, '%Y-%m-%d').date()
id = '/dangers/zip/%s/d/%s' % (zip, day_obj.strftime('%Y-%m-%d'))
danger_record = models.DangerEntry.load(id)
if not danger_record:
if id in running_futures:
if running_futures[id].done():
danger_record = running_futures[id].result()
else:
pass # In progress
else:
running_futures[id] = executor.submit(
models.get_or_create_danger_entry,
day_obj,
zip,
db=flask.g.couch
)
# In progress
if not danger_record:
return make_response('In progress\n', 202)
return render_template(
'events.html',
zip=zip,
day=day_obj,
events=None,
)
tweet = danger_record.get_tweet()
events_prioritized = danger_record.prioritized()
region_capacity = danger.get_zip_capacity('74103')
percent = int((100.0 * danger_record.total() / region_capacity))
percent_str = '%d%%' % percent
return render_template(
'events.html',
zip=zip,
events=events_prioritized['useful'],
other_events=events_prioritized['useless'],
total=danger_record.total(),
day=day_obj,
region_capacity=region_capacity,
percent_str=percent_str,
percent=percent,
tweet=tweet,
)
@app.route('/q/z/<zip>/d/<date>/', methods=['GET',])
def query_danger_status(zip, date):
day_obj = datetime.strptime(date, '%Y-%m-%d').date()
events = models.get_or_create_danger_entry(day_obj, zip)
events_prioritized = events.prioritized()
region_capacity = danger.get_zip_capacity('74103')
percent = int((100.0 * events.total() / region_capacity))
percent_str = '%d%%' % percent
print events.get_tweet()
return render_template(
'events.html',
zip=zip,
events=events_prioritized['useful'],
other_events=events_prioritized['useless'],
total=events.total(),
day=day_obj,
region_capacity=region_capacity,
percent_str=percent_str,
percent=percent
)
@app.route('/locations/index_zips/', methods=['POST',])
def index_zips():
# curl -X POST http://localhost:5000/locations/index_zips/?key=ADMIN_SECRET
if request.args.get('key', None) == app.config.get('ADMIN_SECRET'):
if 'index_zips' in running_futures:
print 'in'
if running_futures['index_zips'].done():
return make_response('/locations/\n', 200)
else:
return make_response('In progress\n', 202)
else:
task = executor.submit(models.store_all_zips,
app.config.get('ZIP_DB'),
db=flask.g.couch)
running_futures['index_zips'] = task
return make_response('Job started\n', 202)
else:
return make_response('INVALID SECRET\n', 403)
@app.route('/p<shortcut>', methods=['GET',])
def shortcut_entry(shortcut):
pass
|
[
"duplico@dupli.co"
] |
duplico@dupli.co
|
8d729913131bff94532ce64ca9ca5e50f25f2cc7
|
dbf7d177d2fa11314b81806ed1e0187133b8da9d
|
/play_50.py
|
76c186df7f8764e60091c1750c0e6353097b427b
|
[] |
no_license
|
sushmithanat/player_guvi
|
79ed4aab807715f4bf8d975225242d0a72dbfd8d
|
56e8a09d695b63b6b318b515ecfb9138164371ca
|
refs/heads/master
| 2020-03-28T09:36:47.491591
| 2018-06-12T05:56:24
| 2018-06-12T05:56:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
def play_50():
n=int(input('Enter n:'))
for i in range(2,n//2):
if n%i==0:
return "yes"
return "no"
play_50()
|
[
"noreply@github.com"
] |
sushmithanat.noreply@github.com
|
eebc60e0ac20e673cf5883a22b54ac1bd9e9efb0
|
4c1ca80e3897e0af4ff3f16771e6e54eb52dfe9f
|
/restaurants/api/serializers.py
|
a410e69b5f71211260f9607b0e96cc13524638f1
|
[] |
no_license
|
vpatel85/gas-up
|
7a7185a29aaf013cfa8fc7099d2edc2731428b58
|
973d41da4e8f2dff81f0d3705fa0331d6b1d0419
|
refs/heads/master
| 2021-01-13T02:11:27.725433
| 2014-11-10T22:24:33
| 2014-11-10T22:24:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from restaurants.models import UserProfile, Restaurant, Comment, SubComment
class SubCommentSerializer(serializers.ModelSerializer):
class Meta:
model = SubComment
fields = ('parent', 'comment')
class CommentSerializer(serializers.ModelSerializer):
subcomment_set = SubCommentSerializer(many=True)
class Meta:
model = Comment
fields = ('id', 'subcomment_set', 'restaurant', 'comment')
class UserSerializer(serializers.ModelSerializer):
comment_set = CommentSerializer()
class Meta:
model = User
fields = ('comment_set', 'username', 'first_name', 'last_name', 'email')
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
user = UserSerializer()
visited = serializers.RelatedField(many=True)
dislike = serializers.RelatedField(many=True)
class Meta:
model = UserProfile
fields = ('url', 'user', 'visited', 'dislike')
class RestaurantSerializer(serializers.HyperlinkedModelSerializer):
comment_set = CommentSerializer(many=True)
class Meta:
model = Restaurant
fields = ('url', 'name', 'google_rating', 'up_vote', 'down_vote', 'price_level', 'icon', 'created', 'updated', 'comment_set')
|
[
"vpatel@visual-a.com"
] |
vpatel@visual-a.com
|
a41d7cf025c623d35ced20217816991bbf758202
|
1566af780ae7342760609d194bc50512777c9a44
|
/pyNastran/op2/op2_interface/op2_scalar.py
|
771c8fafb29cac8410e40224e97fe3f460b0bf70
|
[] |
no_license
|
JamesdeanP/pyNastran
|
61bb43127601a2f1ba09166bfca1783e989afbe3
|
c7a9a72c6b4d1778ef2f72c5f6f67e5df4b0233e
|
refs/heads/master
| 2020-09-24T04:50:33.559676
| 2019-11-29T22:27:23
| 2019-11-29T22:27:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79,315
|
py
|
#pylint: disable=R0913
"""
Defines the sub-OP2 class. This should never be called outisde of the OP2 class.
- OP2_Scalar(debug=False, log=None, debug_file=None)
**Methods**
- set_subcases(subcases=None)
- set_transient_times(times)
- read_op2(op2_filename=None, combine=False)
- set_additional_generalized_tables_to_read(tables)
- set_additional_result_tables_to_read(tables)
- set_additional_matrices_to_read(matrices)
**Attributes**
- total_effective_mass_matrix
- effective_mass_matrix
- rigid_body_mass_matrix
- modal_effective_mass_fraction
- modal_participation_factors
- modal_effective_mass
- modal_effective_weight
- set_as_msc()
- set_as_optistruct()
**Private Methods**
- _get_table_mapper()
- _not_available(data, ndata)
- _table_crasher(data, ndata)
- _table_passer(data, ndata)
- _validate_op2_filename(op2_filename)
- _create_binary_debug()
- _make_tables()
- _read_tables(table_name)
- _skip_table(table_name)
- _read_table_name(rewind=False, stop_on_failure=True)
- _update_generalized_tables(tables)
- _read_cmodext()
- _read_cmodext_helper(marker_orig, debug=False)
- _read_geom_table()
- _finish()
"""
import os
from struct import Struct, unpack
from collections import defaultdict
from typing import List, Tuple, Dict, Union, Any
from numpy import array
import numpy as np
from cpylog import get_logger
from pyNastran import is_release, __version__
from pyNastran.f06.errors import FatalError
from pyNastran.op2.op2_interface.op2_reader import OP2Reader
from pyNastran.bdf.cards.params import PARAM
#============================
from pyNastran.op2.op2_interface.msc_tables import MSC_RESULT_TABLES, MSC_MATRIX_TABLES, MSC_GEOM_TABLES
from pyNastran.op2.op2_interface.nx_tables import NX_RESULT_TABLES, NX_MATRIX_TABLES, NX_GEOM_TABLES
from pyNastran.op2.tables.lama_eigenvalues.lama import LAMA
from pyNastran.op2.tables.oee_energy.onr import ONR
from pyNastran.op2.tables.ogf_gridPointForces.ogpf import OGPF
from pyNastran.op2.tables.oef_forces.oef import OEF
from pyNastran.op2.tables.oes_stressStrain.oes import OES
#from pyNastran.op2.tables.oes_stressStrain.oesm import OESM
from pyNastran.op2.tables.ogs_grid_point_stresses.ogs import OGS
from pyNastran.op2.tables.opg_appliedLoads.opg import OPG
from pyNastran.op2.tables.oqg_constraintForces.oqg import OQG
from pyNastran.op2.tables.oug.oug import OUG
from pyNastran.op2.tables.ogpwg import OGPWG
from pyNastran.op2.fortran_format import FortranFormat
from pyNastran.utils import is_binary_file
"""
ftp://161.24.15.247/Nastran2011/seminar/SEC04-DMAP_MODULES.pdf
Datablock Type Description
EFMFSMS Matrix 6 x 1 Total Effective mass matrix
EFMASSS Matrix 6 x 6 Effective mass matrix
RBMASS Matrix 6 x 6 Rigid body mass matrix
EFMFACS Matrix 6 X N Modal effective mass fraction matrix
MPFACS Matrix 6 x N Modal participation factor matrix
MEFMASS Matrix 6 x N Modal effective mass matrix
MEFWTS Matrix 6 x N Modal effective weight matrix
RAFGEN Matrix N x M Generalized force matrix
RADEFMP Matrix N X U2 Effective inertia loads
BHH Matrix N x N Viscous damping matrix
K4HH Matrix N x N Structural damping matrix
RADAMPZ Matrix N x N equivalent viscous damping ratios
RADAMPG Matrix N X N equivalent structural damping ratio
LAMA LAMA Eigenvalue summary table
OGPWG OGPWG Mass properties output
OQMG1 OQMG Modal MPC forces
RANCONS ORGY1 Constraint mode element strain energy table
RANEATC ORGY1 Attachment mode element strain energy table
RAGCONS OGPFB Constraint mode grid point force table
RAGEATC OGPFB Attachment mode grid point force table
RAPCONS OES Constraint mode ply stress table
RAPEATC OES Attachment mode ply stress table
RASCONS OES Constraint mode element stress table
RAECONS OES Constraint mode element strain table
RASEATC OES Attachment mode element stress table
RAEEATC OES Attachment mode element strain table
OES1C OES Modal Element Stress Table
OES1X OES Modal Element Stress Table
OSTR1C OES Modal Element Strain Table
OSTR1X OSTR Modal Element Strain Table
RAQCONS OUG Constraint mode MPC force table
RADCONS OUG Constraint mode displacement table
RADEFFM OUG Effective inertia displacement table
RAQEATC OUG Attachment mode MPC force table
RADEATC OUG Attachment mode displacement table
OUGV1 OUG Eigenvector Table
RAFCONS OEF Constraint mode element force table
RAFEATC OEF Attachment mode element force table
OEF1X OEF Modal Element Force Table
OGPFB1 OGPFB Modal Grid Point Force Table
ONRGY1 ONRGY1 Modal Element Strain Energy Table
ONRGY2 ONRGY1
#--------------------
RADCONS - Displacement Constraint Mode
RADDATC - Displacement Distributed Attachment Mode
RADNATC - Displacement Nodal Attachment Mode
RADEATC - Displacement Equivalent Inertia Attachment Mode
RADEFFM - Displacement Effective Inertia Mode
RAECONS - Strain Constraint Mode
RAEDATC - Strain Distributed Attachment Mode
RAENATC - Strain Nodal Attachment Mode
RAEEATC - Strain Equivalent Inertia Attachment Mode
RAFCONS - Element Force Constraint Mode
RAFDATC - Element Force Distributed Attachment Mode
RAFNATC - Element Force Nodal Attachment Mode
RAFEATC - Element Force Equivalent Inertia Attachment Mode
RALDATC - Load Vector Used to Compute the Distributed Attachment M
RANCONS - Strain Energy Constraint Mode
RANDATC - Strain Energy Distributed Attachment Mode
RANNATC - Strain Energy Nodal Attachment Mode
RANEATC - Strain Energy Equivalent Inertia Attachment Mode
RAQCONS - Ply Strains Constraint Mode
RAQDATC - Ply Strains Distributed Attachment Mode
RAQNATC - Ply Strains Nodal Attachment Mode
RAQEATC - Ply Strains Equivalent Inertia Attachment Mode
RARCONS - Reaction Force Constraint Mode
RARDATC - Reaction Force Distributed Attachment Mode
RARNATC - Reaction Force Nodal Attachment Mode
RAREATC - Reaction Force Equivalent Inertia Attachment Mode
RASCONS - Stress Constraint Mode
RASDATC - Stress Distributed Attachment Mode
RASNATC - Stress Nodal Attachment Mode
RASEATC - Stress Equivalent Inertia Attachment Mode
RAPCONS - Ply Stresses Constraint Mode
RAPDATC - Ply Stresses Distributed Attachment Mode
RAPNATC - Ply Stresses Nodal Attachment Mode
RAPEATC - Ply Stresses Equivalent Inertia Attachment Mode
RAGCONS - Grid Point Forces Constraint Mode
RAGDATC - Grid Point Forces Distributed Attachment Mode
RAGNATC - Grid Point Forces Nodal Attachment Mode
RAGEATC - Grid Point Forces Equivalent Inertia Attachment Mode
RADEFMP - Displacement PHA^T * Effective Inertia Mode
RADAMPZ - Viscous Damping Ratio Matrix
RADAMPG - Structural Damping Ratio Matrix
RAFGEN - Generalized Forces
BHH - Modal Viscous Damping Matrix
K4HH - Modal Structural Damping Matrix
"""
GEOM_TABLES = MSC_GEOM_TABLES + NX_GEOM_TABLES
AUTODESK_MATRIX_TABLES = [
#b'BELM',
b'KELM',
#b'MELM',
] # type: List[bytes]
# this will be split later
TEST_MATRIX_TABLES = [b'ATB', b'BTA', b'MYDOF']
RESULT_TABLES = NX_RESULT_TABLES + MSC_RESULT_TABLES
MATRIX_TABLES = NX_MATRIX_TABLES + MSC_MATRIX_TABLES + AUTODESK_MATRIX_TABLES + TEST_MATRIX_TABLES + [b'MEFF']
#GEOM_TABLES = MSC_GEOM_TABLES
#RESULT_TABLES = MSC_RESULT_TABLES
#MATRIX_TABLES = MSC_MATRIX_TABLES
# TODO: these are weird...
# RPOSTS1, MAXRATI, RESCOMP, PDRMSG
INT_PARAMS_1 = [
b'POST', b'OPPHIPA', b'OPPHIPB', b'GRDPNT', b'RPOSTS1', b'BAILOUT',
b'COUPMASS', b'CURV', b'INREL', b'MAXRATI', b'OG',
b'S1AM', b'S1M', b'DDRMM', b'MAXIT', b'PLTMSG', b'LGDISP', b'NLDISP',
b'OUNIT2K', b'OUNIT2M', b'RESCOMP', b'PDRMSG', b'LMODES', b'USETPRT',
b'NOCOMPS', b'OPTEXIT', b'RSOPT', b'GUSTAERO', b'MPTUNIT',
b'USETSEL', b'NASPRT', b'DESPCH', b'DESPCH1', b'COMPARE', b'DBNBLKS', b'NEWSEQ', b'OLDSEQ',
b'METHCMRS', b'NOFISR', b'KGGCPCH', b'ERROR', b'DBCDIAG', b'GPECT', b'LSTRN',
b'DBDROPT', b'SEOP2CV', b'IRES', b'SNORMPRT', b'DBDRNL', b'VMOPT',
b'OSWPPT', b'KDAMP', b'KDAMPFL', b'MATNL', b'MPCX', b'GEOMPLT', b'NOELOP',
b'NOGPF', b'PROUT', b'SUPER', b'LGDIS', b'EST', b'SEP1XOVR',
b'FRSEID', b'HRSEID', b'LRSEID', b'MODACC', b'XFLAG', b'TSTATIC',
b'NASPDV', b'RMXCRT', b'RMXTRN', b'DBCLEAN', b'LANGLE', b'SEMAPPRT',
b'FIXEDB', b'AMGOK', b'ASING', b'CNSTRT', b'CURVPLOT', b'CYCIO',
b'CYCSEQ', b'DBDICT', b'DBINIT', b'DBSET1', b'DBSET2', b'DBSET3', b'DBSET4',
b'DBSORT', b'DOPT', b'FACTOR', b'ALTSHAPE', b'MODTRK', b'IFTM', b'INRLM',
b'KINDEX', b'KMIN', b'KMAX', b'LARGEDB', b'LOADINC', b'LOADING', b'LOOP',
b'LOOPID', b'MODEL', b'MOREK', b'NEWDYN', b'NFECI', b'NINTPTS',
b'NLAYERS', b'NOELOF', b'NOMSGSTR', b'NONCUP', b'NUMOUT', b'NUMOUT1', b'NUMOUT2',
b'OPGTKG', b'OPPHIB', b'OUTOPT', b'PKRSP', b'RSPECTRA', b'RSPRINT',
b'S1G', b'SCRSPEC', b'SEMAPOPT', b'SEQOUT', b'SESEF', b'SKPAMG', b'SKPAMP',
b'SLOOPID', b'SOLID', b'SPCGEN', b'SRTELTYP', b'SRTOPT', b'START', b'SUBID',
b'SUBSKP', b'TABID', b'TESTNEG', b'BDMNCON',
# not defined in qrg...
b'NT', b'PNCHDB', b'DLOAD', b'NLOAD', b'NOAP', b'NOCMPFLD', b'NODATA',
b'NODJE', b'NOMECH', b'NOSDR1', b'NOSHADE', b'NOSORT1', b'NOTRED',
b'NSEGS', b'OLDELM', b'OPADOF', b'OUTPUT', b'P1', b'P2', b'P3', b'PCHRESP',
b'PLOT', b'PLOTSUP', b'PRTPCH', b'RADLIN', b'RESDUAL', b'S1', b'SDATA',
b'SEFINAL', b'SEMAP1', b'SKPLOAD', b'SKPMTRX', b'SOLID1', b'SSG3',
b'PEDGEP', b'ACMSPROC', b'ACMSSEID', b'ACOUS', b'ACOUSTIC', b'ADJFLG',
b'ADJLDF', b'AEDBCP', b'AESRNDM', b'ARCSIGNS', b'ATVUSE', b'BADMESH', b'BCHNG',
b'BCTABLE', b'ROTCSV', b'ROTGPF',
]
FLOAT_PARAMS_1 = [
b'K6ROT', b'WTMASS', b'SNORM', b'PATVER', b'MAXRATIO', b'EPSHT',
b'SIGMA', b'TABS', b'EPPRT', b'AUNITS', b'BOLTFACT', b'LMSCAL',
'DSZERO', b'G', b'GFL', b'LFREQ', b'HFREQ', b'ADPCON',
b'W3', b'W4', b'W3FL', b'W4FL', b'PREFDB',
b'EPZERO', b'DSZERO', b'TINY', b'TOLRSC',
b'FRSPD', b'HRSPD', b'LRSPD', b'MTRFMAX', b'ROTCMRF', b'MTRRMAX',
b'LAMLIM', b'BIGER', b'BIGER1', b'BIGER2', b'CLOSE',
b'EPSBIG', b'EPSMALC', b'EPSMALU', b'HIRES', b'KDIAG', b'MACH', b'VREF',
b'STIME', b'TESTSE', b'LFREQFL', b'Q', b'ADPCONS', b'AFNORM', b'AFZERO',
# not defined
b'PRPA', b'PRPHIVZ', b'PRPJ', b'PRRULV', b'RMAX', b'ADJFRQ', b'ARF',
b'ARS',
]
FLOAT_PARAMS_2 = [
b'BETA', b'CB1', b'CB2', b'CK1', b'CK2', b'CK3', b'CK41', b'CK42',
b'CM1', b'CM2',
b'G2', b'G4', b'G5', b'G6', b'G7', b'G8', b'G9', b'G10', b'G12', b'G13',
b'ALPHA1', b'ALPHA2', b'APPF',
]
INT_PARAMS_2 = [b'APPI',]
DOUBLE_PARAMS_1 = [] # b'Q'
STR_PARAMS_1 = [
b'POSTEXT', b'PRTMAXIM', b'AUTOSPC', b'OGEOM', b'PRGPST',
b'RESVEC', b'RESVINER', b'ALTRED', b'OGPS', b'OIBULK', b'OMACHPR',
b'UNITSYS', b'F56', b'OUGCORD', b'OGEM', b'EXTSEOUT',
b'CDIF', b'SUPAERO', b'RSCON', b'AUTOMPC', b'DBCCONV',
b'AUTOSPRT', b'PBRPROP', b'OMID', b'HEATSTAT', b'SECOMB', b'ELEMITER',
b'ELITASPC', b'DBCONV', b'SHLDAMP', b'COMPMATT', b'SPCSTR', b'ASCOUP',
b'PRTRESLT', b'SRCOMPS', b'CHECKOUT', b'SEMAP', b'AESMETH', b'RESVALT',
b'ROTSYNC', b'SYNCDAMP', b'PRGPOST', b'WMODAL', b'SDAMPUP',
b'COLPHEXA', b'CHKOUT', b'CTYPE', b'DBNAME', b'VUHEXA', b'VUPENTA', b'VUTETRA',
b'MESH', b'OPTION', b'PRINT', b'SENAME', b'MECHFIX', b'RMXTRAN', b'FLEXINV',
b'ADSTAT', b'ACOUT', b'ACSYM', b'ACTYPE', b'ADBX', b'AUTOSEEL',
b'RDSPARSE',
# part of param, checkout
b'PRTBGPDT', b'PRTCSTM', b'PRTEQXIN', b'PRTGPDT',
b'PRTGPL', b'PRTGPTT', b'PRTMGG', b'PRTPG',
# superelements
b'EXTOUT', b'SESDAMP',
# TODO: remove these as they're in the matrix test and are user
# defined PARAMs; arguably all official examples should just work
# TODO: add an option for custom PARAMs
b'ADB', b'AEDB', b'MREDUC', b'OUTDRM', b'OUTFORM', b'REDMETH', b'DEBUG',
b'AEDBX', b'AERO', b'AUTOSUP0', b'AXIOPT',
]
class OP2_Scalar(LAMA, ONR, OGPF,
OEF, OES, OGS, OPG, OQG, OUG, OGPWG, FortranFormat):
"""
Defines an interface for the Nastran OP2 file.
"""
@property
def total_effective_mass_matrix(self):
"""6x6 matrix"""
return self.matrices['EFMFSMS']
@property
def effective_mass_matrix(self):
"""6x6 matrix"""
return self.matrices['EFMASSS']
@property
def rigid_body_mass_matrix(self):
"""6x6 matrix"""
return self.matrices['RBMASS']
@property
def modal_effective_mass_fraction(self):
"""6xnmodes matrix"""
return self.matrices['EFMFACS']#.dataframe
@property
def modal_participation_factors(self):
"""6xnmodes matrix"""
return self.matrices['MPFACS']#.dataframe
@property
def modal_effective_mass(self):
"""6xnmodes matrix"""
return self.matrices['MEFMASS']#.dataframe
@property
def modal_effective_weight(self):
"""6xnmodes matrix"""
return self.matrices['MEFWTS']#.dataframe
@property
def matrix_tables(self):
return MATRIX_TABLES
def set_as_nx(self):
self.is_nx = True
self.is_msc = False
self.is_autodesk = False
self.is_optistruct = False
self._nastran_format = 'nx'
def set_as_msc(self):
self.is_nx = False
self.is_msc = True
self.is_autodesk = False
self.is_optistruct = False
self._nastran_format = 'msc'
def set_as_autodesk(self):
self.is_nx = False
self.is_msc = False
self.is_autodesk = True
self.is_optistruct = False
self._nastran_format = 'autodesk'
def set_as_optistruct(self):
self.is_nx = False
self.is_msc = False
self.is_autodesk = False
self.is_optistruct = True
self._nastran_format = 'optistruct'
def __init__(self, debug=False, log=None, debug_file=None):
"""
Initializes the OP2_Scalar object
Parameters
----------
debug : bool; default=False
enables the debug log and sets the debug in the logger
log : Log()
a logging object to write debug messages to
(.. seealso:: import logging)
debug_file : str; default=None (No debug)
sets the filename that will be written to
"""
assert isinstance(debug, bool), 'debug=%r' % debug
self.log = get_logger(log, 'debug' if debug else 'info')
self._count = 0
self.op2_filename = None
self.bdf_filename = None
self.f06_filename = None
self.des_filename = None
self.h5_filename = None
self._encoding = 'utf8'
#: should a MATPOOL "symmetric" matrix be stored as symmetric
#: it takes double the RAM, but is easier to use
self.apply_symmetry = True
LAMA.__init__(self)
ONR.__init__(self)
OGPF.__init__(self)
OEF.__init__(self)
OES.__init__(self)
#OESM.__init__(self)
OGS.__init__(self)
OPG.__init__(self)
OQG.__init__(self)
OUG.__init__(self)
OGPWG.__init__(self)
FortranFormat.__init__(self)
self.is_vectorized = False
self._close_op2 = True
self.result_names = set()
self.grid_point_weight = {}
self.words = []
self.debug = debug
self._last_comment = None
#self.debug = True
#self.debug = False
#debug_file = None
if debug_file is None:
self.debug_file = None
else:
assert isinstance(debug_file, str), debug_file
self.debug_file = debug_file
self.op2_reader = OP2Reader(self)
def set_subcases(self, subcases=None):
"""
Allows you to read only the subcases in the list of isubcases
Parameters
----------
subcases : List[int, ...] / int; default=None->all subcases
list of [subcase1_ID,subcase2_ID]
"""
#: stores the set of all subcases that are in the OP2
#self.subcases = set()
if subcases is None or subcases == []:
#: stores if the user entered [] for isubcases
self.is_all_subcases = True
self.valid_subcases = []
else:
#: should all the subcases be read (default=True)
self.is_all_subcases = False
if isinstance(subcases, int):
subcases = [subcases]
#: the set of valid subcases -> set([1,2,3])
self.valid_subcases = set(subcases)
self.log.debug("set_subcases - subcases = %s" % self.valid_subcases)
def set_transient_times(self, times): # TODO this name sucks...
"""
Takes a dictionary of list of times in a transient case and
gets the output closest to those times.
Examples
--------
>>> times = {subcase_id_1: [time1, time2],
subcase_id_2: [time3, time4]}
.. warning:: I'm not sure this still works...
"""
expected_times = {}
for (isubcase, etimes) in times.items():
etimes = list(times)
etimes.sort()
expected_times[isubcase] = array(etimes)
self.expected_times = expected_times
def _get_table_mapper(self):
"""gets the dictionary of function3 / function4"""
# MSC table mapper
table_mapper = {
# per NX
b'OESVM1' : [self._read_oes1_3, self._read_oes1_4], # isat_random
b'OESVM1C' : [self._read_oes1_3, self._read_oes1_4], # isat_random
b'OSTRVM1' : [self._read_oes1_3, self._read_ostr1_4], # isat_random
b'OSTRVM1C' : [self._read_oes1_3, self._read_ostr1_4], # isat_random
b'OSTRVM2' : [self._read_oes2_3, self._read_ostr2_4],
b'OESVM2' : [self._read_oes2_3, self._read_oes2_4], # big random
b'OES2C' : [self._read_oes2_3, self._read_oes2_4],
b'OSTR2' : [self._read_oes2_3, self._read_ostr2_4], # TODO: disable
b'OSTR2C' : [self._read_oes2_3, self._read_ostr2_4],
#b'OES2C' : [self._table_passer, self._table_passer], # stress
#b'OSTR2' : [self._table_passer, self._table_passer], # TODO: enable
#b'OSTR2C' : [self._table_passer, self._table_passer],
b'OTEMP1' : [self._read_otemp1_3, self._read_otemp1_4],
# --------------------------------------------------------------------------
# MSC TABLES
# common tables
# unorganized
b'RADCONS': [self._read_oug1_3, self._read_oug_4], # Displacement Constraint Mode (OUG)
b'RADEFFM': [self._read_oug1_3, self._read_oug_4], # Displacement Effective Inertia Mode (OUG)
b'RADEATC': [self._read_oug1_3, self._read_oug_4], # Displacement Equivalent Inertia Attachment mode (OUG)
# broken - isat_launch_100hz.op2 - wrong numwide
# spc forces
b'RAQCONS': [self._read_oqg1_3, self._read_oqg_4], # Constraint mode MPC force table (OQG)
b'RAQEATC': [self._read_oqg1_3, self._read_oqg_4], # Attachment mode MPC force table (OQG)
#b'RAQCONS': [self._table_passer, self._table_passer], # temporary
#b'RAQEATC': [self._table_passer, self._table_passer], # temporary
# element forces
b'RAFCONS': [self._read_oef1_3, self._read_oef1_4], # Element Force Constraint Mode (OEF)
b'RAFEATC': [self._read_oef1_3, self._read_oef1_4], # Element Force Equivalent Inertia Attachment mode (OEF)
#b'RAFCONS': [self._table_passer, self._table_passer], # temporary
#b'RAFEATC': [self._table_passer, self._table_passer], # temporary
# grid point forces
b'RAGCONS': [self._read_ogpf1_3, self._read_ogpf1_4], # Grid Point Forces Constraint Mode (OGPFB)
b'RAGEATC': [self._read_ogpf1_3, self._read_ogpf1_4], # Grid Point Forces Equivalent Inertia Attachment mode (OEF)
#b'RAGCONS': [self._table_passer, self._table_passer], # Grid Point Forces Constraint Mode (OGPFB)
#b'RAGEATC': [self._table_passer, self._table_passer], # Grid Point Forces Equivalent Inertia Attachment mode (OEF)
# stress
b'RAPCONS': [self._read_oes1_3, self._read_oes1_4], # Constraint mode ply stress table (OES)
b'RAPEATC': [self._read_oes1_3, self._read_oes1_4], # Attachment mode ply stress table (OES)
#b'RAPCONS': [self._table_passer, self._table_passer], # Constraint mode ply stress table (OES)
#b'RAPEATC': [self._table_passer, self._table_passer], # Attachment mode ply stress table (OES)
# stress
b'RASCONS': [self._read_oes1_3, self._read_oes1_4], # Stress Constraint Mode (OES)
b'RASEATC': [self._read_oes1_3, self._read_oes1_4], # Stress Equivalent Inertia Attachment mode (OES)
#b'RASCONS': [self._table_passer, self._table_passer], # temporary
#b'RASEATC': [self._table_passer, self._table_passer], # temporary
# strain
b'RAEEATC': [self._read_oes1_3, self._read_ostr1_4], # Strain Equivalent Inertia Attachment mode (OES)
b'RAECONS': [self._read_oes1_3, self._read_ostr1_4], # Strain Constraint Mode (OSTR)
#b'RAEEATC': [self._table_passer, self._table_passer], # temporary
#b'RAECONS': [self._table_passer, self._table_passer], # temporary
# strain energy
b'RANEATC' : [self._read_onr1_3, self._read_onr1_4], # Strain Energy Equivalent Inertia Attachment mode (ORGY1)
b'RANCONS': [self._read_onr1_3, self._read_onr1_4], # Constraint mode element strain energy table (ORGY1)
#b'RANEATC': [self._table_passer, self._table_passer], # Strain Energy Equivalent Inertia Attachment mode (ORGY1)
#b'RANCONS': [self._table_passer, self._table_passer], # Constraint mode element strain energy table (ORGY1)
b'R1TABRG': [self._table_passer, self.op2_reader.read_r1tabrg],
#b'TOL': [self._table_passer, self._table_passer],
b'MATPOOL': [self._table_passer, self._table_passer], # DMIG bulk data entries
# this comment may refer to CSTM?
#F:\work\pyNastran\examples\Dropbox\pyNastran\bdf\cards\test\test_mass_01.op2
#F:\work\pyNastran\examples\matpool\gpsc1.op2
b'AXIC': [self._table_passer, self._table_passer],
b'RSOUGV1': [self._table_passer, self._table_passer],
b'RESOES1': [self._table_passer, self._table_passer],
b'RESEF1' : [self._table_passer, self._table_passer],
b'DESCYC' : [self._table_passer, self._table_passer],
#b'AEMONPT' : [self._read_aemonpt_3, self._read_aemonpt_4],
#=======================
# OEF
# element forces
#b'OEFITSTN' : [self._table_passer, self._table_passer], # works
b'OEFITSTN' : [self._read_oef1_3, self._read_oef1_4],
b'OEFIT' : [self._read_oef1_3, self._read_oef1_4], # failure indices
b'OEF1X' : [self._read_oef1_3, self._read_oef1_4], # element forces at intermediate stations
b'OEF1' : [self._read_oef1_3, self._read_oef1_4], # element forces or heat flux
b'HOEF1' : [self._read_oef1_3, self._read_oef1_4], # element heat flux
b'DOEF1' : [self._read_oef1_3, self._read_oef1_4], # scaled response spectra - forces
# off force
b'OEF2' : [self._read_oef2_3, self._read_oef2_4], # element forces or heat flux
#=======================
# OQG
# spc forces
# OQG1/OQGV1 - spc forces in the nodal frame
# OQP1 - scaled response spectra - spc-forces
b'OQG1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQG2' : [self._read_oqg2_3, self._read_oqg_4],
b'OQGV1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQGV2' : [self._read_oqg2_3, self._read_oqg_4],
b'OQP1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQP2' : [self._read_oqg2_3, self._read_oqg_4],
# SPC/MPC tables depending on table_code
# SPC - NX/MSC
# MPC - MSC
b'OQGATO1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQGCRM1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQGPSD1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQGRMS1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQGNO1' : [self._read_oqg1_3, self._read_oqg_4],
b'OQGATO2' : [self._read_oqg2_3, self._read_oqg_4],
b'OQGCRM2' : [self._read_oqg2_3, self._read_oqg_4],
b'OQGPSD2' : [self._read_oqg2_3, self._read_oqg_4],
b'OQGRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OQGNO2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OQGRMS2' : [self._read_oqg2_3, self._read_oqg_4], # buggy on isat random
#b'OQGNO2' : [self._read_oqg2_3, self._read_oqg_4], # buggy on isat random
#=======================
# MPC Forces
# these are NX tables
# OQGM1 - mpc forces in the nodal frame
b'OQMG1' : [self._read_oqg1_3, self._read_oqg_mpc_forces],
b'OQMATO1' : [self._read_oqg1_3, self._read_oqg_mpc_ato],
b'OQMCRM1' : [self._read_oqg1_3, self._read_oqg_mpc_crm],
b'OQMPSD1' : [self._read_oqg1_3, self._read_oqg_mpc_psd],
b'OQMRMS1' : [self._read_oqg1_3, self._read_oqg_mpc_rms],
b'OQMNO1' : [self._read_oqg1_3, self._read_oqg_mpc_no],
b'OQMG2' : [self._read_oqg2_3, self._read_oqg_mpc_forces], # big random
b'OQMATO2' : [self._read_oqg2_3, self._read_oqg_mpc_ato],
b'OQMCRM2' : [self._read_oqg2_3, self._read_oqg_mpc_crm],
b'OQMPSD2' : [self._read_oqg2_3, self._read_oqg_mpc_psd],
b'OQMRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OQMNO2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OQMRMS2' : [self._read_oqg2_3, self._read_oqg_mpc_rms], # buggy on isat random
#b'OQMNO2' : [self._read_oqg2_3, self._read_oqg_mpc_no], # buggy on isat random
#=======================
# OPG
# applied loads
b'OPG1' : [self._read_opg1_3, self._read_opg1_4], # applied loads in the nodal frame
b'OPGV1' : [self._read_opg1_3, self._read_opg1_4], # solution set applied loads?
b'OPNL1' : [self._read_opg1_3, self._read_opg1_4], # nonlinear loads
b'OCRPG' : [self._read_opg1_3, self._read_opg1_4], # post-buckling loads
b'OPG2' : [self._read_opg2_3, self._read_opg1_4], # applied loads in the nodal frame
b'OPNL2' : [self._read_opg2_3, self._read_opg1_4], # nonlinear loads
b'OPGATO1' : [self._read_opg1_3, self._read_opg1_4],
b'OPGCRM1' : [self._read_opg1_3, self._read_opg1_4],
b'OPGPSD1' : [self._read_opg1_3, self._read_opg1_4],
b'OPGRMS1' : [self._read_opg1_3, self._read_opg1_4],
b'OPGNO1' : [self._read_opg1_3, self._read_opg1_4],
b'OPGATO2' : [self._read_opg2_3, self._read_opg1_4],
b'OPGCRM2' : [self._read_opg2_3, self._read_opg1_4],
b'OPGPSD2' : [self._read_opg2_3, self._read_opg1_4],
#b'OPGRMS2' : [self._table_passer, self._table_passer],
#b'OPGNO2' : [self._table_passer, self._table_passer],
b'OPGRMS2' : [self._read_opg2_3, self._read_opg1_4],
b'OPGNO2' : [self._read_opg2_3, self._read_opg1_4],
#=======================
# OGPFB1
# grid point forces
b'OGPFB1' : [self._read_ogpf1_3, self._read_ogpf1_4], # grid point forces
#=======================
# ONR/OEE
# strain energy density
b'ONRGY' : [self._read_onr1_3, self._read_onr1_4],
b'ONRGY1' : [self._read_onr1_3, self._read_onr1_4], # strain energy density
b'ONRGY2': [self._read_onr2_3, self._read_onr1_4],
#b'ONRGY2': [self._table_passer, self._table_passer],
#===========================================================
# OES
# stress
# OES1C - Table of composite element stresses or strains in SORT1 format
# OESRT - Table of composite element ply strength ratio. Output by SDRCOMP
b'OES1X1' : [self._read_oes1_3, self._read_oes1_4], # stress - nonlinear elements
b'OES1' : [self._read_oes1_3, self._read_oes1_4], # stress - linear only
b'OES1X' : [self._read_oes1_3, self._read_oes1_4], # element stresses at intermediate stations & nonlinear stresses
b'OES1C' : [self._read_oes1_3, self._read_oes1_4], # stress - composite
b'OESCP' : [self._read_oes1_3, self._read_oes1_4], # stress - nonlinear???
b'OESRT' : [self._read_oes1_3, self._read_oes1_4], # ply strength ratio
# strain
b'OSTR1' : [self._read_oes1_3, self._read_ostr1_4], # strain - autodesk/9zk6b5uuo.op2
b'OSTR1X' : [self._read_oes1_3, self._read_ostr1_4], # strain - isotropic
b'OSTR1C' : [self._read_oes1_3, self._read_ostr1_4], # strain - composite
b'OESTRCP' : [self._read_oes1_3, self._read_ostr1_4],
# special nonlinear tables
# OESNLBR - Slideline stresses
# OESNLXD - Nonlinear transient stresses
# OESNLXR - Nonlinear stress
# Table of nonlinear element stresses in SORT1 format and appended for all subcases
b'OESNLXR' : [self._read_oes1_3, self._read_oes1_4], # nonlinear stresses
b'OESNLXD' : [self._read_oes1_3, self._read_oes1_4], # nonlinear transient stresses
b'OESNLBR' : [self._read_oes1_3, self._read_oes1_4],
b'OESNL1X' : [self._read_oes1_3, self._read_oes1_4],
b'OESNL2' : [self._read_oes2_3, self._read_oes2_4],
b'OESNLXR2' : [self._read_oes2_3, self._read_oes2_4],
b'OESNLBR2' : [self._read_oes2_3, self._read_oes2_4],
#b'OESNLXR2' : [self._table_passer, self._table_passer],
#b'OESNLBR2' : [self._table_passer, self._table_passer],
# off stress
b'OES2' : [self._read_oes2_3, self._read_oes2_4], # stress - linear only - disabled; need better tests
#b'OES2' : [self._table_passer, self._table_passer], # stress - linear only - disabled; need better tests
#b'OESPSD2C' : [self._table_passer, self._table_passer],
#b'OSTPSD2C' : [self._table_passer, self._table_passer],
#=======================
# off strain
b'OSTRATO1' : [self._read_oes1_3, self._read_ostr1_4],
b'OSTRCRM1' : [self._read_oes1_3, self._read_ostr1_4],
b'OSTRPSD1' : [self._read_oes1_3, self._read_ostr1_4],
b'OSTRRMS1' : [self._read_oes1_3, self._read_ostr1_4], # isat_random
b'OSTRNO1' : [self._read_oes1_3, self._read_ostr1_4], # isat_random
b'OSTRATO2' : [self._read_oes2_3, self._read_ostr2_4],
b'OSTRCRM2' : [self._read_oes2_3, self._read_ostr2_4],
b'OSTRPSD2' : [self._read_oes2_3, self._read_ostr2_4],
b'OSTRRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OSTRNO2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OSTRRMS2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random
#b'OSTRNO2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random
b'OSTRMS1C' : [self._read_oes1_3, self._read_ostr1_4], # isat_random
b'OSTNO1C' : [self._read_oes1_3, self._read_ostr1_4], # isat_random
#=======================
# OUG
# displacement/velocity/acceleration/eigenvector/temperature
b'OUG1' : [self._read_oug1_3, self._read_oug_4], # displacements in nodal frame
# OVG1?
b'OAG1' : [self._read_oug1_3, self._read_oug_4], # accelerations in nodal frame
b'OUGV1' : [self._read_oug1_3, self._read_oug_4], # displacements in nodal frame
b'BOUGV1' : [self._read_oug1_3, self._read_oug_4], # OUG1 on the boundary???
b'OUGV1PAT': [self._read_oug1_3, self._read_oug_4], # OUG1 + coord ID
b'OUPV1' : [self._read_oug1_3, self._read_oug_4], # scaled response spectra - displacement
b'TOUGV1' : [self._read_oug1_3, self._read_oug_4], # grid point temperature
b'ROUGV1' : [self._read_oug1_3, self._read_oug_4], # relative OUG
b'OPHSA' : [self._read_oug1_3, self._read_oug_4], # Displacement output table in SORT1
b'OUXY1' : [self._read_oug1_3, self._read_oug_4], # Displacements in SORT1 format for h-set or d-set.
b'OUGV2' : [self._read_oug2_3, self._read_oug_4], # displacements in nodal frame
b'ROUGV2' : [self._read_oug2_3, self._read_oug_4], # relative OUG
b'OUXY2' : [self._read_oug2_3, self._read_oug_4], # Displacements in SORT2 format for h-set or d-set.
#F:\work\pyNastran\examples\Dropbox\move_tpl\sbuckl2a.op2
b'OCRUG' : [self._read_oug1_3, self._read_oug_4], # post-buckling displacement
b'OPHIG' : [self._read_oug1_3, self._read_oug_4], # eigenvectors in basic coordinate system
b'BOPHIG' : [self._read_oug1_3, self._read_oug_4], # eigenvectors in basic coordinate system
b'BOPHIGF' : [self._read_oug1_3, self._read_oug_4], # Eigenvectors in the basic coordinate system for the fluid portion of the model.
b'BOPHIGS' : [self._read_oug1_3, self._read_oug_4], # Eigenvectors in the basic coordinate system for the structural portion of the model.
b'BOPG1' : [self._read_opg1_3, self._read_opg1_4], # applied loads in basic coordinate system
b'OUGATO1' : [self._read_oug1_3, self._read_oug_ato],
b'OUGCRM1' : [self._read_oug1_3, self._read_oug_crm],
b'OUGPSD1' : [self._read_oug1_3, self._read_oug_psd],
b'OUGRMS1' : [self._read_oug1_3, self._read_oug_rms],
b'OUGNO1' : [self._read_oug1_3, self._read_oug_no],
b'OUGATO2' : [self._read_oug2_3, self._read_oug_ato],
b'OUGCRM2' : [self._read_oug2_3, self._read_oug_crm],
b'OUGPSD2' : [self._read_oug2_3, self._read_oug_psd],
b'OUGRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OUGNO2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OUGRMS2' : [self._read_oug2_3, self._read_oug_rms], # buggy on isat random
#b'OUGNO2' : [self._read_oug2_3, self._read_oug_no], # buggy on isat random
#=======================
# extreme values of the respective table
b'OUGV1MX' : [self._table_passer, self._table_passer],
b'OEF1MX' : [self._table_passer, self._table_passer],
b'OES1MX' : [self._table_passer, self._table_passer],
#=======================
# contact
b'OQGCF1' : [self._table_passer, self._table_passer], # Contact force at grid point.
b'OQGCF2' : [self._table_passer, self._table_passer], # Contact force at grid point.
b'OSPDS1' : [self._table_passer, self._table_passer], # Final separation distance.
b'OSPDS2' : [self._table_passer, self._table_passer],
b'OSPDSI1' : [self._table_passer, self._table_passer], # Initial separation distance.
b'OSPDSI2' : [self._table_passer, self._table_passer], # Output contact separation distance results.
b'OBC1' : [self._table_passer, self._table_passer],
b'OBC2' : [self._table_passer, self._table_passer], # Contact pressures and tractions at grid points.
# Glue normal and tangential tractions at grid point in basic coordinate system
b'OBG1' : [self._table_passer, self._table_passer],
b'OBG2' : [self._table_passer, self._table_passer],
b'OQGGF1' : [self._table_passer, self._table_passer], # Glue forces at grid point in basic coordinate system
b'OQGGF2' : [self._table_passer, self._table_passer],
#=======================
# OGPWG
# grid point weight
b'OGPWG' : [self._read_ogpwg_3, self._read_ogpwg_4], # grid point weight
b'OGPWGM' : [self._read_ogpwg_3, self._read_ogpwg_4], # modal? grid point weight
#=======================
# OGS
# grid point stresses
b'OGS1' : [self._read_ogs1_3, self._read_ogs1_4], # grid point stresses
#b'OGS2' : [self._read_ogs1_3, self._read_ogs1_4], # grid point stresses
#=======================
# eigenvalues
b'BLAMA' : [self._read_buckling_eigenvalue_3, self._read_buckling_eigenvalue_4], # buckling eigenvalues
b'CLAMA' : [self._read_complex_eigenvalue_3, self._read_complex_eigenvalue_4], # complex eigenvalues
b'LAMA' : [self._read_real_eigenvalue_3, self._read_real_eigenvalue_4], # eigenvalues
b'LAMAS' : [self._read_real_eigenvalue_3, self._read_real_eigenvalue_4], # eigenvalues-structure
b'LAMAF' : [self._read_real_eigenvalue_3, self._read_real_eigenvalue_4], # eigenvalues-fluid
# ===========================geom passers===========================
# geometry
b'GEOM1' : [self._table_passer, self._table_passer], # GEOM1-Geometry-related bulk data
b'GEOM2' : [self._table_passer, self._table_passer], # GEOM2-element connectivity and SPOINT-related data
b'GEOM3' : [self._table_passer, self._table_passer], # GEOM3-Static and thermal loads
b'GEOM4' : [self._table_passer, self._table_passer], # GEOM4-constraints, DOF membership entries, MPC, and R-type element data
# superelements
b'GEOM1S' : [self._table_passer, self._table_passer], # GEOMx + superelement
b'GEOM2S' : [self._table_passer, self._table_passer],
b'GEOM3S' : [self._table_passer, self._table_passer],
b'GEOM4S' : [self._table_passer, self._table_passer],
b'GEOM1VU' : [self._table_passer, self._table_passer],
b'GEOM2VU' : [self._table_passer, self._table_passer],
b'BGPDTVU' : [self._table_passer, self._table_passer],
b'GEOM1N' : [self._table_passer, self._table_passer],
b'GEOM2N' : [self._table_passer, self._table_passer],
b'GEOM3N' : [self._table_passer, self._table_passer],
b'GEOM4N' : [self._table_passer, self._table_passer],
b'GEOM1OLD' : [self._table_passer, self._table_passer],
b'GEOM2OLD' : [self._table_passer, self._table_passer],
b'GEOM3OLD' : [self._table_passer, self._table_passer],
b'GEOM4OLD' : [self._table_passer, self._table_passer],
b'EPT' : [self._table_passer, self._table_passer], # elements
b'EPTS' : [self._table_passer, self._table_passer], # elements - superelements
b'EPTOLD' : [self._table_passer, self._table_passer],
b'MPT' : [self._table_passer, self._table_passer], # materials
b'MPTS' : [self._table_passer, self._table_passer], # materials - superelements
b'DYNAMIC' : [self._table_passer, self._table_passer],
b'DYNAMICS' : [self._table_passer, self._table_passer],
b'DIT' : [self._table_passer, self._table_passer],
b'DITS' : [self._table_passer, self._table_passer],
b'AXIC' : [self._table_passer, self._table_passer],
# =========================end geom passers=========================
# ===passers===
#b'EQEXIN': [self._table_passer, self._table_passer],
#b'EQEXINS': [self._table_passer, self._table_passer],
b'GPDT' : [self._table_passer, self._table_passer], # grid points?
b'BGPDT' : [self._table_passer, self._table_passer], # basic grid point defintion table
b'BGPDTS' : [self._table_passer, self._table_passer],
b'BGPDTOLD' : [self._table_passer, self._table_passer],
b'PVT' : [self._read_pvto_3, self._read_pvto_4], # PVT - Parameter Variable Table
b'PVTS' : [self._read_pvto_3, self._read_pvto_4], # ???
b'PVT0' : [self._read_pvto_3, self._read_pvto_4], # user parameter value table
b'TOLD' : [self._table_passer, self._table_passer],
b'CASECC' : [self._table_passer, self._table_passer], # case control deck
b'STDISP' : [self._table_passer, self._table_passer], # matrix?
b'AEDISP' : [self._table_passer, self._table_passer], # matrix?
#b'TOLB2' : [self._table_passer, self._table_passer], # matrix?
# EDT - element deformation, aerodynamics, p-element, divergence analysis,
# and iterative solver input (includes SET1 entries)
b'EDT' : [self._table_passer, self._table_passer],
b'EDTS' : [self._table_passer, self._table_passer],
b'FOL' : [self._table_passer, self._table_passer],
b'PERF' : [self._table_passer, self._table_passer],
b'VIEWTB' : [self._table_passer, self._table_passer], # view elements
# DSCMCOL - Correlation table for normalized design sensitivity coefficient matrix.
# Output by DSTAP2.
# DBCOPT - Design optimization history table for
b'CONTACT' : [self._table_passer, self._table_passer],
b'CONTACTS' : [self._table_passer, self._table_passer],
b'OEKE1' : [self._table_passer, self._table_passer],
#b'DSCMCOL' : [self._table_passer, self._table_passer],
#b'DBCOPT' : [self._table_passer, self._table_passer],
#b'FRL0': [self._table_passer, self._table_passer], # frequency response list
#==================================
# modal participation factors
# OFMPF2M Table of fluid mode participation factors by normal mode.
b'OFMPF2M' : [self._read_mpf_3, self._read_mpf_4],
# OLMPF2M Load mode participation factors by normal mode.
b'OLMPF2M' : [self._read_mpf_3, self._read_mpf_4],
# OPMPF2M Panel mode participation factors by normal mode.
b'OPMPF2M' : [self._read_mpf_3, self._read_mpf_4],
# OPMPF2M Panel mode participation factors by normal mode.
b'OSMPF2M' : [self._read_mpf_3, self._read_mpf_4],
# OGMPF2M Grid mode participation factors by normal mode.
b'OGPMPF2M' : [self._read_mpf_3, self._read_mpf_4],
#OFMPF2E Table of fluid mode participation factors by excitation frequencies.
#OSMPF2E Table of structure mode participation factors by excitation frequencies.
#OPMPF2E Table of panel mode participation factors by excitation frequencies.
#OLMPF2E Table of load mode participation factors by excitation frequencies.
#OGMPF2E Table of grid mode participation factors by excitation frequencies.
# velocity
b'OVGATO1' : [self._read_oug1_3, self._read_oug_ato],
b'OVGCRM1' : [self._read_oug1_3, self._read_oug_crm],
b'OVGPSD1' : [self._read_oug1_3, self._read_oug_psd],
b'OVGRMS1' : [self._read_oug1_3, self._read_oug_rms],
b'OVGNO1' : [self._read_oug1_3, self._read_oug_no],
b'OVGATO2' : [self._read_oug2_3, self._read_oug_ato],
b'OVGCRM2' : [self._read_oug2_3, self._read_oug_crm],
b'OVGPSD2' : [self._read_oug2_3, self._read_oug_psd],
#b'OVGRMS2' : [self._table_passer, self._table_passer],
#b'OVGNO2' : [self._table_passer, self._table_passer],
b'OVGRMS2' : [self._read_oug2_3, self._read_oug_rms],
b'OVGNO2' : [self._read_oug2_3, self._read_oug_no],
#==================================
#b'GPL': [self._table_passer, self._table_passer],
#b'OMM2' : [self._table_passer, self._table_passer], # max/min table - kinda useless
b'ERRORN' : [self._table_passer, self._table_passer], # p-element error summary table
#==================================
b'EDOM' : [self._table_passer, self._table_passer],
b'OUG2T' : [self._table_passer, self._table_passer],
# acceleration
b'OAGATO1' : [self._read_oug1_3, self._read_oug_ato],
b'OAGCRM1' : [self._read_oug1_3, self._read_oug_crm],
b'OAGPSD1' : [self._read_oug1_3, self._read_oug_psd],
b'OAGRMS1' : [self._read_oug1_3, self._read_oug_rms],
b'OAGNO1' : [self._read_oug1_3, self._read_oug_no],
b'OAGATO2' : [self._read_oug2_3, self._read_oug_ato],
b'OAGCRM2' : [self._read_oug2_3, self._read_oug_crm],
b'OAGPSD2' : [self._read_oug2_3, self._read_oug_psd],
#b'OAGRMS2' : [self._table_passer, self._table_passer],
#b'OAGNO2' : [self._table_passer, self._table_passer],
b'OAGRMS2' : [self._read_oug2_3, self._read_oug_rms],
b'OAGNO2' : [self._read_oug2_3, self._read_oug_no],
# stress
b'OESATO1' : [self._read_oes1_3, self._read_oes1_4],
b'OESCRM1' : [self._read_oes1_3, self._read_oes1_4],
b'OESPSD1' : [self._read_oes1_3, self._read_oes1_4],
b'OESRMS1' : [self._read_oes1_3, self._read_oes1_4],
b'OESNO1' : [self._read_oes1_3, self._read_oes1_4],
# OESXRM1C : Composite element RMS stresses in SORT1 format for random analysis that includes von Mises stress output.
b'OESXRMS1' : [self._read_oes1_3, self._read_oes1_4],
b'OESXRM1C' : [self._read_oes1_3, self._read_oes1_4],
b'OESXNO1' : [self._read_oes1_3, self._read_oes1_4],
b'OESXNO1C' : [self._read_oes1_3, self._read_oes1_4],
b'OESATO2' : [self._read_oes2_3, self._read_oes2_4],
b'OESCRM2' : [self._read_oes2_3, self._read_oes2_4],
b'OESPSD2' : [self._read_oes2_3, self._read_oes2_4],
#b'OESRMS2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random
#b'OESNO2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random
b'OESRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OESNO2' : [self._table_passer, self._table_passer], # buggy on isat random
# force
b'OEFATO1' : [self._read_oef1_3, self._read_oef1_4],
b'OEFCRM1' : [self._read_oef1_3, self._read_oef1_4],
b'OEFPSD1' : [self._read_oef1_3, self._read_oef1_4],
b'OEFRMS1' : [self._read_oef1_3, self._read_oef1_4],
b'OEFNO1' : [self._read_oef1_3, self._read_oef1_4],
b'OEFATO2' : [self._read_oef2_3, self._read_oef2_4],
b'OEFCRM2' : [self._read_oef2_3, self._read_oef2_4],
b'OEFPSD2' : [self._read_oef2_3, self._read_oef2_4],
#b'OEFRMS2' : [self._read_oef2_3, self._read_oef2_4], # buggy on isat random
}
if self.is_nx and 0:
table_mapper2 = {
#b'OUGRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OUGNO2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OUGRMS2' : [self._read_oug2_3, self._read_oug_rms], # buggy on isat random
b'OUGNO2' : [self._read_oug2_3, self._read_oug_no], # buggy on isat random
#b'OQMRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OQMNO2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OQMRMS2' : [self._read_oqg2_3, self._read_oqg_mpc_rms], # buggy on isat random
b'OQMNO2' : [self._read_oqg2_3, self._read_oqg_mpc_no], # buggy on isat random
#b'OSTRRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OSTRNO2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OSTRRMS2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random
b'OSTRNO2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random
b'OESRMS2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random
b'OESNO2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random
#b'OESRMS2' : [self._table_passer, self._table_passer], # buggy on isat random
#b'OESNO2' : [self._table_passer, self._table_passer], # buggy on isat random
b'OEFNO2' : [self._read_oef2_3, self._read_oef2_4],
#b'OEFNO2' : [self._table_passer, self._table_passer], # buggy on isat_random_steve2.op2
}
for key, value in table_mapper2.items():
table_mapper[key] = value
#table_mapper.update(table_mapper2)
return table_mapper
def _read_mpf_3(self, data, ndata: int) -> int:
"""reads table 3 (the header table)
OFMPF2E Table of fluid mode participation factors by excitation frequencies.
OFMPF2M Table of fluid mode participation factors by normal mode.
OSMPF2E Table of structure mode participation factors by excitation frequencies.
OSMPF2M Table of structure mode participation factors by normal mode.
OPMPF2E Table of panel mode participation factors by excitation frequencies.
OPMPF2M Table of panel mode participation factors by normal mode.
OLMPF2E Table of load mode participation factors by excitation frequencies.
OLMPF2M Table of load mode participation factors by normal mode.
OGMPF2E Table of grid mode participation factors by excitation frequencies.
OGMPF2M Table of grid mode participation factors by normal mode.
"""
#self._set_times_dtype()
self.nonlinear_factor = np.nan
self.is_table_1 = True
self.is_table_2 = False
unused_three = self.parse_approach_code(data)
self.words = [
'approach_code', 'table_code', '???', 'isubcase',
'???', '???', '???', 'random_code',
'format_code', 'num_wide', '???', '???',
'acoustic_flag', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', 'thermal', '???',
'???', 'Title', 'subtitle', 'label']
## random code
self.random_code = self.add_data_parameter(data, 'random_code', b'i', 8, False)
## format code
self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False)
## number of words per entry in record
self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False)
## acoustic pressure flag
self.acoustic_flag = self.add_data_parameter(data, 'acoustic_flag', b'i', 13, False)
## thermal flag; 1 for heat transfer, 0 otherwise
self.thermal = self.add_data_parameter(data, 'thermal', b'i', 23, False)
#if self.analysis_code == 1: # statics / displacement / heat flux
## load set number
#self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5, False)
#self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])
#self.setNullNonlinearFactor()
#elif self.analysis_code == 2: # real eigenvalues
## mode number
#self.mode = self.add_data_parameter(data, 'mode', b'i', 5)
## eigenvalue
#self.eign = self.add_data_parameter(data, 'eign', b'f', 6, False)
## mode or cycle .. todo:: confused on the type - F1???
#self.mode_cycle = self.add_data_parameter(data, 'mode_cycle', b'i', 7, False)
#self.update_mode_cycle('mode_cycle')
#self.data_names = self.apply_data_code_value('data_names', ['mode', 'eign', 'mode_cycle'])
#elif self.analysis_code == 3: # differential stiffness
#self.lsdvmn = self.get_values(data, b'i', 5) ## load set number
#self.data_code['lsdvmn'] = self.lsdvmn
#elif self.analysis_code == 4: # differential stiffness
#self.lsdvmn = self.get_values(data, b'i', 5) ## load set number
if self.analysis_code == 5: # frequency
# frequency
self.node_id = self.add_data_parameter(data, 'node_id', b'i', 5, fix_device_code=True)
self.data_names = self.apply_data_code_value('data_names', ['node_id'])
#self.freq = self.add_data_parameter(data, 'freq', b'f', 5)
#self.data_names = self.apply_data_code_value('data_names', ['freq'])
#elif self.analysis_code == 6: # transient
## time step
#self.dt = self.add_data_parameter(data, 'dt', b'f', 5)
#self.data_names = self.apply_data_code_value('data_names', ['dt'])
#elif self.analysis_code == 7: # pre-buckling
## load set number
#self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)
#self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])
#elif self.analysis_code == 8: # post-buckling
## load set number
#self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)
## real eigenvalue
#self.eigr = self.add_data_parameter(data, 'eigr', b'f', 6, False)
#self.data_names = self.apply_data_code_value('data_names', ['lsdvmn', 'eigr'])
#elif self.analysis_code == 9: # complex eigenvalues
## mode number
#self.mode = self.add_data_parameter(data, 'mode', b'i', 5)
## real eigenvalue
#self.eigr = self.add_data_parameter(data, 'eigr', b'f', 6, False)
## imaginary eigenvalue
#self.eigi = self.add_data_parameter(data, 'eigi', b'f', 7, False)
#self.data_names = self.apply_data_code_value('data_names', ['mode', 'eigr', 'eigi'])
#elif self.analysis_code == 10: # nonlinear statics
## load step
#self.lftsfq = self.add_data_parameter(data, 'lftsfq', b'f', 5)
#self.data_names = self.apply_data_code_value('data_names', ['lftsfq'])
#elif self.analysis_code == 11: # old geometric nonlinear statics
## load set number
#self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)
#self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])
#elif self.analysis_code == 12: # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...
## load set number
#self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)
#self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])
else:
msg = f'invalid analysis_code...analysis_code={self.analysis_code}\ndata={self.data_code}'
raise RuntimeError(msg)
#print self.code_information()
#
self.fix_format_code()
if self.num_wide == 8:
self.format_code = 1
self.data_code['format_code'] = 1
else:
#self.fix_format_code()
if self.format_code == 1:
self.format_code = 2
self.data_code['format_code'] = 2
assert self.format_code in [2, 3], self.code_information()
self._parse_thermal_code()
if self.is_debug_file:
self.binary_debug.write(' approach_code = %r\n' % self.approach_code)
self.binary_debug.write(' tCode = %r\n' % self.tCode)
self.binary_debug.write(' isubcase = %r\n' % self.isubcase)
self._read_title(data)
self._write_debug_bits()
def _read_mpf_4(self, data, ndata):
"""unused"""
if self.read_mode == 1: # or self.table_name_str not in ['OFMPF2M']:
return ndata
#print(self.table_name_str, ndata, self.num_wide) # 176
#self.show_ndata(100, types='ifs')
structi = Struct('fiff')
nelements = ndata // 16
ndev = ndata % 16
assert ndev == 0, ndev
for i in range(nelements):
datai = data[i*16 : (i+1)*16]
freq, dunno_int, mag, phase = structi.unpack(datai)
assert dunno_int == 2, str(self.node_id, freq, dunno_int, mag, phase)
#print(self.node_id, freq, dunno_int, mag, phase)
#print()
if self.isubtable == -4:
self.log.warning('%s results were read, but not saved' % self.table_name_str)
return ndata
def _read_pvto_3(self, data, ndata):
"""unused"""
raise RuntimeError(self.read_mode)
def _read_pvto_4(self, data, ndata):
"""reads PARAM cards"""
if self.read_mode == 1:
return ndata
iloc = self.f.tell()
try:
ndata2 = self._read_pvto_4_helper(data, ndata)
except Exception as e:
self.log.error(str(e))
if 'dev' in __version__:
raise # only for testing
self.f.seek(iloc)
ndata2 = ndata
return ndata2
def _read_pvto_4_helper(self, data, ndata):
"""reads PARAM cards"""
nvalues = ndata // 4
assert ndata % 4 == 0, ndata
structs8 = Struct(b'8s')
#struct2s8 = Struct(b'4s8s')
struct2i = Struct(b'ii')
struct2f = Struct(b'ff')
struct2d = Struct(b'dd')
i = 0
#print('---------------------------')
while i < nvalues:
#print('*i=%s nvalues=%s' % (i, nvalues))
word = data[i*4:(i+2)*4].rstrip()
#print('word=%r' % word)
#word = s8.unpack(word)[0]#.decode(self._encoding)
# the first two entries are typically trash, then we can get values
if word in INT_PARAMS_1:
slot = data[(i+2)*4:(i+4)*4]
value = struct2i.unpack(slot)[1]
i += 4
elif word in FLOAT_PARAMS_1:
slot = data[(i+2)*4:(i+4)*4]
value = struct2f.unpack(slot)[1]
i += 4
elif word in FLOAT_PARAMS_2:
slot = data[(i+3)*4:(i+5)*4]
value = struct2f.unpack(slot)
i += 5
elif word in INT_PARAMS_2:
slot = data[(i+3)*4:(i+5)*4]
value = struct2i.unpack(slot)
i += 5
elif word in DOUBLE_PARAMS_1:
slot = data[(i+1)*4:(i+8)*4]
try:
value = struct2d.unpack(slot)[1]
except:
print(word)
raise
i += 8
#elif word in [b'VUHEXA']:
#self.show_data(data[i*4:(i+5)*4], types='ifs', endian=None)
#aaa
elif word in STR_PARAMS_1:
i += 3
slot = data[i*4:(i+2)*4]
value = structs8.unpack(slot)[0].decode('latin1').rstrip()
i += 2
else:
self.show_data(data[i*4:(i+4)*4], types='ifsd')
self.show_data(data[i*4+4:i*4+i*4+12], types='ifsd')
raise NotImplementedError('%r is not a supported PARAM' % word)
key = word.decode('latin1')
#print(key, value)
self.params[key] = PARAM(key, [value], comment='')
return nvalues
def _not_available(self, data, ndata):
"""testing function"""
if ndata > 0:
raise RuntimeError('this should never be called...'
'table_name=%r len(data)=%s' % (self.table_name, ndata))
def _table_crasher(self, data, ndata):
"""auto-table crasher"""
if self.is_debug_file:
self.binary_debug.write(' crashing table = %s\n' % self.table_name)
raise NotImplementedError(self.table_name)
return ndata
def _table_passer(self, data, ndata):
"""auto-table skipper"""
if self.is_debug_file:
self.binary_debug.write(' skipping table = %s\n' % self.table_name)
if self.table_name not in GEOM_TABLES and self.isubtable > -4:
self.log.warning(' skipping table: %s' % self.table_name_str)
if not is_release and self.isubtable > -4:
if self.table_name in GEOM_TABLES and not self.make_geom:
pass
else:
print('dont skip table %r' % self.table_name_str)
raise RuntimeError('dont skip table %r' % self.table_name_str)
return ndata
def _validate_op2_filename(self, op2_filename):
"""
Pops a GUI if the op2_filename hasn't been set.
Parameters
----------
op2_filename : str
the filename to check (None -> gui)
Returns
-------
op2_filename : str
a valid file string
"""
if op2_filename is None:
from pyNastran.utils.gui_io import load_file_dialog
wildcard_wx = "Nastran OP2 (*.op2)|*.op2|" \
"All files (*.*)|*.*"
wildcard_qt = "Nastran OP2 (*.op2);;All files (*)"
title = 'Please select a OP2 to load'
op2_filename, unused_wildcard_level = load_file_dialog(
title, wildcard_wx, wildcard_qt, dirname='')
assert op2_filename is not None, op2_filename
return op2_filename
def _create_binary_debug(self):
"""Instatiates the ``self.binary_debug`` variable/file"""
if hasattr(self, 'binary_debug') and self.binary_debug is not None:
self.binary_debug.close()
del self.binary_debug
self.is_debug_file, self.binary_debug = create_binary_debug(
self.op2_filename, self.debug_file, self.log)
def read_op2(self, op2_filename=None, combine=False, load_as_h5=False, h5_file=None, mode=None):
"""
Starts the OP2 file reading
Parameters
----------
op2_filename : str
the op2 file
combine : bool; default=True
True : objects are isubcase based
False : objects are (isubcase, subtitle) based;
will be used for superelements regardless of the option
load_as_h5 : default=None
False : don't setup the h5_file
True : loads the op2 as an h5 file to save memory
stores the result.element/data attributes in h5 format
h5_file : h5File; default=None
None : ???
h5File : ???
+--------------+-----------------------+
| op2_filename | Description |
+--------------+-----------------------+
| None | a dialog is popped up |
+--------------+-----------------------+
| string | the path is used |
+--------------+-----------------------+
"""
fname = os.path.splitext(op2_filename)[0]
self.op2_filename = op2_filename
self.bdf_filename = fname + '.bdf'
self.f06_filename = fname + '.f06'
self.des_filename = fname + '.des'
self.h5_filename = fname + '.h5'
self.op2_reader.load_as_h5 = load_as_h5
if load_as_h5:
h5_file = None
import h5py
self.h5_file = h5py.File(self.h5_filename, 'w')
self.op2_reader.h5_file = self.h5_file
self._count = 0
if self.read_mode == 1:
#sr = list(self._results.saved)
#sr.sort()
#self.log.debug('_results.saved = %s' % str(sr))
#self.log.info('_results.saved = %s' % str(sr))
pass
if self.read_mode != 2:
op2_filename = self._validate_op2_filename(op2_filename)
self.log.info('op2_filename = %r' % op2_filename)
if not is_binary_file(op2_filename):
if os.path.getsize(op2_filename) == 0:
raise IOError('op2_filename=%r is empty.' % op2_filename)
raise IOError('op2_filename=%r is not a binary OP2.' % op2_filename)
self._create_binary_debug()
self._setup_op2()
self.op2_reader.read_nastran_version(mode)
_op2 = self.op2_reader.op2
data = _op2.f.read(4)
_op2.f.seek(_op2.n)
if len(data) == 0:
raise FatalError('There was a Nastran FATAL Error. Check the F06.\n'
'No tables exist...check for a license issue')
#=================
table_name = self.op2_reader._read_table_name(rewind=True, stop_on_failure=False)
if table_name is None:
raise FatalError('There was a Nastran FATAL Error. Check the F06.\n'
'No tables exist...check for a license issue')
self._make_tables()
table_names = self._read_tables(table_name)
self.close_op2(force=False)
#self.remove_unpickable_data()
return table_names
def close_op2(self, force=True):
"""closes the OP2 and debug file"""
if self.is_debug_file:
self.binary_debug.write('-' * 80 + '\n')
self.binary_debug.write('f.tell()=%s\ndone...\n' % self.f.tell())
self.binary_debug.close()
if self._close_op2 or force:
if self.f is not None:
# can happen if:
# - is ascii file
self.f.close()
del self.binary_debug
del self.f
self._cleanup_data_members()
self._cleanup_words()
#self.op2_reader.h5_file.close()
def _cleanup_words(self):
"""
Remove internal parameters that are not useful and just clutter
the object attributes.
"""
words = [
'isubcase', 'int3', '_table4_count', 'nonlinear_factor',
'is_start_of_subtable', 'superelement_adaptivity_index',
'thermal_bits', 'is_vectorized', 'pval_step', #'_frequencies',
'_analysis_code_fmt', 'isubtable', '_data_factor', 'sort_method',
'acoustic_flag', 'approach_code', 'format_code_original',
'element_name', 'sort_bits', 'code', 'n', 'use_vector', 'ask',
'stress_bits', 'expected_times', 'table_code', 'sort_code',
'is_all_subcases', 'num_wide', '_table_mapper', 'label',
'apply_symmetry',
'words', 'device_code', 'table_name', '_count', 'additional_matrices',
# 350
'data_names', '_close_op2',
'op2_reader',
# 74
'generalized_tables',
# 124
'is_table_1', 'is_table_2', 'ntotal', 'element_mapper',
'is_debug_file', 'debug_file',
'_results', 'skip_undefined_matrices',
# 140
#---------------------------------------------------------
# dont remove...
# make_geom, title, read_mode
# result_names, op2_results
]
for word in words:
if hasattr(self, word):
delattr(self, word)
def _setup_op2(self):
"""
Does preliminary op2 tasks like:
- open the file
- set the endian
- preallocate some struct objects
"""
#: file index
self.n = 0
self.table_name = None
if not hasattr(self, 'f') or self.f is None:
#: the OP2 file object
self.f = open(self.op2_filename, 'rb')
#: the endian in bytes
self._endian = None
#: the endian in unicode
self._uendian = None
flag_data = self.f.read(20)
self.f.seek(0)
if unpack(b'>5i', flag_data)[0] == 4:
self._uendian = '>'
self._endian = b'>'
elif unpack(b'<5i', flag_data)[0] == 4:
self._uendian = '<'
self._endian = b'<'
#elif unpack(b'<ii', flag_data)[0] == 4:
#self._endian = b'<'
else:
# Matrices from test show
# (24, 10, 10, 6, 2) before the Matrix Name...
#self.show_data(flag_data, types='iqlfsld', endian='<')
#print('----------')
#self.show_data(flag_data, types='iqlfsld', endian='>')
raise FatalError('cannot determine endian')
else:
self.op2_reader._goto(self.n)
if self.read_mode == 1:
self._set_structs()
def _make_tables(self):
return
#global RESULT_TABLES, NX_RESULT_TABLES, MSC_RESULT_TABLES
#table_mapper = self._get_table_mapper()
#RESULT_TABLES = table_mapper.keys()
def _read_tables(self, table_name: bytes) -> List[bytes]:
"""
Reads all the geometry/result tables.
The OP2 header is not read by this function.
Parameters
----------
table_name : bytes str
the first table's name
Returns
-------
table_names : List[bytes str]
the table names that were read
"""
op2_reader = self.op2_reader
table_names = []
self.table_count = defaultdict(int)
while table_name is not None:
self.table_count[table_name] += 1
table_names.append(table_name)
if self.is_debug_file:
self.binary_debug.write('-' * 80 + '\n')
self.binary_debug.write('table_name = %r\n' % (table_name))
if is_release:
self.log.debug(' table_name=%r' % table_name)
self.table_name = table_name
#if 0:
#op2_reader._skip_table(table_name)
#else:
#print(table_name, table_name in op2_reader.mapped_tables)
if table_name in self.generalized_tables:
t0 = self.f.tell()
self.generalized_tables[table_name](self)
assert self.f.tell() != t0, 'the position was unchanged...'
elif table_name in op2_reader.mapped_tables:
t0 = self.f.tell()
op2_reader.mapped_tables[table_name]()
assert self.f.tell() != t0, 'the position was unchanged...'
elif table_name in GEOM_TABLES:
op2_reader.read_geom_table() # DIT (agard)
elif table_name in MATRIX_TABLES:
op2_reader.read_matrix(table_name)
elif table_name in RESULT_TABLES:
op2_reader.read_results_table()
elif self.skip_undefined_matrices:
op2_reader.read_matrix(table_name)
elif table_name.strip() in self.additional_matrices:
op2_reader.read_matrix(table_name)
else:
msg = (
'Invalid Table = %r\n\n'
'If you have matrices that you want to read, see:\n'
' model.set_additional_matrices_to_read(matrices)'
' matrices = {\n'
" b'BHH' : True,\n"
" b'KHH' : False,\n"
' } # you want to read some matrices, but not others\n'
" matrices = [b'BHH', b'KHH'] # assumes True\n\n"
'If you the table is a geom/result table, see:\n'
' model.set_additional_result_tables_to_read(methods_dict)\n'
" methods_dict = {\n"
" b'OUGV1' : [method3, method4],\n"
" b'GEOM4SX' : [method3, method4],\n"
" b'OES1X1' : False,\n"
' }\n\n'
'If you want to take control of the OP2 reader (mainly useful '
'for obscure tables), see:\n'
" methods_dict = {\n"
" b'OUGV1' : [method],\n"
' }\n'
' model.set_additional_generalized_tables_to_read(methods_dict)\n' % (
table_name)
)
raise NotImplementedError(msg)
table_name = op2_reader._read_table_name(rewind=True, stop_on_failure=False)
return table_names
def set_additional_generalized_tables_to_read(self, tables):
"""
Adds methods to call a generalized table.
Everything is left to the user.
::
def read_some_table(self):
# read the data from self.f
pass
# let's overwrite the existing OP2 table
model2 = OP2Geom(debug=True)
generalized_tables = {
b'GEOM1S' : read_some_table,
}
model.set_additional_generalized_tables_to_read(generalized_tables)
"""
self._update_generalized_tables(tables)
self.generalized_tables = tables
def set_additional_result_tables_to_read(self, tables):
"""
Adds methods to read additional result tables.
This is expected to really only be used for skipping
unsupported tables or disabling enabled tables that are
buggy (e.g., OUGV1).
Parameters
----------
tables : Dict[bytes] = varies
a dictionary of key=name, value=list[method3, method4]/False,
False : skips a table
applies self._table_passer to method3 and method4
method3 : function
function to read table 3 results (e.g., metadata)
method4 : function
function to read table 4 results (e.g., the actual results)
"""
self._update_generalized_tables(tables)
table_mapper = self._get_table_mapper()
#is_added = False
def func():
"""overloaded version of _get_table_mapper"""
#if is_added:
#return table_mapper
for _key, methods in tables.items():
if methods is False:
table_mapper[_key] = [self._table_passer, self._table_passer]
else:
assert len(methods) == 2, methods
table_mapper[_key] = methods
#is_added = True
return table_mapper
self._get_table_mapper = func
def _update_generalized_tables(self, tables):
"""
helper function for:
- set_additional_generalized_tables_to_read
- set_additional_result_tables_to_read
"""
global NX_RESULT_TABLES
global MSC_RESULT_TABLES
global RESULT_TABLES
failed_keys = []
keys = list(tables.keys())
for _key in keys:
if not isinstance(_key, bytes):
failed_keys.append(_key)
if hasattr(self, 'is_nx') and self.is_nx:
NX_RESULT_TABLES.append(_key)
else:
MSC_RESULT_TABLES.append(_key)
if failed_keys:
failed_keys_str = [str(_key) for _key in failed_keys]
raise TypeError('[%s] must be bytes' % ', '. join(failed_keys_str))
RESULT_TABLES = NX_RESULT_TABLES + MSC_RESULT_TABLES
#RESULT_TABLES.sort()
#assert 'OESXRMS1' in RESULT_TABLES, RESULT_TABLES
def set_additional_matrices_to_read(self, matrices: Union[List[str], Dict[str, bool]]):
"""
Matrices (e.g., KHH) can be sparse or dense.
Parameters
----------
matrices : List[str]; Dict[str] = bool
List[str]:
simplified method to add matrices; value will be True
Dict[str] = bool:
a dictionary of key=name, value=True/False,
where True/False indicates the matrix should be read
.. note:: If you use an already defined table (e.g. KHH), it
will be ignored. If the table you requested doesn't
exist, there will be no effect.
.. note:: Do not use this for result tables like OUGV1, which
store results like displacement. Those are not matrices.
Matrices are things like DMIGs.
"""
if isinstance(matrices, list):
matrices2 = {}
for matrix in matrices:
assert isinstance(matrix, str), 'matrix=%r' % str(matrix)
matrices2[matrix] = True
matrices = matrices2
self.additional_matrices = matrices
self.additional_matrices = {}
for matrix_name, matrix in matrices.items():
if isinstance(matrix_name, bytes):
self.additional_matrices[matrix_name] = matrix
else:
self.additional_matrices[matrix_name.encode('latin1')] = matrix
def _finish(self):
"""
Clears out the data members contained within the self.words variable.
This prevents mixups when working on the next table, but otherwise
has no effect.
"""
for word in self.words:
if word != '???' and hasattr(self, word):
if word not in ['Title', 'reference_point']:
delattr(self, word)
self.obj = None
if hasattr(self, 'subtable_name'):
del self.subtable_name
def main(): # pragma: no cover
"""testing pickling"""
from pickle import dump, load
txt_filename = 'solid_shell_bar.txt'
pickle_file = open(txt_filename, 'wb')
op2_filename = 'solid_shell_bar.op2'
op2 = OP2_Scalar()
op2.read_op2(op2_filename)
print(op2.displacements[1])
dump(op2, pickle_file)
pickle_file.close()
pickle_file = open(txt_filename, 'r')
op2 = load(pickle_file)
pickle_file.close()
print(op2.displacements[1])
#import sys
#op2_filename = sys.argv[1]
#o = OP2_Scalar()
#o.read_op2(op2_filename)
#(model, ext) = os.path.splitext(op2_filename)
#f06_outname = model + '.test_op2.f06'
#o.write_f06(f06_outname)
def create_binary_debug(op2_filename: str, debug_file: str, log) -> Tuple[bool, Any]:
"""helper method"""
binary_debug = None
if debug_file is not None:
#: an ASCII version of the op2 (creates lots of output)
log.debug('debug_file = %s' % debug_file)
binary_debug = open(debug_file, 'w')
binary_debug.write(op2_filename + '\n')
is_debug_file = True
else:
is_debug_file = False
return is_debug_file, binary_debug
if __name__ == '__main__': # pragma: no cover
main()
|
[
"mesheb82@gmail.com"
] |
mesheb82@gmail.com
|
231c5670a7b9101a97de1e024359e17b16341b00
|
26f6313772161851b3b28b32a4f8d255499b3974
|
/Python/480_SlidingWindowMedian.py
|
2cd4bf46c6091ce083662f732c6a2cd957c5e801
|
[] |
no_license
|
here0009/LeetCode
|
693e634a3096d929e5c842c5c5b989fa388e0fcd
|
f96a2273c6831a8035e1adacfa452f73c599ae16
|
refs/heads/master
| 2023-06-30T19:07:23.645941
| 2021-07-31T03:38:51
| 2021-07-31T03:38:51
| 266,287,834
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,531
|
py
|
"""
Median is the middle value in an ordered integer list. If the size of the list is even, there is no middle value. So the median is the mean of the two middle value.
Examples:
[2,3,4] , the median is 3
[2,3], the median is (2 + 3) / 2 = 2.5
Given an array nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position. Your job is to output the median array for each window in the original array.
For example,
Given nums = [1,3,-1,-3,5,3,6,7], and k = 3.
Window position Median
--------------- -----
[1 3 -1] -3 5 3 6 7 1
1 [3 -1 -3] 5 3 6 7 -1
1 3 [-1 -3 5] 3 6 7 -1
1 3 -1 [-3 5 3] 6 7 3
1 3 -1 -3 [5 3 6] 7 5
1 3 -1 -3 5 [3 6 7] 6
Therefore, return the median sliding window as [1,-1,-1,3,5,6].
Note:
You may assume k is always valid, ie: k is always smaller than input array's size for non-empty array.
Answers within 10^-5 of the actual value will be accepted as correct.
"""
from typing import List
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
"""
use a fenwick tree to store sorted index based on the value of nums
update the fenwick tree when the window slides
query k//2 + 1 (and k // 2 if k is even) for median
"""
def update(i, v):
while i <= length:
counts[i] += v
i += i & -i
pass
def query(k):
while counts[i] > k:
pass
length = len(nums)
vals = sorted([(v, i) for i, v in enumerate(nums)])
counts = [0] * (length + 1)
index_convert = dict(zip(i, vals[i][1] + 1) for i in range(length)) # link the index in nums and counts
from bisect import insort
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
res = []
lst = sorted(nums[:k - 1])
len_n = len(nums)
half_k, rmd = divmod(k, 2)
for i in range(k - 1, len_n):
if i >= k:
lst.remove(nums[i - k])
insort(lst, nums[i])
# print(i, nums[i], lst)
if rmd == 1:
res.append(lst[half_k])
else:
res.append((lst[half_k] + lst[half_k - 1]) / 2)
return res
from sortedcontainers import SortedList
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
window = SortedList(nums[:k])
res = []
half_k, rmd = divmod(k, 2)
tmp = window[half_k] if rmd == 1 else (window[half_k] + window[half_k - 1]) / 2
res.append(tmp)
for i in range(k, len(nums)):
window.remove(nums[i - k])
window.add(nums[i])
tmp = window[half_k] if rmd == 1 else (window[half_k] + window[half_k - 1]) / 2
res.append(tmp)
return res
# https://leetcode.com/problems/sliding-window-median/discuss/333240/Python-O(NlogN)-using-heap
import heapq
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
"""
use a upper heap and lower heap, keep the max and min part of the window respectively
keep the length of the lower heap len_k//2 and the length of the upper heap len_k - len_k//2.
iterate over the nums, keep the valid (in window) nums in upper and lower heap always to be len_k - len_k//2 and len_k//2.
only delete the invalid nums if they are at the top of the heap
"""
def poppush(h1, h2):
"""
pop an element from h1, push it to h2, convert the sign
"""
v, i = heapq.heappop(h1)
heapq.heappush(h2, (-v, i))
lower, upper = [], []
res = []
half_k, rmd = divmod(k, 2)
len_n = len(nums)
for i, v in enumerate(nums[:k]):
heapq.heappush(upper, (v, i))
for _ in range(half_k):
poppush(upper, lower)
res.append(upper[0][0] if rmd else (upper[0][0] - lower[0][0]) / 2)
# print(lower, upper)
for i in range(k, len_n):
v = nums[i]
j = i - k
pre_v = nums[j]
if v >= upper[0][0]:
heapq.heappush(upper, (v, i))
if pre_v <= upper[0][0]: # if pre_v > upper[0][0] then it is in upper, so the valid nums in lower and upper are the same, we do nothing. but if the pre_v is in lower, we need to transfer an element to lower to keep the balance.
# if (pre_v, j) is upper[0], we can transfer it to lower for removal, no loss for lower
# if (pre_v, j) is not upper[0], it can compensate the loss of lower
poppush(upper, lower)
else:
heapq.heappush(lower, (-v, i))
if pre_v >= upper[0][0]:
poppush(lower, upper)
while lower and lower[0][1] <= j:
heapq.heappop(lower)
while upper and upper[0][1] <= j:
heapq.heappop(upper)
# print(lower, upper)
res.append(upper[0][0] if rmd else (upper[0][0] - lower[0][0]) / 2)
return res
S = Solution()
nums = [1,3,-1,-3,5,3,6,7]
k = 3
print(S.medianSlidingWindow(nums, k))
nums = [1,4,2,3]
k = 4
print(S.medianSlidingWindow(nums, k))
|
[
"here0009@163.com"
] |
here0009@163.com
|
1d80cf91b20248fcd2018eb5e08868d720f55b3e
|
336dfb6b67520a04e71e2ef58fff1ab5794c63d9
|
/Tweet_streaming_crawler_20170519J.py
|
7d268714aa83a22fc5ce62902bf33ecdca4b3d77
|
[] |
no_license
|
pil0706/Tweet_Crawler_with_Jin
|
9e61230625f7c6e9d9af606d9d2b04c59403c987
|
8543bbb1fa6b258e2dcbd2545d9c6108ecaf8fe3
|
refs/heads/master
| 2021-01-21T19:02:05.586954
| 2017-05-23T05:52:56
| 2017-05-23T05:52:56
| 92,110,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
#-*- coding: utf-8 -*-
from tweepy import StreamListener
from tweepy import Stream
import json
import tweepy
import sys
import jsonpickle
import datetime
#
consumer_key = 'TcSyTjO8ycNRXWUIxxP0rbBFG'
consumer_secret = 'm6uNwnPkMwJkMqWwB4DPqRNlpmfdnEHyJcgiQUsI0tYjpiHaBi'
access_token = '142620900-cOv68WHjBU6WPxPRTGVcxbIaICeSweNGnsxgDNXR'
access_secret = 'QBrzkiCDx8auR5CtjcB8KYdsqnwUXrDFB5LGi51pjgpRQ'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if (not api):
print ("Problem connecting to API")
sys.exit(-1)
#start
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
fName = 'stream_tweets' + timestamp +'.json'
file = open(fName, mode='a', encoding="utf-8")
class CustomStreamListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
def on_status(self, status):
print(status)
def on_data(self, data):
parsed = json.loads(data)
text = parsed['text'].translate(non_bmp_map)
print(text)
file.write(text)
return True
def on_error(self, status):
print(status)
return True
def on_timeout(self):
print (sys.stderr, 'Timeout...')
return True
stream = Stream(auth, CustomStreamListener())
stream.filter(track=[u'문재인', u'심상정', u'안철수'])
|
[
"pil0706@gmail.com"
] |
pil0706@gmail.com
|
ca84b99a622c454d4e778aa201ee5b822cb49cff
|
c63aeed37436715b06a0e0c98c05dbf426a51817
|
/CODEUP/recursion/ex1901.py
|
3a7554c83944e12a932fd64405cff8371290b47b
|
[
"MIT"
] |
permissive
|
FridayAlgorithm/taesong_study
|
8fca9fa6a14b7dfadf984ce2efcb00038ae6ad07
|
50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c
|
refs/heads/main
| 2023-05-14T21:06:17.595287
| 2021-06-06T15:00:34
| 2021-06-06T15:00:34
| 323,922,637
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
n = int(input()) # 정수 n을 입력 받음
def integer_print(a): # 재귀 함수
if a == 0: # 종료 조건
return
else:
print(n-a+1) # 순서대로 출력
integer_print(a-1) # 자기 자신 호출
integer_print(n) # 함수 호출
|
[
"taesongweb@gmail.com"
] |
taesongweb@gmail.com
|
24d3e0b3de437c4336060cee7f12b0c827c09418
|
6533df70738b94e44e98c36f1fb1b087598054c1
|
/roosterize/ml/onmt/MultiSourceInputFeedRNNDecoder.py
|
a2eb3f1ccddb665eb05840eda5238f8b581f55d0
|
[
"MIT"
] |
permissive
|
Desperatesonic/roosterize
|
579989a838a591289f7ed6f67d017f538260d4b9
|
2990f7bdef8889045a26f3e9aaaca96d9c92e0bc
|
refs/heads/master
| 2023-01-22T03:24:31.595188
| 2020-11-23T16:18:33
| 2020-11-23T16:18:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,379
|
py
|
from typing import *
from roosterize.ml.onmt.MultiSourceGlobalAttention import MultiSourceGlobalAttention
from onmt.decoders.decoder import DecoderBase
from onmt.models.stacked_rnn import StackedLSTM, StackedGRU
from onmt.modules import context_gate_factory, GlobalAttention
from onmt.utils.misc import aeq
import torch
import torch.nn as nn
from seutil import LoggingUtils
class MultiSourceInputFeedRNNDecoder(DecoderBase):
logger = LoggingUtils.get_logger(__name__)
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general", attn_func="softmax",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False, copy_attn_type="general",
num_srcs: int = 1,
):
super().__init__(attentional=attn_type != "none" and attn_type is not None)
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
self.num_srcs = num_srcs
# Decoder state
self.state = {}
# Build the RNN.
self.rnn = self._build_rnn(rnn_type,
input_size=self._input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout)
# Hidden state merging
self.hidden_merge_0 = nn.Linear(
in_features=self.hidden_size * self.num_srcs,
out_features=self.hidden_size,
)
if rnn_type == "LSTM":
self.hidden_merge_1 = nn.Linear(
in_features=self.hidden_size * self.num_srcs,
out_features=self.hidden_size,
)
# end if
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = context_gate_factory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# end if
# Set up the standard attention.
assert not coverage_attn, "Coverage attention is not supported"
self._coverage: bool = coverage_attn
if not self.attentional:
if self._coverage: raise ValueError("Cannot use coverage term with no attention.")
self.ms_attn = None
else:
self.ms_attn = MultiSourceGlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type, attn_func=attn_func
)
# end if
# Copy attention
if copy_attn and not reuse_copy_attn:
if copy_attn_type == "none" or copy_attn_type is None: raise ValueError("Cannot use copy_attn with copy_attn_type none")
self.copy_ms_attn = MultiSourceGlobalAttention(
hidden_size, attn_type=copy_attn_type, attn_func=attn_func
)
else:
self.copy_ms_attn = None
# end if
self._reuse_copy_attn = reuse_copy_attn and copy_attn
if self._reuse_copy_attn and not self.attentional: raise ValueError("Cannot reuse copy attention with no attention.")
return
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.rnn_type,
opt.brnn,
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout[0] if type(opt.dropout) is list
else opt.dropout,
embeddings,
opt.reuse_copy_attn,
opt.copy_attn_type,
opt.num_srcs,
)
def init_state(self,
src_list: List,
memory_bank_list: List,
encoder_final_list: List, # [srcs] x [layers*directions, batch, dim]
) -> NoReturn:
"""Initialize decoder state with last state of the encoder."""
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final_list[0], tuple): # LSTM
self.state["hidden"] = (
self.hidden_merge_0(torch.cat([_fix_enc_hidden(encoder_final[0]) for encoder_final in encoder_final_list], dim=2)),
self.hidden_merge_1(torch.cat([_fix_enc_hidden(encoder_final[1]) for encoder_final in encoder_final_list], dim=2))
)
else: # GRU
self.state["hidden"] = (
self.hidden_merge_0(torch.cat(_fix_enc_hidden(encoder_final_list), dim=2)),
)
# end if
# Init the input feed.
batch_size = self.state["hidden"][0].size(1)
h_size = (batch_size, self.hidden_size)
self.state["input_feed"] = self.state["hidden"][0].data.new(*h_size).zero_().unsqueeze(0)
self.state["coverage"] = None
return
def map_state(self, fn):
self.state["hidden"] = tuple(fn(h, 1) for h in self.state["hidden"])
self.state["input_feed"] = fn(self.state["input_feed"], 1)
if self._coverage and self.state["coverage"] is not None:
self.state["coverage"] = fn(self.state["coverage"], 1)
def detach_state(self):
self.state["hidden"] = tuple(h.detach() for h in self.state["hidden"])
self.state["input_feed"] = self.state["input_feed"].detach()
def forward(self,
tgt: torch.LongTensor, # [tgt_len, batch, nfeats]
memory_bank_list: List[torch.FloatTensor], # [srcs] x [src_len, batch, hidden]
memory_lengths_list: List[torch.LongTensor] = None, # [srcs] x [batch]
step=None
) -> Tuple[List[torch.FloatTensor], Dict[str, List[torch.FloatTensor]]]:
# dec_outs: [tgt_len, batch, hidden]
# attns: Dict[.., [tgt_len, batch, src_len]]
dec_state, dec_outs, attns = self._run_forward_pass(tgt, memory_bank_list, memory_lengths_list=memory_lengths_list)
# Update the state with the result.
if not isinstance(dec_state, tuple): dec_state = (dec_state,)
self.state["hidden"] = dec_state
self.state["input_feed"] = dec_outs[-1].unsqueeze(0)
self.state["coverage"] = None
if "coverage" in attns:
self.state["coverage"] = attns["coverage"][-1].unsqueeze(0)
# end if
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: dec_outs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(dec_outs) == list:
dec_outs = torch.stack(dec_outs)
for k in attns:
if type(attns[k]) == list: attns[k] = torch.stack(attns[k])
# end for
# end if
return dec_outs, attns
def _run_forward_pass(self,
tgt: torch.LongTensor, # [tgt_len, batch, nfeats]
memory_bank_list: List[torch.FloatTensor], # [srcs] x [src_len, batch, hidden]
memory_lengths_list: List[torch.LongTensor] = None, # [srcs] x [batch]
) -> Tuple[torch.Tensor, List[torch.FloatTensor], Dict[str, List[torch.FloatTensor]]]:
# dec_state
# dec_outs
# attns
batch_size = tgt.size()[1]
# Additional args check.
input_feed = self.state["input_feed"].squeeze(0)
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
dec_outs: List[torch.FloatTensor] = []
attns: Dict[str, List[torch.FloatTensor]] = {}
if self.ms_attn is not None: attns["std"] = []
if self.copy_ms_attn is not None or self._reuse_copy_attn: attns["copy"] = []
if self._coverage: attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
dec_state = self.state["hidden"]
coverage = self.state["coverage"].squeeze(0) if self.state["coverage"] is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
rnn_output, dec_state = self.rnn(decoder_input, dec_state) # [batch, tgt_len, dim]
if self.attentional:
decoder_output, align_vectors = self.ms_attn(
rnn_output,
[mb.transpose(0, 1) for mb in memory_bank_list],
memory_lengths_list,
) # [tgt_len, batch, dim], [tgt_len, batch, num_srcs*src_len]
attns["std"].append(align_vectors)
else:
decoder_output = rnn_output
# end if
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
# Update the coverage attention.
# PN: disabled coverage attention for now
# if self._coverage:
# coverage = p_attn if coverage is None else p_attn + coverage
# attns["coverage"] += [coverage]
if self.copy_ms_attn is not None:
_, copy_attn = self.copy_ms_attn(
decoder_output,
[mb.transpose(0, 1) for mb in memory_bank_list],
memory_lengths_list,
)
attns["copy"] += [copy_attn]
elif self._reuse_copy_attn:
for enc_i in range(self.num_srcs):
attns["copy"] = attns["std"]
# end for
# end if
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert rnn_type != "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
stacked_cell = StackedLSTM if rnn_type == "LSTM" else StackedGRU
return stacked_cell(num_layers, input_size, hidden_size, dropout)
@property
def _input_size(self):
"""Using input feed by concatenating input with attention vectors."""
return self.embeddings.embedding_size + self.hidden_size
def update_dropout(self, dropout):
self.dropout.p = dropout
self.embeddings.update_dropout(dropout)
|
[
"prodigy.sov@gmail.com"
] |
prodigy.sov@gmail.com
|
034340941526fdd10366317b3ece8cc1ffa4281b
|
7ec9ed419217eb3b313659c44dbda90eb2ddcb87
|
/util/visual.py
|
0913a2e8238055856b53c2a78c7a8ca83d90a172
|
[] |
no_license
|
adityapurohit1996/6DOF_arm
|
823db8bd7c3236aeb980149f348104180f387169
|
9193f9efeb92d9b2bf2004820ae9d653c59f5521
|
refs/heads/master
| 2021-01-14T04:03:32.207942
| 2019-10-07T02:14:41
| 2019-10-07T02:14:41
| 242,593,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
import time
import numpy as np
import tp_test
import matplotlib.pyplot as plt
tp = tp_test.TrajectoryPlanner()
waypoints = np.array([[ 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0, 0.8, 1.0, 0.5, 1.0],
[-1.0,-0.8,-1.0,-0.5, -1.0],
[-1.0, 0.8, 1.0, 0.5, 1.0],
[1.0, -0.8,-1.0,-0.5, -1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0]])
max_speed = 0.4 # in radius/s
Time = []
Q = []
V = []
t_last = 0
for i in range(len(waypoints) - 1):
tp.set_initial_wp(waypoints[i])
tp.set_final_wp(waypoints[i+1])
T = tp.calc_time_from_waypoints(max_speed)
[t, plan] = tp.generate_cubic_spline(T)
plan_q = plan[0]
plan_v = plan[1]
plan_q = plan_q.transpose()
plan_v = plan_v.transpose()
if i == 0:
Time = t
Q = plan_q
V = plan_v
else:
Time = np.concatenate((Time, t+t_last))
Q = np.concatenate((Q,plan_q), axis=1)
V = np.concatenate((V,plan_v), axis=1)
t_last = t_last + t[-1]
inte_q = np.zeros((5,1))
for qi in Q.T:
for
ax1 = plt.subplot2grid(shape=(2,6), loc=(0,0), colspan=2)
ax2 = plt.subplot2grid((2,6), (0,2), colspan=2)
ax3 = plt.subplot2grid((2,6), (0,4), colspan=2)
ax4 = plt.subplot2grid((2,6), (1,1), colspan=2)
ax5 = plt.subplot2grid((2,6), (1,3), colspan=2)
axs = [ax1, ax2, ax3, ax4, ax5]
for i, [Qi, Vi] in enumerate(zip(Q, V)):
plt.suptitle(['joint #', i])
axs[i].plot(Time, Qi, 'r--', Time, Vi, 'b')
plt.show()
# print(plan[0][-1])
# tp.execute_plan(plan, 10)
# self.set_next_state("idle")
|
[
"shihchil@umich.edu"
] |
shihchil@umich.edu
|
431801fd48391f0e3cf3900712086036292262cd
|
07b043cd032ac348b44220823cb2ac3cf0d7f5a7
|
/DJANGO_ADMIN_CSV/urls.py
|
393dd8d1a3d5111b58d9b14db437da9febc2e326
|
[] |
no_license
|
muhtomakin/DJANGO_ADMIN_CSV
|
73439a5822805fb15d5fc6b863f5b0f388bd80e8
|
5ad210afeb7e636a7c6add67e42aeeee3b587424
|
refs/heads/main
| 2023-08-23T02:32:48.482946
| 2021-11-05T19:28:16
| 2021-11-05T19:28:16
| 425,062,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
"""DJANGO_ADMIN_CSV URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"muhtomakin@gmail.com"
] |
muhtomakin@gmail.com
|
68c981f552ca351da797b3fb8d65913ca2acff6c
|
ac0d651b78ded1de9f0bf5c8aa8350627deea063
|
/tests/box_sim.py
|
87ea6b3dd669d265e072d2b8c1e631962dc9c526
|
[] |
no_license
|
cbott/PiWeatherBox
|
5d67e677028e314c0c720c3b5c904646fd680e5c
|
04b0056dd4711f3be73aeebe63fa09c99cc5156c
|
refs/heads/master
| 2023-04-07T06:02:16.747034
| 2023-03-25T21:02:30
| 2023-03-25T21:02:30
| 92,607,849
| 0
| 0
| null | 2021-03-07T01:43:33
| 2017-05-27T15:39:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,938
|
py
|
import tkinter as tk
import tkinter.font
from collections import namedtuple
from typing import Callable
# Duplicated from hardware.LED to avoid needing to import
Color = namedtuple('Color', ['Red', 'Green', 'Blue'])
def duty_cycle_to_intensity(duty_cycle: float) -> int:
"""
Converts a 0-100 duty cycle to a 0-255 color intensity
Inverse of intensity_to_duty_cycle() from hardware.led
"""
return int(duty_cycle * 255.0 / 100.0)
class BoxWindow(tk.Frame):
"""
Tk window with a "button" and "led" acting as a stand-in for the real hardware
"""
# Map "hardware" pin numbers to colors
RED_PIN = 16
GREEN_PIN = 20
BLUE_PIN = 21
def __init__(self, button_callback: Callable):
# Store current RGB color values of the Fake LED (0-255)
self.current_color = {
self.RED_PIN: 0,
self.GREEN_PIN: 0,
self.BLUE_PIN: 0
}
self.master = tk.Tk()
tk.Frame.__init__(self, self.master)
self.button_callback = button_callback
self.status_text = tk.Label(text='off')
self.status_text.pack()
self.led = tk.Label(text='⬤', foreground='#000000', font=tkinter.font.Font(size=64))
self.led.pack()
self.button = tk.Button(text='', width=4, height=2, background='#AA0000')
self.button.bind("<ButtonPress>", self._wrap_button_callback)
# TODO: bind <ButtonRelease> for button hold shutdown condition
self.button.pack()
self.master.wm_title("PiBox Sim")
self.master.geometry("250x200")
self._update_color()
def _wrap_button_callback(self, event: tk.Event):
self.button_callback()
def set_led_channel_duty_cycle(self, pin: int, duty_cycle: float):
self.current_color[pin] = duty_cycle_to_intensity(duty_cycle)
def _update_color(self):
"""
Refresh the display every 50ms
If we try to call this every time the color updates it slows things down way too much
"""
r = self.current_color[self.RED_PIN]
g = self.current_color[self.GREEN_PIN]
b = self.current_color[self.BLUE_PIN]
color_string = f'#{r:02x}{g:02x}{b:02x}'
self.led['foreground'] = color_string
self.status_text['text'] = color_string
self.master.after(50, self._update_color)
def set_callback(self, new_callback: Callable):
self.button_callback = new_callback
class FakePWM:
""" Stand-in for RPi.GPIO.pwm to pass the relevant commands to the BoxWindow sim """
def __init__(self, window: BoxWindow, pin: int, freq: int):
self.pin = pin
self.window = window
def start(self, dutycycle: float):
self.window.set_led_channel_duty_cycle(self.pin, dutycycle)
def ChangeDutyCycle(self, dutycycle: float):
self.window.set_led_channel_duty_cycle(self.pin, dutycycle)
def stop(self):
pass
|
[
"cbott6@gmail.com"
] |
cbott6@gmail.com
|
a4d60ee0f64911922d9ab22391fb998f01caa2d6
|
34998c0baf6922a5dfd11ad239077ab6c06fb0b0
|
/FetchMaker.py
|
67bfb11e1a2636e7208d4fa130db1404c212b5b2
|
[] |
no_license
|
amanm063/codecademy-practice
|
ac449b31235c872caba38945044fa4e80b27cdce
|
6260b6447e3997c662b976b16936607c50cc997b
|
refs/heads/main
| 2023-03-22T05:37:31.169092
| 2021-03-12T05:21:19
| 2021-03-12T05:21:19
| 330,562,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
"""Congratulations! You’ve just started working at the hottest new tech startup, FetchMaker. FetchMaker’s mission is to match up prospective dog owners with their perfect pet. FetchMaker has been collecting data on their adoptable dogs, and it’s your job to analyze some of that data."""
# Import libraries
import numpy as np
import pandas as pd
import codecademylib3
from scipy.stats import binom_test
from scipy.stats import f_oneway
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from scipy.stats import chi2_contingency
# Import data
dogs = pd.read_csv('dog_data.csv')
# Subset to just whippets, terriers, and pitbulls
dogs_wtp = dogs[dogs.breed.isin(['whippet', 'terrier', 'pitbull'])]
# Subset to just poodles and shihtzus
dogs_ps = dogs[dogs.breed.isin(['poodle', 'shihtzu'])]
print(dogs.head())
whippet_rescue = dogs.is_rescue[dogs.breed == "whippet"]
print(whippet_rescue.head())
num_whippet_rescues = np.sum(whippet_rescue == 1)
print(num_whippet_rescues)
num_whippets = len(whippet_rescue)
print(num_whippets)
pval = binom_test(x=num_whippet_rescues,n=num_whippets,p=0.08)
print(pval)
wt_whippets = dogs.weight[dogs.breed == "whippet"]
wt_terriers = dogs.weight[dogs.breed == "terrier"]
wt_pitbulls = dogs.weight[dogs.breed == "pitbull"]
stat,p = f_oneway(wt_whippets,wt_terriers,wt_pitbulls)
print(p)
results = pairwise_tukeyhsd(endog=dogs_wtp.weight,groups = dogs_wtp.breed)
print(results)
print(dogs_ps.head())
xtab = pd.crosstab(dogs_ps.breed,dogs_ps.color)
print(xtab)
chi2,p2,dof,exp = chi2_contingency(xtab)
print(p2)
|
[
"65498402+amanm063@users.noreply.github.com"
] |
65498402+amanm063@users.noreply.github.com
|
db0413d8e67dc999e25725427036dbe9057a3f23
|
93e2b70ed42261423e8cabd482b10e0087f00832
|
/ClassicalField/Laplace/LaplacePy/LaplaceExample.py
|
4218854d0ecc9d8b19a2d4a68c5137af499cae4e
|
[] |
no_license
|
JessicaJor/examples
|
b4cb68250cdb6b976a7aedbfb19f70791e3b57b2
|
146b3cab0c88760e9687b99a5bdc2325615be39e
|
refs/heads/master
| 2021-01-18T09:40:24.095806
| 2011-12-09T10:13:28
| 2011-12-09T10:13:28
| 2,258,279
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,651
|
py
|
#!/usr/bin/env python
# Add Python bindings directory to PATH
import sys, os
sys.path.append(os.sep.join((os.environ['OPENCMISS_ROOT'],'cm','bindings','python')))
# Intialise OpenCMISS
from opencmiss import CMISS
# Set problem parameters
height = 1.0
width = 2.0
length = 3.0
(coordinateSystemUserNumber,
regionUserNumber,
basisUserNumber,
generatedMeshUserNumber,
meshUserNumber,
decompositionUserNumber,
geometricFieldUserNumber,
equationsSetFieldUserNumber,
dependentFieldUserNumber,
equationsSetUserNumber,
problemUserNumber) = range(1,12)
numberGlobalXElements = 5
numberGlobalYElements = 5
numberGlobalZElements = 5
CMISS.DiagnosticsSetOn(CMISS.DiagnosticTypes.In,[1,2,3,4,5],"Diagnostics",["DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE"])
# Get the computational nodes information
numberOfComputationalNodes = CMISS.ComputationalNumberOfNodesGet()
computationalNodeNumber = CMISS.ComputationalNodeNumberGet()
# Creation a RC coordinate system
coordinateSystem = CMISS.CoordinateSystem()
coordinateSystem.CreateStart(coordinateSystemUserNumber)
coordinateSystem.dimension = 3
coordinateSystem.CreateFinish()
# Create a region
region = CMISS.Region()
region.CreateStart(regionUserNumber,CMISS.WorldRegion)
region.label = "LaplaceRegion"
region.coordinateSystem = coordinateSystem
region.CreateFinish()
# Create a tri-linear lagrange basis
basis = CMISS.Basis()
basis.CreateStart(basisUserNumber)
basis.type = CMISS.BasisTypes.LagrangeHermiteTP
basis.numberOfXi = 3
basis.interpolationXi = [CMISS.BasisInterpolationSpecifications.LinearLagrange]*3
basis.quadratureNumberOfGaussXi = [2]*3
basis.CreateFinish()
# Create a generated mesh
generatedMesh = CMISS.GeneratedMesh()
generatedMesh.CreateStart(generatedMeshUserNumber,region)
generatedMesh.type = CMISS.GeneratedMeshTypes.Regular
generatedMesh.basis = [basis]
generatedMesh.extent = [width,height,length]
generatedMesh.numberOfElements = [numberGlobalXElements,numberGlobalYElements,numberGlobalZElements]
mesh = CMISS.Mesh()
generatedMesh.CreateFinish(meshUserNumber,mesh)
# Create a decomposition for the mesh
decomposition = CMISS.Decomposition()
decomposition.CreateStart(decompositionUserNumber,mesh)
decomposition.type = CMISS.DecompositionTypes.Calculated
decomposition.numberOfDomains = numberOfComputationalNodes
decomposition.CreateFinish()
# Create a field for the geometry
geometricField = CMISS.Field()
geometricField.CreateStart(geometricFieldUserNumber,region)
geometricField.meshDecomposition = decomposition
geometricField.ComponentMeshComponentSet(CMISS.FieldVariableTypes.U,1,1)
geometricField.ComponentMeshComponentSet(CMISS.FieldVariableTypes.U,2,1)
geometricField.ComponentMeshComponentSet(CMISS.FieldVariableTypes.U,3,1)
geometricField.CreateFinish()
# Set geometry from the generated mesh
CMISS.GeneratedMeshGeometricParametersCalculate(geometricField,generatedMesh)
# Create standard Laplace equations set
equationsSetField = CMISS.Field()
equationsSet = CMISS.EquationsSet()
equationsSet.CreateStart(equationsSetUserNumber,region,geometricField, \
CMISS.EquationsSetClasses.ClassicalField,
CMISS.EquationsSetTypes.LaplaceEquation, \
CMISS.EquationsSetSubtypes.StandardLaplace, \
equationsSetFieldUserNumber, equationsSetField)
equationsSet.CreateFinish()
# Create dependent field
dependentField = CMISS.Field()
equationsSet.DependentCreateStart(dependentFieldUserNumber,dependentField)
dependentField.DOFOrderTypeSet(CMISS.FieldVariableTypes.U,CMISS.FieldDOFOrderTypes.Separated)
dependentField.DOFOrderTypeSet(CMISS.FieldVariableTypes.DelUDelN,CMISS.FieldDOFOrderTypes.Separated)
equationsSet.DependentCreateFinish()
# Initialise dependent field
dependentField.ComponentValuesInitialiseDP(CMISS.FieldVariableTypes.U,CMISS.FieldParameterSetTypes.FieldValues,1,0.5)
# Create equations
equations = CMISS.Equations()
equationsSet.EquationsCreateStart(equations)
equations.sparsityType = CMISS.EquationsSparsityTypes.Sparse
equations.outputType = CMISS.EquationsOutputTypes.NONE
equationsSet.EquationsCreateFinish()
# Create Laplace problem
problem = CMISS.Problem()
problem.CreateStart(problemUserNumber)
problem.SpecificationSet(CMISS.ProblemClasses.ClassicalField, \
CMISS.ProblemTypes.LaplaceEquation, \
CMISS.ProblemSubTypes.StandardLaplace)
problem.CreateFinish()
# Create control loops
problem.ControlLoopCreateStart()
problem.ControlLoopCreateFinish()
# Create problem solver
solver = CMISS.Solver()
problem.SolversCreateStart()
problem.SolverGet([CMISS.ControlLoopIdentifiers.Node],1,solver)
solver.outputType = CMISS.SolverOutputTypes.Solver
solver.linearType = CMISS.LinearSolverTypes.Iterative
solver.linearIterativeAbsoluteTolerance = 1.0E-12
solver.linearIterativeRelativeTolerance = 1.0E-12
problem.SolversCreateFinish()
# Create solver equations and add equations set to solver equations
solver = CMISS.Solver()
solverEquations = CMISS.SolverEquations()
problem.SolverEquationsCreateStart()
problem.SolverGet([CMISS.ControlLoopIdentifiers.Node],1,solver)
solver.SolverEquationsGet(solverEquations)
solverEquations.sparsityType = CMISS.SolverEquationsSparsityTypes.Sparse
equationsSetIndex = solverEquations.EquationsSetAdd(equationsSet)
problem.SolverEquationsCreateFinish()
# Create boundary conditions and set first and last nodes to 0.0 and 1.0
boundaryConditions = CMISS.BoundaryConditions()
solverEquations.BoundaryConditionsCreateStart(boundaryConditions)
firstNodeNumber=1
nodes = CMISS.Nodes()
region.NodesGet(nodes)
lastNodeNumber = nodes.numberOfNodes
firstNodeDomain = decomposition.NodeDomainGet(firstNodeNumber,1)
lastNodeDomain = decomposition.NodeDomainGet(lastNodeNumber,1)
if firstNodeDomain == computationalNodeNumber:
boundaryConditions.SetNode(dependentField,CMISS.FieldVariableTypes.U,1,1,firstNodeNumber,1,CMISS.BoundaryConditionsTypes.Fixed,0.0)
if lastNodeDomain == computationalNodeNumber:
boundaryConditions.SetNode(dependentField,CMISS.FieldVariableTypes.U,1,1,lastNodeNumber,1,CMISS.BoundaryConditionsTypes.Fixed,1.0)
solverEquations.BoundaryConditionsCreateFinish()
# Solve the problem
problem.Solve()
# Export results
baseName = "laplace"
dataFormat = "PLAIN_TEXT"
fml = CMISS.FieldMLIO()
fml.OutputCreate(mesh, "", baseName, dataFormat)
fml.OutputAddFieldNoType(baseName+".geometric", dataFormat, geometricField,
CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.FieldValues)
fml.OutputAddFieldNoType(baseName+".phi", dataFormat, dependentField,
CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.FieldValues)
fml.OutputWrite("LaplaceExample.xml")
fml.Finalise()
CMISS.Finalise()
|
[
"aree035@aucklanduni.ac.nz"
] |
aree035@aucklanduni.ac.nz
|
27b5cc635f28293b0be825c39684f54d3fae813c
|
99b7f63fc1fc3a69177ac4f8f3f6ea510f67c7d2
|
/top100/python/medium/309_Best_Time_to_Buy_and_Sell_Stock_with_Cooldown.py
|
d4e0c9e575c980a181e5aa2c6822332bd4f6d9b2
|
[] |
no_license
|
zbw0034/coding-practise
|
5e030e61faaf6addedf0896be7355dfae1faa564
|
9a749ea81b353ed41993bd03dae4bceb46d795a2
|
refs/heads/master
| 2022-04-26T22:17:32.458145
| 2020-04-29T12:47:40
| 2020-04-29T12:47:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
"""
Jesse@FDU-VTS-MIA
created by 2019/12/23
"""
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
dp = [[0 for _ in range(2)] for _ in range(len(prices)+1)] # 初始化状态转移矩阵
dp[0][1] = float('-inf') # 不可能持有股票,置为负无穷
for i in range(1, len(prices)+1):
dp[i][0] = max(dp[i-1][1]+prices[i-1], dp[i-1][0]) # 今天没有,要么昨天卖了,要么昨天也没买保持了
dp_pre_0 = dp[i-2][0]-prices[i-1] if i >= 2 else -prices[i-1]
dp[i][1] = max(dp_pre_0, dp[i-1][1]) # 今天有,要么昨天买了,要么昨天买了(因为冷却一天,所以从前两天转移状态)
return dp[len(prices)][0]
|
[
"jesse_ecnu@126.com"
] |
jesse_ecnu@126.com
|
c216e210a3cd4f60ddcd94220107bcf6a2f2ad58
|
290fa984448c3350fa4059fa8852f8a1321109ab
|
/services/users/src/tests/test_admin.py
|
c5485a60d9b67dc9cad9af5833edad25ca6b1003
|
[] |
no_license
|
testdrivenio/flask-react-aws
|
673a612ae3368e7a9dcd7ddb50c0ea03e3221928
|
365f0771d5234b0b4dfe05d59bab29a03845af4f
|
refs/heads/master
| 2023-07-19T19:45:14.042103
| 2022-05-04T16:08:52
| 2022-05-04T16:08:52
| 198,724,692
| 29
| 21
| null | 2023-07-19T14:39:18
| 2019-07-24T23:54:07
|
Python
|
UTF-8
|
Python
| false
| false
| 984
|
py
|
import os
from src import create_app, db
def test_admin_view_dev():
os.environ["FLASK_ENV"] = "development"
assert os.getenv("FLASK_ENV") == "development"
app = create_app()
app.config.from_object("src.config.TestingConfig")
with app.app_context():
db.session.remove()
db.drop_all()
db.create_all()
client = app.test_client()
resp = client.get("/admin/user/")
assert resp.status_code == 200
assert os.getenv("FLASK_ENV") == "development"
def test_admin_view_prod():
os.environ["FLASK_ENV"] = "production"
assert os.getenv("FLASK_ENV") == "production"
app = create_app()
app.config.from_object("src.config.TestingConfig")
with app.app_context():
db.session.remove()
db.drop_all()
db.create_all()
client = app.test_client()
resp = client.get("/admin/user/")
assert resp.status_code == 404
assert os.getenv("FLASK_ENV") == "production"
|
[
"hermanmu@gmail.com"
] |
hermanmu@gmail.com
|
05c74d4ba9f7b0c9cc53d7982072cb98b41192d7
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4146/codes/1585_1571.py
|
3c1bfbd4a36765c09fbb267862444daf795d8032
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
texto = input("Digite a string: ")
print(texto.upper())
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
6471dcb5fcce92aba50f34df3aa21e8ac3868163
|
ae03a82147b154e037ba088174b342acd334262e
|
/external_teachers/manage.py
|
f01a9f6c2ff5b678e45804eb6cfa7e8f5963236b
|
[] |
no_license
|
samfcmc/manage_external_teachers
|
d0f23e55495510a9c6243f9b7080b30bd7a09c63
|
fc63d5b152e636a85ffe0a4a6c7a5894b5bdc732
|
refs/heads/master
| 2021-01-10T19:03:26.153753
| 2015-02-06T15:08:38
| 2015-02-06T15:08:38
| 16,146,008
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "external_teachers.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"samuelfcmc@gmail.com"
] |
samuelfcmc@gmail.com
|
975f7015b7ddf2009c5828c36b1d817a6d76d931
|
508661f0dbb012eecd6f6c1aaeac0a7cc86aa667
|
/Praktikum 6/DemoQComboBox/DemoQComboBox.py
|
1761d23c17dc4d7f85185687532da71bd9ee622c
|
[] |
no_license
|
anggeralmasih/Pemrograman-dan-Praktikum-GUI_19104073_Anggeralmasih-WR_S1SE03B
|
e060fb99dc4d6b0c2d94f27ebb6b82621f2370f2
|
edc865581ecbe8136fea50f8d73219231089663d
|
refs/heads/main
| 2023-06-29T08:03:54.501644
| 2021-07-11T08:42:54
| 2021-07-11T08:42:54
| 356,132,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(300, 100)
self.move(300, 300)
self.setWindowTitle('Demo QComboBox')
self.combo = QComboBox()
for i in range(1,11):
self.combo.addItem('Item ke-%d' % i)
self.getTextButton = QPushButton('Ambil Teks')
layout = QVBoxLayout()
layout.addWidget(self.combo)
layout.addWidget(self.getTextButton)
layout.addStretch()
self.setLayout(layout)
self.getTextButton.clicked.connect(self.getTextButtonClick)
def getTextButtonClick(self):
QMessageBox.information(self, 'Informasi', 'Anda memilih: ' + self.combo.currentText())
if __name__ == '__main__':
a = QApplication(sys.argv)
form = MainForm()
form.show()
a.exec_()
|
[
"noreply@github.com"
] |
anggeralmasih.noreply@github.com
|
49610d80d561c60ef23817e11e2e344424d7bc74
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/nlp/FairSeq_Transformer_ID0496_for_PyTorch/examples/noisychannel/rerank_utils.py
|
3056f54bfef9a854ae562221701e982f9ad82220
|
[
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 29,170
|
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import subprocess
import os
import re
from fairseq import options
import eval_lm
import preprocess
from contextlib import redirect_stdout
import math
def reprocess(fle):
# takes in a file of generate.py translation generate_output
# returns a source dict and hypothesis dict, where keys are the ID num (as a string)
# and values and the corresponding source and translation. There may be several translations
# per source, so the values for hypothesis_dict are lists.
# parses output of generate.py
with open(fle, 'r') as f:
txt = f.read()
"""reprocess generate.py output"""
p = re.compile(r"[STHP][-]\d+\s*")
hp = re.compile(r"(\s*[-]?\d+[.]?\d+\s*)|(\s*(-inf)\s*)")
source_dict = {}
hypothesis_dict = {}
score_dict = {}
target_dict = {}
pos_score_dict = {}
lines = txt.split("\n")
for line in lines:
line += "\n"
prefix = re.search(p, line)
if prefix is not None:
assert len(prefix.group()) > 2, "prefix id not found"
_, j = prefix.span()
id_num = prefix.group()[2:]
id_num = int(id_num)
line_type = prefix.group()[0]
if line_type == "H":
h_txt = line[j:]
hypo = re.search(hp, h_txt)
assert hypo is not None, ("regular expression failed to find the hypothesis scoring")
_, i = hypo.span()
score = hypo.group()
if id_num in hypothesis_dict:
hypothesis_dict[id_num].append(h_txt[i:])
score_dict[id_num].append(float(score))
else:
hypothesis_dict[id_num] = [h_txt[i:]]
score_dict[id_num] = [float(score)]
elif line_type == "S":
source_dict[id_num] = (line[j:])
elif line_type == "T":
target_dict[id_num] = (line[j:])
elif line_type == "P":
pos_scores = (line[j:]).split()
pos_scores = [float(x) for x in pos_scores]
if id_num in pos_score_dict:
pos_score_dict[id_num].append(pos_scores)
else:
pos_score_dict[id_num] = [pos_scores]
return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
def reprocess_nbest(fle):
"""reprocess interactive.py output"""
with open(fle, 'r') as f:
txt = f.read()
source_dict = {}
hypothesis_dict = {}
score_dict = {}
target_dict = {}
pos_score_dict = {}
lines = txt.split("\n")
hp = re.compile(r'[-]?\d+[.]?\d+')
j = -1
for _i, line in enumerate(lines):
line += "\n"
line_type = line[0]
if line_type == "H":
hypo = re.search(hp, line)
_, start_index = hypo.span()
score = hypo.group()
if j in score_dict:
score_dict[j].append(float(score))
hypothesis_dict[j].append(line[start_index:].strip("\t"))
else:
score_dict[j] = [float(score)]
hypothesis_dict[j] = [line[start_index:].strip("\t")]
elif line_type == "O":
j += 1
source_dict[j] = line[2:]
# we don't have the targets for interactive.py
target_dict[j] = "filler"
elif line_type == "P":
pos_scores = [float(pos_score) for pos_score in line.split()[1:]]
if j in pos_score_dict:
pos_score_dict[j].append(pos_scores)
else:
pos_score_dict[j] = [pos_scores]
assert source_dict.keys() == hypothesis_dict.keys()
assert source_dict.keys() == pos_score_dict.keys()
assert source_dict.keys() == score_dict.keys()
return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
def write_reprocessed(sources, hypos, targets, source_outfile,
hypo_outfile, target_outfile, right_to_left=False,
prefix_len=None, bpe_symbol=None,
target_prefix_frac=None, source_prefix_frac=None):
"""writes nbest hypothesis for rescoring"""
assert not (prefix_len is not None and target_prefix_frac is not None), \
"in writing reprocessed, only one type of prefix may be used"
assert not (prefix_len is not None and source_prefix_frac is not None), \
"in writing reprocessed, only one type of prefix may be used"
assert not (target_prefix_frac is not None and source_prefix_frac is not None), \
"in writing reprocessed, only one type of prefix may be used"
with open(source_outfile, 'w') as source_file, \
open(hypo_outfile, 'w') as hypo_file, \
open(target_outfile, 'w') as target_file:
assert len(sources) == len(hypos), "sources and hypos list length mismatch"
if right_to_left:
for i in range(len(sources)):
for j in range(len(hypos[i])):
if prefix_len is None:
hypo_file.write(make_right_to_left(hypos[i][j])+"\n")
else:
raise NotImplementedError()
source_file.write(make_right_to_left(sources[i])+"\n")
target_file.write(make_right_to_left(targets[i])+"\n")
else:
for i in sorted(sources.keys()):
for j in range(len(hypos[i])):
if prefix_len is not None:
shortened = get_prefix_no_bpe(hypos[i][j], bpe_symbol, prefix_len)+"\n"
hypo_file.write(shortened)
source_file.write(sources[i])
target_file.write(targets[i])
elif target_prefix_frac is not None:
num_words, shortened, num_bpe_tokens = \
calc_length_from_frac(hypos[i][j], target_prefix_frac, bpe_symbol)
shortened += "\n"
hypo_file.write(shortened)
source_file.write(sources[i])
target_file.write(targets[i])
elif source_prefix_frac is not None:
num_words, shortened, num_bpe_tokensn = \
calc_length_from_frac(sources[i], source_prefix_frac, bpe_symbol)
shortened += "\n"
hypo_file.write(hypos[i][j])
source_file.write(shortened)
target_file.write(targets[i])
else:
hypo_file.write(hypos[i][j])
source_file.write(sources[i])
target_file.write(targets[i])
def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol):
# return number of words, (not bpe tokens) that we want
no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol)
len_sen = len(no_bpe_sen.split())
num_words = math.ceil(len_sen * prefix_frac)
prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words)
num_bpe_tokens = len(prefix.split())
return num_words, prefix, num_bpe_tokens
def get_prefix(sentence, prefix_len):
"""assuming no bpe, gets the prefix of the sentence with prefix_len words"""
tokens = sentence.strip("\n").split()
if prefix_len >= len(tokens):
return sentence.strip("\n")
else:
return " ".join(tokens[:prefix_len])
def get_prefix_no_bpe(sentence, bpe_symbol, prefix_len):
if bpe_symbol is None:
return get_prefix(sentence, prefix_len)
else:
return " ".join(get_prefix_from_len(sentence.split(), bpe_symbol, prefix_len))
def get_prefix_from_len(sentence, bpe_symbol, prefix_len):
"""get the prefix of sentence with bpe, with prefix len in terms of words, not bpe tokens"""
bpe_count = sum([bpe_symbol.strip(" ") in t for t in sentence[:prefix_len]])
if bpe_count == 0:
return sentence[:prefix_len]
else:
return sentence[:prefix_len]+get_prefix_from_len(sentence[prefix_len:], bpe_symbol, bpe_count)
def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len):
"""given a prefix length in terms of words, return the number of bpe tokens"""
prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len)
assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len
return len(prefix.split(" "))
def make_right_to_left(line):
tokens = line.split()
tokens.reverse()
new_line = " ".join(tokens)
return new_line
def remove_bpe(line, bpe_symbol):
line = line.replace("\n", '')
line = (line + ' ').replace(bpe_symbol, '').rstrip()
return line+("\n")
def remove_bpe_dict(pred_dict, bpe_symbol):
new_dict = {}
for i in pred_dict:
if type(pred_dict[i]) == list:
new_list = [remove_bpe(elem, bpe_symbol) for elem in pred_dict[i]]
new_dict[i] = new_list
else:
new_dict[i] = remove_bpe(pred_dict[i], bpe_symbol)
return new_dict
def parse_bleu_scoring(line):
p = re.compile(r'(BLEU4 = )\d+[.]\d+')
res = re.search(p, line)
assert res is not None, line
return float(res.group()[8:])
def get_full_from_prefix(hypo_prefix, hypos):
"""given a hypo prefix, recover the first hypo from the list of complete hypos beginning with that prefix"""
for hypo in hypos:
hypo_prefix = hypo_prefix.strip("\n")
len_prefix = len(hypo_prefix)
if hypo[:len_prefix] == hypo_prefix:
return hypo
# no match found
raise Exception()
def get_score(a, b, c, target_len, bitext_score1, bitext_score2=None, lm_score=None,
lenpen=None, src_len=None, tgt_len=None, bitext1_backwards=False,
bitext2_backwards=False, normalize=False):
if bitext1_backwards:
bitext1_norm = src_len
else:
bitext1_norm = tgt_len
if bitext_score2 is not None:
if bitext2_backwards:
bitext2_norm = src_len
else:
bitext2_norm = tgt_len
else:
bitext2_norm = 1
bitext_score2 = 0
if normalize:
score = a*bitext_score1/bitext1_norm + b*bitext_score2/bitext2_norm+c*lm_score/src_len
else:
score = a*bitext_score1 + b*bitext_score2+c*lm_score
if lenpen is not None:
score /= (target_len) ** float(lenpen)
return score
class BitextOutput(object):
def __init__(self, output_file, backwards, right_to_left, bpe_symbol,
prefix_len=None, target_prefix_frac=None, source_prefix_frac=None):
"""process output from rescoring"""
source, hypo, score, target, pos_score = reprocess(output_file)
if backwards:
self.hypo_fracs = source_prefix_frac
else:
self.hypo_fracs = target_prefix_frac
# remove length penalty so we can use raw scores
score, num_bpe_tokens = get_score_from_pos(pos_score, prefix_len, hypo, bpe_symbol, self.hypo_fracs, backwards)
source_lengths = {}
target_lengths = {}
assert hypo.keys() == source.keys(), "key mismatch"
if backwards:
tmp = hypo
hypo = source
source = tmp
for i in source:
# since we are reranking, there should only be one hypo per source sentence
if backwards:
len_src = len(source[i][0].split())
# record length without <eos>
if len_src == num_bpe_tokens[i][0] - 1:
source_lengths[i] = num_bpe_tokens[i][0] - 1
else:
source_lengths[i] = num_bpe_tokens[i][0]
target_lengths[i] = len(hypo[i].split())
source[i] = remove_bpe(source[i][0], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
len_tgt = len(hypo[i][0].split())
# record length without <eos>
if len_tgt == num_bpe_tokens[i][0] - 1:
target_lengths[i] = num_bpe_tokens[i][0] - 1
else:
target_lengths[i] = num_bpe_tokens[i][0]
source_lengths[i] = len(source[i].split())
if right_to_left:
source[i] = remove_bpe(make_right_to_left(source[i]), bpe_symbol)
target[i] = remove_bpe(make_right_to_left(target[i]), bpe_symbol)
hypo[i] = remove_bpe(make_right_to_left(hypo[i][0]), bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
assert len(hypo[i]) == 1, "expected only one hypothesis per source sentence"
source[i] = remove_bpe(source[i], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i][0], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
self.rescore_source = source
self.rescore_hypo = hypo
self.rescore_score = score
self.rescore_target = target
self.rescore_pos_score = pos_score
self.backwards = backwards
self.right_to_left = right_to_left
self.target_lengths = target_lengths
self.source_lengths = source_lengths
class BitextOutputFromGen(object):
def __init__(self, predictions_bpe_file, bpe_symbol=None, nbest=False, prefix_len=None, target_prefix_frac=None):
if nbest:
pred_source, pred_hypo, pred_score, pred_target, pred_pos_score = reprocess_nbest(predictions_bpe_file)
else:
pred_source, pred_hypo, pred_score, pred_target, pred_pos_score = reprocess(predictions_bpe_file)
assert len(pred_source) == len(pred_hypo)
assert len(pred_source) == len(pred_score)
assert len(pred_source) == len(pred_target)
assert len(pred_source) == len(pred_pos_score)
# remove length penalty so we can use raw scores
pred_score, num_bpe_tokens = get_score_from_pos(pred_pos_score, prefix_len, pred_hypo,
bpe_symbol, target_prefix_frac, False)
self.source = pred_source
self.target = pred_target
self.score = pred_score
self.pos_score = pred_pos_score
self.hypo = pred_hypo
self.target_lengths = {}
self.source_lengths = {}
self.no_bpe_source = remove_bpe_dict(pred_source.copy(), bpe_symbol)
self.no_bpe_hypo = remove_bpe_dict(pred_hypo.copy(), bpe_symbol)
self.no_bpe_target = remove_bpe_dict(pred_target.copy(), bpe_symbol)
# indexes to match those from the rescoring models
self.rescore_source = {}
self.rescore_target = {}
self.rescore_pos_score = {}
self.rescore_hypo = {}
self.rescore_score = {}
self.num_hypos = {}
self.backwards = False
self.right_to_left = False
index = 0
for i in sorted(pred_source.keys()):
for j in range(len(pred_hypo[i])):
self.target_lengths[index] = len(self.hypo[i][j].split())
self.source_lengths[index] = len(self.source[i].split())
self.rescore_source[index] = self.no_bpe_source[i]
self.rescore_target[index] = self.no_bpe_target[i]
self.rescore_hypo[index] = self.no_bpe_hypo[i][j]
self.rescore_score[index] = float(pred_score[i][j])
self.rescore_pos_score[index] = pred_pos_score[i][j]
self.num_hypos[index] = len(pred_hypo[i])
index += 1
def get_score_from_pos(pos_score_dict, prefix_len, hypo_dict, bpe_symbol, hypo_frac, backwards):
score_dict = {}
num_bpe_tokens_dict = {}
assert prefix_len is None or hypo_frac is None
for key in pos_score_dict:
score_dict[key] = []
num_bpe_tokens_dict[key] = []
for i in range(len(pos_score_dict[key])):
if prefix_len is not None and not backwards:
num_bpe_tokens = get_num_bpe_tokens_from_len(hypo_dict[key][i], bpe_symbol, prefix_len)
score_dict[key].append(sum(pos_score_dict[key][i][:num_bpe_tokens]))
num_bpe_tokens_dict[key].append(num_bpe_tokens)
elif hypo_frac is not None:
num_words, shortened, hypo_prefix_len = calc_length_from_frac(hypo_dict[key][i], hypo_frac, bpe_symbol)
score_dict[key].append(sum(pos_score_dict[key][i][:hypo_prefix_len]))
num_bpe_tokens_dict[key].append(hypo_prefix_len)
else:
score_dict[key].append(sum(pos_score_dict[key][i]))
num_bpe_tokens_dict[key].append(len(pos_score_dict[key][i]))
return score_dict, num_bpe_tokens_dict
class LMOutput(object):
def __init__(self, lm_score_file, lm_dict=None, prefix_len=None, bpe_symbol=None, target_prefix_frac=None):
lm_sentences, lm_sen_scores, lm_sen_pos_scores, lm_no_bpe_sentences, lm_bpe_tokens = \
parse_lm(lm_score_file, prefix_len=prefix_len,
bpe_symbol=bpe_symbol, target_prefix_frac=target_prefix_frac)
self.sentences = lm_sentences
self.score = lm_sen_scores
self.pos_score = lm_sen_pos_scores
self.lm_dict = lm_dict
self.no_bpe_sentences = lm_no_bpe_sentences
self.bpe_tokens = lm_bpe_tokens
def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None):
"""parse output of eval_lm"""
with open(input_file, 'r') as f:
text = f.readlines()
text = text[7:]
cleaned_text = text[:-2]
sentences = {}
sen_scores = {}
sen_pos_scores = {}
no_bpe_sentences = {}
num_bpe_tokens_dict = {}
for _i, line in enumerate(cleaned_text):
tokens = line.split()
if tokens[0].isdigit():
line_id = int(tokens[0])
scores = [float(x[1:-1]) for x in tokens[2::2]]
sentences[line_id] = " ".join(tokens[1::2][:-1])+"\n"
if bpe_symbol is not None:
# exclude <eos> symbol to match output from generate.py
bpe_sen = " ".join(tokens[1::2][:-1])+"\n"
no_bpe_sen = remove_bpe(bpe_sen, bpe_symbol)
no_bpe_sentences[line_id] = no_bpe_sen
if prefix_len is not None:
num_bpe_tokens = get_num_bpe_tokens_from_len(bpe_sen, bpe_symbol, prefix_len)
sen_scores[line_id] = sum(scores[:num_bpe_tokens])
num_bpe_tokens_dict[line_id] = num_bpe_tokens
elif target_prefix_frac is not None:
num_words, shortened, target_prefix_len = calc_length_from_frac(bpe_sen, target_prefix_frac,
bpe_symbol)
sen_scores[line_id] = sum(scores[:target_prefix_len])
num_bpe_tokens_dict[line_id] = target_prefix_len
else:
sen_scores[line_id] = sum(scores)
num_bpe_tokens_dict[line_id] = len(scores)
sen_pos_scores[line_id] = scores
return sentences, sen_scores, sen_pos_scores, no_bpe_sentences, num_bpe_tokens_dict
def get_directories(data_dir_name, num_rescore, gen_subset,
fw_name, shard_id, num_shards,
sampling=False, prefix_len=None,
target_prefix_frac=None, source_prefix_frac=None):
nbest_file_id = "nbest_" + str(num_rescore) + \
"_subset_" + gen_subset + \
"_fw_name_" + fw_name + \
"_shard_" + str(shard_id) + \
"_of_" + str(num_shards)
if sampling:
nbest_file_id += "_sampling"
# the directory containing all information for this nbest list
pre_gen = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+data_dir_name+"/"+nbest_file_id
# the directory to store the preprocessed nbest list, for left to right rescoring
left_to_right_preprocessed_dir = pre_gen+"/left_to_right_preprocessed"
if source_prefix_frac is not None:
left_to_right_preprocessed_dir = left_to_right_preprocessed_dir + "/prefix_frac" + str(source_prefix_frac)
# the directory to store the preprocessed nbest list, for right to left rescoring
right_to_left_preprocessed_dir = pre_gen+"/right_to_left_preprocessed"
# the directory to store the preprocessed nbest list, for backwards rescoring
backwards_preprocessed_dir = pre_gen+"/backwards"
if target_prefix_frac is not None:
backwards_preprocessed_dir = backwards_preprocessed_dir+"/prefix_frac"+str(target_prefix_frac)
elif prefix_len is not None:
backwards_preprocessed_dir = backwards_preprocessed_dir+"/prefix_"+str(prefix_len)
# the directory to store the preprocessed nbest list, for rescoring with P(T)
lm_preprocessed_dir = pre_gen+"/lm_preprocessed"
return pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir
def lm_scoring(preprocess_directory, bpe_status, gen_output, pre_gen,
cur_lm_dict, cur_lm_name, cur_language_model, cur_lm_bpe_code,
batch_size, lm_score_file, target_lang, source_lang, prefix_len=None):
if prefix_len is not None:
assert bpe_status == "different", "bpe status must be different to use prefix len"
if bpe_status == "no bpe":
# run lm on output without bpe
write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo,
gen_output.no_bpe_target, pre_gen+"/rescore_data_no_bpe.de",
pre_gen+"/rescore_data_no_bpe.en", pre_gen+"/reference_file_no_bpe")
preprocess_lm_param = ["--only-source",
"--trainpref", pre_gen+"/rescore_data_no_bpe."+target_lang,
"--srcdict", cur_lm_dict,
"--destdir", preprocess_directory]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [preprocess_directory,
"--path", cur_language_model,
"--output-word-probs",
"--batch-size", str(batch_size),
"--max-tokens", "1024",
"--sample-break-mode", "eos",
"--gen-subset", "train"]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, 'w') as f:
with redirect_stdout(f):
eval_lm.main(input_args)
elif bpe_status == "shared":
preprocess_lm_param = ["--only-source",
"--trainpref", pre_gen+"/rescore_data."+target_lang,
"--srcdict", cur_lm_dict,
"--destdir", preprocess_directory]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [preprocess_directory,
"--path", cur_language_model,
"--output-word-probs",
"--batch-size", str(batch_size),
"--sample-break-mode", "eos",
"--gen-subset", "train"]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, 'w') as f:
with redirect_stdout(f):
eval_lm.main(input_args)
elif bpe_status == "different":
rescore_file = pre_gen+"/rescore_data_no_bpe"
rescore_bpe = pre_gen+"/rescore_data_new_bpe"
rescore_file += "."
rescore_bpe += "."
write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo,
gen_output.no_bpe_target, rescore_file+source_lang,
rescore_file+target_lang, pre_gen+"/reference_file_no_bpe",
bpe_symbol=None)
# apply LM bpe to nbest list
bpe_src_param = ["-c", cur_lm_bpe_code,
"--input", rescore_file+target_lang,
"--output", rescore_bpe+target_lang]
subprocess.call(["python",
os.path.join(os.path.dirname(__file__),
"subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param,
shell=False)
# uncomment to use fastbpe instead of subword-nmt bpe
# bpe_src_param = [rescore_bpe+target_lang, rescore_file+target_lang, cur_lm_bpe_code]
# subprocess.call(["/private/home/edunov/fastBPE/fast", "applybpe"] + bpe_src_param, shell=False)
preprocess_dir = preprocess_directory
preprocess_lm_param = ["--only-source",
"--trainpref", rescore_bpe+target_lang,
"--srcdict", cur_lm_dict,
"--destdir", preprocess_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [preprocess_dir,
"--path", cur_language_model,
"--output-word-probs",
"--batch-size", str(batch_size),
"--max-tokens", "1024",
"--sample-break-mode", "eos",
"--gen-subset", "train"]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, 'w') as f:
with redirect_stdout(f):
eval_lm.main(input_args)
def rescore_file_name(nbest_dir, prefix_len, scorer_name, lm_file=False,
target_prefix_frac=None, source_prefix_frac=None, backwards=None):
if lm_file:
score_file = nbest_dir+"/lm_score_translations_model_"+scorer_name+".txt"
else:
score_file = nbest_dir+"/"+scorer_name+"_score_translations.txt"
if backwards:
if prefix_len is not None:
score_file += "prefix_len"+str(prefix_len)
elif target_prefix_frac is not None:
score_file += "target_prefix_frac"+str(target_prefix_frac)
else:
if source_prefix_frac is not None:
score_file += "source_prefix_frac"+str(source_prefix_frac)
return score_file
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
ea6c1da884dcbb87f7983256016e00195b4d66b2
|
dd2d29b326d9adb5743f6588d4248f8358b82d8d
|
/torchaudio/backend/utils.py
|
cb53b3e02ff24dfc1f4e55dc13b198af41d13bba
|
[
"BSD-2-Clause"
] |
permissive
|
mmxgn/audio
|
0e6bc06a738db3b353e88d70e1fed3964ee70a58
|
24741fdb97b9257868ae1caf593207aa7e581b6d
|
refs/heads/master
| 2022-11-21T23:55:12.584975
| 2020-07-18T10:16:32
| 2020-07-18T10:16:32
| 267,539,108
| 0
| 0
|
BSD-2-Clause
| 2020-05-28T08:46:43
| 2020-05-28T08:46:42
| null |
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import Optional, List
import torchaudio
from torchaudio._internal.module_utils import is_module_available
from . import (
no_backend,
sox_backend,
sox_io_backend,
soundfile_backend,
)
__all__ = [
'list_audio_backends',
'get_audio_backend',
'set_audio_backend',
]
def list_audio_backends() -> List[str]:
"""List available backends"""
backends = []
if is_module_available('soundfile'):
backends.append('soundfile')
if is_module_available('torchaudio._torchaudio'):
backends.append('sox')
backends.append('sox_io')
return backends
def set_audio_backend(backend: Optional[str]) -> None:
"""Set the backend for I/O operation
Args:
backend (str): Name of the backend. One of "sox" or "soundfile",
based on availability of the system.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(
f'Backend "{backend}" is not one of '
f'available backends: {list_audio_backends()}.')
if backend is None:
module = no_backend
elif backend == 'sox':
module = sox_backend
elif backend == 'sox_io':
module = sox_io_backend
elif backend == 'soundfile':
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ['save', 'load', 'load_wav', 'info']:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if 'sox' in backends:
set_audio_backend('sox')
elif 'soundfile' in backends:
set_audio_backend('soundfile')
else:
warnings.warn('No audio backend is available.')
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_backend.load:
return 'sox'
if torchaudio.load == sox_io_backend.load:
return 'sox_io'
if torchaudio.load == soundfile_backend.load:
return 'soundfile'
raise ValueError('Unknown backend.')
|
[
"noreply@github.com"
] |
mmxgn.noreply@github.com
|
7c2f3314655e6df4b85ee81a43b4a4f4a3751002
|
d8edd97f8f8dea3f9f02da6c40d331682bb43113
|
/train_wgangp_augment2_weightinit_noxy_singlegp.py
|
addd4121f7df27acdf60083943957ec45428d914
|
[] |
no_license
|
mdubouch/noise-gan
|
bdd5b2fff3aff70d5f464150443d51c2192eeafd
|
639859ec4a2aa809d17eb6998a5a7d217559888a
|
refs/heads/master
| 2023-07-15T09:37:57.631656
| 2021-08-27T11:02:45
| 2021-08-27T11:02:45
| 284,072,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,790
|
py
|
#!/usr/bin/python3
#$ -P P_comet
#$ -j y
#$ -cwd
#$ -M m.dubouchet18@imperial.ac.uk
#$ -m be
#$ -q mc_gpu_long
#$ -pe multicores_gpu 4
#$ -l sps=1,GPU=1,GPUtype=V100
import os
import sys
sys.path.append(os.getcwd())
import argparse
import logging
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
parser = argparse.ArgumentParser('Train CDC GAN')
parser.add_argument('--n-epochs', type=int, default=1)
parser.add_argument('--ngf', type=int, default=16)
parser.add_argument('--ndf', type=int, default=16)
parser.add_argument('--latent-dims', type=int, default=256)
parser.add_argument('--sequence-length', type=int, default=2048)
parser.add_argument('--net-version', type=int)
parser.add_argument('--dataset', type=str, default='dataset')
parser.add_argument('--batch-size', type=int, default=2)
parser.add_argument('--enc-dim', type=int, default=4)
parser.add_argument('--log', type=str, default='info')
parser.add_argument('--gfx', type=bool, default=False)
parser.add_argument('--seed', type=int, default=1337)
parser.add_argument('--save-every', type=int, default=1)
parser.add_argument('--continue-from-epoch', '--cfe', type=int)
parser.add_argument('--continue-from-job', '--cfj', type=int)
args = parser.parse_args()
job_id = int(os.getenv('JOB_ID', default='0'))
output_dir = 'output_%d/' % (job_id)
print('Outputting to %s' % (output_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.basicConfig(filename=output_dir+'output.log', level=getattr(logging, args.log.upper()), format='%(asctime)s %(message)s')
n_epochs = args.n_epochs
ngf = args.ngf
ndf = args.ndf
logging.info('ndf=%d' % (ndf))
logging.info('ngf=%d' % (ngf))
latent_dims = args.latent_dims
seq_len = args.sequence_length
encoded_dim = args.enc_dim
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
logging.info('Running on GPU: %s' % (torch.cuda.get_device_name()))
else:
logging.info('Running on CPU')
def to_device(x):
if torch.cuda.is_available():
return x.cuda()
else:
return x
import importlib
print('Importing dataset...')
print('Importing dataset from %s.py' % (args.dataset))
dataset = importlib.import_module(args.dataset)
#smplr = dataset.SequenceSampler(np.arange(1000) // 3, 128)
#for s in smplr:
# #print(s)
# pass
#exit(0)
data = dataset.Data()
data.load()
logging.info('pot %d bunches %d', data.n_pot, data.n_bunches)
logging.info('dtypes {0}'.format(data.data.dtype))
logging.info('shape {0}'.format(data.data.shape))
import geom_util
gu = geom_util.GeomUtil(data.get_cdc_tree())
gu.validate_wire_pos()
print('Import networks version %d' % (args.net_version))
logging.info('networks=%d' % (args.net_version))
networks = importlib.import_module('networks%d' % (args.net_version))
print('Importing networks from "%s"...' % (networks.__name__))
gen = to_device(networks.Gen(ngf=ngf, latent_dims=latent_dims, seq_len=seq_len,
encoded_dim=encoded_dim, n_wires=gu.n_wires))
logging.info(gen)
disc = to_device(networks.Disc(ndf=ndf, seq_len=seq_len, encoded_dim=encoded_dim, n_wires=gu.n_wires))
logging.info(disc)
print('Generator params: {:,}'.format(networks.get_n_params(gen)))
print('Discriminator params: {:,}'.format(networks.get_n_params(disc)))
logging.info('generator params: %d' % (networks.get_n_params(gen)))
logging.info('discriminator params: %d' % (networks.get_n_params(disc)))
logging.info('%s %s' % (data.get_cdc_tree().shape, data.get_cdc_tree().dtype))
import matplotlib
if args.gfx:
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['savefig.bbox'] = 'tight'
plt.rcParams['savefig.transparent'] = False
plt.rcParams['axes.labelsize'] = 'large'
plt.rcParams['axes.titlesize'] = 'x-large'
plt.rcParams['savefig.facecolor'] = 'white'
plt.figure(figsize=(6,6))
plt.scatter(gu.wire_x, gu.wire_y, s=1, c=gu.layer)
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
plt.savefig(output_dir+'wire_position.png', dpi=120)
plt.clf()
print('Pre-processing...')
train_minmax = data.preprocess()
data.diagnostic_plots(output_dir)
train_loader, train_dataset, n_chunks = data.chunk(seq_len, batch_size=args.batch_size)
logging.info('%s' % (train_dataset[0:4][0].shape,))
geom_dim=2
wire_sphere = torch.zeros((geom_dim, gu.n_wires), device='cuda')
wire_sphere[0] = torch.from_numpy(gu.wire_x)
wire_sphere[1] = torch.from_numpy(gu.wire_y)
logging.info('{}'.format(wire_sphere.norm(dim=0)))
#data_std = data.train_dataset.tensors[0].permute(1,0,2).flatten(1,2).std(dim=1)
#logging.info('DATA STD {}'.format(data_std))
logging.info('VAR {}'.format(wire_sphere.std(dim=1)))
logging.info('NORM {}'.format(wire_sphere.norm(dim=0)))
logging.info('MAX {}'.format(wire_sphere.argmax(dim=1)))
#wire_sphere = wire_sphere / wire_sphere.std(dim=1, keepdim=True) - wire_sphere.mean(dim=1, keepdim=True)
#wire_sphere[2] += torch.randn_like(wire_sphere[2]) * 0.01
#wire_sphere[3] += torch.randn_like(wire_sphere[2]) * 0.01
# STANDARDIZATION
#wire_sphere = (wire_sphere - wire_sphere.mean(dim=1, keepdim=True)) / wire_sphere.std(dim=1, keepdim=True)
smax = wire_sphere.max(dim=1, keepdim=True)[0]
smin = wire_sphere.min(dim=1, keepdim=True)[0]
wire_sphere = (wire_sphere - smin) / (smax - smin + 1e-8)
logging.info('DISTANCE BETWEEN WIRES: {}'.format(torch.norm(wire_sphere[:,1] - wire_sphere[:,0])))
plt.figure()
plt.plot(wire_sphere[0].detach().cpu())
plt.savefig(output_dir+'geom_tensor.png')
plt.close()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
x = wire_sphere[0].cpu()
y = wire_sphere[1].cpu()
z = np.zeros_like(x)
ax.scatter(x, y, z, s=1, c=gu.layer)
#ax.set_zlim(-1, 1)
#for i in range(gu.n_layers):
# idx = gu.cum_n_wires[i] - gu.n_wires_per_layer[i]
# ax.text(x[idx], y[idx], z[idx], str(i))
plt.savefig(output_dir+'wire_3d.png', dpi=120)
plt.close()
plt.figure(figsize=(8,8))
plt.plot(wire_sphere.norm(dim=0).cpu(), marker='.', linewidth=0)
plt.savefig(output_dir+'wire_sphere_norm.png', dpi=60)
plt.close()
def sample_fake(batch_size, tau):
noise = to_device(torch.randn((batch_size, latent_dims), requires_grad=True))
sample = gen(noise)
return sample
#_p, _w = sample_real(2)
#print(_p.shape, _w.shape)
__f = sample_fake(2, 1.0)
#print(__f.shape)
# Initialisation of saved variables and lists
tau = 2/3
discriminator_losses = []
generator_losses = []
gradient_pen_hist = []
start_epoch = 0
validation_losses = []
optimizer_gen = torch.optim.Adam(list(gen.parameters()),
lr=1e-4, betas=(0.9, 0.999))
optimizer_disc = torch.optim.Adam(list(disc.parameters()),
lr=1e-4, betas=(0.9, 0.999))
def weight_init_relu(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
#elif classname.find('Norm') != -1:
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
#elif classname.find('Linear') != -1:
# nn.init.normal_(m.weight, 0.0, 0.02)
# if m.bias is not None:
# nn.init.zeros_(m.bias)
def weight_init_leakyrelu(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
#nn.init.kaiming_normal_(m.weight, a=0.2, mode='fan_in', nonlinearity='leaky_relu')
nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
if args.continue_from_epoch is not None:
path = ''
if args.continue_from_job is not None:
path = 'output_%d/states_%d.pt' % (args.continue_from_job, args.continue_from_epoch)
else:
path = output_dir+'states_%d.pt' % (args.continue_from_epoch)
print('Loading GAN states from %s...' % (path))
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
states = torch.load(path, map_location=device)
disc.load_state_dict(states['disc'])
optimizer_disc.load_state_dict(states['d_opt'])
discriminator_losses = states['d_loss']
gen.load_state_dict(states['gen'])
optimizer_gen.load_state_dict(states['g_opt'])
generator_losses = states['g_loss']
tau = states['tau']
start_epoch = states['n_epochs']
print('Starting from', start_epoch)
#data.qt = states['qt']
#data.minmax = states['minmax']
if 'validation_loss' in states:
validation_losses = states['validation_loss']
if 'gradient_penalty' in states:
gradient_pen_hist = states['gradient_penalty']
print('OK')
else:
pass
disc.apply(weight_init_leakyrelu)
gen.apply(weight_init_relu)
print('Training begin')
import time
import torch.autograd as autograd
def save_states(epoch):
states = { 'disc': disc.state_dict(), 'd_opt': optimizer_disc.state_dict(),
'd_loss': discriminator_losses, 'gen': gen.state_dict(),
'g_opt': optimizer_gen.state_dict(), 'g_loss': generator_losses,
'tau': tau, 'n_epochs': epoch, 'qt': data.qt, 'minmax': data.minmax,
'gradient_penalty': gradient_pen_hist, 'validation_loss': validation_losses}
torch.save(states, output_dir + 'states_%d.pt' % (epoch))
print("Saved after epoch %d (%d gen its) to" % (epoch, len(generator_losses)), output_dir + '/states_%d.pt' % (epoch))
def wire_hook(grad):
print('%.2e' % (grad.abs().mean().item()))
return grad
# Implement "Gradient Penalty" for WGAN-GP (https://arxiv.org/pdf/1704.00028.pdf)
def gradient_penalty(disc, interp):
interp.requires_grad_()
d_interpolates = disc(interp).squeeze()
grad_outputs = torch.ones(d_interpolates.shape, requires_grad=False, device='cuda')
gradients = autograd.grad(outputs=d_interpolates,
inputs=interp,
grad_outputs=grad_outputs,
create_graph=True, # IMPORTANT! Allows to compute gradient with respect to gradient
only_inputs=False
)[0]
gradients_pen = (gradients.pow(2).flatten(1, 2).sum(dim=1).pow(0.5) - 1)**2
gradient_pen = torch.mean(gradients_pen)
return gradient_pen
def get_wire_weights():
wire_counts = np.bincount(data.wire, minlength=gu.n_wires)
logging.info('{}'.format(wire_counts.shape))
return torch.tensor(1 / (wire_counts + 1e-1), device='cuda', dtype=torch.float)
wire_weights = get_wire_weights()
logging.info('{}'.format(wire_weights))
logging.info('{}'.format(wire_weights[1600]))
logging.info('{}'.format(wire_weights.shape))
gen.train()
disc.train()
lambda_gp = 10
n_critic = 5
critic_count = 0
for e in range(start_epoch, start_epoch + n_epochs):
logging.info('Epoch %d, generator iterations %d' % (e, len(generator_losses)))
print('Epoch %d, generator iterations %d' % (e, len(generator_losses)))
for i, (real_p, real_w) in enumerate(train_loader):
if i % 10 == 0:
print("it %d" % (i))
# real_p (batch, n_features, seq_len)
# real_w (batch, 1, seq_len)
real_p = real_p.cuda().permute(0,2,1)
real_w_ohe = F.one_hot(real_w.cuda(),
num_classes=gu.cum_n_wires[-1]).squeeze(2).permute(0, 2, 1).float()
# real_w_ohe (batch, n_wires, seq_len)
gen.requires_grad_(False)
#disc.requires_grad_(True)
# Critic optimization step
optimizer_disc.zero_grad()
# Weight clipping
#for p in disc.parameters():
# p.data.clamp_(-0.01, 0.01)
#real_xy = wire_sphere[:,real_w].squeeze(2).permute(1, 0, 2)
#print(real_xy[0,:,0])
#real_xy = torch.tensordot(real_w_ohe, wire_sphere, dims=[[1], [1]]).permute(0,2,1)
#real_xy = real_xy + torch.randn_like(real_xy) * 1.1
#real_x = torch.cat([real_p, real_w_ohe], dim=1).requires_grad_()
out_real = disc(torch.cat([real_p, real_w_ohe], dim=1))
#D_loss_real = F.binary_cross_entropy_with_logits(out_real, torch.ones_like(out_real))
#print('out real %.2e' % out_real.mean())
#with torch.no_grad():
fake_p, fake_wg = sample_fake(real_p.shape[0], tau)
#fake_wg.register_hook(wire_hook)
#fake_xy = fake_xy + torch.randn_like(fake_xy) * 1.1
#print(torch.randn_like(fake_xy) * 500.1)
#print('fk',fake_xy[0,:,0])
#fake_x = torch.cat([fake_p, fake_wg], dim=1)
out_fake = disc(torch.cat([fake_p, fake_wg], dim=1))
#print('out fake %.2e' % out_fake.mean())
#D_loss_fake = F.binary_cross_entropy_with_logits(out_fake, torch.zeros_like(out_fake))
#D_loss_fake.backward()
#print('D loss fake', D_loss_fake.item())
eps = torch.rand((real_p.shape[0], 1, 1), device='cuda')
#seq_stop = (eps[0] * seq_len).long()
##interpolates_p = (eps * real_p + (1-eps) * fake_p).requires_grad_(True)
##interpolates_x = torch.cat([real_x[:,:,:seq_stop], fake_x[:,:,seq_stop:]], dim=2)
#reverse = torch.rand(1)
#if (reverse > 0.5):
# p1 = real_p[:,:,:seq_stop]
# p2 = fake_p[:,:,seq_stop:].detach()
# wg1 = real_w_ohe[:,:,:seq_stop]
# wg2 = fake_wg[:,:,seq_stop:].detach()
#else:
# p1 = fake_p[:,:,:seq_stop].detach()
# p2 = real_p[:,:,seq_stop:]
# wg1 = fake_wg[:,:,:seq_stop].detach()
# wg2 = real_w_ohe[:,:,seq_stop:]
#interpolates_p = torch.cat([p1, p2], dim=2).requires_grad_(True)
#interpolates_w = torch.cat([wg1, wg2], dim=2)
interpolates_p = eps * real_p + (1-eps) * fake_p
interpolates_w = eps * real_w_ohe + (1-eps) * fake_wg
#interpolates_x = torch.cat([interpolates_p, interpolates_w], dim=1)
gp = gradient_penalty(disc, torch.cat([interpolates_p, interpolates_w], dim=1))
gradient_pen_hist.append(gp.item())
D_loss = -out_real.mean() + out_fake.mean() + lambda_gp * gp
D_loss.backward()
discriminator_losses.append(D_loss.item())
optimizer_disc.step()
critic_count += 1
if (critic_count % n_critic == 0):
critic_count = 0
gen.requires_grad_(True)
#disc.requires_grad_(False)
# Generator update
optimizer_gen.zero_grad()
fake_p, fake_wg = sample_fake(real_p.shape[0], tau)
# Figure out what the avg gradient is in inner and outer layers
#fake_xy = wireproj(fake_wg)
#fake_xy = fake_xy + torch.randn_like(fake_xy) * 1.1
#print('fk',fake_xy[0,:,0])
#fake_x = torch.cat([fake_p, fake_wg], dim=1)
out_fake = disc(torch.cat([fake_p, fake_wg], dim=1))
G_loss = -out_fake.mean()
generator_losses.append(G_loss.item())
#print(fake_wg.grad, fake_xy.grad, fake_p.grad)
G_loss.backward()
optimizer_gen.step()
optimizer_disc.zero_grad()
# Calculate validation loss once every epoch
if (e+1) % 100 == 0:
_val_loss_values = []
for val_p, val_w in data.test_loader:
print('test')
val_p = val_p.cuda().permute(0,2,1)
val_w_ohe = F.one_hot(val_w.cuda(),
num_classes=gu.cum_n_wires[-1]).squeeze(2).permute(0, 2, 1).float()
#val_xy = torch.tensordot(val_w_ohe, wire_sphere, dims=[[1], [1]]).permute(0,2,1)
out_real = disc(torch.cat([val_p, val_w_ohe], dim=1))
fake_p, fake_wg = sample_fake(val_p.shape[0], tau)
eps = torch.rand((val_p.shape[0], 1, 1), device='cuda')
interpolates_p = eps * val_p + (1-eps) * fake_p
interpolates_w = eps * val_w_ohe + (1-eps) * fake_wg
gp = gradient_penalty(disc, torch.cat([interpolates_p, interpolates_w], dim=1))
D_loss = -out_real.mean() + out_fake.mean() + lambda_gp * gp
_val_loss_values.append(D_loss.item())
validation_losses.append(np.mean(_val_loss_values))
if ((e+1) % args.save_every) == 0:
save_states(e+1)
print('Done')
print('Saving models...')
plt.figure()
plt.plot(np.linspace(0, n_epochs, num=len(discriminator_losses)), discriminator_losses, alpha=0.7)
plt.plot(np.linspace(0, n_epochs, num=len(validation_losses)), validation_losses, alpha=0.7)
plt.savefig(output_dir + 'val_loss.png')
plt.close()
print(start_epoch + n_epochs)
save_states(start_epoch + n_epochs)
|
[
"m.dubouchet18@imperial.ac.uk"
] |
m.dubouchet18@imperial.ac.uk
|
6a4bdafbf51409445c87afc8b35917d30df7031b
|
c167c5a7cd73dc4296520544dac20833f4b61ffd
|
/src/GridSearchCV.py
|
5cdbc58085608fefae38b87dff7218c1ea8383a8
|
[] |
no_license
|
gvisani/enzyme_promiscuity
|
b63f7a89f9000e57dca01292a658b078ca8a02ad
|
fd27a4e515b5af96fed4bbed2568b12ed10b74fa
|
refs/heads/master
| 2021-05-23T11:39:11.515508
| 2020-07-30T19:27:34
| 2020-07-30T19:27:34
| 253,266,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,547
|
py
|
import numpy as np
import sklearn
class GridSearchCV(object):
def __init__(self,
estimator,
param_grid,
cv=5,
proba=False):
self.estimator = estimator
self.num_folds = cv
self.param_grid = param_grid
self.proba = proba
def prepare_folds(self, x_NF, y_N, y_prev, y_binary_orig, sample_weight):
x_pos_NF = x_NF[y_binary_orig == 1.0]
y_pos_N = y_N[y_binary_orig == 1.0]
y_pos_prev = y_prev[y_binary_orig == 1.0]
y_pos_binary_orig = y_binary_orig[y_binary_orig == 1.0]
sample_weight_pos = sample_weight[y_binary_orig == 1.0]
x_neg_NF = x_NF[y_binary_orig == 0.0]
y_neg_N = y_N[y_binary_orig == 0.0]
y_neg_prev = y_prev[y_binary_orig == 0.0]
y_neg_binary_orig = y_binary_orig[y_binary_orig == 0.0]
sample_weight_neg = sample_weight[y_binary_orig == 0.0]
x_tr_NF_list_pos, y_tr_N_list_pos, w_tr_N_list_pos, x_va_NF_list_pos, y_va_N_list_pos, y_va_prev_list_pos, y_va_orig_list_pos, w_va_N_list_pos = self.prepare_singleclass_folds(x_pos_NF, y_pos_N, y_pos_prev, y_pos_binary_orig, sample_weight_pos)
x_tr_NF_list_neg, y_tr_N_list_neg, w_tr_N_list_neg, x_va_NF_list_neg, y_va_N_list_neg, y_va_prev_list_neg, y_va_orig_list_neg, w_va_N_list_neg = self.prepare_singleclass_folds(x_neg_NF, y_neg_N, y_neg_prev, y_neg_binary_orig, sample_weight_neg)
x_tr_NF_list = []
y_tr_N_list = []
w_tr_N_list = []
x_va_NF_list = []
y_va_N_list = []
y_va_prev_list = []
y_va_orig_list = []
w_va_N_list = []
for i in range(self.num_folds):
x_tr_NF_list.append(np.vstack((x_tr_NF_list_pos[i], x_tr_NF_list_neg[i])))
y_tr_N_list.append(np.hstack((y_tr_N_list_pos[i], y_tr_N_list_neg[i])))
w_tr_N_list.append(np.hstack((w_tr_N_list_pos[i], w_tr_N_list_neg[i])))
x_va_NF_list.append(np.vstack((x_va_NF_list_pos[i], x_va_NF_list_neg[i])))
y_va_N_list.append(np.hstack((y_va_N_list_pos[i], y_va_N_list_neg[i])))
y_va_prev_list.append(np.hstack((y_va_prev_list_pos[i], y_va_prev_list_neg[i])))
y_va_orig_list.append(np.hstack((y_va_orig_list_pos[i], y_va_orig_list_neg[i])))
w_va_N_list.append(np.hstack((w_va_N_list_pos[i], w_va_N_list_neg[i])))
return x_tr_NF_list, y_tr_N_list, w_tr_N_list, x_va_NF_list, y_va_N_list, y_va_prev_list, y_va_orig_list, w_va_N_list
def prepare_singleclass_folds(self, x_NF, y_N, y_prev, y_binary_orig, sample_weight):
N = y_N.size
n_rows_per_fold = int(np.ceil(N / float(self.num_folds))) * np.ones(self.num_folds, dtype=np.int32)
n_surplus = np.sum(n_rows_per_fold) - N
if n_surplus > 0:
n_rows_per_fold[-n_surplus:] -= 1
assert np.allclose(np.sum(n_rows_per_fold), N)
fold_boundaries = np.hstack([0, np.cumsum(n_rows_per_fold)])
start_per_fold = fold_boundaries[:-1]
stop_per_fold = fold_boundaries[1:]
x_tr_NF_list = []
y_tr_N_list = []
w_tr_N_list = []
x_va_NF_list = []
y_va_N_list = []
y_va_prev_list = []
y_va_orig_list = []
w_va_N_list = []
## Loop over folds from 1, 2, ... K=num_folds
for fold_id in range(1, self.num_folds + 1):
fold_start = start_per_fold[fold_id-1]
fold_stop = stop_per_fold[fold_id-1]
# Training data is everything that's not current validation fold
x_tr_NF = np.vstack([x_NF[:fold_start], x_NF[fold_stop:]])
y_tr_N = np.hstack([y_N[:fold_start], y_N[fold_stop:]])
w_tr_N = np.hstack([sample_weight[:fold_start], sample_weight[fold_stop:]])
x_va_NF = x_NF[fold_start:fold_stop].copy()
y_va_N = y_N[fold_start:fold_stop].copy()
y_va_prev = y_prev[fold_start:fold_stop].copy()
y_va_orig = y_binary_orig[fold_start:fold_stop].copy()
w_va_N = sample_weight[fold_start:fold_stop].copy()
x_tr_NF_list.append(x_tr_NF)
y_tr_N_list.append(y_tr_N)
w_tr_N_list.append(w_tr_N)
x_va_NF_list.append(x_va_NF)
y_va_N_list.append(y_va_N)
y_va_prev_list.append(y_va_prev)
y_va_orig_list.append(y_va_orig)
w_va_N_list.append(w_va_N)
return x_tr_NF_list, y_tr_N_list, w_tr_N_list, x_va_NF_list, y_va_N_list, y_va_prev_list, y_va_orig_list, w_va_N_list
def fit(self, x_NF, y_N, y_prev, y_binary_orig, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones(y_N.size)
x_tr_NF_list, y_tr_N_list, w_tr_N_list, x_va_NF_list, y_va_N_list, y_prev_list, y_va_orig_list, w_va_N_list = self.prepare_folds(x_NF, y_N, y_prev, y_binary_orig, sample_weight)
scores = []
param_combinations = generate_argument_dicts(self.param_grid)
for params in param_combinations:
param_scores = []
for fold in range(self.num_folds):
self.estimator.set_params(**params)
self.estimator.fit(x_tr_NF_list[fold], y_tr_N_list[fold], sample_weight=w_tr_N_list[fold])
s = self.average_precision_hierarchy(self.estimator, x_va_NF_list[fold], y_prev_list[fold], y_va_orig_list[fold], w_va_N_list[fold])
param_scores.append(s)
scores.append(np.mean(param_scores))
best_params = param_combinations[np.argmax(np.array(scores))]
self.estimator.set_params(**best_params)
self.estimator.fit(x_NF, y_N)
self.best_estimator_ = self.estimator
self.best_params_ = best_params
self.best_score_ = np.max(np.array(scores))
def average_precision_hierarchy(self, estimator, x, y_prev, y_binary_orig, sample_weight):
if self.proba:
y_hat = estimator.predict_proba(x)[:,1]
else:
y_hat = estimator.predict(x)
y_hat += y_prev
# y_hat[y_hat >= 0.5] = 1.0
# y_hat[y_hat < 0.5] = 0.0
score = sklearn.metrics.average_precision_score(y_binary_orig, y_hat, sample_weight=sample_weight)
return score
def balanced_accuracy_hierarchy(estimator, x, y_prev, y_binary_orig, sample_weight):
y_hat = estimator.predict(x) + y_prev
y_hat[y_hat >= 0.5] = 1.0
y_hat[y_hat < 0.5] = 0.0
# print(y_hat)
# print(y_binary_orig)
score = sklearn.metrics.balanced_accuracy_score(y_binary_orig, y_hat, sample_weight=sample_weight)
return score
def generate_argument_dicts(param_grid):
params = []
values_list = []
for param in param_grid:
params.append(param)
values_list.append(param_grid[param])
list_of_permutations = [list(x) for x in np.array(np.meshgrid(*values_list)).T.reshape(-1,len(values_list))]
dicts = []
for perms in list_of_permutations:
adict = {}
for i, value in enumerate(perms):
if value != 'sqrt':
value = int(value)
adict[params[i]] = value
dicts.append(adict)
return dicts
|
[
"gian_marco.visani@tufts.edu"
] |
gian_marco.visani@tufts.edu
|
ed8eeaa3cf57ec9beead485077c332fd71a176d6
|
dd2b4d26d44774b0dd4b5bb52af4923300398d43
|
/src/bvexp201007/first_order/first_order_sensels.py
|
017eab59c0fe8a5f82096cf02dbe31ebfa8448a4
|
[] |
no_license
|
AndreaCensi/bvexp201007
|
7fe6e4c428c7face83383bbdc91d14c286029b08
|
0030d19f2914329f1d93c560b83f711b07ed0313
|
refs/heads/master
| 2021-01-21T00:45:23.108676
| 2011-05-29T17:53:58
| 2011-05-29T17:53:58
| 916,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from numpy import zeros
from pybv.utils import weighted_average, outer
class FirstorderSensels:
def __init__(self, config):
n = config.num_sensels
k = config.num_commands
self.T = zeros((k, n, n))
self.num_samples = 0
def process_data(self, data):
y = data.sensels
y_dot = data.sensels_dot
u = data.commands
T = outer(u, outer(y, y_dot))
self.T = weighted_average(self.T, self.num_samples, T)
self.num_samples += 1
|
[
"andrea@cds.caltech.edu"
] |
andrea@cds.caltech.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.