hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d720d5649b3f6f61d2b618a481884218c21dc51a | 4,894 | py | Python | ocean_drifters_data/buoy_data.py | nglaze00/trajectory-analysis | 35cf6bfa75eaf3cae5635729ccdf15d17a6177c7 | [
"MIT"
] | 4 | 2021-02-23T14:52:53.000Z | 2022-03-31T14:14:37.000Z | ocean_drifters_data/buoy_data.py | nglaze00/trajectory-analysis | 35cf6bfa75eaf3cae5635729ccdf15d17a6177c7 | [
"MIT"
] | 2 | 2022-01-11T18:44:50.000Z | 2022-01-15T05:42:48.000Z | ocean_drifters_data/buoy_data.py | nglaze00/trajectory-analysis | 35cf6bfa75eaf3cae5635729ccdf15d17a6177c7 | [
"MIT"
] | 1 | 2022-03-08T02:20:37.000Z | 2022-03-08T02:20:37.000Z | """
Author: Nicholas Glaze, Rice ECE (nkg2 at rice.edu)
Code for converting ocean drifter data from Schaub's format to ours.
"""
import h5py
from trajectory_analysis.synthetic_data_gen import *
dataset_folder = 'buoy'
f = h5py.File('dataBuoys.jld2', 'r')
print(f.keys())
### Load arrays from file
## Graph
# elist (edge list)
edge_list = f['elist'][:] - 1 # 1-index -> 0-index
# tlist (triangle list)
face_list = f['tlist'][:] - 1
# NodeToHex (map node id <-> hex coords) # nodes are 1-indexed in data source
node_hex_map = [tuple(f[x][()]) for x in f['NodeToHex'][:]]
hex_node_map = {tuple(hex_coords): node for node, hex_coords in enumerate(node_hex_map)}
## trajectories
# coords
hex_coords = np.array([tuple(x) for x in f['HexcentersXY'][()]])
# nodes
traj_nodes = [[f[x][()] - 1 for x in f[ref][()]] for ref in f['TrajectoriesNodes'][:]]
#### Convert to SCoNe dataset
# generate graph + faces
G = nx.Graph()
G.add_edges_from([(edge_list[0][i], edge_list[1][i]) for i in range(len(edge_list[0]))])
V, E = np.array(sorted(G.nodes)), np.array([sorted(x) for x in sorted(G.edges)])
faces = np.array(sorted([[face_list[j][i] for j in range(3)] for i in range(len(face_list[0]))]))
edge_to_idx = {tuple(e): i for i, e in enumerate(E)}
coords = hex_coords
valid_idxs = np.arange(len(coords))
# B1, B2
B1, B2 = incidence_matrices(G, V, E, faces, edge_to_idx)
# Trajectories
G_undir = G.to_undirected()
stripped_paths = strip_paths(traj_nodes)
paths = [path[-10:] for path in stripped_paths if len(path) >= 5]
# Print graph info
print(np.mean([len(G[i]) for i in V]))
print('# nodes: {}, # edges: {}, # faces: {}'.format(*B1.shape, B2.shape[1]))
print('# paths: {}, # paths with prefix length >= 3: {}'.format(len(traj_nodes), len(paths)))
rev_paths = [path[::-1] for path in paths]
# Save graph image to file
color_faces(G, V, coords, faces_from_B2(B2, E), filename='madagascar_graph_faces_paths.pdf', paths=[paths[1], paths[48], paths[125]])
# train / test masks
np.random.seed(1)
train_mask = np.asarray([1] * round(len(paths) * 0.8) + [0] * round(len(paths) * 0.2))
np.random.shuffle(train_mask)
test_mask = 1 - train_mask
max_degree = np.max([deg for n, deg in G_undir.degree()])
## Consolidate dataset
# forward
prefix_flows_1hop, targets_1hop, last_nodes_1hop, suffixes_1hop, \
prefix_flows_2hop, targets_2hop, last_nodes_2hop, suffixes_2hop = path_dataset(G_undir, E, edge_to_idx,
paths, max_degree, include_2hop=True,
truncate_paths=False)
# reversed
rev_prefix_flows_1hop, rev_targets_1hop, rev_last_nodes_1hop, rev_suffixes_1hop, \
rev_prefix_flows_2hop, rev_targets_2hop, rev_last_nodes_2hop, rev_suffixes_2hop = path_dataset(G_undir, E, edge_to_idx,
rev_paths, max_degree,
include_2hop=True,
truncate_paths=False)
dataset_1hop = [prefix_flows_1hop, B1, B2, targets_1hop, train_mask, test_mask, G_undir, last_nodes_1hop,
suffixes_1hop, rev_prefix_flows_1hop, rev_targets_1hop, rev_last_nodes_1hop, rev_suffixes_1hop]
dataset_2hop = [prefix_flows_2hop, B1, B2, targets_2hop, train_mask, test_mask, G_undir, last_nodes_2hop,
suffixes_2hop, rev_prefix_flows_2hop, rev_targets_2hop, rev_last_nodes_2hop, rev_suffixes_2hop]
print('Train samples:', sum(train_mask))
print('Test samples:', sum(test_mask))
### Save datasets
folder_1hop = '../trajectory_analysis/trajectory_data_1hop_' + dataset_folder
folder_2hop = '../trajectory_analysis/trajectory_data_2hop_' + dataset_folder
try:
os.mkdir(folder_1hop)
except:
pass
try:
os.mkdir(folder_2hop)
except:
pass
# Save files
filenames = (
'flows_in', 'B1', 'B2', 'targets', 'train_mask', 'test_mask', 'G_undir', 'last_nodes', 'target_nodes', 'rev_flows_in',
'rev_targets', 'rev_last_nodes', 'rev_target_nodes')
for arr_1hop, arr_2hop, filename in zip(dataset_1hop, dataset_2hop, filenames):
if filename == 'G_undir':
nx.readwrite.gpickle.write_gpickle(G_undir, os.path.join(folder_1hop, filename + '.pkl'))
nx.readwrite.gpickle.write_gpickle(G_undir, os.path.join(folder_2hop, filename + '.pkl'))
else:
np.save(os.path.join(folder_1hop, filename + '.npy'), arr_1hop)
np.save(os.path.join(folder_2hop, filename + '.npy'), arr_2hop)
# Save prefixes file
edge_set = set()
for path in paths:
for i in range(1, len(path)):
edge = tuple(sorted(path[i-1:i+1]))
edge_set.add(edge)
np.save(folder_1hop + '/prefixes.npy', [path[:-2] for path in paths])
| 35.722628 | 133 | 0.643645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.221291 |
d720f208a3df6385750ad1742504776a36069d68 | 6,151 | py | Python | Latest/venv/Lib/site-packages/apptools/permissions/default/user_manager.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-01-09T20:04:31.000Z | 2022-01-09T20:04:31.000Z | Latest/venv/Lib/site-packages/apptools/permissions/default/user_manager.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-02-15T12:01:57.000Z | 2022-03-24T19:48:47.000Z | Latest/venv/Lib/site-packages/apptools/permissions/default/user_manager.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | null | null | null | #------------------------------------------------------------------------------
# Copyright (c) 2008, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Riverbank Computing Limited
# Description: <Enthought permissions package component>
#------------------------------------------------------------------------------
# Enthought library imports.
from pyface.action.api import Action
from traits.api import Bool, Event, HasTraits, provides, \
Instance, List, Unicode
# Local imports.
from apptools.permissions.i_user import IUser
from apptools.permissions.i_user_manager import IUserManager
from apptools.permissions.package_globals import get_permissions_manager
from apptools.permissions.permission import ManageUsersPermission
from .i_user_database import IUserDatabase
@provides(IUserManager)
class UserManager(HasTraits):
"""The default user manager implementation."""
#### 'IUserManager' interface #############################################
management_actions = List(Instance(Action))
user = Instance(IUser)
user_actions = List(Instance(Action))
user_authenticated = Event(IUser)
#### 'UserManager' interface ##############################################
# The user database.
user_db = Instance(IUserDatabase)
###########################################################################
# 'IUserManager' interface.
###########################################################################
def bootstrapping(self):
"""Return True if we are bootstrapping, ie. no users have been defined.
"""
return self.user_db.bootstrapping()
def authenticate_user(self):
"""Authenticate the user."""
if self.user_db.authenticate_user(self.user):
self.user.authenticated = True
# Tell the policy manager before everybody else.
get_permissions_manager().policy_manager.load_policy(self.user)
self.user_authenticated = self.user
def unauthenticate_user(self):
"""Unauthenticate the user."""
if self.user.authenticated and self.user_db.unauthenticate_user(self.user):
self.user.authenticated = False
# Tell the policy manager before everybody else.
get_permissions_manager().policy_manager.load_policy(None)
self.user_authenticated = None
def matching_user(self, name):
"""Select a user."""
return self.user_db.matching_user(name)
###########################################################################
# Trait handlers.
###########################################################################
def _management_actions_default(self):
"""Return the list of management actions."""
from apptools.permissions.secure_proxy import SecureProxy
user_db = self.user_db
actions = []
perm = ManageUsersPermission()
if user_db.can_add_user:
act = Action(name="&Add a User...", on_perform=user_db.add_user)
actions.append(SecureProxy(act, permissions=[perm], show=False))
if user_db.can_modify_user:
act = Action(name="&Modify a User...",
on_perform=user_db.modify_user)
actions.append(SecureProxy(act, permissions=[perm], show=False))
if user_db.can_delete_user:
act = Action(name="&Delete a User...",
on_perform=user_db.delete_user)
actions.append(SecureProxy(act, permissions=[perm], show=False))
return actions
def _user_actions_default(self):
"""Return the list of user actions."""
actions = []
if self.user_db.can_change_password:
actions.append(_ChangePasswordAction())
return actions
def _user_default(self):
"""Return the default current user."""
return self.user_db.user_factory()
def _user_db_default(self):
"""Return the default user database."""
# Defer to an external user database if there is one.
try:
from apptools.permissions.external.user_database import UserDatabase
except ImportError:
from apptools.permissions.default.user_database import UserDatabase
return UserDatabase()
class _ChangePasswordAction(Action):
"""An action that allows the current user to change their password. It
isn't exported through actions/api.py because it is specific to this user
manager implementation."""
#### 'Action' interface ###################################################
enabled = Bool(False)
name = Unicode("&Change Password...")
###########################################################################
# 'object' interface.
###########################################################################
def __init__(self, **traits):
"""Initialise the object."""
super(_ChangePasswordAction, self).__init__(**traits)
get_permissions_manager().user_manager.on_trait_event(self._refresh_enabled, 'user_authenticated')
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
"""Perform the action."""
um = get_permissions_manager().user_manager
um.user_db.change_password(um.user)
###########################################################################
# Private interface.
###########################################################################
def _refresh_enabled(self, user):
"""Invoked whenever the current user's authorisation state changes."""
self.enabled = user is not None
| 33.612022 | 106 | 0.558121 | 4,986 | 0.8106 | 0 | 0 | 3,514 | 0.571289 | 0 | 0 | 2,714 | 0.441229 |
d72176db25b837eec39504c5b4f9c46b726440a8 | 97 | py | Python | libraries/exhibitions/apps.py | chris-lawton/libraries_wagtail | 60c2484b137bb33763da2e49b191b1a380f3d56f | [
"ECL-2.0"
] | 9 | 2017-12-19T21:15:23.000Z | 2022-03-23T08:11:52.000Z | libraries/exhibitions/apps.py | chris-lawton/libraries_wagtail | 60c2484b137bb33763da2e49b191b1a380f3d56f | [
"ECL-2.0"
] | 140 | 2017-07-12T00:09:53.000Z | 2021-11-02T17:30:43.000Z | libraries/exhibitions/apps.py | chris-lawton/libraries_wagtail | 60c2484b137bb33763da2e49b191b1a380f3d56f | [
"ECL-2.0"
] | 3 | 2017-08-21T10:47:34.000Z | 2020-08-20T14:04:18.000Z | from django.apps import AppConfig
class ExhibitionsConfig(AppConfig):
name = 'exhibitions'
| 16.166667 | 35 | 0.773196 | 60 | 0.618557 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.134021 |
d722d3dbdd851da32d3a6d5f6fb12b40e48dbb16 | 227 | py | Python | 2-1_factorial_Q1_recursive.py | Soooyeon-Kim/Algorithm | 28a191d7382d9c3bb6d9afb19f4cff642c3aec03 | [
"MIT"
] | null | null | null | 2-1_factorial_Q1_recursive.py | Soooyeon-Kim/Algorithm | 28a191d7382d9c3bb6d9afb19f4cff642c3aec03 | [
"MIT"
] | null | null | null | 2-1_factorial_Q1_recursive.py | Soooyeon-Kim/Algorithm | 28a191d7382d9c3bb6d9afb19f4cff642c3aec03 | [
"MIT"
] | null | null | null | def factorial(num):
# 재귀함수를 세울 때는 탈출 조건부터 찾는다.
if num <= 1:
return 1
return factorial(num - 1) * num
def main():
print(factorial(5))
# return 120
if __name__ == "__main__":
main() | 17.461538 | 36 | 0.53304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.326996 |
d724654c41c2d3df788e13f69c9b62305d208d29 | 1,210 | py | Python | PyPoll/main.py | adekted/python-challenge | a38442e0e0996a17397d15395e1b53ff4cc52ce4 | [
"MIT"
] | null | null | null | PyPoll/main.py | adekted/python-challenge | a38442e0e0996a17397d15395e1b53ff4cc52ce4 | [
"MIT"
] | null | null | null | PyPoll/main.py | adekted/python-challenge | a38442e0e0996a17397d15395e1b53ff4cc52ce4 | [
"MIT"
] | null | null | null | import os
import csv
pollresults = os.path.join(".","raw_data","election_data_1.csv")
output = os.path.join(".","results.txt")
with open(pollresults, newline = '') as polldata:
pollreader = csv.reader(polldata, delimiter = ",")
firstline = polldata.readline()
votes = 0
poll_results = {}
for row in pollreader:
votes = votes + 1
if row[2] in poll_results.keys():
poll_results[row[2]] = poll_results[row[2]] + 1
else:
poll_results[row[2]] = 1
vote_results = []
for i, k in poll_results.items():
vote_results.append((i, k, (round((float(k/votes)*100),2))))
max_votes = 0
winner = ''
for j in vote_results:
if j[1] >= max_votes:
max_votes = j[1]
winner = j[0]
with open(output,'w') as resultsfile:
resultsfile.write("Election Results\n")
resultsfile.write("----------------\n")
for result in vote_results:
resultsfile.writelines(result[0] + ": " + str(result[2]) + "% (" + str(result[1]) + ")\n")
resultsfile.write("----------------\n")
resultsfile.write("Winner: " + winner)
with open(output, 'r') as readresults:
print(readresults.read()) | 28.809524 | 98 | 0.575207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.121488 |
d72479d0e051c4a22354d598e1cbb9a5f7561db5 | 1,907 | py | Python | ReinforcementLearning/ExperienceReplay.py | Suryavf/SelfDrivingCar | 362ac830516366b1c31ef01ea0456eb99f0d9722 | [
"MIT"
] | 11 | 2019-08-14T18:55:13.000Z | 2021-09-10T05:54:49.000Z | ReinforcementLearning/ExperienceReplay.py | Suryavf/SelfDrivingCar | 362ac830516366b1c31ef01ea0456eb99f0d9722 | [
"MIT"
] | 3 | 2020-05-05T15:20:20.000Z | 2021-06-22T07:47:26.000Z | ReinforcementLearning/ExperienceReplay.py | Suryavf/SelfDrivingCar | 362ac830516366b1c31ef01ea0456eb99f0d9722 | [
"MIT"
] | 1 | 2020-12-18T15:46:09.000Z | 2020-12-18T15:46:09.000Z | import numpy as np
from common.prioritized import PrioritizedExperienceReplay
class ReplayMemory(object):
def __init__(self, n_buffer,len_state,len_action):
# Parameters
self.n_buffer = n_buffer
self.len_state = len_state
self.len_action = len_action
self.n_experiences = 0
self.pointer = 0
# Buffer
self.state = np.zeros([n_buffer,len_state ],dtype=float)
self.action = np.zeros([n_buffer,len_action],dtype=float)
self.reward = np.zeros( n_buffer ,dtype=float)
self.new_state = np.zeros([n_buffer,len_state ],dtype=float)
# Priority
self.priority = PrioritizedExperienceReplay(n_buffer)
def getBatch(self):
idx,weight = self.priority.sample()
return (self. state[idx],self. action[idx],
self.reward[idx],self.new_state[idx]), weight
def size(self):
return self.n_buffer
def count(self):
return self.n_experiences
def add(self, state, action, reward, new_state, done):
n = self.n_experiences
if n < self.n_buffer:
self.n_experiences += 1
else:
n = self.pointer
self.pointer += 1
if self.pointer>=self.n_buffer:
self.pointer = 0
self.state [n] = state
self.action [n] = action
self.reward [n] = reward
self.new_state[n] = new_state
def erase(self):
self.state = np.zeros([self.n_buffer,self.len_state ],dtype=float)
self.action = np.zeros([self.n_buffer,self.len_action],dtype=float)
self.reward = np.zeros( self.n_buffer ,dtype=float)
self.new_state = np.zeros([self.n_buffer,self.len_state ],dtype=float)
self.n_experiences = 0
self.pointer = 0
| 35.314815 | 78 | 0.58259 | 1,816 | 0.952281 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.015732 |
d724fe813f619adf66dd7a68f8f674a9c901f1fa | 9,707 | py | Python | domdf_python_tools/dates.py | domdfcoding/domdf_python_tools | eb15c744eac3005e13fae8d8a5582e955137cf22 | [
"MIT"
] | null | null | null | domdf_python_tools/dates.py | domdfcoding/domdf_python_tools | eb15c744eac3005e13fae8d8a5582e955137cf22 | [
"MIT"
] | 57 | 2020-06-16T15:48:20.000Z | 2022-03-31T08:35:09.000Z | domdf_python_tools/dates.py | domdfcoding/domdf_python_tools | eb15c744eac3005e13fae8d8a5582e955137cf22 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
#
# dates.py
"""
Utilities for working with dates and times.
.. extras-require:: dates
:pyproject:
**Data:**
.. autosummary::
~domdf_python_tools.dates.months
~domdf_python_tools.dates.month_full_names
~domdf_python_tools.dates.month_short_names
"""
#
# Copyright © 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Parts of the docstrings based on the Python 3.8.2 Documentation
# Licensed under the Python Software Foundation License Version 2.
# Copyright © 2001-2020 Python Software Foundation. All rights reserved.
# Copyright © 2000 BeOpen.com. All rights reserved.
# Copyright © 1995-2000 Corporation for National Research Initiatives. All rights reserved.
# Copyright © 1991-1995 Stichting Mathematisch Centrum. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# calc_easter from https://code.activestate.com/recipes/576517-calculate-easter-western-given-a-year/
# Copyright © 2008 Martin Diers
# Licensed under the MIT License
#
# stdlib
import datetime
import sys
import typing
from collections import OrderedDict
from types import ModuleType
from typing import Optional, Union
__all__ = [
"current_tzinfo",
"set_timezone",
"utc_timestamp_to_datetime",
"months",
"parse_month",
"get_month_number",
"check_date",
"calc_easter",
"month_short_names",
"month_full_names",
]
def current_tzinfo() -> Optional[datetime.tzinfo]:
"""
Returns a tzinfo object for the current timezone.
"""
return datetime.datetime.now().astimezone().tzinfo # pragma: no cover (hard to test)
#
# def datetime_to_utc_timestamp(datetime, current_tzinfo=None):
# """
# Convert a :class:`datetime.datetime` object to seconds since UNIX epoch, in UTC time
#
# :param datetime:
# :type datetime: :class:`datetime.datetime`
# :param current_tzinfo: A tzinfo object representing the current timezone.
# If None it will be inferred.
# :type current_tzinfo: :class:`datetime.tzinfo`
#
# :return: Timestamp in UTC timezone
# :rtype: float
# """
#
# return datetime.astimezone(current_tzinfo).timestamp()
#
def set_timezone(obj: datetime.datetime, tzinfo: datetime.tzinfo) -> datetime.datetime:
"""
Sets the timezone / tzinfo of the given :class:`datetime.datetime` object.
This will not convert the time (i.e. the hours will stay the same).
Use :meth:`datetime.datetime.astimezone` to accomplish that.
:param obj:
:param tzinfo:
"""
return obj.replace(tzinfo=tzinfo)
def utc_timestamp_to_datetime(
utc_timestamp: Union[float, int],
output_tz: Optional[datetime.tzinfo] = None,
) -> datetime.datetime:
"""
Convert UTC timestamp (seconds from UNIX epoch) to a :class:`datetime.datetime` object.
If ``output_tz`` is :py:obj:`None` the timestamp is converted to the platform’s local date and time,
and the local timezone is inferred and set for the object.
If ``output_tz`` is not :py:obj:`None`, it must be an instance of a :class:`datetime.tzinfo` subclass,
and the timestamp is converted to ``output_tz``’s time zone.
:param utc_timestamp: The timestamp to convert to a datetime object
:param output_tz: The timezone to output the datetime object for.
If :py:obj:`None` it will be inferred.
:return: The timestamp as a datetime object.
:raises OverflowError: if the timestamp is out of the range
of values supported by the platform C localtime() or gmtime() functions,
and OSError on localtime() or gmtime() failure. It’s common for this to
be restricted to years in 1970 through 2038.
"""
new_datetime = datetime.datetime.fromtimestamp(utc_timestamp, output_tz)
return new_datetime.astimezone(output_tz)
if sys.version_info <= (3, 7, 2): # pragma: no cover (py37+)
MonthsType = OrderedDict
else: # pragma: no cover (<py37)
MonthsType = typing.OrderedDict[str, str] # type: ignore # noqa: TYP006
#: Mapping of 3-character shortcodes to full month names.
months: MonthsType = OrderedDict(
Jan="January",
Feb="February",
Mar="March",
Apr="April",
May="May",
Jun="June",
Jul="July",
Aug="August",
Sep="September",
Oct="October",
Nov="November",
Dec="December",
)
month_short_names = tuple(months.keys())
"""
List of the short names for months in the Gregorian calendar.
.. versionadded:: 2.0.0
"""
month_full_names = tuple(months.values())
"""
List of the full names for months in the Gregorian calendar.
.. versionadded:: 2.0.0
"""
def parse_month(month: Union[str, int]) -> str:
"""
Converts an integer or shorthand month into the full month name.
:param month: The month number or shorthand name
:return: The full name of the month
"""
error_text = f"The given month ({month!r}) is not recognised."
try:
month = int(month)
except ValueError:
try:
return months[month.capitalize()[:3]] # type: ignore
except KeyError:
raise ValueError(error_text)
# Only get here if first try succeeded
if 0 < month <= 12:
return list(months.values())[month - 1]
else:
raise ValueError(error_text)
def get_month_number(month: Union[str, int]) -> int:
"""
Returns the number of the given month.
If ``month`` is already a number between 1 and 12 it will be returned immediately.
:param month: The month to convert to a number
:return: The number of the month
"""
if isinstance(month, int):
if 0 < month <= 12:
return month
else:
raise ValueError(f"The given month ({month!r}) is not recognised.")
else:
month = parse_month(month)
return list(months.values()).index(month) + 1
def check_date(month: Union[str, int], day: int, leap_year: bool = True) -> bool:
"""
Returns :py:obj:`True` if the day number is valid for the given month.
.. note::
This function will return :py:obj:`True` for the 29th Feb.
If you don't want this behaviour set ``leap_year`` to :py:obj:`False`.
.. latex:vspace:: -10px
:param month: The month to test.
:param day: The day number to test.
:param leap_year: Whether to return :py:obj:`True` for 29th Feb.
"""
# Ensure day is an integer
day = int(day)
month = get_month_number(month)
year = 2020 if leap_year else 2019
try:
datetime.date(year, month, day)
return True
except ValueError:
return False
def calc_easter(year: int) -> datetime.date:
"""
Returns the date of Easter in the given year.
.. versionadded:: 1.4.0
:param year:
"""
a = year % 19
b = year // 100
c = year % 100
d = (19 * a + b - b // 4 - ((b - (b + 8) // 25 + 1) // 3) + 15) % 30
e = (32 + 2 * (b % 4) + 2 * (c // 4) - d - (c % 4)) % 7
f = d + e - 7 * ((a + 11 * d + 22 * e) // 451) + 114
month = f // 31
day = f % 31 + 1
return datetime.date(year, month, day)
def get_utc_offset(
tz: Union[datetime.tzinfo, str],
date: Optional[datetime.datetime] = None,
) -> Optional[datetime.timedelta]:
"""
Returns the offset between UTC and the requested timezone on the given date.
If ``date`` is :py:obj:`None` then the current date is used.
:param tz: ``pytz.timezone`` or a string representing the timezone
:param date: The date to obtain the UTC offset for
"""
if date is None:
date = datetime.datetime.utcnow()
timezone: Optional[datetime.tzinfo]
if isinstance(tz, str):
timezone = get_timezone(tz, date)
else:
timezone = tz # pragma: no cover (hard to test)
return date.replace(tzinfo=pytz.utc).astimezone(timezone).utcoffset()
def get_timezone(tz: str, date: Optional[datetime.datetime] = None) -> Optional[datetime.tzinfo]:
"""
Returns a localized ``pytz.timezone`` object for the given date.
If ``date`` is :py:obj:`None` then the current date is used.
.. latex:vspace:: -10px
:param tz: A string representing a pytz timezone
:param date: The date to obtain the timezone for
"""
if date is None: # pragma: no cover (hard to test)
date = datetime.datetime.utcnow()
d = date.replace(tzinfo=None)
return pytz.timezone(tz).localize(d).tzinfo
_pytz_functions = ["get_utc_offset", "get_timezone"]
try:
# 3rd party
import pytz
__all__.extend(_pytz_functions)
except ImportError as e:
if __name__ == "__main__":
# stdlib
import warnings
# this package
from domdf_python_tools.words import word_join
warnings.warn(
f"""\
'{word_join(_pytz_functions)}' require pytz (https://pypi.org/project/pytz/), but it could not be imported.
The error was: {e}.
"""
)
else:
_actual_module = sys.modules[__name__]
class SelfWrapper(ModuleType):
def __getattr__(self, name):
if name in _pytz_functions:
raise ImportError(
f"{name!r} requires pytz (https://pypi.org/project/pytz/), but it could not be imported."
)
else:
return getattr(_actual_module, name)
sys.modules[__name__] = SelfWrapper(__name__)
| 26.814917 | 109 | 0.708561 | 277 | 0.028501 | 0 | 0 | 0 | 0 | 0 | 0 | 6,297 | 0.647906 |
d72583632536ec0cced7fa350c14b691453b491f | 5,812 | py | Python | admix/tasks/check_transfers.py | XENONnT/admix | 3abcd40d08dc6a403bc535a5b0f13f624a5d766e | [
"BSD-3-Clause"
] | 2 | 2021-01-23T21:43:48.000Z | 2021-01-23T21:43:57.000Z | admix/tasks/check_transfers.py | XENONnT/admix | 3abcd40d08dc6a403bc535a5b0f13f624a5d766e | [
"BSD-3-Clause"
] | 17 | 2019-09-30T12:54:26.000Z | 2021-12-09T21:17:37.000Z | admix/tasks/check_transfers.py | XENONnT/admix | 3abcd40d08dc6a403bc535a5b0f13f624a5d766e | [
"BSD-3-Clause"
] | 2 | 2020-03-28T14:06:05.000Z | 2021-03-18T21:50:48.000Z | # -*- coding: utf-8 -*-
import json
import os
from admix.helper import helper
import time
import shutil
from admix.interfaces.database import ConnectMongoDB
from admix.helper.decorator import Collector
#get Rucio imports done:
from admix.interfaces.rucio_dataformat import ConfigRucioDataFormat
from admix.interfaces.rucio_summoner import RucioSummoner
from admix.utils import make_did
@Collector
class CheckTransfers():
"""
Using the runDB, it searches for all runs and all data types for which a Rucio rule is ongoing
(identified by both status specific data.status equal to "transferring".
For each of them, it checks, by using Rucio API commands, if those rules have been
succesfully completed. If so, the corresponding data.status is updated as
"transferred". If all data types are flagged as transferred, then the run itself
is flagged as "transferred".
"""
def __init__(self):
pass
def init(self):
helper.global_dictionary['logger'].Info(f'Init task {self.__class__.__name__}')
#Take all data types categories
self.NORECORDS_DTYPES = helper.get_hostconfig()['norecords_types']
self.RAW_RECORDS_DTYPES = helper.get_hostconfig()['raw_records_types']
self.LIGHT_RAW_RECORDS_DTYPES = helper.get_hostconfig()['light_raw_records_types']
self.RECORDS_DTYPES = helper.get_hostconfig()['records_types']
# Choose which RSE you want upload to
self.UPLOAD_TO = helper.get_hostconfig()['upload_to']
#Choose which data type you want to treat
self.DTYPES = self.NORECORDS_DTYPES + self.RECORDS_DTYPES + self.RAW_RECORDS_DTYPES + self.LIGHT_RAW_RECORDS_DTYPES
#Define the waiting time (seconds)
self.waitfor = 60*5
#Init the runDB
self.db = ConnectMongoDB()
#Init Rucio for later uploads and handling:
self.rc = RucioSummoner(helper.get_hostconfig("rucio_backend"))
self.rc.SetRucioAccount(helper.get_hostconfig('rucio_account'))
self.rc.SetConfigPath(helper.get_hostconfig("rucio_cli"))
self.rc.SetProxyTicket(helper.get_hostconfig('rucio_x509'))
self.rc.SetHost(helper.get_hostconfig('host'))
self.rc.ConfigHost()
self.rc.SetProxyTicket("rucio_x509")
def check_transfers(self):
cursor = self.db.db.find(
{'status': 'transferring'},
# {'number': 11924},
{'number': 1, 'data': 1, 'bootstrax': 1})
cursor = list(cursor)
helper.global_dictionary['logger'].Info('Check transfers : checking status of {0} runs'.format(len(cursor)))
for run in list(cursor):
# Extracts the correct Event Builder machine who processed this run
bootstrax = run['bootstrax']
eb = bootstrax['host'].split('.')[0]
# for each run, check the status of all REPLICATING rules
rucio_stati = []
eb_still_to_be_uploaded = []
for d in run['data']:
if d['host'] == 'rucio-catalogue':
# if run['number']==7695 and d['status'] == 'stuck':
# self.db.db.find_one_and_update({'_id': run['_id'],'data': {'$elemMatch': d}},
# {'$set': {'data.$.status': 'transferring'}}
# )
if d['status'] in ['transferring','error','stuck']:
did = d['did']
status = self.rc.CheckRule(did, d['location'])
if status == 'REPLICATING':
rucio_stati.append('transferring')
elif status == 'OK':
# update database
helper.global_dictionary['logger'].Info('Check transfers : Run {0}, data type {1}, location {2}: transferred'.format(run['number'], d['type'],d['location']))
self.db.db.find_one_and_update({'_id': run['_id'],'data': {'$elemMatch': d}},
{'$set': {'data.$.status': 'transferred'}}
)
rucio_stati.append('transferred')
elif status == 'STUCK':
self.db.db.find_one_and_update({'_id': run['_id'], 'data': {'$elemMatch': d}},
{'$set': {'data.$.status': 'stuck'}}
)
rucio_stati.append('stuck')
else:
rucio_stati.append(d['status'])
# print(d['did'])
# search if dtype still has to be uploaded
if eb in d['host'] and d['type'] in self.DTYPES:
if 'status' not in d:
eb_still_to_be_uploaded.append(d['type'])
else:
if d['status'] != "transferred":
eb_still_to_be_uploaded.append(d['type'])
# are there any other rucio rules transferring?
#print(run['number'],eb_still_to_be_uploaded,rucio_stati)
if len(rucio_stati) > 0 and all([s == 'transferred' for s in rucio_stati]) and len(eb_still_to_be_uploaded)==0:
self.db.SetStatus(run['number'], 'transferred')
helper.global_dictionary['logger'].Info('Check transfers : Run {0} fully transferred'.format(run['number']))
def run(self,*args, **kwargs):
helper.global_dictionary['logger'].Info(f'Run task {self.__class__.__name__}')
# Check transfers
self.check_transfers()
return 0
def __del__(self):
pass
| 41.514286 | 185 | 0.562285 | 5,411 | 0.931005 | 0 | 0 | 5,422 | 0.932897 | 0 | 0 | 2,326 | 0.400206 |
d726c821bd4d42ffbe3ad183aeab04cc58a7918f | 4,113 | py | Python | fantastico/mvc/models/tests/test_module_filter_compound_or.py | bopopescu/fantastico | 7c95f244f0cf0239ac5408146612dd72f88d35ea | [
"MIT"
] | 2 | 2016-12-18T02:42:25.000Z | 2018-01-30T16:32:29.000Z | fantastico/mvc/models/tests/test_module_filter_compound_or.py | bopopescu/fantastico | 7c95f244f0cf0239ac5408146612dd72f88d35ea | [
"MIT"
] | 1 | 2020-07-28T07:16:35.000Z | 2020-07-28T07:16:35.000Z | fantastico/mvc/models/tests/test_module_filter_compound_or.py | bopopescu/fantastico | 7c95f244f0cf0239ac5408146612dd72f88d35ea | [
"MIT"
] | 1 | 2020-07-24T05:55:28.000Z | 2020-07-24T05:55:28.000Z | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:fantastico.mvc.models.tests.test_model_filter_compound
'''
from fantastico.exceptions import FantasticoNotSupportedError, FantasticoError
from fantastico.mvc.models.model_filter import ModelFilter
from fantastico.mvc.models.model_filter_compound import ModelFilterAnd, \
ModelFilterOr
from fantastico.tests.base_case import FantasticoUnitTestsCase
from mock import Mock
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer
class ModelFilterOrTests(FantasticoUnitTestsCase):
'''This class provides test suite for compound *and* model filter.'''
def init(self):
self._model = Mock()
self._model.id = Column("id", Integer)
def test_modelfilteror_noargs(self):
'''This test case ensures compound **or** filter can not be built without arguments.'''
with self.assertRaises(FantasticoNotSupportedError):
ModelFilterOr()
def test_modelfilteror_notenoughargs(self):
'''This test case ensures compound **or** filter can not be built with a single argument.'''
with self.assertRaises(FantasticoNotSupportedError):
ModelFilterOr(ModelFilter(self._model.id, 1, ModelFilter.EQ))
def test_modelfilteror_wrongargtype(self):
'''This test case ensures compound **or** filter works only with ModelFilter arguments.'''
with self.assertRaises(FantasticoNotSupportedError):
ModelFilterAnd(Mock(), Mock())
def test_modelfilteror_ok(self):
'''This test case ensures compound **or** filter correctly transform the filter into sql alchemy and_ statement.'''
self._or_invoked = False
model_filter = ModelFilterOr(ModelFilter(self._model.id, 1, ModelFilter.EQ),
ModelFilter(self._model.id, 1, ModelFilter.EQ),
ModelFilter(self._model.id, 1, ModelFilter.EQ))
query = Mock()
def filter_fn(expr):
self._or_invoked = True
return Mock()
self._model.id.table = Mock()
query._primary_entity = query
query.selectable = self._model.id.table
query.filter = filter_fn
query_new = model_filter.build(query)
self.assertTrue(self._or_invoked)
self.assertIsInstance(query_new, Mock)
def test_modelfiteror_unhandled_exception(self):
'''This test case ensures unhandled exceptions raised from ModelFilter are gracefully handled by ModelFilterOr build.'''
model_filter = ModelFilter(self._model.id, 1, ModelFilter.EQ)
model_filter.get_expression = Mock(side_effect=Exception("Unhandled exception"))
model_filter_or = ModelFilterOr(model_filter, model_filter, model_filter)
with self.assertRaises(FantasticoError):
model_filter_or.build(Mock()) | 46.213483 | 128 | 0.701191 | 2,520 | 0.612691 | 0 | 0 | 0 | 0 | 0 | 0 | 1,800 | 0.437637 |
d726fb4760e4aa5a235cc0ba701a2901f20f2e31 | 7,182 | py | Python | main.py | AlexanderBrandborg/TextGameEngine | a00997b6300ba91a8981539b1cb2f36416c384ef | [
"MIT"
] | null | null | null | main.py | AlexanderBrandborg/TextGameEngine | a00997b6300ba91a8981539b1cb2f36416c384ef | [
"MIT"
] | null | null | null | main.py | AlexanderBrandborg/TextGameEngine | a00997b6300ba91a8981539b1cb2f36416c384ef | [
"MIT"
] | null | null | null | import json
import os
class Notifyer():
def __init__(self):
self.subscribers = []
def add_subscriber(self, subscriber):
self.subscribers.append(subscriber)
def notify(self, triggerId):
for sub in self.subscribers:
sub.notify(triggerId)
global_characters = []
global_items = []
global_notifyer = Notifyer()
class State():
def __init__(self, id, desc, entry_condition, reactive_items):
self.id = id
self.desc = desc
self.neighbours = []
self.entry_condition = entry_condition
self.reactive_items = reactive_items
def add_neighbour(self, neighbour):
self.neighbours.append(neighbour)
class StateMachine():
def __init__(self, state_dto):
self.states = [State(s["id"], "\n".join(s["lines"]), s["entry_condition"], s["reactive_items"]) for s in state_dto]
for dto, state in zip(state_dto, self.states):
for n in dto["neighbours"]:
state.add_neighbour(next(x for x in self.states if x.id == n))
self.current_state = self.states[0]
global_notifyer.add_subscriber(self)
def get_desc(self):
return self.current_state.desc
def trigger(self, item_id):
trigger_id = self.current_state.reactive_items[str(item_id)]
global_notifyer.notify(trigger_id)
def notify(self, trigger_id):
new_state = next(x for x in self.current_state.neighbours if x.entry_condition == trigger_id)
if new_state:
self.current_state = new_state
class Item():
def __init__(self, id, item_name, desc):
self.id = id
self.name = item_name
self.desc = desc
class Portal():
def __init__(self, name, room_id, trigger_id, open):
self.name = name
self.room_id = room_id
self.trigger_id = trigger_id
self.open = open
global_notifyer.add_subscriber(self)
def notify(self, trigger_id):
if trigger_id == self.trigger_id:
self.open = True
class Character():
def __init__(self, id, name, desc, states):
self.id = id
self.name = name
self.desc = desc
self.state_machine = StateMachine(states)
def look(self):
return self.desc
def talk(self):
return self.state_machine.get_desc()
def use(self, player, item_name):
item_id = next((x.id for x in global_items if x.name == item_name and x.id in player.inventory), None)
if item_id != None:
self.state_machine.trigger(item_id)
class Room():
def __init__(self, id, entryText, desc, neighbours, items_ids, characters_ids):
self.id = id
self.entryText = entryText
self.desc = desc
self.neighbours = [Portal(x["name"], x["roomId"], x["triggerId"], x["open"]) for x in neighbours]
self.items_ids = items_ids
self.characters_ids = characters_ids
def enter_room(self):
return self.entryText
def get_character(self, name):
character = next((x for x in global_characters if x.name == name), None)
if character and character.id in self.characters_ids:
return character
else:
raise Exception("Character doesn't exists in this room")
def take_item(self, item_name):
item_id = next((x.id for x in global_items if x.name == item_name), None)
if item_id in self.items_ids:
self.items_ids.remove(item_id)
return True
else:
print("You can't find a {} in this room".format(item_name))
return False
def enter(self, portal_name):
portal = next(x for x in self.neighbours if x.name == portal_name)
if portal.open:
return portal.room_id
class Player():
def __init__(self):
self.inventory = []
def add_to_inventory(self, item_name):
item_id = next((x.id for x in global_items if x.name == item_name), None)
if item_id != None:
self.inventory.append(item_id)
print("You have aquired a {}".format(item_name))
def remove_from_inventory(self, item_name):
item_id = next((x.id for x in global_items if x.name == item_name), None)
if item_id:
self.inventory.remove(item_id)
def main():
with open("game.json") as layout_file:
layout = json.load(layout_file)
print(layout["title"])
print("Press any key to start")
input()
rooms = []
for roomDto in layout["rooms"]:
r = Room(roomDto["id"],
roomDto["entryText"],
roomDto["description"],
roomDto["neighbours"],
roomDto["items"],
roomDto["characters"])
rooms.append(r)
room = rooms[0]
characters = []
for c in layout["characters"]:
character = Character(c["id"], c["name"], c["description"], c["states"])
characters.append(character)
global_characters.extend(characters)
active_characters = [x for x in characters if x.id in room.characters_ids]
entering = True
player = Player()
global_items.extend([Item(x["id"], x["name"], x["description"]) for x in layout["items"]])
os.system('cls')
while(True):
if entering:
entering = False
print(room.enter_room())
inpt = input()
verbs = ["look", "talk", "take", "use", "go"]
comps = [x.lower() for x in inpt.split()]
if comps[0] in verbs:
if comps[0] == "look":
if len(comps) == 1:
print(room.desc)
elif len(comps) == 2:
char_name = comps[1]
char = next((x for x in active_characters if x.name == char_name), None)
if not char:
print("Character {} doesn't exist".format(char_name))
else:
print(char.look())
elif comps[0] == "talk":
char_name = comps[1]
char = next((x for x in active_characters if x.name == char_name), None)
if not char:
print("Character {} doesn't exist".format(char_name))
else:
print(char.talk())
elif comps[0] == "take":
item_name = comps[1]
if room.take_item(item_name):
player.add_to_inventory(item_name)
elif comps[0] == "use":
item_name = comps[1]
target = comps[2]
character = room.get_character(target)
character.use(player, item_name)
elif comps[0] == "go":
portal_name = comps[1]
room_id = room.enter(portal_name)
if room_id != None:
r = next(x for x in rooms if x.id == room_id)
if r:
room = r
entering = True
else:
print("I don't understand")
else:
print("I don't understand")
if __name__ == "__main__":
main() | 30.561702 | 123 | 0.564188 | 4,196 | 0.584238 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.074213 |
d728c55b4f2bb7d8f35b667f6a550393bb16970f | 16,046 | py | Python | sdks/python/appcenter_sdk/models/InternalHockeyAppCompatibilityResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/InternalHockeyAppCompatibilityResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/InternalHockeyAppCompatibilityResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class InternalHockeyAppCompatibilityResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
slack = "slack"
teams = "teams"
generic = "generic"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'owner_type': 'string',
'os': 'string',
'platform': 'string',
'has_crashes': 'boolean',
'has_feedback': 'boolean',
'has_metrics': 'boolean',
'has_external_builds': 'boolean',
'has_specified_build_server_url': 'boolean',
'has_distribution_groups_outside_of_ownership': 'boolean',
'owner_has_distribution_groups': 'boolean',
'bugtracker_type': 'string',
'webhook_types': 'array'
}
attribute_map = {
'owner_type': 'owner_type',
'os': 'os',
'platform': 'platform',
'has_crashes': 'has_crashes',
'has_feedback': 'has_feedback',
'has_metrics': 'has_metrics',
'has_external_builds': 'has_external_builds',
'has_specified_build_server_url': 'has_specified_build_server_url',
'has_distribution_groups_outside_of_ownership': 'has_distribution_groups_outside_of_ownership',
'owner_has_distribution_groups': 'owner_has_distribution_groups',
'bugtracker_type': 'bugtracker_type',
'webhook_types': 'webhook_types'
}
def __init__(self, owner_type=None, os=None, platform=None, has_crashes=None, has_feedback=None, has_metrics=None, has_external_builds=None, has_specified_build_server_url=None, has_distribution_groups_outside_of_ownership=None, owner_has_distribution_groups=None, bugtracker_type=None, webhook_types=None): # noqa: E501
"""InternalHockeyAppCompatibilityResponse - a model defined in Swagger""" # noqa: E501
self._owner_type = None
self._os = None
self._platform = None
self._has_crashes = None
self._has_feedback = None
self._has_metrics = None
self._has_external_builds = None
self._has_specified_build_server_url = None
self._has_distribution_groups_outside_of_ownership = None
self._owner_has_distribution_groups = None
self._bugtracker_type = None
self._webhook_types = None
self.discriminator = None
if owner_type is not None:
self.owner_type = owner_type
if os is not None:
self.os = os
if platform is not None:
self.platform = platform
if has_crashes is not None:
self.has_crashes = has_crashes
if has_feedback is not None:
self.has_feedback = has_feedback
if has_metrics is not None:
self.has_metrics = has_metrics
if has_external_builds is not None:
self.has_external_builds = has_external_builds
if has_specified_build_server_url is not None:
self.has_specified_build_server_url = has_specified_build_server_url
if has_distribution_groups_outside_of_ownership is not None:
self.has_distribution_groups_outside_of_ownership = has_distribution_groups_outside_of_ownership
if owner_has_distribution_groups is not None:
self.owner_has_distribution_groups = owner_has_distribution_groups
if bugtracker_type is not None:
self.bugtracker_type = bugtracker_type
if webhook_types is not None:
self.webhook_types = webhook_types
@property
def owner_type(self):
"""Gets the owner_type of this InternalHockeyAppCompatibilityResponse. # noqa: E501
The owner type of the app # noqa: E501
:return: The owner_type of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: string
"""
return self._owner_type
@owner_type.setter
def owner_type(self, owner_type):
"""Sets the owner_type of this InternalHockeyAppCompatibilityResponse.
The owner type of the app # noqa: E501
:param owner_type: The owner_type of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: string
"""
allowed_values = [undefined, undefined, undefined, ] # noqa: E501
self._owner_type = owner_type
@property
def os(self):
"""Gets the os of this InternalHockeyAppCompatibilityResponse. # noqa: E501
The OS of the app # noqa: E501
:return: The os of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: string
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this InternalHockeyAppCompatibilityResponse.
The OS of the app # noqa: E501
:param os: The os of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: string
"""
allowed_values = [undefined, undefined, undefined, ] # noqa: E501
self._os = os
@property
def platform(self):
"""Gets the platform of this InternalHockeyAppCompatibilityResponse. # noqa: E501
The OS of the app # noqa: E501
:return: The platform of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: string
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this InternalHockeyAppCompatibilityResponse.
The OS of the app # noqa: E501
:param platform: The platform of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: string
"""
allowed_values = [undefined, undefined, undefined, ] # noqa: E501
self._platform = platform
@property
def has_crashes(self):
"""Gets the has_crashes of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have crashes from within the last 90 days? # noqa: E501
:return: The has_crashes of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._has_crashes
@has_crashes.setter
def has_crashes(self, has_crashes):
"""Sets the has_crashes of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have crashes from within the last 90 days? # noqa: E501
:param has_crashes: The has_crashes of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._has_crashes = has_crashes
@property
def has_feedback(self):
"""Gets the has_feedback of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have feedback from within the last 90 days? # noqa: E501
:return: The has_feedback of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._has_feedback
@has_feedback.setter
def has_feedback(self, has_feedback):
"""Sets the has_feedback of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have feedback from within the last 90 days? # noqa: E501
:param has_feedback: The has_feedback of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._has_feedback = has_feedback
@property
def has_metrics(self):
"""Gets the has_metrics of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have metrics from within the last 30 days? # noqa: E501
:return: The has_metrics of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._has_metrics
@has_metrics.setter
def has_metrics(self, has_metrics):
"""Sets the has_metrics of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have metrics from within the last 30 days? # noqa: E501
:param has_metrics: The has_metrics of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._has_metrics = has_metrics
@property
def has_external_builds(self):
"""Gets the has_external_builds of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have any external builds? # noqa: E501
:return: The has_external_builds of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._has_external_builds
@has_external_builds.setter
def has_external_builds(self, has_external_builds):
"""Sets the has_external_builds of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have any external builds? # noqa: E501
:param has_external_builds: The has_external_builds of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._has_external_builds = has_external_builds
@property
def has_specified_build_server_url(self):
"""Gets the has_specified_build_server_url of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have any build server URLs specified? # noqa: E501
:return: The has_specified_build_server_url of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._has_specified_build_server_url
@has_specified_build_server_url.setter
def has_specified_build_server_url(self, has_specified_build_server_url):
"""Sets the has_specified_build_server_url of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have any build server URLs specified? # noqa: E501
:param has_specified_build_server_url: The has_specified_build_server_url of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._has_specified_build_server_url = has_specified_build_server_url
@property
def has_distribution_groups_outside_of_ownership(self):
"""Gets the has_distribution_groups_outside_of_ownership of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have an associated Distribution Group that is owned by a different owner? # noqa: E501
:return: The has_distribution_groups_outside_of_ownership of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._has_distribution_groups_outside_of_ownership
@has_distribution_groups_outside_of_ownership.setter
def has_distribution_groups_outside_of_ownership(self, has_distribution_groups_outside_of_ownership):
"""Sets the has_distribution_groups_outside_of_ownership of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have an associated Distribution Group that is owned by a different owner? # noqa: E501
:param has_distribution_groups_outside_of_ownership: The has_distribution_groups_outside_of_ownership of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._has_distribution_groups_outside_of_ownership = has_distribution_groups_outside_of_ownership
@property
def owner_has_distribution_groups(self):
"""Gets the owner_has_distribution_groups of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app's owner own any Distribution Groups? # noqa: E501
:return: The owner_has_distribution_groups of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: boolean
"""
return self._owner_has_distribution_groups
@owner_has_distribution_groups.setter
def owner_has_distribution_groups(self, owner_has_distribution_groups):
"""Sets the owner_has_distribution_groups of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app's owner own any Distribution Groups? # noqa: E501
:param owner_has_distribution_groups: The owner_has_distribution_groups of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: boolean
"""
self._owner_has_distribution_groups = owner_has_distribution_groups
@property
def bugtracker_type(self):
"""Gets the bugtracker_type of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have any bugtracker configured? Which type? # noqa: E501
:return: The bugtracker_type of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: string
"""
return self._bugtracker_type
@bugtracker_type.setter
def bugtracker_type(self, bugtracker_type):
"""Sets the bugtracker_type of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have any bugtracker configured? Which type? # noqa: E501
:param bugtracker_type: The bugtracker_type of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: string
"""
allowed_values = [undefined, undefined, undefined, ] # noqa: E501
self._bugtracker_type = bugtracker_type
@property
def webhook_types(self):
"""Gets the webhook_types of this InternalHockeyAppCompatibilityResponse. # noqa: E501
Does the HockeyApp app have any webhooks configured? Which types? # noqa: E501
:return: The webhook_types of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:rtype: array
"""
return self._webhook_types
@webhook_types.setter
def webhook_types(self, webhook_types):
"""Sets the webhook_types of this InternalHockeyAppCompatibilityResponse.
Does the HockeyApp app have any webhooks configured? Which types? # noqa: E501
:param webhook_types: The webhook_types of this InternalHockeyAppCompatibilityResponse. # noqa: E501
:type: array
"""
self._webhook_types = webhook_types
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalHockeyAppCompatibilityResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.316279 | 325 | 0.680294 | 15,745 | 0.981241 | 0 | 0 | 10,430 | 0.650006 | 0 | 0 | 9,258 | 0.576966 |
d72946ce37dfd233bdc1990a40d96dc9871865f7 | 3,356 | py | Python | contrib/experiments/interpretation/penobscot/local/default.py | elmajdma/seismic-deeplearning | bc084abe153509c40b45f8bf0f80dfda1049d7dc | [
"MIT"
] | 270 | 2019-12-17T13:40:51.000Z | 2022-03-20T10:02:11.000Z | contrib/experiments/interpretation/penobscot/local/default.py | elmajdma/seismic-deeplearning | bc084abe153509c40b45f8bf0f80dfda1049d7dc | [
"MIT"
] | 233 | 2019-12-18T17:59:36.000Z | 2021-08-03T13:43:49.000Z | contrib/experiments/interpretation/penobscot/local/default.py | elmajdma/seismic-deeplearning | bc084abe153509c40b45f8bf0f80dfda1049d7dc | [
"MIT"
] | 118 | 2019-12-17T13:41:43.000Z | 2022-03-29T02:06:36.000Z | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = "output" # This will be the base directory for all output, such as logs and saved models
_C.LOG_DIR = "" # This will be a subdirectory inside OUTPUT_DIR
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.LOG_CONFIG = "logging.conf"
_C.SEED = 42
_C.OPENCV_BORDER_CONSTANT = 0
# size of voxel cube: WINDOW_SIZE x WINDOW_SIZE x WINDOW_SIZE; used for 3D models only
_C.WINDOW_SIZE = 65
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ""
_C.DATASET.NUM_CLASSES = 7
_C.DATASET.CLASS_WEIGHTS = [
0.02630481,
0.05448931,
0.0811898,
0.01866496,
0.15868563,
0.0875993,
0.5730662,
]
_C.DATASET.INLINE_HEIGHT = 1501
_C.DATASET.INLINE_WIDTH = 481
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = "resnet_unet"
_C.MODEL.IN_CHANNELS = 1
_C.MODEL.PRETRAINED = ""
_C.MODEL.EXTRA = CN(new_allowed=True)
# training
_C.TRAIN = CN()
_C.TRAIN.COMPLETE_PATCHES_ONLY = True
_C.TRAIN.MIN_LR = 0.001
_C.TRAIN.MAX_LR = 0.01
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 300
_C.TRAIN.BATCH_SIZE_PER_GPU = 32
_C.TRAIN.WEIGHT_DECAY = 0.0001
_C.TRAIN.SNAPSHOTS = 5
_C.TRAIN.MODEL_DIR = "models" # This will be a subdirectory inside OUTPUT_DIR
_C.TRAIN.AUGMENTATION = True
_C.TRAIN.STRIDE = 64
_C.TRAIN.PATCH_SIZE = 128
_C.TRAIN.MEAN = [-0.0001777, 0.49, -0.0000688] # 0.0009996710808862074
_C.TRAIN.STD = [0.14076, 0.2717, 0.06286] # 0.20976548783479299
_C.TRAIN.MAX = 1
_C.TRAIN.DEPTH = "patch" # Options are none, patch, and section
# None adds no depth information and the num of channels remains at 1
# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3
# Section adds depth per section so contains depth information for the whole section, channels=3
_C.TRAIN.AUGMENTATIONS = CN()
_C.TRAIN.AUGMENTATIONS.RESIZE = CN()
_C.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT = 256
_C.TRAIN.AUGMENTATIONS.RESIZE.WIDTH = 256
_C.TRAIN.AUGMENTATIONS.PAD = CN()
_C.TRAIN.AUGMENTATIONS.PAD.HEIGHT = 256
_C.TRAIN.AUGMENTATIONS.PAD.WIDTH = 256
# validation
_C.VALIDATION = CN()
_C.VALIDATION.BATCH_SIZE_PER_GPU = 32
_C.VALIDATION.COMPLETE_PATCHES_ONLY = True
# TEST
_C.TEST = CN()
_C.TEST.MODEL_PATH = ""
_C.TEST.COMPLETE_PATCHES_ONLY = True
_C.TEST.AUGMENTATIONS = CN()
_C.TEST.AUGMENTATIONS.RESIZE = CN()
_C.TEST.AUGMENTATIONS.RESIZE.HEIGHT = 256
_C.TEST.AUGMENTATIONS.RESIZE.WIDTH = 256
_C.TEST.AUGMENTATIONS.PAD = CN()
_C.TEST.AUGMENTATIONS.PAD.HEIGHT = 256
_C.TEST.AUGMENTATIONS.PAD.WIDTH = 256
def update_config(cfg, options=None, config_file=None):
cfg.defrost()
if config_file:
cfg.merge_from_file(config_file)
if options:
cfg.merge_from_list(options)
cfg.freeze()
if __name__ == "__main__":
import sys
with open(sys.argv[1], "w") as f:
print(_C, file=f)
| 27.284553 | 105 | 0.700834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 986 | 0.293802 |
d72956bd1ede9e6b4c33e5348df8b0b68299b20e | 743 | py | Python | evaluation/dwf_power.py | TrustedThings/litepuf | 8b0f93af626775e7971d25643081af1c2829bf3d | [
"BSD-2-Clause"
] | null | null | null | evaluation/dwf_power.py | TrustedThings/litepuf | 8b0f93af626775e7971d25643081af1c2829bf3d | [
"BSD-2-Clause"
] | null | null | null | evaluation/dwf_power.py | TrustedThings/litepuf | 8b0f93af626775e7971d25643081af1c2829bf3d | [
"BSD-2-Clause"
] | null | null | null | from ctypes import *
from dwfconstants import *
dwf = cdll.LoadLibrary("libdwf.so")
hdwf = c_int()
dwf.FDwfParamSet(DwfParamOnClose, c_int(0)) # 0 = run, 1 = stop, 2 = shutdown
print("Opening first device")
dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))
if hdwf.value == hdwfNone.value:
print("failed to open device")
quit()
print(f'{hdwf=}')
dwf.FDwfDeviceAutoConfigureSet(hdwf, c_int(0))
# set up analog IO channel nodes
# enable positive supply
dwf.FDwfAnalogIOChannelNodeSet(hdwf, c_int(0), c_int(0), c_double(True))
# set voltage to 1.2 V
dwf.FDwfAnalogIOChannelNodeSet(hdwf, c_int(0), c_int(1), c_double(1.2))
# master enable
dwf.FDwfAnalogIOEnableSet(hdwf, c_int(True))
dwf.FDwfAnalogIOConfigure(hdwf)
dwf.FDwfDeviceClose(hdwf) | 28.576923 | 77 | 0.74428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.258412 |
d729c41c45e0ad91c39b5482430d352eba2c6c9e | 699 | py | Python | src/backend/common/sitevars/flask_secrets.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | 266 | 2015-01-04T00:10:48.000Z | 2022-03-28T18:42:05.000Z | src/backend/common/sitevars/flask_secrets.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | 2,673 | 2015-01-01T20:14:33.000Z | 2022-03-31T18:17:16.000Z | src/backend/common/sitevars/flask_secrets.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | 230 | 2015-01-04T00:10:48.000Z | 2022-03-26T18:12:04.000Z | from typing import TypedDict
from backend.common.sitevars.sitevar import Sitevar
class ContentType(TypedDict):
secret_key: str
class FlaskSecrets(Sitevar[ContentType]):
DEFAULT_SECRET_KEY: str = "thebluealliance"
@staticmethod
def key() -> str:
return "flask.secrets"
@staticmethod
def description() -> str:
return "Secret key for Flask session"
@staticmethod
def default_value() -> ContentType:
return ContentType(secret_key=FlaskSecrets.DEFAULT_SECRET_KEY)
@classmethod
def secret_key(cls) -> str:
secret_key = cls.get().get("secret_key")
return secret_key if secret_key else FlaskSecrets.DEFAULT_SECRET_KEY
| 24.103448 | 76 | 0.706724 | 611 | 0.874106 | 0 | 0 | 449 | 0.642346 | 0 | 0 | 74 | 0.105866 |
d729d478b0bd147f173fcca0692b2e9d89ab472f | 1,782 | py | Python | cornflow-server/migrations/versions/f3bee20314a2_.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | 2 | 2020-07-09T20:58:47.000Z | 2020-07-20T20:40:46.000Z | cornflow-server/migrations/versions/f3bee20314a2_.py | baobabsoluciones/cornflow | bd7cae22107e5fe148704d5f41d4f58f9c410b40 | [
"Apache-2.0"
] | 2 | 2022-03-31T08:42:10.000Z | 2022-03-31T12:05:23.000Z | cornflow-server/migrations/versions/f3bee20314a2_.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | null | null | null | """
Added DAG master table and DAG permissions table
Revision ID: f3bee20314a2
Revises: ca449af8034c
Create Date: 2021-12-14 14:41:16.096297
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f3bee20314a2"
down_revision = "ca449af8034c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"deployed_dags",
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("deleted_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("description", sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"permission_dag",
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("deleted_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("dag_id", sa.String(length=128), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["dag_id"],
["deployed_dags.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("dag_id", "user_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("permission_dag")
op.drop_table("deployed_dags")
# ### end Alembic commands ###
| 30.20339 | 74 | 0.626263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 638 | 0.358025 |
d72a11a2c8bf6250447fd4fa6eb0f797bb01fbab | 6,184 | py | Python | autoPyTorch/core/autonet_classes/autonet_feature_data.py | thomascherickal/Auto-PyTorch | 9e25a3bdef8e836e63979229eef77830cd64bb53 | [
"BSD-3-Clause"
] | 1 | 2019-09-02T00:37:52.000Z | 2019-09-02T00:37:52.000Z | autoPyTorch/core/autonet_classes/autonet_feature_data.py | thomascherickal/Auto-PyTorch | 9e25a3bdef8e836e63979229eef77830cd64bb53 | [
"BSD-3-Clause"
] | null | null | null | autoPyTorch/core/autonet_classes/autonet_feature_data.py | thomascherickal/Auto-PyTorch | 9e25a3bdef8e836e63979229eef77830cd64bb53 | [
"BSD-3-Clause"
] | 1 | 2019-09-02T00:40:30.000Z | 2019-09-02T00:40:30.000Z |
__author__ = "Max Dippel, Michael Burkart and Matthias Urban"
__version__ = "0.0.1"
__license__ = "BSD"
from autoPyTorch.core.api import AutoNet
class AutoNetFeatureData(AutoNet):
@classmethod
def get_default_pipeline(cls):
from autoPyTorch.pipeline.base.pipeline import Pipeline
from autoPyTorch.pipeline.nodes.autonet_settings import AutoNetSettings
from autoPyTorch.pipeline.nodes.optimization_algorithm import OptimizationAlgorithm
from autoPyTorch.pipeline.nodes.cross_validation import CrossValidation
from autoPyTorch.pipeline.nodes.imputation import Imputation
from autoPyTorch.pipeline.nodes.normalization_strategy_selector import NormalizationStrategySelector
from autoPyTorch.pipeline.nodes.one_hot_encoding import OneHotEncoding
from autoPyTorch.pipeline.nodes.preprocessor_selector import PreprocessorSelector
from autoPyTorch.pipeline.nodes.resampling_strategy_selector import ResamplingStrategySelector
from autoPyTorch.pipeline.nodes.embedding_selector import EmbeddingSelector
from autoPyTorch.pipeline.nodes.network_selector import NetworkSelector
from autoPyTorch.pipeline.nodes.optimizer_selector import OptimizerSelector
from autoPyTorch.pipeline.nodes.lr_scheduler_selector import LearningrateSchedulerSelector
from autoPyTorch.pipeline.nodes.log_functions_selector import LogFunctionsSelector
from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector
from autoPyTorch.pipeline.nodes.loss_module_selector import LossModuleSelector
from autoPyTorch.pipeline.nodes.train_node import TrainNode
# build the pipeline
pipeline = Pipeline([
AutoNetSettings(),
OptimizationAlgorithm([
CrossValidation([
Imputation(),
NormalizationStrategySelector(),
OneHotEncoding(),
PreprocessorSelector(),
ResamplingStrategySelector(),
EmbeddingSelector(),
NetworkSelector(),
OptimizerSelector(),
LearningrateSchedulerSelector(),
LogFunctionsSelector(),
MetricSelector(),
LossModuleSelector(),
TrainNode()
])
])
])
cls._apply_default_pipeline_settings(pipeline)
return pipeline
@staticmethod
def _apply_default_pipeline_settings(pipeline):
from autoPyTorch.pipeline.nodes.normalization_strategy_selector import NormalizationStrategySelector
from autoPyTorch.pipeline.nodes.preprocessor_selector import PreprocessorSelector
from autoPyTorch.pipeline.nodes.embedding_selector import EmbeddingSelector
from autoPyTorch.pipeline.nodes.network_selector import NetworkSelector
from autoPyTorch.pipeline.nodes.optimizer_selector import OptimizerSelector
from autoPyTorch.pipeline.nodes.lr_scheduler_selector import LearningrateSchedulerSelector
from autoPyTorch.pipeline.nodes.train_node import TrainNode
from autoPyTorch.components.networks.feature import MlpNet, ResNet, ShapedMlpNet, ShapedResNet
from autoPyTorch.components.optimizer.optimizer import AdamOptimizer, SgdOptimizer
from autoPyTorch.components.lr_scheduler.lr_schedulers import SchedulerCosineAnnealingWithRestartsLR, SchedulerNone, \
SchedulerCyclicLR, SchedulerExponentialLR, SchedulerReduceLROnPlateau, SchedulerReduceLROnPlateau, SchedulerStepLR
from autoPyTorch.components.networks.feature import LearnedEntityEmbedding
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
from autoPyTorch.components.preprocessing.feature_preprocessing import \
TruncatedSVD, FastICA, RandomKitchenSinks, KernelPCA, Nystroem
from autoPyTorch.training.early_stopping import EarlyStopping
from autoPyTorch.training.mixup import Mixup
pre_selector = pipeline[PreprocessorSelector.get_name()]
pre_selector.add_preprocessor('truncated_svd', TruncatedSVD)
pre_selector.add_preprocessor('fast_ica', FastICA)
pre_selector.add_preprocessor('kitchen_sinks', RandomKitchenSinks)
pre_selector.add_preprocessor('kernel_pca', KernelPCA)
pre_selector.add_preprocessor('nystroem', Nystroem)
norm_selector = pipeline[NormalizationStrategySelector.get_name()]
norm_selector.add_normalization_strategy('minmax', MinMaxScaler)
norm_selector.add_normalization_strategy('standardize', StandardScaler)
norm_selector.add_normalization_strategy('maxabs', MaxAbsScaler)
emb_selector = pipeline[EmbeddingSelector.get_name()]
emb_selector.add_embedding_module('learned', LearnedEntityEmbedding)
net_selector = pipeline[NetworkSelector.get_name()]
net_selector.add_network('mlpnet', MlpNet)
net_selector.add_network('shapedmlpnet', ShapedMlpNet)
net_selector.add_network('resnet', ResNet)
net_selector.add_network('shapedresnet', ShapedResNet)
opt_selector = pipeline[OptimizerSelector.get_name()]
opt_selector.add_optimizer('adam', AdamOptimizer)
opt_selector.add_optimizer('sgd', SgdOptimizer)
lr_selector = pipeline[LearningrateSchedulerSelector.get_name()]
lr_selector.add_lr_scheduler('cosine_annealing', SchedulerCosineAnnealingWithRestartsLR)
lr_selector.add_lr_scheduler('cyclic', SchedulerCyclicLR)
lr_selector.add_lr_scheduler('exponential', SchedulerExponentialLR)
lr_selector.add_lr_scheduler('step', SchedulerStepLR)
lr_selector.add_lr_scheduler('plateau', SchedulerReduceLROnPlateau)
lr_selector.add_lr_scheduler('none', SchedulerNone)
train_node = pipeline[TrainNode.get_name()]
train_node.add_training_technique("early_stopping", EarlyStopping)
train_node.add_batch_loss_computation_technique("mixup", Mixup)
| 52.854701 | 126 | 0.733991 | 6,035 | 0.975906 | 0 | 0 | 5,984 | 0.967658 | 0 | 0 | 318 | 0.051423 |
d72b1da8920d5a9113865f0b4d64b0fa86860726 | 2,421 | py | Python | src/dispatch/incident_priority/service.py | mclueppers/dispatch | b9e524ca10e5b2e95490b388db61c58e79e975e2 | [
"Apache-2.0"
] | 1 | 2022-02-23T02:42:10.000Z | 2022-02-23T02:42:10.000Z | src/dispatch/incident_priority/service.py | mclueppers/dispatch | b9e524ca10e5b2e95490b388db61c58e79e975e2 | [
"Apache-2.0"
] | 1 | 2021-04-30T21:36:14.000Z | 2021-04-30T21:36:14.000Z | src/dispatch/incident_priority/service.py | AlexaKelley/dispatch | b46d8416a0e4ec9badb76f6f3d1765c6093203f8 | [
"Apache-2.0"
] | 1 | 2021-04-08T10:02:57.000Z | 2021-04-08T10:02:57.000Z | from typing import List, Optional
from fastapi.encoders import jsonable_encoder
from sqlalchemy.sql.expression import true
from .models import IncidentPriority, IncidentPriorityCreate, IncidentPriorityUpdate
def get(*, db_session, incident_priority_id: int) -> Optional[IncidentPriority]:
"""Returns an incident priority based on the given priority id."""
return (
db_session.query(IncidentPriority)
.filter(IncidentPriority.id == incident_priority_id)
.one_or_none()
)
def get_default(*, db_session):
"""Returns the current default incident_priority."""
return (
db_session.query(IncidentPriority).filter(IncidentPriority.default == true()).one_or_none()
)
def get_by_name(*, db_session, name: str) -> Optional[IncidentPriority]:
"""Returns an incident priority based on the given priority name."""
return db_session.query(IncidentPriority).filter(IncidentPriority.name == name).one_or_none()
def get_by_slug(*, db_session, slug: str) -> Optional[IncidentPriority]:
"""Returns an incident priority based on the given type slug."""
return db_session.query(IncidentPriority).filter(IncidentPriority.slug == slug).one_or_none()
def get_all(*, db_session) -> List[Optional[IncidentPriority]]:
"""Returns all incident priorities."""
return db_session.query(IncidentPriority)
def create(*, db_session, incident_priority_in: IncidentPriorityCreate) -> IncidentPriority:
"""Creates an incident priority."""
incident_priority = IncidentPriority(**incident_priority_in.dict())
db_session.add(incident_priority)
db_session.commit()
return incident_priority
def update(
*, db_session, incident_priority: IncidentPriority, incident_priority_in: IncidentPriorityUpdate
) -> IncidentPriority:
"""Updates an incident priority."""
incident_priority_data = jsonable_encoder(incident_priority)
update_data = incident_priority_in.dict(skip_defaults=True)
for field in incident_priority_data:
if field in update_data:
setattr(incident_priority, field, update_data[field])
db_session.add(incident_priority)
db_session.commit()
return incident_priority
def delete(*, db_session, incident_priority_id: int):
"""Deletes an incident priority."""
db_session.query(IncidentPriority).filter(IncidentPriority.id == incident_priority_id).delete()
db_session.commit()
| 35.602941 | 100 | 0.748451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.16233 |
d72c0a38e98d75d19247ea6d9280edc5b014f693 | 1,143 | py | Python | Ex036.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | Ex036.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | Ex036.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | from time import sleep
aa = 0
print('\033[2;31;40m-=\033[m'*40)
while aa == 0:
print('Este software ira calcular seu financiamento')
sleep(1)
a = float(input('Qual é a sua renda mensal? R$'))
sleep(1)
b = float(input('Qual é o valor do imóvel? R$'))
sleep(1)
c = float(input('Em quantos anos deseja efetuar o pagamento? '))
sleep(1)
print('\33[2;31;40m-=\33[m'*40,'\nAguarde os calsulos')
p = b/(c*12)
if p < a*0.3:
sleep(1)
print('\33[2;31;40m-=\33[m'*40,)
print('\nPARABÉNS!!!\nVocê teve seu financiamento aprovado!\nO imóvel de valor R${:.2f} será financiado em {:.0f} anos com prestações de R${:.2f}.\n\n\n'.format(b, c, p))
aa = 1
sleep(2)
else:
sleep(1)
print('\33[2;31;40m-=\33[m' * 40, '\nAguarde os calsulos')
print('Me desculpe, mas o seu fianciamento não foi altorizado.\nTente diminuir o valor fianciado ou aumentar o prazo de financiamento.\n')
sleep(2)
print('\33[2;31;40m-=\33[m'*40,)
aa = int(input('Digige 0 para tentar novamente ou 1 para sair\n'))
print('\33[2;31;40m-=\33[m'*40,'\nFIM') | 39.413793 | 178 | 0.591426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.58196 |
d72d5a4417022f67b70e7c3aa53322cdcc3be526 | 1,295 | py | Python | neuralnetworksanddeeplearning_michael_nielsen/chapter_1/0020_single_perceptron_as_NAND.py | researcherben/learn_machine_learning | db117703786b46d7b3eb5c20b7240d006c01b90d | [
"Apache-2.0"
] | null | null | null | neuralnetworksanddeeplearning_michael_nielsen/chapter_1/0020_single_perceptron_as_NAND.py | researcherben/learn_machine_learning | db117703786b46d7b3eb5c20b7240d006c01b90d | [
"Apache-2.0"
] | null | null | null | neuralnetworksanddeeplearning_michael_nielsen/chapter_1/0020_single_perceptron_as_NAND.py | researcherben/learn_machine_learning | db117703786b46d7b3eb5c20b7240d006c01b90d | [
"Apache-2.0"
] | 1 | 2019-03-03T16:26:16.000Z | 2019-03-03T16:26:16.000Z | '''
Single perceptron can replicate a NAND gate
https://en.wikipedia.org/wiki/NAND_logic
inputs | output
0 0 1
0 1 1
1 0 1
1 1 0
'''
def dot_product(vec1,vec2):
if (len(vec1) != len(vec2)):
print("input vector lengths are not equal")
print(len(vec1))
print(len(vec2))
reslt=0
for indx in range(len(vec1)):
reslt=reslt+vec1[indx]*vec2[indx]
return reslt
def perceptron(input_binary_vector,weight_vector,bias):
reslt = dot_product(input_binary_vector,weight_vector)
if ( reslt + bias <= 0 ): # aka reslt <= threshold
output=0
else: # reslt > threshold, aka reslt + bias > 0
output=1
return output
def nand(input_binary_vector):
if (len(input_binary_vector) != 2):
print("input vector length is not 2; this is an NAND gate!")
return int(not (input_binary_vector[0] and input_binary_vector[1]))
# weight. Higher value means more important
w = [-2, -2]
bias = 3
for indx in range(4):
# input decision factors; value 0 or 1
if (indx == 0): x = [ 0, 0]
elif (indx == 1): x = [ 1, 0]
elif (indx == 2): x = [ 0, 1]
elif (indx == 3): x = [ 1, 1]
else: print("error in indx")
print("input: "+str(x[0])+", "+str(x[1]))
print("preceptron: "+str(perceptron(x,w,bias)))
print("NAND: "+str(nand(x)))
| 23.981481 | 69 | 0.626255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.343629 |
d72d81ba3f6fdaef1e59e65c0b11f8060df26e53 | 5,158 | py | Python | job-search/Indeed_Scraper.py | oscarevolves/JobSearch_WebScraper | 776d137e7cdd636774e6c731894dc6c2a9a0a03b | [
"Apache-2.0"
] | 2 | 2019-11-25T20:16:11.000Z | 2019-12-13T19:39:33.000Z | job-search/Indeed_Scraper.py | oscarevolves/JobSearch_WebScraper | 776d137e7cdd636774e6c731894dc6c2a9a0a03b | [
"Apache-2.0"
] | 3 | 2019-11-27T20:40:10.000Z | 2019-12-13T09:36:15.000Z | job-search/Indeed_Scraper.py | oscarevolves/JobSearch_WebScraper | 776d137e7cdd636774e6c731894dc6c2a9a0a03b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Author: Oscar Villagrana
from bs4 import BeautifulSoup
import re
import pandas as pd
import requests
#-------------------------------------------------
# Making the Soup
#-------------------------------------------------
def format_page():
print("First we will format the job search with rearch keywords")
sub0 = input("Enter first Job Search keyword: ")
sub1 = input("Enter second Job Search keyword: ")
page = "https://www.indeed.com/jobs?as_and={0}+{1}&as_phr=&as_any=&as_not=&as_ttl=&as_cmp=&jt=all&st=&as_src=&salary=&radius=0&l=San+Francisco&fromage=any&limit=50&sort=&psf=advsrch"
a = page.format(sub0, sub1)
return a
page = "https://www.indeed.com/q-software-developer-l-San-Francisco-jobs.html"
# page = format_page()
headers = {'User-Agent':'Mozilla/5.0'}
def get_soup():
session = requests.Session()
pageTree = session.get(page, headers=headers)
return BeautifulSoup(pageTree.content, 'html.parser')
pageSoup = get_soup()
# print(pageSoup)
#----------------------------------------------------
# Getters
#----------------------------------------------------
def print_company_names():
companyName = pageSoup.find_all('span', class_='company')
for span in companyName:
print(span.text)
# print_company_names()
def print_job_titles():
jobTitle = pageSoup.find_all('div', class_='title')
for span in jobTitle:
print(span.text)
# print_job_titles()
# Prints company and job information
def print_company_and_jobs():
companyName = pageSoup.find_all('span', class_='company')
jobTitle = pageSoup.find_all('div', class_='title')
for span in jobTitle:
for x in companyName:
print(x.text,span.text)
# print_company_and_jobs()
# Makes a list with company and job information
def get_company_and_jobs():
comps_and_jobs = []
companyName = pageSoup.find_all('span', class_='company')
jobTitle = pageSoup.find_all('div', class_='title')
for span in jobTitle:
for x in companyName:
comps_and_jobs.append(str(x.text))
comps_and_jobs.append(str(span.text))
return comps_and_jobs
# get_company_and_jobs()
def get_company_names():
comp_names = []
companyName = pageSoup.find_all('span', class_='company')
for span in companyName:
comp_names.append(str(span.text))
return comp_names
def get_job_titles():
jobs = []
jobTitle = pageSoup.find_all('div', class_='title')
for span in jobTitle:
jobs.append(str(span.text))
return jobs
#-----------------------------------------------------
# TODO: Get links from Soup and add them to df
#-----------------------------------------------------
# Here I am trying to translate this get_column_titles function
# example into one that keeps the links of jobs and company
# example: data visualisation with python and javascript p.152
def get_column_titles(table):
""" Get the Nobel categories from the table header """
cols = []
for th in table.find('tr').find_all('th')[1:]:
link = th.find('a')
# Store the category name and any Wikipedia link it has
if link:
cols.append({'title':link.text,\
'href':link.attrs['href']})
else:
cols.append({'title':th.text, 'href':None})
return cols
# my version so far:
def get_job_titles():
jobs = []
jobTitle = pageSoup.find_all('div', class_='title')
for span in jobTitle:
link = span.find('href')
if link:
jobs.append({'title':link.text,
'href':link.attrs['href']})
else:
jobs.append({'title':span.text, 'href':None})
return jobs
#not working
def print_links():
jobLink = pageSoup.find_all('div', class_='title')
for div in jobLink:
print(div.find('a')['href'])
#not working
def print_links():
jobLink = [div.a for div in pageSoup.find_all('div', class_='title')]
for div in jobLink:
print(div['href'])
#-----------------------------------------------------
# TODO: Make table
#-----------------------------------------------------
# AttributeError when trying to append the output
# from get_company_and_jobs
def make_table():
company_name = []
job_title = []
company_name.append(companyName.replace("\n",""))
job_title.append(jobTitle.text)
df = pd.DataFrame({"company_name":company_name,"job_title":job_title})
return df
#-----------------------------------------------------
# TODO: Remove Duplicates
# Python for data analysis.pg.194.e379
#-----------------------------------------------------
def remove_duplicates():
data = get_company_and_jobs()
# returns boolean indicating duplicate row
data.duplicated()
# returns a df where the duplicated array is True
data.drop_duplicates(['column1'])
# take_last will return the last observed value combination and default keeps the first
data.drop_duplicates(['column1','column2'], take_last=True)
# print_company_names()
# print_job_titles()
# print_company_and_jobs()
# get_company_and_jobs()
| 28.977528 | 186 | 0.597325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,336 | 0.452889 |
d72da0462a860ef207ae387369b10c547914c8ab | 24 | py | Python | tests/acknowledge/acknowledge.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 37 | 2015-04-21T15:33:53.000Z | 2022-02-07T00:02:29.000Z | tests/acknowledge/acknowledge.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 86 | 2015-02-01T22:26:02.000Z | 2021-07-09T08:49:36.000Z | tests/acknowledge/acknowledge.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 27 | 2015-01-26T16:01:09.000Z | 2021-11-08T23:53:55.000Z | __author__ = 'yaroslav'
| 12 | 23 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.416667 |
d72ddd086f69dbbd5f9fa1618ec9586c8a562c0f | 591 | py | Python | mangopi/tests/site/test_mangaFox.py | BFTeck/mangopi | 8598c8b35c38f9bf2a0880c93af5c1d3ae5728be | [
"MIT"
] | 24 | 2015-01-03T00:47:06.000Z | 2020-11-27T14:58:32.000Z | mangopi/tests/site/test_mangaFox.py | BFTeck/mangopi | 8598c8b35c38f9bf2a0880c93af5c1d3ae5728be | [
"MIT"
] | 4 | 2015-03-14T14:00:21.000Z | 2020-12-30T07:15:20.000Z | mangopi/tests/site/test_mangaFox.py | BFTeck/mangopi | 8598c8b35c38f9bf2a0880c93af5c1d3ae5728be | [
"MIT"
] | 5 | 2015-02-04T00:44:08.000Z | 2018-08-13T21:59:47.000Z | from unittest import TestCase
from mangopi.site.mangafox import MangaFox
class TestMangaFox(TestCase):
SERIES = MangaFox.series('gantz')
CHAPTERS = SERIES.chapters
def test_chapter_count(self):
self.assertEqual(len(TestMangaFox.CHAPTERS), 386)
def test_chapter_title(self):
self.assertEqual(TestMangaFox.CHAPTERS[-2].title, 'Lightning Counterstrike')
def test_chapter_pages(self):
self.assertEqual(len(TestMangaFox.CHAPTERS[0].pages), 43)
def test_for_image_url(self):
self.assertIsNone(TestMangaFox.CHAPTERS[0].pages[0].image)
| 28.142857 | 84 | 0.732657 | 514 | 0.869712 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.054146 |
d72e69e7855a795df76657ade7f8ca50ed7c1338 | 3,562 | py | Python | src/core/service/extractor.py | Mahe1980/btb | 29f65acb0dc5fe0f6ae03140cff87126a78e4aae | [
"Apache-2.0"
] | null | null | null | src/core/service/extractor.py | Mahe1980/btb | 29f65acb0dc5fe0f6ae03140cff87126a78e4aae | [
"Apache-2.0"
] | null | null | null | src/core/service/extractor.py | Mahe1980/btb | 29f65acb0dc5fe0f6ae03140cff87126a78e4aae | [
"Apache-2.0"
] | null | null | null | import json
import pandas as pd
import re
from pathlib import Path
from src.core.connectors.connectors import get_nz_conn
from src.settings.envs import NZ_TO_DATASET_DTYPE_MAPPING
from src.settings import envs
from src.settings import log_config
import logging
logger = logging.getLogger(__name__)
class Extractor(object):
def __init__(self, source, cutoff_ts):
self.source = source
self.cutoff_ts = cutoff_ts
self.conn = get_nz_conn()
def run(self, output_path):
"""
This extracts data from NZ and generates json output
:param output_path:
"""
sql = self.prepare_extract_query()
logger.info("sql query to extract data from NZ '{}'".format(sql))
chunk_size = envs.DEFAULT_CHUNK if self.source.chunk_size is None else self.source.chunk_size
with open(output_path, mode="a") as fw:
df_chunks = pd.read_sql_query(sql=sql, con=self.conn, chunksize=chunk_size)
for df in df_chunks:
df_c = self.convert_types(df)
df_c.to_json(fw, orient="records", date_format='iso', date_unit='s', lines=True)
fw.write('\n')
logger.info("Extracted json output file '{}'".format(Path(output_path).name))
def convert_types(self, df):
"""
Converts Float64 to Int64 if the actual dtype is Int64 and there are NaN (pandas bug)
:param df:
:return: df
"""
manifest_file = "{}/{}/{}.json".format(envs.PROJECT_ROOT, "src/core/manifest", self.source.source_name)
with open(manifest_file) as f:
man_fields = json.load(f)["fields"]
for man_field in man_fields:
man_field_name, man_field_type = man_field["name"], man_field["dtype"]
df_dtpye = df[man_field_name].dtype.name
man_dtype = NZ_TO_DATASET_DTYPE_MAPPING.get(man_field_type)
if df_dtpye != "object" and not re.search(df_dtpye[:10], man_dtype):
if df_dtpye == "float64" and man_dtype == "int64":
logger.info(("Casting {} to Int64 for {}".format(df_dtpye, man_field_type)))
df[man_field_name] = df[man_field_name].astype("Int64")
return df
def prepare_extract_query(self):
"""
Prepares extract sql query for extraction
:return: sql (string)
"""
table = self.source.source_name
wm_columns = self.source.watermark_columns
wm_value = self.source.value
fields = self.source.select_fields
where_filter = "" if self.source.where_filter is None else " AND " + self.source.where_filter
sql = "SELECT {fields} FROM {table} WHERE ".format(fields=fields, table=table)
watermark = self.prepare_watermark(wm_columns, wm_value)
sql += "{wm}{wf}".format(wm=watermark, wf=where_filter)
return sql
def prepare_watermark(self, hwm_columns, hwm_value):
"""
Prepares WM part for the sql query
:param hwm_columns:
:param hwm_value:
:return: hwm_clause (string)
"""
cols = json.loads(hwm_columns)["cols"]
from_ts = json.loads(hwm_value)["HWM1"]
hwm_clause = ''
for col in cols:
hwm_clause += "({col} >= '{from_ts}' AND {col} < '{to_ts}') OR ".format(col=col, from_ts=from_ts,
to_ts=self.cutoff_ts)
hwm_clause = hwm_clause[:-4]
return hwm_clause
| 39.142857 | 111 | 0.607805 | 3,260 | 0.915216 | 0 | 0 | 0 | 0 | 0 | 0 | 827 | 0.232173 |
d72eba65f9d051669771438ac61513bf982abaad | 3,925 | py | Python | mmdet/ops/nms_rotated/nms_rotated_wrapper.py | vpeopleonatank/OBBDetection | 86fb098d8d2ff3fc3cc447714d89a44c0939614a | [
"Apache-2.0"
] | 274 | 2021-04-06T15:46:06.000Z | 2022-03-31T02:00:10.000Z | mmdet/ops/nms_rotated/nms_rotated_wrapper.py | vpeopleonatank/OBBDetection | 86fb098d8d2ff3fc3cc447714d89a44c0939614a | [
"Apache-2.0"
] | 136 | 2021-07-11T11:26:54.000Z | 2022-03-31T02:45:34.000Z | mmdet/ops/nms_rotated/nms_rotated_wrapper.py | vpeopleonatank/OBBDetection | 86fb098d8d2ff3fc3cc447714d89a44c0939614a | [
"Apache-2.0"
] | 84 | 2021-05-29T06:58:14.000Z | 2022-03-31T07:44:10.000Z | import BboxToolkit as bt
import numpy as np
import torch
from . import nms_rotated_ext
def obb2hbb(obboxes):
center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=1)
Cos, Sin = torch.cos(theta), torch.sin(theta)
x_bias = torch.abs(w/2 * Cos) + torch.abs(h/2 * Sin)
y_bias = torch.abs(w/2 * Sin) + torch.abs(h/2 * Cos)
bias = torch.cat([x_bias, y_bias], dim=1)
return torch.cat([center-bias, center+bias], dim=1)
def obb_nms(dets, iou_thr, device_id=None):
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else f'cuda:{device_id}'
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError('dets must be eithr a Tensor or numpy array, '
f'but got {type(dets)}')
if dets_th.numel() == 0:
inds = dets_th.new_zeros(0, dtype=torch.int64)
else:
# same bug will happen when bboxes is too small
too_small = dets_th[:, [2, 3]].min(1)[0] < 0.001
if too_small.all():
inds = dets_th.new_zeros(0, dtype=torch.int64)
else:
ori_inds = torch.arange(dets_th.size(0))
ori_inds = ori_inds[~too_small]
dets_th = dets_th[~too_small]
bboxes, scores = dets_th[:, :5], dets_th[:, 5]
inds = nms_rotated_ext.nms_rotated(bboxes, scores, iou_thr)
inds = ori_inds[inds]
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def poly_nms(dets, iou_thr, device_id=None):
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else f'cuda:{device_id}'
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError('dets must be eithr a Tensor or numpy array, '
f'but got {type(dets)}')
if dets_th.device == torch.device('cpu'):
raise NotImplementedError
inds = nms_rotated_ext.nms_poly(dets_th.float(), iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def BT_nms(dets, iou_thr, device_id=None):
if isinstance(dets, torch.Tensor):
is_tensor = True
device = dets.device
dets_np = dets.cpu().numpy()
elif isinstance(dets, np.ndarray):
is_tensor = False
dets_np = dets
else:
raise TypeError('dets must be eithr a Tensor or numpy array, '
f'but got {type(dets)}')
bboxes, scores = dets_np[:, :-1], dets_np[:, -1]
inds = bt.bbox_nms(bboxes, scores, iou_thr=iou_thr, score_thr=0)
if is_tensor:
inds = torch.from_numpy(inds).to(device)
return dets[inds, :], inds
def arb_batched_nms(bboxes, scores, inds, nms_cfg, class_agnostic=False):
nms_cfg_ = nms_cfg.copy()
class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
if class_agnostic:
bboxes_for_nms = bboxes
else:
hbboxes = obb2hbb(bboxes) if bboxes.size(-1) == 5 else bboxes
max_coordinate = hbboxes.max() - hbboxes.min()
offsets = inds.to(bboxes) * (max_coordinate + 1)
if bboxes.size(-1) == 5:
bboxes_for_nms = bboxes.clone()
bboxes_for_nms[:, :2] = bboxes_for_nms[:, :2] + offsets[:, None]
else:
bboxes_for_nms = bboxes + offsets[:, None]
nms_type = nms_cfg_.pop('type', 'BT_nms')
try:
nms_op = eval(nms_type)
except NameError:
from ..nms import nms_wrapper
nms_op = getattr(nms_wrapper, nms_type)
dets, keep = nms_op(
torch.cat([bboxes_for_nms, scores[:, None]], -1), **nms_cfg_)
bboxes = bboxes[keep]
scores = dets[:, -1]
return torch.cat([bboxes, scores[:, None]], -1), keep
| 32.983193 | 76 | 0.601783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.08586 |
d7315024b9feb9895ac394720425de68d6266009 | 12,033 | py | Python | StreamPy/StreamPy-UI/src/root/nested/temp/MakeNetwork(1).py | AnomalyInc/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | 2 | 2017-04-27T11:04:27.000Z | 2019-02-07T21:03:32.000Z | StreamPy/StreamPy-UI/src/root/nested/temp/MakeNetwork(1).py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | StreamPy/StreamPy-UI/src/root/nested/temp/MakeNetwork(1).py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | from Stream import Stream
from Stream import _no_value, _multivalue
from Agent import Agent
from root.nested.OperatorsTestNew import stream_agent
def make_network(stream_names_tuple, agent_descriptor_dict):
""" This function makes a network of agents given the names
of the streams in the network and a description of the
agents in the network.
Parameters
----------
stream_names_tuple: tuple of str
A tuple consisting of names of streams in the network.
Each stream in the network must have a unique name.
agent_descriptor_dict: dict of tuples
The key is an agent name
The value is a tuple:
in_list, out_list, f, f_type, f_args, state
where:
in_list: list of input stream names
out_list: list of output stream names
f: function associated with the agent
f_type: 'element', 'list', 'window', etc
f_args: tuple of arguments for functions f
state: the state associated with this agent.
Local Variables
---------------
stream_dict: dict
key: stream name
value: Stream
agent_dict: dict
key: agent name
value: agent with the specified description:
in_list, out_list, f, f_type, f_args, state,
call_streams=[timer_stream]
where one timer stream is associated with
each agent.
agent_timer_dict: dict
key: agent_name
value: Stream
The value is the timer stream associated with the
agent. When the timer stream has a message, the
agent is made to execute a step.
"""
# Create streams and insert streams into stream_dict.
stream_dict = dict()
for stream_name in stream_names_tuple:
stream_dict[stream_name] = Stream(stream_name)
## # Only for debugging
## for key, value in stream_dict.items():
## print 'stream_name: ', key
## print 'stream:', value
agent_dict = dict()
agent_timer_dict = dict()
# Create agents with the specified description
# and put the agents into agent_dict.
for agent_name in agent_descriptor_dict.keys():
# print 'agent_name:', agent_name
in_list, out_list, f, f_type, f_args, state = \
agent_descriptor_dict[agent_name]
## # Only for debugging
## print 'in_list', in_list
## print 'out_list', out_list
## print 'f', f
## print 'f_args', f_args
## print 'f_type', f_type
## print 'state', state
# Replace a list consisting of a single input stream
# by the stream itself.
if len(in_list) == 1:
single_input_stream_name = in_list[0]
inputs = stream_dict[single_input_stream_name]
else:
inputs = list()
for input_stream_name in in_list:
inputs.append(stream_dict[input_stream_name])
# Replace a list consisting of a single output stream
# by the stream itself.
if len(out_list) == 1:
single_output_stream_name = out_list[0]
outputs = stream_dict[single_output_stream_name]
else:
outputs = list()
for output_stream_name in out_list:
outputs.append(stream_dict[output_stream_name])
# Create timer streams and insert them into agent_timer_dict
agent_timer_dict[agent_name] = Stream(
agent_name + ':timer')
# Create agents and insert them into agent_dict
agent_dict[agent_name] = stream_agent(
inputs, outputs, f_type, f, f_args, state,
call_streams=[agent_timer_dict[agent_name]])
# Set the name for this agent.
agent_dict[agent_name].name = agent_name
return (stream_dict, agent_dict, agent_timer_dict)
def network_data_structures(stream_names_tuple, agent_descriptor_dict):
"""Builds data structures that improve the efficiency
of driving networks for animation or command-line
execution of network nodes.
Parameters
----------
Same as for make_network.
Return Values
-------------
(stream_to_agent_list_dict,
agent_to_stream_dict,
agent_to_agent_list_dict)
stream_to_agent_list_dict
key: stream_name
value: list of agent_name.
The stream is an input stream of each agent
with the agent name in the list.
agent_to_stream_dict
key: stream_name
value: str. A single agent_name.
The stream is the output stream of the
this agent.
agent_to_agent_list_dict
key: agent_name
value: list of agent names
The agent with name in key has an output stream to
each agent whose name is in value.
"""
stream_to_agent_list_dict = dict()
for stream_name in stream_names_tuple:
stream_to_agent_list_dict[stream_name] = list()
agent_to_stream_dict = dict()
# Construct stream_to_agent_list_dict and agent_to_stream_dict
# from agent_descriptor_dict
for agent_name, descriptor in agent_descriptor_dict.iteritems():
input_stream_list = descriptor[0]
output_stream_list = descriptor[1]
for stream_name in input_stream_list:
stream_to_agent_list_dict[stream_name].append(agent_name)
for stream_name in output_stream_list:
if stream_name in agent_to_stream_dict:
raise Exception(
stream_name+'output by'+agent_to_stream_dict[stream_name]+'and'+agent_name)
agent_to_stream_dict[stream_name] = agent_name
# Construct agent_to_agent_list_dict from
# agent_descriptor_dict, stream_to_agent_list_dict, and
# agent_to_stream_dict.
agent_to_agent_list_dict = dict()
# Initialize agent_to_agent_list_dict
for agent_name in agent_descriptor_dict.keys():
agent_to_agent_list_dict[agent_name] = list()
# Compute agent_to_agent_list_dict
# If a stream is output of agent x and input to agents y, z
# then agent x outputs to [y,z]
for stream_name, agent_name in agent_to_stream_dict.iteritems():
agent_to_agent_list_dict[agent_name].extend(
stream_to_agent_list_dict[stream_name])
# Construct agent_from_agent_list_dict from
# agent_descriptor_dict, stream_to_agent_list_dict, and
# agent_to_stream_dict.
agent_from_agent_list_dict = dict()
# Initialize agent_from_agent_list_dict
for agent_name in agent_descriptor_dict.keys():
agent_from_agent_list_dict[agent_name] = list()
# Compute agent_from_agent_list_dict
# If a stream is an input of agent x and is an output of agents y, z
# then agents[y,z] output to agent x.
for stream_name, agent_name_list in stream_to_agent_list_dict.iteritems():
for receiving_agent_name in agent_name_list:
agent_from_agent_list_dict[receiving_agent_name].append(
agent_to_stream_dict[stream_name])
return (stream_to_agent_list_dict, agent_to_stream_dict,
agent_to_agent_list_dict, agent_from_agent_list_dict)
def main():
# STEP 1
# PROVIDE CODE OR IMPORT PURE (NON-STREAM) FUNCTIONS
from random import randint
def rand(f_args):
max_integer = f_args[0]
return randint(0, max_integer)
def split(m, f_args):
divisor = f_args[0]
return [_no_value, m] if m%divisor else [m, _no_value]
def print_value(v, index):
#print name + '[' , index , '] = ', v
print '[' , index , '] = ', v
return (index+1)
# STEP 2
# SPECIFY THE NETWORK.
# Specify names of all the streams.
stream_names_tuple = ('random_stream', 'multiples_stream', 'non_multiples_stream')
# Specify the agents:
# key: agent name
# value: list of input streams, list of output streams, function, function type,
# tuple of arguments, state
agent_descriptor_dict = {
'generate_random': [
[], ['random_stream'], rand, 'element', (100,), None],
'split': [
['random_stream'], ['multiples_stream', 'non_multiples_stream'],
split, 'element', (2,), None],
'print_random': [
['random_stream'], [], print_value, 'element', None, 0],
'print_multiples': [['multiples_stream'], [], print_value, 'element', None, 0],
'print_non_multiples': [['non_multiples_stream'], [], print_value, 'element', None, 0]
}
# STEP 3: MAKE THE NETWORK
stream_dict, agent_dict, agent_timer_dict = make_network(
stream_names_tuple, agent_descriptor_dict)
(stream_to_agent_list_dict, agent_to_stream_dict,
agent_to_agent_list_dict, agent_from_agent_list_dict) = \
network_data_structures(stream_names_tuple, agent_descriptor_dict)
## # Only for debugging
## for key, value in stream_to_agent_list_dict.iteritems():
## print 'stream_to_agent_list_dict key: ', key
## print 'stream_to_agent_list_dict value: ', value
## for key, value in agent_to_stream_dict.iteritems():
## print 'agent_to_stream_dict key: ', key
## print 'agent_to_stream_dict value: ', value
## for key, value in agent_to_agent_list_dict.iteritems():
## print 'agent_to_agent_list_dict key: ', key
## print 'agent_to_agent_list_dict value: ', value
## for key, value in s_dict.items():
## print 'stream name', key
## print 'stream', value
## for key, value in a_dict.items():
## print 'agent name', key
## print 'agent', value
## for key, value in agent_timer_dict.items():
## print 'timer name is', key
## print 'timer', value
# STEP 4: DRIVE THE NETWORK BY APPENDING
# VALUES TO TIMER STREAMS
for t in range(5):
print
print '--------- time step: ', t
# Append t to each of the timer streams
for agent_name, timer_stream in agent_timer_dict.iteritems():
print
print 'Execute single step of agent with name', agent_name
timer_stream.append(t)
## # for debugging
## for stream in stream_dict.values():
## stream.print_recent()
for receiving_agent_name in agent_to_agent_list_dict[agent_name]:
descriptor = agent_descriptor_dict[receiving_agent_name]
receiving_agent = agent_dict[receiving_agent_name]
input_stream_list = descriptor[0]
for stream_name in input_stream_list:
stream = stream_dict[stream_name]
print 'from', agent_name, 'on', stream_name, 'to', receiving_agent_name,
print stream.recent[stream.start[receiving_agent]:stream.stop]
descriptor = agent_descriptor_dict[agent_name]
agent = agent_dict[agent_name]
input_stream_list = descriptor[0]
for stream_name in input_stream_list:
stream = stream_dict[stream_name]
sending_agent_name = agent_to_stream_dict[stream_name]
print 'from', sending_agent_name, 'on', stream_name, 'to', agent_name,
print stream.recent[stream.start[agent]:stream.stop]
## # Print messages in transit to the input port
## # of each agent.
## for agent_name, agent in agent_dict.iteritems():
## descriptor = agent_descriptor_dict[agent_name]
## input_stream_list = descriptor[0]
## for stream_name in input_stream_list:
## stream = stream_dict[stream_name]
## print "messages in ", stream_name, "to", agent.name
## print stream.recent[stream.start[agent]:stream.stop]
if __name__ == '__main__':
main()
| 37.25387 | 95 | 0.634588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,031 | 0.501205 |
d7343b09e03a726772cf411ddcf471ddbd4605d5 | 10,627 | py | Python | pyfuntofem/base.py | anilyil/funtofem | 332c9fcc90f2b2763b54b9b35f40527de3534a08 | [
"Apache-2.0"
] | null | null | null | pyfuntofem/base.py | anilyil/funtofem | 332c9fcc90f2b2763b54b9b35f40527de3534a08 | [
"Apache-2.0"
] | null | null | null | pyfuntofem/base.py | anilyil/funtofem | 332c9fcc90f2b2763b54b9b35f40527de3534a08 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# This file is part of the package FUNtoFEM for coupled aeroelastic simulation
# and design optimization.
# Copyright (C) 2015 Georgia Tech Research Corporation.
# Additional copyright (C) 2015 Kevin Jacobson, Jan Kiviaho and Graeme Kennedy.
# All rights reserved.
# FUNtoFEM is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
class Base(object):
"""
Base class for FUNtoFEM bodies and scenarios
"""
def __init__(self,name,id=0,group=None):
"""
Parameters
----------
name: str
name of the body or scenario
id: int
id number in list of bodies or scenarios in the model
group: int
group number for the body or scenario. Coupled variables defined in the body/scenario will be coupled with
bodies/scenarios in the same group
See Also
--------
:mod:`body`,:mod:`scenario` : subclass the Base class
"""
self.name = name
self.id = id
if group:
self.group = group
else:
self.group = -1
self.group_master = False
self.variables = {}
self.derivatives = {}
def add_variable(self,vartype,var):
"""
Add a new variable to the body's or scenario's variable dictionary
Parameters
----------
vartype: str
type of variable
var: Variable object
variable to be added
"""
# if it is a new vartype add it to the dictionaries
if not vartype in self.variables:
self.variables[vartype] = []
self.derivatives[vartype] = []
# assign identifying properties to the variable then to the list
var.id = len(self.variables[vartype])+1
var.analysis_type = vartype
self.variables[vartype].append(var)
def set_variable(self, vartype, name=None, index=None, value=None, lower=None, upper=None, scaling=None, active=True, coupled=None):
"""
Set one or more properties of a variable given the vartype and either the variable name or a list of id's
Parameters
----------
vartype: str
type of variable
name: str
name of the variable
index: int or list of ints
list of id numbers for the variables to modify
value: float or complex
value of the variable
lower: float
lower bound for the variable
upper: float
upper bound for the variable
scaling: float
scaling of the variable for an optimizer
active: bool
whether or not the variable is active
coupled: bool
whether or not the variable is coupled
Examples
-------
base.set_variable('aerodynamic',name='AOA',value=3.0)
base.set_variable('structural',index=2,active=False)
base.set_variable('structural',index=[0,1,2,3,4],active=False)
"""
if name is not None:
for variable in self.variables[vartype]:
if variable.name == name:
variable.assign(value=value,upper=upper,lower=lower,scaling=scaling,active=active,coupled=coupled)
break
elif index is not None:
if type(index) == list:
for ndx in index:
self.variables[vartype][ndx].assign(value=value,upper=upper,lower=lower,scaling=scaling,active=active,coupled=coupled)
elif type(index) == int:
self.variables[vartype][index].assign(value=value,upper=upper,lower=lower,scaling=scaling,active=active,coupled=coupled)
else:
print("Warning unknown type for index. Variable not set")
else:
print("Warning no valid name or index given. Variable not set")
def count_active_variables(self):
"""
Counts the number of active variables in this body or scenario
Returns
-------
count: int
number of active variables in the variable dictionary
"""
is_active = lambda var: var.active ==True
count = 0
for vartype in self.variables:
count += len(list(filter(is_active, self.variables[vartype])))
return count
def count_uncoupled_variables(self):
"""
Counts the number of variables in this body or scenario that are both uncoupled and active
This is the number of unique variables to this object
Returns
-------
count: int
number of uncoupled, active variables in the variable dictionary
"""
is_coupled = lambda var: var.active == True and not var.coupled
count = 0
for vartype in self.variables:
count += len(list(filter(is_coupled, self.variables[vartype])))
return count
def active_variables(self):
"""
Get the list of active variables in body or scenario
Returns
-------
active_list: list of variables
list of active variables
"""
full_list = []
is_active = lambda var: var.active ==True
for vartype in self.variables:
full_list.extend(list(filter(is_active,self.variables[vartype])))
return full_list
def uncoupled_variables(self):
"""
Get the list of uncoupled, active variables in body or scenario
Returns
-------
active_list: list of variables
list of uncoupled, active variables
"""
full_list = []
is_coupled = lambda var: var.active == True and not var.coupled
for vartype in self.variables:
full_list.extend(list(filter(is_coupled,self.variables[vartype])))
return full_list
def couple_variables(self,base):
"""
**[model call]**
Updates coupled variables in the body or scenario based on the input's variables
Parameters
----------
base: body or scenario object
body or scenario to copy coupled variables from
"""
for vartype in base.variables:
if vartype in self.variables:
self.variables[vartype] = [ v1 if v1.coupled else v2
for v1,v2 in zip(base.variables[vartype],
self.variables[vartype]) ]
def update_id(self,id):
"""
**[model call]**
Update the id number of the body or scenario
Parameters
----------
id: int
id number of the scenario
"""
self.id = id
def add_coupled_derivatives(self,base):
"""
**[model call]**
Adds coupled derivatives in the body or scenario based on the input's derivatives
Parameters
----------
base: body or scenario object
body or scenario to copy coupled variables from
"""
for vartype in base.variables:
if vartype in self.variables:
for i, var in enumerate(base.variables[vartype]):
if var.coupled:
for func in range(len(self.derivatives[vartype])):
self.derivatives[vartype][func][i]+= base.derivatives[vartype][func][i]
def set_coupled_derivatives(self,base):
"""
**[model call]**
Updates coupled derivatives in the body or scenario based on the input's derivatives
Parameters
----------
base: body or scenario object
body or scenario to copy coupled variables from
"""
for vartype in base.variables:
if vartype in self.variables:
for i, var in enumerate(base.variables[vartype]):
if var.coupled:
for func in range(len(self.derivatives[vartype])):
self.derivatives[vartype][func][i]= base.derivatives[vartype][func][i]
def add_function_derivatives(self):
"""
**[model call]**
For each variable, add a new derivative value for a single new function
"""
for vartype in self.derivatives:
self.derivatives[vartype].append( len(self.variables[vartype]) * [0.0] )
def active_derivatives(self,n):
"""
**[model call]**
Get the derivatives of a function, n, with respect to all the active variables in this body or scenario
Parameters
----------
n: int
the function number
Returns
-------
active_list: list of float
list of derivative values for each active variable in this object
"""
full_list = []
for vartype in self.derivatives:
der_list = self.derivatives[vartype][n][:]
offset = 0
for i, var in enumerate(self.variables[vartype]):
if not var.active:
der_list.pop(i-offset)
offset+=1
full_list.extend(der_list)
return full_list
def uncoupled_derivatives(self,n):
"""
**[model call]**
Get the derivatives of a function, n, with respect to all the uncoupled, active variables in this body or scenario
Parameters
----------
n: int
the function number
Returns
-------
active_list: list of float
list of derivative values for each uncoupled, active variable in this object
"""
full_list = []
for vartype in self.derivatives:
der_list = self.derivatives[vartype][n][:]
offset = 0
for i, var in enumerate(self.variables[vartype]):
if not var.active or var.coupled:
der_list.pop(i-offset)
offset+=1
full_list.extend(der_list)
return full_list
| 32.59816 | 138 | 0.57655 | 9,739 | 0.916439 | 0 | 0 | 0 | 0 | 0 | 0 | 5,637 | 0.530441 |
d735642e9d558eb3bf9fdd07fb6538d0fa899e7b | 987 | py | Python | shell/core/backdoors.py | theralfbrown/shellsploit-framework | 93b66ab9361872697eafda2125b37005f49116be | [
"MIT"
] | 3 | 2020-03-21T04:37:50.000Z | 2021-08-14T07:31:13.000Z | shell/core/backdoors.py | security-geeks/shellsploit-framework | 93b66ab9361872697eafda2125b37005f49116be | [
"MIT"
] | null | null | null | shell/core/backdoors.py | security-geeks/shellsploit-framework | 93b66ab9361872697eafda2125b37005f49116be | [
"MIT"
] | 3 | 2020-02-04T13:28:47.000Z | 2020-06-10T01:34:19.000Z | from color import *
#Will be add command line params ..
def backdoorlist( require=False):
if require != False:
data = [
"linux/x86/reverse_tcp",
"linux/x64/reverse_tcp",
"osx/x86/reverse_tcp",
"osx/x64/reverse_tcp",
"windows/x86/reverse_tcp",
"php/reverse_tcp",
"asp/reverse_tcp",
"jsp/reverse_tcp",
"war/reverse_tcp",
"unix/python/reverse_tcp",
"unix/perl/reverse_tcp",
"unix/bash/reverse_tcp",
"unix/ruby/reverse_tcp",
]
return data
else:
print (bcolors.GREEN+"""
Binaries
==========
linux/x86/reverse_tcp
linux/x64/reverse_tcp
osx/x86/reverse_tcp
windows/x86/reverse_tcp - [Passive]
windows/x64/reverse_tcp - [Passive]
Web Payloads
=============
php/reverse_tcp - [Passive]
asp/reverse_tcp - [Passive]
jsp/reverse_tcp - [Passive]
war/reverse_tcp - [Passive]
Scripting Payloads
===================
unix/python/reverse_tcp
unix/perl/reverse_tcp
unix/bash/reverse_tcp
unix/ruby/reverse_tcp
""" + bcolors.ENDC)
| 17.625 | 37 | 0.672746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 777 | 0.787234 |
d736949b59153dcc0e4c4d3472e58423cffa5c71 | 754 | py | Python | circuit_mapper/gate_1_qubit.py | quantumgenetics/quantumgenetics | 630ae5a47c887ecf7a3b4ad62de0d58dc944a42d | [
"Apache-2.0"
] | 6 | 2019-11-09T16:59:29.000Z | 2021-03-27T03:20:24.000Z | circuit_mapper/gate_1_qubit.py | quantumgenetics/quantumgenetics | 630ae5a47c887ecf7a3b4ad62de0d58dc944a42d | [
"Apache-2.0"
] | null | null | null | circuit_mapper/gate_1_qubit.py | quantumgenetics/quantumgenetics | 630ae5a47c887ecf7a3b4ad62de0d58dc944a42d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from functools import partial
def combine(qubit_count, gates):
return [partial(g, i) for g in gates for i in range(qubit_count)]
def repeat_none(index, count):
return [partial(apply_none, index)] * count
def apply_none(index, circuit):
pass
def apply_not(index, circuit):
qr = circuit.qregs[0]
circuit.x(qr[index])
def apply_phase_flip(index, circuit):
qr = circuit.qregs[0]
circuit.z(qr[index])
def apply_hadamard(index, circuit):
qr = circuit.qregs[0]
circuit.h(qr[index])
def apply_y_rotation(theta, index, circuit):
qr = circuit.qregs[0]
circuit.ry(theta, qr[index])
def apply_z_rotation(phi, index, circuit):
qr = circuit.qregs[0]
circuit.rz(phi, qr[index])
| 18.85 | 69 | 0.68435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.029178 |
d736eacec6de3c37305776ffe4de524fa9c56eb4 | 739 | py | Python | stronghold/tests/testmixins.py | davitovmasyan/django-stronghold | 73021742e9aad5c57509623c1152fcf7aa6ed232 | [
"MIT"
] | 252 | 2015-01-07T20:03:27.000Z | 2022-03-22T18:17:28.000Z | stronghold/tests/testmixins.py | davitovmasyan/django-stronghold | 73021742e9aad5c57509623c1152fcf7aa6ed232 | [
"MIT"
] | 35 | 2015-02-16T02:29:22.000Z | 2021-10-04T09:14:11.000Z | stronghold/tests/testmixins.py | davitovmasyan/django-stronghold | 73021742e9aad5c57509623c1152fcf7aa6ed232 | [
"MIT"
] | 48 | 2015-02-15T17:56:52.000Z | 2021-10-04T12:33:27.000Z | from stronghold.views import StrongholdPublicMixin
import django
from django.views.generic import View
from django.views.generic.base import TemplateResponseMixin
if django.VERSION[:2] < (1, 9):
from django.utils import unittest
else:
import unittest
class StrongholdMixinsTests(unittest.TestCase):
def test_public_mixin_sets_attr(self):
class TestView(StrongholdPublicMixin, View):
pass
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
def test_public_mixin_sets_attr_with_multiple_mixins(self):
class TestView(StrongholdPublicMixin, TemplateResponseMixin, View):
template_name = 'dummy.html'
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
| 26.392857 | 75 | 0.760487 | 475 | 0.64276 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.016238 |
d73791489c7b8c6e1b619f76a8345efe1855108d | 3,664 | py | Python | app/utils/mapping.py | bbc/connected-data-pseudocone | 452479cd44fa7d32ecb3d54b801da9024d6984ce | [
"MIT"
] | null | null | null | app/utils/mapping.py | bbc/connected-data-pseudocone | 452479cd44fa7d32ecb3d54b801da9024d6984ce | [
"MIT"
] | 5 | 2018-07-30T09:01:03.000Z | 2019-01-16T11:16:39.000Z | app/utils/mapping.py | bbc/connected-data-pseudocone | 452479cd44fa7d32ecb3d54b801da9024d6984ce | [
"MIT"
] | null | null | null | import datetime
import logging
import isodate
from app import pseudocone_pb2
from app.settings import SERVICE_NAME
logger = logging.getLogger(SERVICE_NAME)
def action_context_to_iso8601_duration(action_context):
"""Process the actionContext string to ISO 8601 duration format.
Example:
"urn:bbc:tv:version_offset:p05xxtvp#150" -> "PT150"
"""
try:
duration = action_context.split('#')[-1]
return isodate.duration_isoformat(datetime.timedelta(seconds=float(duration)))
except Exception as e:
logger.exception(e)
def pid2uri(pid):
"""Map a PID to a Datalab URI."""
if pid is None:
return
return "programmes:bbc.co.uk,2018/FIXME/{}".format(pid)
def convert_json_list_to_pseudocone_response(data):
unique_users_ids = get_unique_vals_for_property(data, "anon_id")
user_interaction_items = []
for user_id in unique_users_ids:
user_data = get_data_matching_property(data, "anon_id", user_id)
unique_item_ids = get_unique_vals_for_property(user_data, "resourceid")
user_items = []
for item_id in unique_item_ids:
try:
user_item_interactions = get_data_matching_property(user_data, "resourceid", item_id)
interaction = extract_latest_interaction(user_item_interactions)
user_items.append(convert_db_object_to_interaction_item(interaction))
except Exception as e:
logger.exception(e)
user = pseudocone_pb2.UserParam(id=user_id, cookie=None)
user_interaction_item = pseudocone_pb2.TestDataUser(user=user, interactions=user_items)
user_interaction_items.append(user_interaction_item)
return pseudocone_pb2.ListTestDataUsersResponse(items=user_interaction_items)
def convert_single_user_interactions_to_proto_response(data):
unique_item_ids = get_unique_vals_for_property(data, "resourceid")
user_items = []
for item_id in unique_item_ids:
user_item_interactions = get_data_matching_property(data, "resourceid", item_id)
interaction = extract_latest_interaction(user_item_interactions) # would not be necessary with the new dump
user_items.append(convert_db_object_to_interaction_item(interaction))
list_interactions_response = pseudocone_pb2.ListInteractionsResponse(interactions=user_items)
return list_interactions_response
def extract_latest_interaction(interactions):
# Return the latest interaction
interactions.sort(key=extract_time, reverse=True)
return interactions[0]
def extract_time(json):
return isodate.parse_datetime(json["anon_activitytime"]).replace(tzinfo=None)
def get_data_matching_property(data, property, value):
return [interaction for interaction in data if interaction[property] == value]
def get_unique_vals_for_property(interactions_data, property):
return list(set([interaction[property] for interaction in interactions_data]))
def convert_db_object_to_interaction_item(obj):
interaction_item = pseudocone_pb2.InteractionItem(action=obj["action"],
activity_time=obj["anon_activitytime"],
activity_type=obj["activitytype"],
completion=action_context_to_iso8601_duration(
obj["actioncontext"]),
pid=obj["resourceid"],
uri=pid2uri(obj["resourceid"]))
return interaction_item
| 35.230769 | 116 | 0.684771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.124454 |
d738962bacfe335ff849bf213922ab2ef2a2a20b | 445 | py | Python | paper_iv/centrality_example.py | wiheto/phd_code | 432cae1aa26f1758e5970fd11361af0e4a130b9d | [
"MIT"
] | 2 | 2017-09-20T18:02:38.000Z | 2017-09-22T09:52:17.000Z | paper_iv/centrality_example.py | wiheto/phd_code | 432cae1aa26f1758e5970fd11361af0e4a130b9d | [
"MIT"
] | null | null | null | paper_iv/centrality_example.py | wiheto/phd_code | 432cae1aa26f1758e5970fd11361af0e4a130b9d | [
"MIT"
] | null | null | null |
import numpy as np
import teneto
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gist_gray'
A=np.zeros((3,3,20))
A[0,2,0:4]=1
A[0,1,0]=1
A[0,1,5]=1
A[0,1,10]=1
A[0,1,15]=1
fig,ax = plt.subplots(1)
ax = teneto.plot.slice_plot(A,ax,vlabs=range(1,4),dlabs=range(1,21))
ax.set_ylabel('nodes')
ax.set_xlabel('time')
ax.set_ylim(-0.25,2.25)
fig.tight_layout()
fig.show()
fig.savefig('./examples/figures/centrality_examples.pdf')
| 16.481481 | 68 | 0.692135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.179775 |
d738bec2f9d2cad3aaccf4ee4535837028c7758a | 401 | py | Python | examples/hsd_struct/src/hsd_struct_beh.py | hnikolov/pihdf | 9a0d2add059db1ee90805e2124beff1fb5185fae | [
"MIT"
] | 2 | 2016-09-25T00:08:47.000Z | 2016-10-09T10:09:55.000Z | examples/hsd_struct/src/hsd_struct_beh.py | hnikolov/pihdf | 9a0d2add059db1ee90805e2124beff1fb5185fae | [
"MIT"
] | null | null | null | examples/hsd_struct/src/hsd_struct_beh.py | hnikolov/pihdf | 9a0d2add059db1ee90805e2124beff1fb5185fae | [
"MIT"
] | null | null | null | def hsd_struct_beh(mode_1, mode_2, LEDs, LED_rdy_en, LED_rdy_buff, LED_rdy_out, DELAY_BITS, BUFFER_SIZE):
'''|
| Specify the behavior, describe data processing; there is no notion
| of clock. Access the in/out interfaces via get() and append()
| methods. The "hsd_struct_beh" function does not return values.
|________'''
print "Warning: Behavior model not implemented yet!"
| 40.1 | 105 | 0.720698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.690773 |
d73ac6fd3661cf82f0a73ee994c217dc326ae563 | 1,187 | py | Python | tests/unit/test_non-lib_utils.py | DiwakerJha/acconeer-python-exploration | 90cc2931c3f7336c1b75882f63e2de081fc72dbf | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/unit/test_non-lib_utils.py | DiwakerJha/acconeer-python-exploration | 90cc2931c3f7336c1b75882f63e2de081fc72dbf | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/unit/test_non-lib_utils.py | DiwakerJha/acconeer-python-exploration | 90cc2931c3f7336c1b75882f63e2de081fc72dbf | [
"BSD-3-Clause-Clear"
] | null | null | null | import sys
from itertools import chain
from pathlib import Path
import pytest
import acconeer.exptool as et
HERE = Path(__file__).parent
path = (HERE / ".." / ".." / "utils").resolve()
sys.path.append(path.as_posix())
from convert_to_csv import record_to_csv # noqa: E402
@pytest.mark.parametrize("test_file", chain(HERE.glob("**/*.h5"), HERE.glob("**/*.npz")))
def test_csv_conversion_is_exact(test_file):
# The idea is to test the csv conversion corresponds exactly to the data file.
# Aimed to catch rounding errors and flipped cols/rows.
record = et.recording.load(test_file)
if record.mode == et.Mode.SPARSE:
pytest.skip("CSV-ifying of sparse data is not supported at this moment.")
data = record.data.squeeze()
assert data.ndim == 2
csv_table = record_to_csv(record)
csv_table_sac = record_to_csv(record, sweep_as_column=True)
assert data.shape == csv_table.shape
assert data.T.shape == csv_table_sac.shape
for row in range(data.shape[0]):
for col in range(data.shape[1]):
assert data[row, col] == complex(csv_table[row, col])
assert data[row, col] == complex(csv_table_sac[col, row])
| 31.236842 | 89 | 0.690817 | 0 | 0 | 0 | 0 | 906 | 0.763269 | 0 | 0 | 250 | 0.210615 |
d73b450d21d773f70afe8701a469d62e69a7223b | 720 | py | Python | pydis_site/apps/api/migrations/0055_reminder_mentions.py | Numerlor/site | e4cec0aeb2a791e622be8edd94fb4e82d150deab | [
"MIT"
] | 700 | 2018-11-17T15:56:51.000Z | 2022-03-30T22:53:17.000Z | pydis_site/apps/api/migrations/0055_reminder_mentions.py | Numerlor/site | e4cec0aeb2a791e622be8edd94fb4e82d150deab | [
"MIT"
] | 542 | 2018-11-17T13:39:42.000Z | 2022-03-31T11:24:00.000Z | pydis_site/apps/api/migrations/0055_reminder_mentions.py | Numerlor/site | e4cec0aeb2a791e622be8edd94fb4e82d150deab | [
"MIT"
] | 178 | 2018-11-21T09:06:56.000Z | 2022-03-31T07:43:28.000Z | # Generated by Django 2.2.14 on 2020-07-15 07:37
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0054_user_invalidate_unknown_role'),
]
operations = [
migrations.AddField(
model_name='reminder',
name='mentions',
field=django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(validators=[django.core.validators.MinValueValidator(limit_value=0, message='Mention IDs cannot be negative.')]), blank=True, default=list, help_text='IDs of roles or users to ping with the reminder.', size=None),
),
]
| 34.285714 | 307 | 0.704167 | 558 | 0.775 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.265278 |
d73cd16bb625739a1d37a63ac0f909083ac38bb7 | 1,951 | py | Python | kantanoidc/views.py | mmiyajima2/django-kantanoidc | eb0540e61c27b301f4cb53bb29e6a853e90ee879 | [
"MIT"
] | null | null | null | kantanoidc/views.py | mmiyajima2/django-kantanoidc | eb0540e61c27b301f4cb53bb29e6a853e90ee879 | [
"MIT"
] | 17 | 2019-02-18T02:55:42.000Z | 2022-02-10T08:01:23.000Z | kantanoidc/views.py | mmiyajima2/django-kantanoidc | eb0540e61c27b301f4cb53bb29e6a853e90ee879 | [
"MIT"
] | null | null | null | from logging import getLogger
from django.http import HttpResponseRedirect
from django.views.generic.base import View
from django.contrib.auth import login
from django.contrib.auth import get_user_model
from django.urls import reverse
from .client import client
from .errors import IllegalStateError
import string
import random
logger = getLogger(__name__)
UserModel = get_user_model()
class Start(View):
http_method_names = ['get']
def get(self, request, *args, **kwargs):
chars = string.ascii_letters + string.digits
stored_nonce = ''.join([random.choice(chars) for i in range(32)])
stored_state = ''.join([random.choice(chars) for i in range(32)])
request.session['stored_nonce'] = stored_nonce
request.session['stored_state'] = stored_state
client.prepare(request)
redirect_uri = \
request.build_absolute_uri(reverse('kantanoidc:callback'))
request.session['redirect_uri'] = redirect_uri
return HttpResponseRedirect(
client.build_starturl(redirect_uri, stored_nonce, stored_state)
)
class Callback(View):
http_method_names = ['get']
def get(self, request, *args, **kwargs):
state = request.GET.get('state')
if state != request.session['stored_state']:
raise IllegalStateError('state <> stored_state')
code = request.GET.get('code')
stored_nonce = request.session['stored_nonce']
redirect_uri = request.session['redirect_uri']
sub = client.get_sub(redirect_uri, code, stored_nonce)
logger.info('%s coming at CallbackView', sub)
try:
user = UserModel.objects.get_by_natural_key(sub)
except UserModel.DoesNotExist as e:
logger.error('username=%s, does not exists', sub)
raise e
login(request, user)
nexturl = client.build_nexturl(request)
return HttpResponseRedirect(nexturl)
| 34.22807 | 75 | 0.679651 | 1,557 | 0.798052 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.108662 |
d7400f4c43c756b05040a9e3afa28d7e32be9f5a | 970 | py | Python | hata/ext/extension_loader/client_extension.py | Multiface24111/hata | cd28f9ef158e347363669cc8d1d49db0ff41aba0 | [
"0BSD"
] | 173 | 2019-06-14T20:25:00.000Z | 2022-03-21T19:36:10.000Z | hata/ext/extension_loader/client_extension.py | Tari-dev/hata | a5c3199c845858f997af3b0b2c18770fdc691897 | [
"0BSD"
] | 52 | 2020-01-03T17:05:14.000Z | 2022-03-31T11:39:50.000Z | hata/ext/extension_loader/client_extension.py | Tari-dev/hata | a5c3199c845858f997af3b0b2c18770fdc691897 | [
"0BSD"
] | 47 | 2019-11-09T08:46:45.000Z | 2022-03-31T14:33:34.000Z | __all__ = ()
from ...backend.utils import KeepType
from ...discord.client import Client
from .extension import EXTENSIONS, EXTENSION_STATE_LOADED
@KeepType(Client)
class Client:
@property
def extensions(self):
"""
Returns a list of extensions added to the client. Added by the `extension_loader` extension.
Returns
-------
extensions : `list` of ``Extension``
"""
extensions = []
for extension in EXTENSIONS.values():
if extension._state == EXTENSION_STATE_LOADED:
snapshot_difference = extension._snapshot_difference
if (snapshot_difference is not None):
for client, client_snapshot_difference in snapshot_difference:
if (self is client) and client_snapshot_difference:
extensions.append(extension)
break
return extensions
| 31.290323 | 100 | 0.593814 | 802 | 0.826804 | 0 | 0 | 820 | 0.845361 | 0 | 0 | 202 | 0.208247 |
d74152884389e5fc3f13b32b2c1f5a681bd2affe | 211 | py | Python | org/miggy/setup.py | DarkSession/fd-api | e319000dd12ca88c3ddd736b739d904279f69b8b | [
"CC-BY-4.0"
] | 20 | 2017-10-03T21:47:39.000Z | 2022-01-27T21:06:53.000Z | org/miggy/setup.py | DarkSession/fd-api | e319000dd12ca88c3ddd736b739d904279f69b8b | [
"CC-BY-4.0"
] | 1 | 2021-10-13T15:49:57.000Z | 2021-11-05T18:58:21.000Z | org/miggy/setup.py | DarkSession/fd-api | e319000dd12ca88c3ddd736b739d904279f69b8b | [
"CC-BY-4.0"
] | 7 | 2020-02-07T13:43:29.000Z | 2022-03-26T13:13:55.000Z | # vim: textwidth=0 wrapmargin=0 tabstop=2 shiftwidth=2 softtabstop=2 smartindent smarttab
from setuptools import setup, find_namespace_packages
setup(
name="org.miggy",
packages=find_namespace_packages()
)
| 26.375 | 89 | 0.805687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.473934 |
d742dd18740c02a1839bc5886575861c03728ccd | 462 | py | Python | day-04/part-1/badouralix.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 12 | 2020-11-30T19:22:18.000Z | 2021-06-21T05:55:58.000Z | day-04/part-1/badouralix.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 13 | 2020-11-30T17:27:22.000Z | 2020-12-22T17:43:13.000Z | day-04/part-1/badouralix.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 3 | 2020-12-01T08:49:40.000Z | 2022-03-26T21:47:38.000Z | from tool.runners.python import SubmissionPy
class BadouralixSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
result = 0
for line in s.split("\n\n"):
keys = {keyvalue.split(":")[0] for keyvalue in line.split()}
if {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"} <= keys:
result += 1
return result
| 25.666667 | 73 | 0.519481 | 414 | 0.896104 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.28355 |
d74303560e27d9dbcb4f0ce7fc9475cbf09c4f3a | 6,845 | py | Python | s2e_env/manage.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/manage.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/manage.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) Django Software Foundation and individual contributors.
Copyright (c) Dependable Systems Laboratory, EPFL
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import pkgutil
import importlib
import sys
from s2e_env.command import BaseCommand, CommandError, CommandParser
from s2e_env.utils import log
COMMANDS_DIR = os.path.join(os.path.dirname(__file__), 'commands')
def find_commands():
"""
Give a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
return [name for _, name, ispkg in pkgutil.iter_modules([COMMANDS_DIR])
if not ispkg and not name.startswith('_')]
def load_command_class(name):
"""
Given a command name, returns the Command class instance. All errors raised
by the import process (ImportError, AttributeError) are allowed to
propagate.
"""
module = importlib.import_module(f's2e_env.commands.{name}')
return module.Command()
def call_command(command_name, *args, **options):
"""
Call the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
`name` may be a string or a command object. Using a string is preferred
unless the command object is required for further processing or testing.
"""
if isinstance(command_name, BaseCommand):
# Command object passed in
command = command_name
command_name = command.__class__.__module__.split('.')[-1]
else:
# Load the command object by name
command = load_command_class(command_name)
# Simulate argument parsing to get the option defaults
parser = command.create_parser('', command_name)
# Use the `dest` option name from the parser option
# pylint: disable=protected-access
opt_mapping = {
min(s_opt.option_strings).lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for
key, value in options.items()}
defaults = parser.parse_args(args=args)
# pylint: disable=protected-access
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
return command.execute(*args, **defaults)
class CommandManager:
"""
Manages and executes commands.
"""
def __init__(self, argv):
# We must do a copy by value of the arguments, because the original sys.argv
# may be sometimes changed arbitrarily by a call to import_module.
self._argv = argv[:]
self._prog_name = os.path.basename(self._argv[0])
def main_help_text(self, commands_only=False):
"""
Return's the main help text, as a string.
"""
if commands_only:
usage = sorted(find_commands())
else:
usage = [
'',
f'Type \'{self._prog_name} help <subcommand>\' for help on a specific '
'subcommand.',
'',
'Available subcommands:',
]
for command in find_commands():
usage.append(f' {command}')
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line if it can't be found.
"""
commands = find_commands()
if subcommand not in commands:
sys.stderr.write(f'Unknown command - {subcommand}. Type \'{self._prog_name} help\' for usage\n')
sys.exit(1)
return load_command_class(subcommand)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self._argv[1]
except IndexError:
# Display help if no arguments were given
subcommand = 'help'
parser = CommandParser(None,
usage='%(prog)s subcommand [options] [args]',
add_help=False)
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self._argv[2:])
except CommandError:
pass # Ignore any option errors at this point
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(f'{self.main_help_text(commands_only=True)}\n')
elif len(options.args) < 1:
sys.stdout.write(f'{self.main_help_text()}\n')
else:
self.fetch_command(options.args[0]).print_help(self._prog_name, options.args[0])
elif self._argv[1:] in (['--help'], ['-h']):
sys.stdout.write(f'{self.main_help_text()}\n')
else:
self.fetch_command(subcommand).run_from_argv(self._argv)
def main():
"""
The main function.
Use the command manager to execute a command.
"""
log.configure_logging()
manager = CommandManager(sys.argv)
manager.execute()
if __name__ == '__main__':
main()
| 35.466321 | 108 | 0.659752 | 2,717 | 0.396932 | 0 | 0 | 0 | 0 | 0 | 0 | 3,794 | 0.554273 |
d74324110a48059e49cab1877c123daa1cf09eba | 1,658 | py | Python | src/interface/vib_main.py | stembl/vibproc | 2588ad7fad5309a0a56fe5ea3d0a0f4affd10911 | [
"MIT"
] | null | null | null | src/interface/vib_main.py | stembl/vibproc | 2588ad7fad5309a0a56fe5ea3d0a0f4affd10911 | [
"MIT"
] | null | null | null | src/interface/vib_main.py | stembl/vibproc | 2588ad7fad5309a0a56fe5ea3d0a0f4affd10911 | [
"MIT"
] | null | null | null | ## Main Program for Vibration Analysis with Pandas
import sys, os
import matplotlib.pyplot as plt
#sys.path.append(os.path.join(os.path.dirname(__file__), "../tools/"))
sys.path.append("../tools/")
sys.path.append("../../data/")
from open_file_folder import *
from import_vib_data import *
from data_features import *
## Required Information
# Input Profile
# This can either be an industry standard or a .csv file.
#input_profile_label = 'profiles/lattice_20170307-test2_logger0.csv'
input_profile_label = 'ista air ride'
# Title of Report
title = 'Javelin Transportation 08/25/2017, Server, Logger #2'
save_title = 'javenlin_20170825-server'
save_doc_title = save_title + '.docx'
save_csv = True # True / False
save_csv_title = save_title + '.csv'
# Locate the file or folder
path = get_path()
print("\n")
print("File or folder selected: \n")
print(path)
print("\n")
# Import and clean vibration data
# Returns data, broken up into segments of X seconds.
# It is quicker to FFT a series of data and average the results
data = path2data(path, eventtime = 60)
# Import data as a single file for visualization along the time axis.
print("\n")
print("Pulling data from path... \n")
dataS = csv2data(path)
print("\n")
print("Printing overview... \n")
dataS = toWindow(dataS)
# Calculate dataset features
th = 0.5 # Threshhold, [G]
peaks, mean = vib_peaks(data, th)
sig3_max, sig3_min = sigma_calc(peaks, 3)
# Input Profile Label defined at the top
input_profile = vib_profiles(input_profile_label)
avg_psd, max_psd, min_psd = psd_avg_data(data)
plot_psd(data_psd, cols, input_profile)
input_profile = vib_profiles(input_profile_label)
| 25.121212 | 70 | 0.740651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 877 | 0.528951 |
d74383d9925c9e5501ce517e56c8dbd310c32f9d | 399 | py | Python | Lesson 02 - Arrays/OddOccurrencesInArray_3.py | kourouklides/codility-python | 0c193a3af3addd3d17a2b0eed6cd29ba6b396e9e | [
"Apache-2.0"
] | 11 | 2018-06-05T03:04:22.000Z | 2022-02-03T05:01:07.000Z | Lesson 02 - Arrays/OddOccurrencesInArray_3.py | kourouklides/codility-python | 0c193a3af3addd3d17a2b0eed6cd29ba6b396e9e | [
"Apache-2.0"
] | null | null | null | Lesson 02 - Arrays/OddOccurrencesInArray_3.py | kourouklides/codility-python | 0c193a3af3addd3d17a2b0eed6cd29ba6b396e9e | [
"Apache-2.0"
] | 6 | 2020-04-23T07:18:22.000Z | 2021-12-05T05:23:26.000Z | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
dictionary = {}
for number in A:
if dictionary.get(number) == None:
dictionary[number] = 1
else:
dictionary[number] += 1
for key in dictionary.keys():
if dictionary.get(key) % 2 == 1:
return key
| 22.166667 | 54 | 0.591479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.298246 |
d7442cc25fd608f9e2b752fac226f3d226409445 | 273 | py | Python | MANN/Utils/similarities.py | jgyllinsky/How-to-Learn-from-Little-Data | 3b7b481ac8aa376540a2ca1fff7046de9f86dad6 | [
"MIT"
] | 161 | 2017-05-06T01:37:30.000Z | 2021-12-15T09:58:26.000Z | MANN/Utils/similarities.py | jgyllinsky/How-to-Learn-from-Little-Data | 3b7b481ac8aa376540a2ca1fff7046de9f86dad6 | [
"MIT"
] | 8 | 2017-05-08T20:00:51.000Z | 2018-05-28T01:16:30.000Z | MANN/Utils/similarities.py | jgyllinsky/How-to-Learn-from-Little-Data | 3b7b481ac8aa376540a2ca1fff7046de9f86dad6 | [
"MIT"
] | 78 | 2017-05-06T03:27:31.000Z | 2020-12-21T17:24:20.000Z | import tensorflow as tf
def cosine_similarity(x, y, eps=1e-6):
z = tf.batch_matmul(x, tf.transpose(y, perm=[0,2,1]))
z /= tf.sqrt(tf.multiply(tf.expand_dims(tf.reduce_sum(tf.multiply(x,x), 2), 2),tf.expand_dims(tf.reduce_sum(tf.multiply(y,y), 2), 1)) + eps)
return z
| 34.125 | 141 | 0.688645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d74569333a69434699769603bb8e6b9c55c7c51c | 1,870 | py | Python | PythonAPI/quickstart/03-raycast.py | MaisJamal/Apollo-BT-GP | 4d8d721fa16b67e7ecefdd05d937b1153c000d63 | [
"Apache-2.0"
] | null | null | null | PythonAPI/quickstart/03-raycast.py | MaisJamal/Apollo-BT-GP | 4d8d721fa16b67e7ecefdd05d937b1153c000d63 | [
"Apache-2.0"
] | null | null | null | PythonAPI/quickstart/03-raycast.py | MaisJamal/Apollo-BT-GP | 4d8d721fa16b67e7ecefdd05d937b1153c000d63 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2019 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
import os
import lgsvl
sim = lgsvl.Simulator(os.environ.get("SIMULATOR_HOST", "127.0.0.1"), 8181)
if sim.current_scene == "BorregasAve":
sim.reset()
else:
sim.load("BorregasAve")
# The next few lines spawns an EGO vehicle in the map
spawns = sim.get_spawn()
state = lgsvl.AgentState()
state.transform = spawns[0]
forward = lgsvl.utils.transform_to_forward(state.transform)
right = lgsvl.utils.transform_to_right(state.transform)
up = lgsvl.utils.transform_to_up(state.transform)
sim.add_agent("Lincoln2017MKZ (Apollo 5.0)", lgsvl.AgentType.EGO, state)
# This is the point from which the rays will originate from. It is raised 1m from the ground
p = spawns[0].position
p.y += 1
# useful bits in layer mask
# 0 - Default (road & ground)
# 9 - EGO vehicles
# 10 - NPC vehicles
# 11 - Pedestrian
# 12 - Obstacle
# Included layers can be hit by the rays. Otherwise the ray will go through the layer
layer_mask = 0
for bit in [0, 10, 11, 12]: # do not put 9 here, to not hit EGO vehicle itself
layer_mask |= 1 << bit
# raycast returns None if the ray doesn't collide with anything
# hit also has the point property which is the Unity position vector of where the ray collided with something
hit = sim.raycast(p, right, layer_mask)
if hit:
print("Distance right:", hit.distance)
hit = sim.raycast(p, -right, layer_mask)
if hit:
print("Distance left:", hit.distance)
hit = sim.raycast(p, -forward, layer_mask)
if hit:
print("Distance back:", hit.distance)
hit = sim.raycast(p, forward, layer_mask)
if hit:
print("Distance forward:", hit.distance)
hit = sim.raycast(p, up, layer_mask)
if hit:
print("Distance up:", hit.distance)
hit = sim.raycast(p, -up, layer_mask)
if hit:
print("Distance down:", hit.distance)
| 27.5 | 109 | 0.725668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.473797 |
d746595f48844a5e6d0dbe01d637017879e8559b | 7,064 | py | Python | app/run.py | mourgaya/iscsi_ihm | aadf09590a02f6f1e6b16b000c2a4380e015f840 | [
"Apache-2.0"
] | null | null | null | app/run.py | mourgaya/iscsi_ihm | aadf09590a02f6f1e6b16b000c2a4380e015f840 | [
"Apache-2.0"
] | null | null | null | app/run.py | mourgaya/iscsi_ihm | aadf09590a02f6f1e6b16b000c2a4380e015f840 | [
"Apache-2.0"
] | null | null | null | #author : eric mourgya
#
import commands
from flask import jsonify
from flask import Flask, Response, request, redirect,session, url_for
from flask.ext.login import LoginManager, UserMixin,login_required, login_user, logout_user
#@app.after_request
#def treat_as_plain_text(response):
# response.headers["content-type"] = "text/plain; charset=utf-8"
# return response
app = Flask(__name__)
app.secret_key="gloubiboulga"
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
class User(UserMixin):
def __init__(self, id):
self.id = id
self.name = "user" + str(id)
self.password = self.name + "_secret"
def __repr__(self):
return "%d/%s/%s" % (self.id, self.name, self.password)
@app.route('/', methods = ['GET'])
@login_required
def help():
"""Welcome page and help page."""
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__
return jsonify(func_list)
def cmdline(cmd):
# make and exec of cmd command on system
status, output = commands.getstatusoutput(cmd)
if status != 0:
error_str= cmd + ": command failed! : " +status+" "+output
print error_str
return error_str
else:
print cmd + "done"
return output
@app.route('/show/discovery')
@login_required
def showdiscovery():
"""------------------------Show discovery portals."""
cmdshow="iscsiadm -m discovery -P1"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/nodes')
@login_required
def shownodes():
"""Show nodes."""
cmdshow="iscsiadm -m node -P1"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/disks')
@login_required
def showdisk():
"""Show discovery disk."""
cmdshow="iscsiadm -m session -P3"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/lsblk')
@login_required
def showlsblk():
"""Show discovery sessions and disks."""
cmdshow="lsblk"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/sessiondetail')
@login_required
def showsessiondetail():
"""Show session in detail without disk."""
cmdshow="iscsiadm -m session -P1"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/session')
@login_required
def showsession():
"""Show session ids"""
cmdshow="iscsiadm -m session"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/specifiquesession',methods=["GET", "POST"])
@login_required
def showspecifiquesession():
"""show specifique session"""
if request.method == 'POST':
session=request.form['session']
cmdres="iscsiadm -m session -r"+session +" -P3"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="session id" type=text name=session>
<p><input type=submit value=submit>
</form>
''')
@app.route('/rescan/session',methods=["GET", "POST"])
@login_required
def rescansession():
"""rescan a specifique session"""
if request.method == 'POST':
ip=request.form['session']
cmdres="iscsiadm -m session -r"+session +" -R"
res=cmdline(cmdres)
return redirect(url_for('showspecifiquesession'),code=302)
else:
return Response('''
<form action="" method="post">
<p><input placeholder="session id" type=text name=session>
<p><input type=submit value=submit>
</form>
''')
@app.route('/make/discovery',methods=["GET", "POST"])
@login_required
def makediscovery():
"""make a discovery
"""
if request.method == 'POST':
ipaddr=request.form['ip']
print ipaddr
cmdres="iscsiadm -m discovery -t sendtargets -p "+ipaddr+":3260 -P 1"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input type=submit value=submit>
</form>
''')
@app.route('/make/nodelogin',methods=["GET", "POST"])
@login_required
def makenodelogin():
"""make a node login
"""
if request.method == 'POST':
ipaddr=request.form['ip']
iqn=request.form['iqn']
cmdres="iscsiadm -m node "+ iqn + "-p " +ipaddr + "-o update -n node.startup -v automatic"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input placeholder="portal iqn" type=text name=iqn>
<p><input type=submit value=submit>
</form>
''')
@app.route('/make/sessionlogin',methods=["GET", "POST"])
@login_required
def makesessionlogin():
"""make a session login
"""
if request.method == 'POST':
ipaddr=request.form['ip']
iqn=request.form['iqn']
cmdres="iscsiadm -m node "+ iqn + "-p " +ipaddr + "-l"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input placeholder="portal iqn" type=text name=iqn>
<p><input type=submit value=submit>
</form>
''')
@app.route("/login", methods=["GET", "POST"])
def login():
"""login page"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if password == username + "_secret":
id = username.split('user')[0]
user = User(id)
login_user(user)
return redirect(url_for('help'))
else:
return abort(401)
else:
return Response('''
<form action="" method="post">
<p><input placeholder="Username" type=text name=username>
<p><input placeholder="Password" type=password name=password>
<p><input type=submit value=Login>
</form>
''')
@app.route("/logout")
@login_required
def logout():
"""logout page """
logout_user()
return Response('<p>Logged out</p>')
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
@login_manager.user_loader
def load_user(userid):
return User(userid)
app.run(debug=True,port=5001)
| 27.920949 | 98 | 0.614383 | 244 | 0.034541 | 0 | 0 | 5,113 | 0.723811 | 0 | 0 | 2,946 | 0.417044 |
d7469f7f64e499f08213f3dd92d29492cf06e4a9 | 539 | py | Python | src/config/defaults/sc2/config.py | ewanlee/mackrl | 6dd505aa09830f16c35a022f67e255db935c807e | [
"Apache-2.0"
] | 26 | 2019-10-28T09:01:45.000Z | 2021-09-20T08:56:12.000Z | src/config/defaults/sc2/config.py | ewanlee/mackrl | 6dd505aa09830f16c35a022f67e255db935c807e | [
"Apache-2.0"
] | 1 | 2020-07-25T06:50:05.000Z | 2020-07-25T06:50:05.000Z | src/config/defaults/sc2/config.py | ewanlee/mackrl | 6dd505aa09830f16c35a022f67e255db935c807e | [
"Apache-2.0"
] | 6 | 2019-12-18T12:02:57.000Z | 2021-03-03T13:15:47.000Z | def get_cfg(existing_cfg, _log):
"""
generates
"""
_sanity_check(existing_cfg, _log)
import ntpath, os, yaml
with open(os.path.join(os.path.dirname(__file__), "{}.yml".format(ntpath.basename(__file__).split(".")[0])),
'r') as stream:
try:
ret = yaml.load(stream)
except yaml.YAMLError as exc:
assert "Default config yaml for '{}' not found!".format(os.path.splitext(__file__)[0])
return ret
def _sanity_check(existing_cfg, _log):
"""
"""
return | 29.944444 | 112 | 0.595547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.168831 |
d74820e3197a81d3b2193a0e7a959fc2f8c0c24d | 1,191 | py | Python | analyser/utils/data_loader.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | 1 | 2022-03-30T09:45:25.000Z | 2022-03-30T09:45:25.000Z | analyser/utils/data_loader.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | null | null | null | analyser/utils/data_loader.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | null | null | null | import glob
import json
import pandas as pd
from models.analysis import Analysis
from models.devices import Devices
import logging
logger = logging.getLogger(__name__)
class Loader:
@staticmethod
def load_analysis_from_csv(path: str, index_col=0, header=0) -> pd.DataFrame:
return pd.read_csv(path, sep=';', header=header, index_col=index_col)
@staticmethod
def load_analysis_from_json(directory: str) -> Devices:
file_paths = glob.glob(f'{directory}/**/*.json', recursive=True)
devices = Devices()
logger.info(f'load_analysis: Found {len(file_paths)} files.')
for path in file_paths:
logger.info(f'load_analysis: load: {path}')
with open(path) as file:
devices.add_analysis(Analysis.from_json(json.load(file), path))
devices.print_all()
return devices
@staticmethod
def load_analysis_from_pickle(path: str) -> pd.DataFrame:
logger.info(f'load_analysis_from_pickle: path: {path}')
dataframe = pd.read_pickle(path)
logger.info('load_analysis_from_pickle: loaded dataframe')
logger.info(dataframe)
return pd.read_pickle(path)
| 32.189189 | 81 | 0.680101 | 1,017 | 0.853904 | 0 | 0 | 987 | 0.828715 | 0 | 0 | 192 | 0.161209 |
d748a84338b8e8ea97ee055765e5c914d9d65349 | 320 | py | Python | catstuff/tools/argparser.py | modora/catstuff | 7a4ea67e26774e42e90d0c71a4b2c299fe506d73 | [
"MIT"
] | null | null | null | catstuff/tools/argparser.py | modora/catstuff | 7a4ea67e26774e42e90d0c71a4b2c299fe506d73 | [
"MIT"
] | 7 | 2017-12-16T06:16:56.000Z | 2017-12-26T06:21:18.000Z | catstuff/tools/argparser.py | modora/catstuff | 7a4ea67e26774e42e90d0c71a4b2c299fe506d73 | [
"MIT"
] | null | null | null | import argparse, sys
__version__ = '1.0.2'
class CSArgParser(argparse.ArgumentParser):
""" Argument parser that shows help if there is an error """
def error(self, message, exit=False):
sys.stderr.write('Error: {}\n'.format(message))
self.print_help()
if exit:
sys.exit(2)
| 24.615385 | 64 | 0.628125 | 273 | 0.853125 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.25 |
d74a05fdbfc357bdb2194d73ae4809aafa378aee | 9,565 | py | Python | custom_components/google_home/sensor.py | tmttn/home-assistant-config | a707a95bb9458a267d7fbf21cde0464af99b5253 | [
"MIT"
] | 1 | 2022-01-01T20:07:21.000Z | 2022-01-01T20:07:21.000Z | custom_components/google_home/sensor.py | tmttn/home-assistant-config | a707a95bb9458a267d7fbf21cde0464af99b5253 | [
"MIT"
] | null | null | null | custom_components/google_home/sensor.py | tmttn/home-assistant-config | a707a95bb9458a267d7fbf21cde0464af99b5253 | [
"MIT"
] | null | null | null | """Sensor platform for Google Home"""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, STATE_UNAVAILABLE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.entity import Entity, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
ALARM_AND_TIMER_ID_LENGTH,
DATA_CLIENT,
DATA_COORDINATOR,
DOMAIN,
GOOGLE_HOME_ALARM_DEFAULT_VALUE,
ICON_ALARMS,
ICON_TIMERS,
ICON_TOKEN,
LABEL_ALARMS,
LABEL_DEVICE,
LABEL_TIMERS,
SERVICE_ATTR_ALARM_ID,
SERVICE_ATTR_TIMER_ID,
SERVICE_DELETE_ALARM,
SERVICE_DELETE_TIMER,
SERVICE_REBOOT,
)
from .entity import GoogleHomeBaseEntity
from .models import GoogleHomeAlarmStatus, GoogleHomeDevice, GoogleHomeTimerStatus
from .types import (
AlarmsAttributes,
DeviceAttributes,
GoogleHomeAlarmDict,
GoogleHomeTimerDict,
TimersAttributes,
)
_LOGGER: logging.Logger = logging.getLogger(__package__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_devices: AddEntitiesCallback,
) -> bool:
"""Setup sensor platform."""
client = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT]
coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR]
sensors: list[Entity] = []
for device in coordinator.data:
sensors.append(
GoogleHomeDeviceSensor(
coordinator,
client,
device.device_id,
device.name,
device.hardware,
)
)
if device.auth_token and device.available:
sensors += [
GoogleHomeAlarmsSensor(
coordinator,
client,
device.device_id,
device.name,
device.hardware,
),
GoogleHomeTimersSensor(
coordinator,
client,
device.device_id,
device.name,
device.hardware,
),
]
async_add_devices(sensors)
platform = entity_platform.current_platform.get()
# Services
platform.async_register_entity_service(
SERVICE_DELETE_ALARM,
{vol.Required(SERVICE_ATTR_ALARM_ID): cv.string},
"async_delete_alarm",
)
platform.async_register_entity_service(
SERVICE_DELETE_TIMER,
{vol.Required(SERVICE_ATTR_TIMER_ID): cv.string},
"async_delete_timer",
)
platform.async_register_entity_service(
SERVICE_REBOOT,
{},
"async_reboot_device",
)
return True
class GoogleHomeDeviceSensor(GoogleHomeBaseEntity):
"""Google Home Device sensor."""
_attr_icon = ICON_TOKEN
_attr_entity_category = EntityCategory.DIAGNOSTIC
@property
def label(self) -> str:
"""Label to use for name and unique id."""
return LABEL_DEVICE
@property
def state(self) -> str | None:
device = self.get_device()
return device.ip_address if device else None
@property
def extra_state_attributes(self) -> DeviceAttributes:
"""Return the state attributes."""
device = self.get_device()
attributes: DeviceAttributes = {
"device_id": None,
"device_name": self.device_name,
"auth_token": None,
"ip_address": None,
"available": False,
}
return self.get_device_attributes(device) if device else attributes
@staticmethod
def get_device_attributes(device: GoogleHomeDevice) -> DeviceAttributes:
"""Device representation as dictionary"""
return {
"device_id": device.device_id,
"device_name": device.name,
"auth_token": device.auth_token,
"ip_address": device.ip_address,
"available": device.available,
}
async def async_reboot_device(self) -> None:
"""Reboot the device."""
device = self.get_device()
if device is None:
_LOGGER.error("Device %s is not found.", self.device_name)
return
await self.client.reboot_google_device(device)
class GoogleHomeAlarmsSensor(GoogleHomeBaseEntity):
"""Google Home Alarms sensor."""
_attr_icon = ICON_ALARMS
_attr_device_class = DEVICE_CLASS_TIMESTAMP
@property
def label(self) -> str:
"""Label to use for name and unique id."""
return LABEL_ALARMS
@property
def state(self) -> str | None:
device = self.get_device()
if not device:
return None
next_alarm = device.get_next_alarm()
return (
next_alarm.local_time_iso
if next_alarm
and next_alarm.status
not in (GoogleHomeAlarmStatus.INACTIVE, GoogleHomeAlarmStatus.MISSED)
else STATE_UNAVAILABLE
)
@property
def extra_state_attributes(self) -> AlarmsAttributes:
"""Return the state attributes."""
return {
"next_alarm_status": self._get_next_alarm_status(),
"alarm_volume": self._get_alarm_volume(),
"alarms": self._get_alarms_data(),
}
def _get_next_alarm_status(self) -> str:
"""Update next alarm status from coordinator"""
device = self.get_device()
next_alarm = device.get_next_alarm() if device else None
return (
next_alarm.status.name.lower()
if next_alarm
else GoogleHomeAlarmStatus.NONE.name.lower()
)
def _get_alarm_volume(self) -> float:
"""Update alarm volume status from coordinator"""
device = self.get_device()
alarm_volume = device.get_alarm_volume() if device else None
return alarm_volume if alarm_volume else GOOGLE_HOME_ALARM_DEFAULT_VALUE
def _get_alarms_data(self) -> list[GoogleHomeAlarmDict]:
"""Update alarms data extracting it from coordinator"""
device = self.get_device()
return (
[alarm.as_dict() for alarm in device.get_sorted_alarms()] if device else []
)
@staticmethod
def is_valid_alarm_id(alarm_id: str) -> bool:
"""Checks if the alarm id provided is valid."""
return (
alarm_id.startswith("alarm/") and len(alarm_id) == ALARM_AND_TIMER_ID_LENGTH
)
async def async_delete_alarm(self, alarm_id: str) -> None:
"""Service call to delete alarm on device"""
device = self.get_device()
if device is None:
_LOGGER.error("Device %s is not found.", self.device_name)
return
if not self.is_valid_alarm_id(alarm_id):
_LOGGER.error(
"Incorrect ID format! Please provide a valid alarm ID. "
"See services tab for more info."
)
return
await self.client.delete_alarm_or_timer(device=device, item_to_delete=alarm_id)
class GoogleHomeTimersSensor(GoogleHomeBaseEntity):
"""Google Home Timers sensor."""
_attr_icons = ICON_TIMERS
_attr_device_class = DEVICE_CLASS_TIMESTAMP
@property
def label(self) -> str:
"""Label to use for name and unique id."""
return LABEL_TIMERS
@property
def state(self) -> str | None:
device = self.get_device()
if not device:
return None
timer = device.get_next_timer()
return (
timer.local_time_iso
if timer and timer.local_time_iso
else STATE_UNAVAILABLE
)
@property
def extra_state_attributes(self) -> TimersAttributes:
"""Return the state attributes."""
return {
"next_timer_status": self._get_next_timer_status(),
"timers": self._get_timers_data(),
}
def _get_next_timer_status(self) -> str:
"""Update next timer status from coordinator"""
device = self.get_device()
next_timer = device.get_next_timer() if device else None
return (
next_timer.status.name.lower()
if next_timer
else GoogleHomeTimerStatus.NONE.name.lower()
)
def _get_timers_data(self) -> list[GoogleHomeTimerDict]:
"""Update timers data extracting it from coordinator"""
device = self.get_device()
return (
[timer.as_dict() for timer in device.get_sorted_timers()] if device else []
)
@staticmethod
def is_valid_timer_id(timer_id: str) -> bool:
"""Checks if the timer id provided is valid."""
return (
timer_id.startswith("timer/") and len(timer_id) == ALARM_AND_TIMER_ID_LENGTH
)
async def async_delete_timer(self, timer_id: str) -> None:
"""Service call to delete alarm on device"""
device = self.get_device()
if device is None:
_LOGGER.error("Device %s is not found.", self.device_name)
return
if not self.is_valid_timer_id(timer_id):
_LOGGER.error(
"Incorrect ID format! Please provide a valid timer ID. "
"See services tab for more info."
)
return
await self.client.delete_alarm_or_timer(device=device, item_to_delete=timer_id)
| 30.657051 | 88 | 0.624255 | 6,647 | 0.694929 | 0 | 0 | 3,047 | 0.318557 | 3,173 | 0.33173 | 1,415 | 0.147935 |
d74aea35b8907c25e7a9565f11c2bcb6503f3a32 | 2,691 | py | Python | ContinuumBenchmarks/MNIST/Continual-Learning-Benchmark/Plot.py | hikmatkhan/Continuum | 83302753648e0d521d8b5f57edae5df4307c0a5c | [
"MIT"
] | null | null | null | ContinuumBenchmarks/MNIST/Continual-Learning-Benchmark/Plot.py | hikmatkhan/Continuum | 83302753648e0d521d8b5f57edae5df4307c0a5c | [
"MIT"
] | null | null | null | ContinuumBenchmarks/MNIST/Continual-Learning-Benchmark/Plot.py | hikmatkhan/Continuum | 83302753648e0d521d8b5f57edae5df4307c0a5c | [
"MIT"
] | null | null | null | import torch
import pandas as pd
# "Avg_NormalNN",
Approaches = ["NormalNN", "EWC",
"SI", "L2", "Naive_Rehearsal_1100", "Naive_Rehearsal_4400",
"MAS", "GEM_1100", "GEM_4400"
]
REPEAT = 10
OutDirPath = "/home/hikmat/Desktop/JWorkspace/CL/Continuum/ContinuumBenchmarks/MNIST/Continual-Learning-Benchmark/scripts/outputs/permuted_MNIST_incremental_domain_10"
# OutDirPath = "/home/hikmat/Desktop/JWorkspace/CL/Continuum/ContinuumBenchmarks/MNIST/Continual-Learning-Benchmark/scripts/outputs/permuted_MNIST_incremental_domain_10"
def get_avg_acc(acc_dict, num_tasks):
acc_matrix = to_tensor(acc_dict, num_tasks)
avg_acc = torch.zeros(num_tasks)
task_avg_acc = torch.zeros(num_tasks)
# Average Acc
for col_index in range(0, num_tasks):
avg_acc[col_index] = torch.sum(acc_matrix[:, col_index]) / (col_index + 1)
task_avg_acc[col_index] = torch.sum(acc_matrix[col_index:(col_index + 1), col_index:]) / (num_tasks - col_index)
print("Avg_acc:", avg_acc)
print("Task Avg_acc:", task_avg_acc)
return avg_acc, task_avg_acc
def to_tensor(acc_dict, num_tasks):
# num_tasks = len(acc_dict.keys())
acc_matrix = torch.zeros(size=(num_tasks, num_tasks))
for row_key in acc_dict.keys():
for col_key in acc_dict[row_key].keys():
acc_matrix[int(row_key) - 1][int(col_key) - 1] = acc_dict[row_key][col_key]
return acc_matrix
if __name__ == '__main__':
for approach in Approaches:
avg_acc_lst = []
task_avg_acc_lst = []
for exp_rep in range(1, REPEAT+1):
acc_matrix_path = "{0}/{1}_{2}-precision_record.pt".format(OutDirPath, approach, exp_rep)
print(acc_matrix_path)
acc_dict = torch.load(acc_matrix_path)
num_tasks = len(acc_dict.keys())
avg_acc, task_avg_acc = get_avg_acc(acc_dict, num_tasks)
avg_acc_lst.append(avg_acc.numpy())
task_avg_acc_lst.append(task_avg_acc.numpy())
avg_acc_pd = pd.DataFrame(avg_acc_lst, columns=list(range(1, num_tasks + 1)))
avg_acc_pd.to_csv("{0}/_PD_{1}.csv".format(OutDirPath, approach), index=False)
# break
# print(avg_acc_lst)
print(avg_acc_pd)
# for key in acc_dict.keys():
# print("K:", key)
# print(acc_dict)
# print(acc_dict["1"])
# print("Avg Acc:", torch.mean(acc_matrix, dim=0))
# print("Task Avg:", torch.mean(acc_matrix, dim=1))
# print(acc_matrix)
# acc_matrix_zeros = torch.zeros(size=(10, 10))
# acc_matrix_ones = torch.ones(size=(10, 10))
# acc_matrix_zeros[0][0] = 100
# avg = acc_matrix_zeros + acc_matrix_ones
# print(avg/2)
| 38.442857 | 169 | 0.66741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 979 | 0.363805 |
d74be0e6efcefb06e3529cae5316490cf81fffce | 130 | py | Python | fmojinja/awk/__main__.py | Taro-Imahiro/fmojinja | 5b2c4100e25bd198b1cf8ed44b37b1c4bb93d2ee | [
"MIT"
] | null | null | null | fmojinja/awk/__main__.py | Taro-Imahiro/fmojinja | 5b2c4100e25bd198b1cf8ed44b37b1c4bb93d2ee | [
"MIT"
] | 13 | 2021-07-06T07:03:58.000Z | 2021-11-22T15:25:22.000Z | fmojinja/awk/__main__.py | Taro-Imahiro/fmojinja | 5b2c4100e25bd198b1cf8ed44b37b1c4bb93d2ee | [
"MIT"
] | 3 | 2021-06-26T19:40:34.000Z | 2021-09-15T04:24:21.000Z | from ..mixin import SubCommands
from .pdb_reformer import PdbReformer
SubCommands.main_proc({
"pdb_reformer": PdbReformer
})
| 18.571429 | 37 | 0.784615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.107692 |
d74c74f32a5497e7e19bbaa47aa3d3f1d06f9e90 | 848 | py | Python | src/home_automation_hub/storage.py | levidavis/py-home | 3cc30e19d506824de9816ad9dbcfba4338a7dfa8 | [
"MIT"
] | 26 | 2018-08-21T19:54:21.000Z | 2021-10-15T18:59:17.000Z | src/home_automation_hub/storage.py | levidavis/py-home | 3cc30e19d506824de9816ad9dbcfba4338a7dfa8 | [
"MIT"
] | 3 | 2020-01-23T03:54:24.000Z | 2020-07-19T13:10:22.000Z | src/home_automation_hub/storage.py | levidavis/py-home | 3cc30e19d506824de9816ad9dbcfba4338a7dfa8 | [
"MIT"
] | 11 | 2018-09-18T21:31:11.000Z | 2021-07-03T11:23:30.000Z | import redis
import json
from . import config
redis_instance = None
def set_up(host, port, db):
global redis_instance
redis_instance = redis.StrictRedis(host=host, port=port, db=db)
class ModuleStorage():
def __init__(self, module_id):
self.key_prefix = "module:" + config.config.enabled_modules[module_id]["storage_prefix"]
@property
def redis(self):
return redis_instance
def prefixed_key(self, key):
return f"{self.key_prefix}:{key}"
def get(self, key):
data_json = redis_instance.get(self.prefixed_key(key))
if not data_json:
return None
data = json.loads(data_json)
return data.get("data")
def set(self, key, value):
data_json = json.dumps({"data": value})
return redis_instance.set(self.prefixed_key(key), data_json)
| 24.941176 | 96 | 0.659198 | 652 | 0.768868 | 0 | 0 | 60 | 0.070755 | 0 | 0 | 63 | 0.074292 |
d74cb954739227c81cdf0acf2e81f09cf919377a | 475 | py | Python | example/abc-preview.py | lochbrunner/vscode-generic-binary-preview | 9cc44089d9b51c9e8a64547f086c61bf86d20a0c | [
"MIT"
] | 1 | 2022-02-26T20:29:40.000Z | 2022-02-26T20:29:40.000Z | example/abc-preview.py | lochbrunner/vscode-generic-binary-preview | 9cc44089d9b51c9e8a64547f086c61bf86d20a0c | [
"MIT"
] | 1 | 2021-08-24T17:12:00.000Z | 2021-08-24T17:12:23.000Z | example/abc-preview.py | lochbrunner/vscode-generic-binary-preview | 9cc44089d9b51c9e8a64547f086c61bf86d20a0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import os
import sys
import pickle
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
if os.path.splitext(args.filename)[1] != '.abc':
# We can not read this file type
sys.exit(1)
with open(args.filename, 'rb') as f:
obj = pickle.load(f)
name = obj['name']
print("<p>Your name is {}</p>".format(name))
| 21.590909 | 52 | 0.612632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.237895 |
d74d3899741085d63737a7cf25e86b7c54dd0fdb | 12,299 | py | Python | obsolete/pipeline_mapping_benchmark.py | cdrakesmith/CGATPipelines | 3c94ae4f9d87d51108255dc405c4b95af7c8b694 | [
"MIT"
] | 49 | 2015-04-13T16:49:25.000Z | 2022-03-29T10:29:14.000Z | obsolete/pipeline_mapping_benchmark.py | cdrakesmith/CGATPipelines | 3c94ae4f9d87d51108255dc405c4b95af7c8b694 | [
"MIT"
] | 252 | 2015-04-08T13:23:34.000Z | 2019-03-18T21:51:29.000Z | obsolete/pipeline_mapping_benchmark.py | cdrakesmith/CGATPipelines | 3c94ae4f9d87d51108255dc405c4b95af7c8b694 | [
"MIT"
] | 22 | 2015-05-21T00:37:52.000Z | 2019-09-25T05:04:27.000Z | """=========================================
Read Mapping parameter titration pipeline
=========================================
* align reads to the genome using a range of different parameters
* calculate alignment statistics
Requirements
------------
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
+--------------------+-------------------+------------------------------------------------+
|*Program* |*Version* |*Purpose* |
+--------------------+-------------------+------------------------------------------------+
|bowtie_ |>=0.12.7 |read mapping |
+--------------------+-------------------+------------------------------------------------+
Pipline Output
==============
The results of the computation are all stored in an sqlite relational
database :file:`csvdb`.
Glossary
========
.. glossary::
bowtie
bowtie_ - a read mapper
.. _bowtie: http://bowtie-bio.sourceforge.net/index.shtml
Code
====
"""
import sys
import os
import CGAT.Experiment as E
from ruffus import *
import pysam
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.Pipeline as P
USECLUSTER = True
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
P.getParameters(["%s/pipeline.ini" %
os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"])
PARAMS = P.PARAMS
bowtie_options = {'n0m1': "-n 0 -a --best --strata -m 1 -3 1", 'n1m1': "-n 1 -a --best --strata -m 1 -3 1", 'n2m1': "-n 2 -a --best --strata -m 1 -3 1", 'n3m1': "-n 3 -a --best --strata -m 1 -3 1",
'n0m2': "-n 0 -a --best --strata -m 2 -3 1", 'n1m2': "-n 1 -a --best --strata -m 2 -3 1", 'n2m2': "-n 2 -a --best --strata -m 2 -3 1", 'n3m2': "-n 3 -a --best --strata -m 2 -3 1",
'n0m3': "-n 0 -a --best --strata -m 3 -3 1", 'n1m3': "-n 1 -a --best --strata -m 3 -3 1", 'n2m3': "-n 2 -a --best --strata -m 3 -3 1", 'n3m3': "-n 3 -a --best --strata -m 3 -3 1",
'n0m4': "-n 0 -a --best --strata -m 4 -3 1", 'n1m4': "-n 1 -a --best --strata -m 4 -3 1", 'n2m4': "-n 2 -a --best --strata -m 4 -3 1", 'n3m4': "-n 3 -a --best --strata -m 4 -3 1",
'n0m5': "-n 0 -a --best --strata -m 5 -3 1", 'n1m5': "-n 1 -a --best --strata -m 5 -3 1", 'n2m5': "-n 2 -a --best --strata -m 5 -3 1", 'n3m5': "-n 3 -a --best --strata -m 5 -3 1",
'v0m1': "-v 0 -a --best --strata -m 1 -3 1", 'v1m1': "-v 1 -a --best --strata -m 1 -3 1", 'v2m1': "-v 2 -a --best --strata -m 1 -3 1", 'v3m1': "-v 3 -a --best --strata -m 1 -3 1",
'v0m2': "-v 0 -a --best --strata -m 2 -3 1", 'v1m2': "-v 1 -a --best --strata -m 2 -3 1", 'v2m2': "-v 2 -a --best --strata -m 2 -3 1", 'v3m2': "-v 3 -a --best --strata -m 2 -3 1",
'v0m3': "-v 0 -a --best --strata -m 3 -3 1", 'v1m3': "-v 1 -a --best --strata -m 3 -3 1", 'v2m3': "-v 2 -a --best --strata -m 3 -3 1", 'v3m3': "-v 3 -a --best --strata -m 3 -3 1",
'v0m4': "-v 0 -a --best --strata -m 4 -3 1", 'v1m4': "-v 1 -a --best --strata -m 4 -3 1", 'v2m4': "-v 2 -a --best --strata -m 4 -3 1", 'v3m4': "-v 3 -a --best --strata -m 4 -3 1",
'v0m5': "-v 0 -a --best --strata -m 5 -3 1", 'v1m5': "-v 1 -a --best --strata -m 5 -3 1", 'v2m5': "-v 2 -a --best --strata -m 5 -3 1", 'v3m5': "-v 3 -a --best --strata -m 5 -3 1"}
###################################################################
###################################################################
###################################################################
# MAP READS
@files([(PARAMS["test_file"], "%s.bam" % x, bowtie_options.get(x)) for x in list(bowtie_options.keys())])
def buildBAM(infile, outfile, options):
'''map reads with bowtie'''
job_threads = PARAMS["bowtie_threads"]
m = PipelineMapping.Bowtie()
reffile = PARAMS["samtools_genome"]
bowtie_options = options
statement = m.build((infile,), outfile)
# print(statement)
P.run()
#########################################################################
@transform(buildBAM,
regex(r"(\S+).bam"),
r"\1.nsrt.bam")
def sortByName(infile, outfile):
'''Add number of hits tags to sam file'''
to_cluster = USECLUSTER
track = P.snip(outfile, ".bam")
statement = '''samtools sort -n %(infile)s %(track)s;'''
P.run()
#########################################################################
@transform(sortByName,
regex(r"(\S+).nsrt.bam"),
r"\1.nh.bam")
def addNHTag(infile, outfile):
'''Add number of hits tags to sam file'''
to_cluster = USECLUSTER
inf = pysam.Samfile(infile, "rb")
outf = pysam.Samfile(outfile, "wb", template=inf)
for readset in read_sets(inf, keep_unmapped=True):
nh = len(readset)
for read in readset:
if (read.is_unmapped):
nh = 0
read.tags = read.tags + [("NH", nh)]
outf.write(read)
inf.close()
outf.close()
#########################################################################
@transform(addNHTag,
regex(r"(\S+).bam"),
r"\1.srt.bam")
def sortByPosition(infile, outfile):
'''Add number of hits tags to sam file'''
to_cluster = USECLUSTER
track = P.snip(outfile, ".bam")
statement = '''samtools sort %(infile)s %(track)s;'''
P.run()
#########################################################################
@transform(sortByPosition,
regex(r"(\S+).nh.srt.bam"),
r"\1.dedup.bam")
def dedup(infiles, outfile):
'''Remove duplicate alignments from BAM files.'''
to_cluster = USECLUSTER
track = P.snip(outfile, ".bam")
statement = '''MarkDuplicates INPUT=%(infiles)s ASSUME_SORTED=true OUTPUT=%(outfile)s METRICS_FILE=%(track)s.dupstats VALIDATION_STRINGENCY=SILENT; ''' % locals(
)
statement += '''samtools index %(outfile)s; ''' % locals()
# print statement
P.run()
#########################################################################
@merge(dedup, "picard_duplicate_stats.load")
def loadPicardDuplicateStats(infiles, outfile):
'''Merge Picard duplicate stats into single table and load into SQLite.'''
tablename = P.toTable(outfile)
outf = open('dupstats.txt', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".dedup.bam")
statfile = P.snip(f, ".bam") + ".dupstats"
if not os.path.exists(statfile):
E.warn("File %s missing" % statfile)
continue
lines = [x for x in open(
statfile, "r").readlines() if not x.startswith("#") and x.strip()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
outf.write("%s\t%s" % (track, lines[1]))
outf.close()
tmpfilename = outf.name
statement = '''cat %(tmpfilename)s
| cgat csv2db
--add-index=track
--table=%(tablename)s
> %(outfile)s
'''
P.run()
#########################################################################
@transform(dedup,
regex(r"(\S+).dedup.bam"),
r"\1.readstats")
def buildBAMStats(infile, outfile):
'''Count number of reads mapped, duplicates, etc. '''
to_cluster = USECLUSTER
scriptsdir = PARAMS["general_scriptsdir"]
statement = '''cgat bam2stats --force-output
--output-filename-pattern=%(outfile)s.%%s < %(infile)s > %(outfile)s'''
P.run()
#########################################################################
@merge(buildBAMStats, "bam_stats.load")
def loadBAMStats(infiles, outfile):
'''Import bam statistics into SQLite'''
scriptsdir = PARAMS["general_scriptsdir"]
header = ",".join([P.snip(os.path.basename(x), ".readstats")
for x in infiles])
filenames = " ".join(["<( cut -f 1,2 < %s)" % x for x in infiles])
tablename = P.toTable(outfile)
E.info("loading bam stats - summary")
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--ignore-empty
%(filenames)s
| perl -p -e "s/bin/track/"
| perl -p -e "s/unique/unique_alignments/"
| cgat table2table --transpose
| cgat csv2db
--allow-empty-file
--add-index=track
--table=%(tablename)s
> %(outfile)s"""
P.run()
for suffix in ("nm", "nh"):
E.info("loading bam stats - %s" % suffix)
filenames = " ".join(["%s.%s" % (x, suffix) for x in infiles])
tname = "%s_%s" % (tablename, suffix)
statement = """cgat combine_tables
--header-names=%(header)s
--skip-titles
--missing-value=0
--ignore-empty
%(filenames)s
| perl -p -e "s/bin/%(suffix)s/"
| cgat csv2db
--table=%(tname)s
--allow-empty-file
>> %(outfile)s """
P.run()
#########################################################################
@transform(dedup,
regex(r"(\S+)/bam/(\S+).bam"),
r"\1/bam/\2.alignstats")
def buildPicardAlignStats(infile, outfile):
'''Gather BAM file alignment statistics using Picard '''
to_cluster = USECLUSTER
track = P.snip(os.path.basename(infile), ".bam")
statement = '''CollectAlignmentSummaryMetrics INPUT=%(infile)s REFERENCE_SEQUENCE=%%(samtools_genome)s ASSUME_SORTED=true OUTPUT=%(outfile)s VALIDATION_STRINGENCY=SILENT ''' % locals(
)
P.run()
############################################################
@merge(buildPicardAlignStats, "picard_align_stats.load")
def loadPicardAlignStats(infiles, outfile):
'''Merge Picard alignment stats into single table and load into SQLite.'''
tablename = P.toTable(outfile)
outf = P.getTempFile()
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".dedup.alignstats")
if not os.path.exists(f):
E.warn("File %s missing" % f)
continue
lines = [
x for x in open(f, "r").readlines() if not x.startswith("#") and x.strip()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
for i in range(1, len(lines)):
outf.write("%s\t%s" % (track, lines[i]))
outf.close()
tmpfilename = outf.name
statement = '''cat %(tmpfilename)s
| cgat csv2db
--add-index=track
--table=%(tablename)s
> %(outfile)s
'''
P.run()
os.unlink(tmpfilename)
############################################################
############################################################
############################################################
# Pipeline organisation
@follows(buildBAM, sortByName, addNHTag, sortByPosition, dedup,
loadPicardDuplicateStats, buildBAMStats, loadBAMStats)
def mapReads():
'''Align reads to target genome.'''
@follows(mapReads)
def full():
'''run the full pipeline.'''
############################################################
############################################################
############################################################
# REPORTS
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| 35.649275 | 197 | 0.462314 | 0 | 0 | 0 | 0 | 7,185 | 0.584194 | 0 | 0 | 7,520 | 0.611432 |
d74ea2281107cf74e2034caf7b92932a4e11e640 | 1,904 | py | Python | user_Test.py | Robertokello11/Password-Locker | 23afa9b2748044b523e33660e45f934a90d88328 | [
"MIT"
] | null | null | null | user_Test.py | Robertokello11/Password-Locker | 23afa9b2748044b523e33660e45f934a90d88328 | [
"MIT"
] | null | null | null | user_Test.py | Robertokello11/Password-Locker | 23afa9b2748044b523e33660e45f934a90d88328 | [
"MIT"
] | null | null | null | import unittest #Import unittest module
from user import user # importing the contact class
class TestUser(unittest.TestCase):
def setUp(self):
'''
method to run before each test
'''
self.new_user=User("Robert", "Robert11") #new User created
def tearDown(self):
'''
clean up to prevent errors
'''
User.user_list = []
# Test 2 ##
def test__init(self):
'''
check if class is initialiazing as expected
'''
self.assertEqual(self.new_user.username, "Robert")
self.assertEqual(self.new_user.password, "Robert11")
def test_save_user(self):
'''
confirm if the user information can be saved
in the user list
'''
self.new_user.save_user()
self.assertEqual(len(User.user_list), 1)
# 3rd test ## saving users ##
def test_save_mutliple_users(self):
'''
check whether you can store more than one user
'''
self.new_user.save_user()
test_user = User("test", "password")
test_user.save_user()
self.assertEqual(len(User.user_list), 2)
#4th test## Delete user ##
def test_delete_user(self):
'''
check whether one can delete a user account
'''
self.new_user.save_user()
test_user = User("test", "password")
test_user.save_user()
self.new_user.delete_user()
self.assertEqual(len(User.user_list), 1)
##5th test#
def test_find_user(self):
'''
find a user using username
'''
self.new_user.save_user()
test_user = User("test", "password")
test_user.save_user()
found_user = User.find_user("Robert")
self.assertEqual(found_user.username, self.new_user.username)
if __name__ == '__main__':
unittest.main()
| 25.72973 | 69 | 0.581408 | 1,757 | 0.922794 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.369223 |
d74f99009f7b23a8259c513299a6bb144a926283 | 4,881 | py | Python | src/old/api_server.py | ssupdoc/k8-simulation | 7834d3faaed3e86b547554c6228540c316621011 | [
"CC0-1.0"
] | null | null | null | src/old/api_server.py | ssupdoc/k8-simulation | 7834d3faaed3e86b547554c6228540c316621011 | [
"CC0-1.0"
] | null | null | null | src/old/api_server.py | ssupdoc/k8-simulation | 7834d3faaed3e86b547554c6228540c316621011 | [
"CC0-1.0"
] | null | null | null | from src.deployment import Deployment
from src.end_point import EndPoint
from src.etcd import Etcd
from src.pod import Pod
from src.pid_controller import PIDController
from src.request import Request
from src.worker_node import WorkerNode
import threading
import random
#The APIServer handles the communication between controllers and the cluster. It houses
#the methods that can be called for cluster management
class APIServer:
def __init__(self, ctrlValues = [0, 0, 0]):
self.etcd = Etcd()
self.etcdLock = threading.Lock()
self.kubeletList = []
self.requestWaiting = threading.Event()
self.controller = PIDController(ctrlValues[0], ctrlValues[1], ctrlValues[2])#Tune your controller
# GetDeployments method returns the list of deployments stored in etcd
def GetDeployments(self):
return self.etcd.deploymentList.copy()
def GetDepByLabel(self, label):
return next(filter(lambda deployment: deployment.deploymentLabel == label, self.etcd.deploymentList), None)
# GetWorkers method returns the list of WorkerNodes stored in etcd
def GetWorkers(self):
return self.etcd.nodeList.copy()
# GetPending method returns the list of PendingPods stored in etcd
def GetPending(self):
return self.etcd.pendingPodList.copy()
# GetEndPoints method returns the list of EndPoints stored in etcd
def GetEndPoints(self):
return self.etcd.endPointList.copy()
# CreateWorker creates a WorkerNode from a list of arguments and adds it to the etcd nodeList
def CreateWorker(self, info):
worker = WorkerNode(info)
self.etcd.nodeList.append(worker)
print("Worker_Node " + worker.label + " created")
# CreateDeployment creates a Deployment object from a list of arguments and adds it to the etcd deploymentList
def CreateDeployment(self, info):
deployment = Deployment(info)
self.etcd.deploymentList.append(deployment)
print("Deployment " + deployment.deploymentLabel + " created")
# RemoveDeployment deletes the associated Deployment object from etcd and sets the status of all associated pods to 'TERMINATING'
def RemoveDeployment(self, info):
for deployment in self.etcd.deploymentList:
if deployment.deploymentLabel == info[0]:
deployment.expectedReplicas = 0
# CreateEndpoint creates an EndPoint object using information from a provided Pod and Node and appends it
# to the endPointList in etcd
def CreateEndPoint(self, pod, worker):
endPoint = EndPoint(pod, pod.deploymentLabel, worker)
self.etcd.endPointList.append(endPoint)
print("New Endpoint for "+endPoint.deploymentLabel+"- NODE: "+ endPoint.node.label + " POD: " + endPoint.pod.podName)
# GetEndPointsByLabel returns a list of EndPoints associated with a given deployment
def GetEndPointsByLabel(self, deploymentLabel):
endPoints = []
for endPoint in self.etcd.endPointList:
if endPoint.deploymentLabel == deploymentLabel:
endPoints.append(endPoint)
return endPoints
#RemoveEndPoint removes the EndPoint from the list within etcd
def RemoveEndPoint(self, endPoint):
endPoint.node.available_cpu+=endPoint.pod.assigned_cpu
print("Removing EndPoint for: "+endPoint.deploymentLabel)
self.etcd.endPointList.remove(endPoint)
#GeneratePodName creates a random label for a pod
def GeneratePodName(self):
label = random.randint(111,999)
for pod in self.etcd.runningPodList:
if pod.podName == label:
label = self.GeneratePodName()
for pod in self.etcd.pendingPodList:
if pod.podName == label:
label = self.GeneratePodName()
return label
# CreatePod finds the resource allocations associated with a deployment and creates a pod using those metrics
def CreatePod(self, deployment):
podName = deployment.deploymentLabel + "_" + str(self.GeneratePodName())
pod = Pod(podName, deployment.cpuCost, deployment.deploymentLabel)
print("Pod " + pod.podName + " created")
self.etcd.pendingPodList.append(pod)
# GetPod returns the pod object associated with an EndPoint
def GetPod(self, endPoint):
return endPoint.pod
#TerminatePod gracefully shuts down a Pod
def TerminatePod(self, endPoint):
pod = endPoint.pod
pod.status="TERMINATING"
self.RemoveEndPoint(endPoint)
print("Removing Pod "+pod.podName)
# CrashPod finds a pod from a given deployment and sets its status to 'FAILED'
# Any resource utilisation on the pod will be reset to the base 0
def CrashPod(self, info):
endPoints = self.GetEndPointsByLabel(info[0])
if len(endPoints) == 0:
print("No Pods to crash")
else:
print("GETTING PODS")
pod = self.GetPod(endPoints[0])
pod.status = "FAILED"
pod.crash.set()
print ("Pod "+pod.podName+" crashed")
# Alter these method so that the requests are pushed to Deployments instead of etcd
def PushReq(self, info):
self.etcd.reqCreator.submit(self.ReqPusher, info)
def ReqPusher(self, info):
self.etcd.pendingReqs.append(Request(info))
self.requestWaiting.set()
| 36.699248 | 129 | 0.763983 | 4,465 | 0.914772 | 0 | 0 | 0 | 0 | 0 | 0 | 1,742 | 0.356894 |
d75199ae09fb507ebd0bec385de7a0580d95d595 | 1,946 | py | Python | ngram_graphs/TextGraph/IGraphTextGraph.py | loginn/ngrams_graphs | 74fc42d3895cdbc51eec5aaf7b5505b4432619e3 | [
"MIT"
] | 8 | 2018-04-24T17:03:29.000Z | 2022-02-08T14:36:00.000Z | ngram_graphs/TextGraph/IGraphTextGraph.py | loginn/ngrams-graphs | 74fc42d3895cdbc51eec5aaf7b5505b4432619e3 | [
"MIT"
] | 1 | 2019-08-13T08:50:54.000Z | 2021-12-03T11:19:51.000Z | ngram_graphs/TextGraph/IGraphTextGraph.py | loginn/ngrams_graphs | 74fc42d3895cdbc51eec5aaf7b5505b4432619e3 | [
"MIT"
] | null | null | null | from igraph import Graph
def find_node_name(graph, node_idx):
return graph.vs[node_idx]["name"]
class IGraphTextGraph(Graph):
def __init__(self):
super().__init__(directed=True)
def __copy__(self):
g = IGraphTextGraph()
for v in self.vs:
g.add_vertex(v)
for e in self.es:
source = find_node_name(self, e.source)
target = find_node_name(self, e.target)
self.add_edge(source, target, weight=e["weight"], name=source + ' ' + target)
return g
@staticmethod
def __calc_new_weight(s_edge, weight, learning_factor) -> float:
return s_edge["weight"] + ((weight - s_edge["weight"]) * learning_factor)
def __add_unknown_vertices(self, other):
for vertex in other.vs:
if vertex["name"] not in self.vs["name"]:
self.add_vertex(name=vertex["name"])
def __add_unknown_edge(self, other, o_edge, learning_factor):
source = find_node_name(other, o_edge.source)
target = find_node_name(other, o_edge.target)
self.add_edge(source, target, weight=o_edge["weight"] * learning_factor, name=source + ' ' + target)
def __update_edges(self, other: 'IGraphTextGraph', learning_factor: float):
for o_edge in other.es:
s_edge = next(iter([e for e in self.es if e["name"] == o_edge["name"]]), None)
if s_edge is not None:
s_edge["weight"] = self.__calc_new_weight(s_edge, o_edge["weight"], learning_factor)
else:
self.__add_unknown_edge(other, o_edge, learning_factor)
for s_edge in self.es:
if s_edge["name"] not in other.es["name"]:
s_edge["weight"] = self.__calc_new_weight(s_edge, 0, learning_factor)
def update(self, other: 'IGraphTextGraph', learning_factor):
self.__add_unknown_vertices(other)
self.__update_edges(other, learning_factor)
| 38.92 | 108 | 0.631552 | 1,841 | 0.946043 | 0 | 0 | 164 | 0.084275 | 0 | 0 | 144 | 0.073998 |
d751efb295d54d9e30bbe118adcaa7556e4e4646 | 67 | py | Python | Python Crash Course/12_use_module.py | rfaria/full-stack-python | 1e764d4a1468d849d20005846e49ff900787d1ed | [
"MIT"
] | 1 | 2022-01-17T19:55:52.000Z | 2022-01-17T19:55:52.000Z | Python Crash Course/12_use_module.py | rfaria/full-stack-python | 1e764d4a1468d849d20005846e49ff900787d1ed | [
"MIT"
] | null | null | null | Python Crash Course/12_use_module.py | rfaria/full-stack-python | 1e764d4a1468d849d20005846e49ff900787d1ed | [
"MIT"
] | null | null | null | import new_module as nm
if __name__ == '__main__':
nm.say_hi() | 16.75 | 26 | 0.686567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.149254 |
d75247a3779551deda4745fa7b2623bb3e507eaf | 619 | py | Python | pyFiDEL/utils.py | sungcheolkim78/pyFiDEL | 670067b12a2efd276e23382251ec612af678731f | [
"Apache-2.0"
] | null | null | null | pyFiDEL/utils.py | sungcheolkim78/pyFiDEL | 670067b12a2efd276e23382251ec612af678731f | [
"Apache-2.0"
] | null | null | null | pyFiDEL/utils.py | sungcheolkim78/pyFiDEL | 670067b12a2efd276e23382251ec612af678731f | [
"Apache-2.0"
] | null | null | null | '''
utils.py - utility functions
Soli Deo Gloria
'''
__author__ = 'Sung-Cheol Kim'
__version__ = '1.0.0'
import numpy as np
def fermi_l(x: np.array, l1: float, l2: float) -> np.array:
''' calculate fermi-dirac distribution with np.array x with l1 and l2'''
return 1. / (1. + np.exp(l2 * x + l1))
def fermi_b(x: np.array, b: float, m: float, normalized: bool = False):
''' calculate fermi-dirac distribution with np.array x with beta and mu'''
if normalized:
return 1. / (1. + np.exp(b / float(len(x)) * (x - m * float(len(x)))))
else:
return 1. / (1. + np.exp(b * (x - m)))
| 23.807692 | 78 | 0.596123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.358643 |
d752c6600006a1dd3d08802063344cf314d1ca74 | 3,970 | py | Python | scripts/run_jsw_ablation_experiments.py | Oulu-IMEDS/OAProgression | 76dcb93d4101d1a97c13e39dbb92808f62923b12 | [
"BSD-4-Clause-UC"
] | 65 | 2019-04-14T15:37:55.000Z | 2021-11-10T10:01:57.000Z | scripts/run_jsw_ablation_experiments.py | Oulu-IMEDS/OAProgression | 76dcb93d4101d1a97c13e39dbb92808f62923b12 | [
"BSD-4-Clause-UC"
] | 9 | 2019-07-09T08:12:08.000Z | 2022-01-03T12:06:26.000Z | scripts/run_jsw_ablation_experiments.py | MIPT-Oulu/OAProgression | 76dcb93d4101d1a97c13e39dbb92808f62923b12 | [
"BSD-4-Clause-UC"
] | 13 | 2019-04-15T10:20:57.000Z | 2021-09-13T11:38:00.000Z | import sys
import os
import cv2
import argparse
import pickle
from sklearn.metrics import average_precision_score
from sklearn.model_selection import GroupKFold
from oaprogression.metadata.oai import jsw_features, read_jsw_metadata_oai, beam_angle_feature
from oaprogression.training.lgbm_tools import optimize_lgbm_hyperopt, fit_lgb
from oaprogression.evaluation import tools
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
DEBUG = sys.gettrace() is not None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', default='')
parser.add_argument('--metadata_root', default='')
parser.add_argument('--seed', type=int, default=12345)
parser.add_argument('--lgbm_hyperopt_trials', type=int, default=2)
parser.add_argument('--save_dir', default='')
args = parser.parse_args()
sites, metadata = read_jsw_metadata_oai(args.metadata_root, args.dataset_root)
results = {}
for feature_set in [['AGE', 'SEX', 'BMI'],
['AGE', 'SEX', 'BMI', 'SURG', 'INJ', 'WOMAC'],
['AGE', 'SEX', 'BMI', 'KL'],
['AGE', 'SEX', 'BMI', 'KL', 'SURG', 'INJ', 'WOMAC'],
['AGE', 'SEX', 'BMI', beam_angle_feature], # Reproducing the test results w. beam angle
['AGE', 'SEX', 'BMI', 'SURG', 'INJ', 'WOMAC', beam_angle_feature],
['AGE', 'SEX', 'BMI', 'KL', beam_angle_feature],
['AGE', 'SEX', 'BMI', 'KL', 'SURG', 'INJ', 'WOMAC', beam_angle_feature],
['AGE', 'SEX', 'BMI'] + jsw_features, # Adding JSW to the base model
['AGE', 'SEX', 'BMI', 'KL'] + jsw_features,
['AGE', 'SEX', 'BMI', 'KL', 'SURG', 'INJ', 'WOMAC'] + jsw_features,
['AGE', 'SEX', 'BMI', 'SURG', 'INJ', 'WOMAC'] + jsw_features,
['AGE', 'SEX', 'BMI', beam_angle_feature] + jsw_features, # Let's try to add the beam angle as well
['AGE', 'SEX', 'BMI', 'KL', beam_angle_feature] + jsw_features,
['AGE', 'SEX', 'BMI', 'KL', 'SURG', 'INJ', 'WOMAC', beam_angle_feature] + jsw_features,
['AGE', 'SEX', 'BMI', 'SURG', 'INJ', 'WOMAC', beam_angle_feature] + jsw_features,
]:
features_suffix = '_'.join(feature_set)
results[features_suffix] = {}
for test_site in sites:
top_subj_train = metadata[metadata.V00SITE != test_site]
top_subj_test = metadata[metadata.V00SITE == test_site]
gkf = GroupKFold(n_splits=5)
train_folds = []
for train_idx, val_idx in gkf.split(top_subj_train, y=top_subj_train.Progressor, groups=top_subj_train.ID):
train_folds.append((top_subj_train.iloc[train_idx], top_subj_train.iloc[val_idx]))
best_params, trials = optimize_lgbm_hyperopt(train_folds, feature_set,
average_precision_score,
args.seed, hyperopt_trials=args.lgbm_hyperopt_trials)
ap_score, models_best, oof_preds = fit_lgb(best_params, train_folds,
feature_set, average_precision_score, True, True)
print('CV score:', feature_set, ap_score)
test_res = tools.eval_models(top_subj_test, feature_set, models_best, mean_std_best=None,
impute=False, model_type='lgbm')
y_test = top_subj_test.Progressor.values.copy() > 0
ids = top_subj_test.ID.values
sides = top_subj_test.Side.values
results[features_suffix][test_site] = (ids, sides, y_test, test_res)
with open(os.path.join(args.save_dir, 'results_ablation_jsw_lgbm.pkl'), 'wb') as f:
pickle.dump(results, f)
| 49.012346 | 123 | 0.575315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.171033 |
d753414b6d8a595d3f5481cc528d827c6798b55d | 819 | py | Python | data/synthetic/analyze.py | thonic/pyhawkes | 99804deb9ea22ba3e1a99584420722abdf8eb56b | [
"MIT"
] | 221 | 2015-02-26T04:25:34.000Z | 2022-03-27T13:06:10.000Z | data/synthetic/analyze.py | thonic/pyhawkes | 99804deb9ea22ba3e1a99584420722abdf8eb56b | [
"MIT"
] | 20 | 2015-08-04T01:47:19.000Z | 2021-08-08T00:22:44.000Z | data/synthetic/analyze.py | thonic/pyhawkes | 99804deb9ea22ba3e1a99584420722abdf8eb56b | [
"MIT"
] | 86 | 2015-02-22T23:36:32.000Z | 2021-11-13T20:56:07.000Z | import gzip
import pickle
import os
def analyze(data_path):
"""
Run the comparison on the given data file
:param data_path:
:return:
"""
if data_path.endswith(".gz"):
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
else:
with open(data_path, 'r') as f:
S, true_model = pickle.load(f)
print("True model:")
print(true_model)
T = float(S.shape[0])
N = S.sum(axis=0)
print("lambda0: ", true_model.bias_model.lambda0.mean())
print("Average event count: ", N.mean(), " +- ", N.std())
print("Average event count: ", (N/T).mean(), " +- ", (N/T).std())
# seed = 2650533028
K = 50
C = 5
T = 100000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
analyze(data_path)
| 22.135135 | 87 | 0.582418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.306471 |
d75380c7f93cf504e36d7b8aad4924bb1e6a7e9d | 1,692 | py | Python | icarus/test/test_util.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 5 | 2021-03-20T09:22:55.000Z | 2021-12-20T17:01:33.000Z | icarus/test/test_util.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 1 | 2021-12-13T07:40:46.000Z | 2021-12-20T16:59:08.000Z | icarus/test/test_util.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 1 | 2021-11-25T05:42:20.000Z | 2021-11-25T05:42:20.000Z | import unittest
import networkx as nx
import fnss
import icarus.util as util
class TestUtil(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_timestr(self):
self.assertEqual("1m 30s", util.timestr(90, True))
self.assertEqual("1m", util.timestr(90, False))
self.assertEqual("2m", util.timestr(120, True))
self.assertEqual("21s", util.timestr(21, True))
self.assertEqual("0m", util.timestr(21, False))
self.assertEqual("1h", util.timestr(3600, True))
self.assertEqual("1h", util.timestr(3600, False))
self.assertEqual("1h 0m 4s", util.timestr(3604, True))
self.assertEqual("1h", util.timestr(3604, False))
self.assertEqual("1h 2m 4s", util.timestr(3724, True))
self.assertEqual("1h 2m", util.timestr(3724, False))
self.assertEqual("2d 1h 3m 9s", util.timestr(49 * 3600 + 189, True))
self.assertEqual("0s", util.timestr(0, True))
self.assertEqual("0m", util.timestr(0, False))
def test_multicast_tree(self):
topo = fnss.Topology()
topo.add_path([2, 1, 3, 4])
sp = nx.all_pairs_shortest_path(topo)
tree = util.multicast_tree(sp, 1, [2, 3])
self.assertSetEqual(set(tree), set([(1, 2), (1, 3)]))
def test_apportionment(self):
self.assertEqual(util.apportionment(10, [0.53, 0.47]), [5, 5])
self.assertEqual(util.apportionment(100, [0.4, 0.21, 0.39]), [40, 21, 39])
self.assertEqual(util.apportionment(99, [0.2, 0.7, 0.1]), [20, 69, 10])
| 32.538462 | 82 | 0.615839 | 1,610 | 0.951537 | 0 | 0 | 103 | 0.060875 | 0 | 0 | 85 | 0.050236 |
d7541a3b15b51e054893433fc2620f63f4d58eb9 | 1,055 | py | Python | pytorch_metric_learning/trainers/unsupervised_embeddings_using_augmentations.py | jacobdanovitch/pytorch_metric_learning | dbcf2d49fffe92f7dc1221b939e182c214633520 | [
"MIT"
] | 1 | 2021-05-18T01:51:04.000Z | 2021-05-18T01:51:04.000Z | pytorch_metric_learning/trainers/unsupervised_embeddings_using_augmentations.py | junjungoal/pytorch_metric_learning | e56bb440d1ec63e13622025209135a788c6f51c1 | [
"MIT"
] | null | null | null | pytorch_metric_learning/trainers/unsupervised_embeddings_using_augmentations.py | junjungoal/pytorch_metric_learning | e56bb440d1ec63e13622025209135a788c6f51c1 | [
"MIT"
] | 1 | 2021-05-18T01:51:00.000Z | 2021-05-18T01:51:00.000Z | from .metric_loss_only import MetricLossOnly
import logging
from ..utils import common_functions as c_f
import torch
class UnsupervisedEmbeddingsUsingAugmentations(MetricLossOnly):
def __init__(self, transforms, **kwargs):
super().__init__(**kwargs)
self.label_mapper = lambda label, hierarchy_level: label
self.collate_fn = self.get_custom_collate_fn(transforms, self.possible_data_keys)
self.initialize_dataloader()
logging.info("Transforms: %s"%transforms)
def get_custom_collate_fn(self, transforms, possible_data_keys):
def custom_collate_fn(data):
transformed_data, labels = [], []
for i, d in enumerate(data):
img = c_f.try_keys(d, possible_data_keys)
for t in transforms:
transformed_data.append(t(img))
labels.append(i)
return {possible_data_keys[0]: torch.stack(transformed_data, dim=0), "label": torch.LongTensor(labels)}
return custom_collate_fn
| 43.958333 | 116 | 0.661611 | 930 | 0.881517 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.021801 |
d7560a43f54c8aae8cfba885c28425454151868f | 866 | py | Python | accelerator/migrations/0064_update_gender_criteria_to_full_gender_spec.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | 6 | 2017-06-14T19:34:01.000Z | 2020-03-08T07:16:59.000Z | accelerator/migrations/0064_update_gender_criteria_to_full_gender_spec.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | 160 | 2017-06-20T17:12:13.000Z | 2022-03-30T13:53:12.000Z | accelerator/migrations/0064_update_gender_criteria_to_full_gender_spec.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.24 on 2021-07-01 20:17
from django.db import migrations
def update_criterion_specs(apps, schema_editor):
CriterionOptionSpec = apps.get_model("accelerator", "CriterionOptionSpec")
CriterionOptionSpec.objects.filter(option="m").update(option="Male")
CriterionOptionSpec.objects.filter(option="f").update(option="Female")
def reverse_update(apps, schema_editor):
CriterionOptionSpec = apps.get_model("accelerator", "CriterionOptionSpec")
CriterionOptionSpec.objects.filter(option="Male").update(option="m")
CriterionOptionSpec.objects.filter(option="Female").update(option="f")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0063_usernote'),
]
operations = [
migrations.RunPython(update_criterion_specs,
reverse_update)
]
| 30.928571 | 78 | 0.71709 | 232 | 0.267898 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.212471 |
d7564ee7c90edf711987a796bd61e3ba3f488668 | 2,766 | py | Python | task8.py | akramnarejo/pycode | 475aab3df34f5c015e6e8f7e628ecadb91404569 | [
"MIT"
] | null | null | null | task8.py | akramnarejo/pycode | 475aab3df34f5c015e6e8f7e628ecadb91404569 | [
"MIT"
] | null | null | null | task8.py | akramnarejo/pycode | 475aab3df34f5c015e6e8f7e628ecadb91404569 | [
"MIT"
] | null | null | null | import random
def choice(choice):
""" This function takes in integer value and returns the equivalent string """
if(choice == 1):
return "Rock"
elif(choice == 2):
return "Paper"
else:
return "Scissors"
def determine(userChoice, computerChoice):
"""
This function takes in two arguments userChoice and computerChoice,
then determines and returns that who wins.
"""
if(userChoice == "Rock" and computerChoice == "Paper"):
return "computer"
elif(userChoice == "Rock" and computerChoice == "Scissors"):
return "user"
elif(userChoice == "Paper" and computerChoice == "Rock"):
return "user"
elif(userChoice == "Paper" and computerChoice == "Scissors"):
return "computer"
elif(userChoice == "Scissors" and computerChoice == "Paper"):
return "user"
elif(userChoice == "Scissors" and computerChoice == "Rock"):
return "computer"
else:
return "tie"
def play():
""" Rock Paper Scissors is two person game, where eachone of them tosses the rock, paper or scissors.
Followig are the rules:
-Rock smashes the scissors.
-Paper wraps the rock.
-Scissors cut the paper.
"""
print("-------Rock Paper Scissiors---------")
print("1. Rock\n2. Paper\n3. Scissors")
print("------------------------------------------------")
rounds = int(input("Enter the number of rounds you want to play: "))
print("------------------------------------------------")
wins = []
i=1
while(i<=rounds):
print(f'round {i}')
print("-------")
userChoice = int(input('Enter your choice(1,2,or 3): '))
computerChoice = random.choice([1,2,3])
print(f"You selected {choice(userChoice)}")
print(f"computer selected {choice(computerChoice)}")
winner = determine(choice(userChoice), choice(computerChoice))
if(winner == "tie"):
print("tie, nobody wins! \U0001F61F")
else:
print(f"{winner} wins! \U0001F602")
wins.append(winner)
print("------------------------------------------------")
i+= 1
userWins = 0
computerWins = 0
for win in wins:
if(win == "user"):
userWins += 1
elif(win == "computer"):
computerWins += 1
if(rounds>1):
if(userWins>computerWins):
print("user wins! \U0001F602")
elif(computerWins>userWins):
print("computer wins!")
else:
print("tie, nobody wins! \U0001F61F")
elif(rounds == 1):
if(wins[0] == "tie"):
print("tie, nobody wins! \U0001F61F")
else:
print(f"{wins[0]} wins! \U0001F602")
play()
| 32.928571 | 105 | 0.537238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,237 | 0.447216 |
d75687d12877fd75bcb5ec323b32f51bea82f129 | 6,245 | py | Python | rl_coach/graph_managers/hrl_graph_manager.py | jl45621/coach | 9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084 | [
"Apache-2.0"
] | 1,960 | 2017-10-19T10:31:24.000Z | 2020-11-07T18:19:23.000Z | rl_coach/graph_managers/hrl_graph_manager.py | gndctrl2mjrtm/coach-ray | ae6593bb33cf0ae3c5a4b3b351560dd6b47cd031 | [
"Apache-2.0"
] | 349 | 2017-10-21T17:17:18.000Z | 2020-10-17T13:39:56.000Z | rl_coach/graph_managers/hrl_graph_manager.py | gndctrl2mjrtm/coach-ray | ae6593bb33cf0ae3c5a4b3b351560dd6b47cd031 | [
"Apache-2.0"
] | 428 | 2017-10-21T01:32:58.000Z | 2020-11-07T13:49:49.000Z | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Union, Tuple
from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \
PresetValidationParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.environments.environment import EnvironmentParameters, Environment
from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters
from rl_coach.level_manager import LevelManager
from rl_coach.utils import short_dynamic_import
class HRLGraphManager(GraphManager):
"""
A simple HRL graph manager creates a deep hierarchy with a single composite agent per hierarchy level, and a single
environment which is interacted with.
"""
def __init__(self, agents_params: List[AgentParameters], env_params: EnvironmentParameters,
schedule_params: ScheduleParameters, vis_params: VisualizationParameters,
consecutive_steps_to_run_each_level: Union[EnvironmentSteps, List[EnvironmentSteps]],
preset_validation_params: PresetValidationParameters = PresetValidationParameters()):
"""
:param agents_params: the parameters of all the agents in the hierarchy starting from the top level of the
hierarchy to the bottom level
:param env_params: the parameters of the environment
:param schedule_params: the parameters for scheduling the graph
:param vis_params: the visualization parameters
:param consecutive_steps_to_run_each_level: the number of time steps that each level is ran.
for example, when the top level gives the bottom level a goal, the bottom level can act for
consecutive_steps_to_run_each_level steps and try to reach that goal. This is expected to be either
an EnvironmentSteps which will be used for all levels, or an EnvironmentSteps for each level as a list.
"""
super().__init__('hrl_graph', schedule_params, vis_params)
self.agents_params = agents_params
self.env_params = env_params
self.preset_validation_params = preset_validation_params
if isinstance(consecutive_steps_to_run_each_level, list):
if len(consecutive_steps_to_run_each_level) != len(self.agents_params):
raise ValueError("If the consecutive_steps_to_run_each_level is given as a list, it should match "
"the number of levels in the hierarchy. Alternatively, it is possible to use a single "
"value for all the levels, by passing an EnvironmentSteps")
elif isinstance(consecutive_steps_to_run_each_level, EnvironmentSteps):
self.consecutive_steps_to_run_each_level = [consecutive_steps_to_run_each_level] * len(self.agents_params)
for agent_params in agents_params:
agent_params.visualization = self.visualization_parameters
if agent_params.input_filter is None:
agent_params.input_filter = self.env_params.default_input_filter()
if agent_params.output_filter is None:
agent_params.output_filter = self.env_params.default_output_filter()
if len(self.agents_params) < 2:
raise ValueError("The HRL graph manager must receive the agent parameters for at least two levels of the "
"hierarchy. Otherwise, use the basic RL graph manager.")
def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]:
self.env_params.seed = task_parameters.seed
env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__,
visualization_parameters=self.visualization_parameters)
for agent_params in self.agents_params:
agent_params.task_parameters = task_parameters
# we need to build the hierarchy in reverse order (from the bottom up) in order for the spaces of each level
# to be known
level_managers = []
current_env = env
# out_action_space = env.action_space
for level_idx, agent_params in reversed(list(enumerate(self.agents_params))):
# TODO: the code below is specific for HRL on observation scale
# in action space
# if level_idx == 0:
# # top level agents do not get directives
# in_action_space = None
# else:
# pass
# attention_size = (env.state_space['observation'].shape - 1)//4
# in_action_space = AttentionActionSpace(shape=2, low=0, high=env.state_space['observation'].shape - 1,
# forced_attention_size=attention_size)
# agent_params.output_filter.action_filters['masking'].set_masking(0, attention_size)
agent_params.name = "agent_{}".format(level_idx)
agent_params.is_a_highest_level_agent = level_idx == 0
agent = short_dynamic_import(agent_params.path)(agent_params)
level_manager = LevelManager(
agents=agent,
environment=current_env,
real_environment=env,
steps_limit=self.consecutive_steps_to_run_each_level[level_idx],
should_reset_agent_state_after_time_limit_passes=level_idx > 0,
name="level_{}".format(level_idx)
)
current_env = level_manager
level_managers.insert(0, level_manager)
# out_action_space = in_action_space
return level_managers, [env]
| 52.923729 | 120 | 0.68711 | 5,173 | 0.828343 | 0 | 0 | 0 | 0 | 0 | 0 | 2,665 | 0.426741 |
d7591c0d088001626e41cc97845928cb48724e49 | 1,667 | py | Python | test_21_recent_filelist.py | Mpreyzner/tdd_in_python | fa20864329424e40a52e80df34f31cdc56620fb4 | [
"MIT"
] | null | null | null | test_21_recent_filelist.py | Mpreyzner/tdd_in_python | fa20864329424e40a52e80df34f31cdc56620fb4 | [
"MIT"
] | null | null | null | test_21_recent_filelist.py | Mpreyzner/tdd_in_python | fa20864329424e40a52e80df34f31cdc56620fb4 | [
"MIT"
] | 1 | 2020-05-09T17:41:01.000Z | 2020-05-09T17:41:01.000Z | # https://sites.google.com/site/tddproblems/all-problems-1/recent-file-list
# A popular feature of graphical editors of
# all kinds (text, graphics, spreadsheets, ..) is the Recent file list. It is often found as a sub-menu of the file
# menu in the GUI of the program.
#
# Use TDD to grow this kind of behaviour. Some examples of the behaviour is
#
# When the program is run for the first time, the list is empty When a file is opened, it is added to the recent file
# list If an opened file already exists in the recent file list, it is bumped to the top, not duplicated in the list
# If the recent file list gets full (typical number of items is 15), the oldest item is removed when a new item is
# added
from filelist import Program
def test_empty_list():
file_list = Program().get_recent_file_list()
assert len(file_list) == 0
def test_not_empty_list():
file_list = Program().open('somefile.txt').get_recent_file_list()
assert len(file_list) == 1
def test_bumping_item():
program = Program()
program.open('1.txt')
program.open('2.txt')
program.open('2.txt')
file_list = program.get_recent_file_list()
assert file_list[0] == '2.txt'
def test_item_should_not_be_duplicated():
program = Program()
program.open('1.txt')
program.open('1.txt')
file_list = program.get_recent_file_list()
assert len(file_list) == 1
def test_list_should_have_up_to_15_items():
program = Program()
for i in range(1, 17):
file_name = str(i) + '.txt'
program.open(file_name)
file_list = program.get_recent_file_list()
assert len(file_list) == 15
assert '1.txt' not in file_list
| 29.767857 | 117 | 0.706059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 766 | 0.459508 |
d7599edaa481a1e260e406f973a13c62378a0dab | 389 | py | Python | tests/utils.py | amatissart/idunn | cae32024165a85ad99881c373a88ea5702631d71 | [
"Apache-2.0"
] | null | null | null | tests/utils.py | amatissart/idunn | cae32024165a85ad99881c373a88ea5702631d71 | [
"Apache-2.0"
] | null | null | null | tests/utils.py | amatissart/idunn | cae32024165a85ad99881c373a88ea5702631d71 | [
"Apache-2.0"
] | null | null | null | from contextlib import contextmanager
from copy import deepcopy
from app import settings
@contextmanager
def override_settings(overrides):
"""
A utility function used by some fixtures to override settings
"""
old_settings = deepcopy(settings._settings)
settings._settings.update(overrides)
try:
yield
finally:
settings._settings = old_settings
| 24.3125 | 65 | 0.732648 | 0 | 0 | 282 | 0.724936 | 298 | 0.766067 | 0 | 0 | 77 | 0.197943 |
d759b3383f0a3272c487bb9c3a6da4112544bbe3 | 173 | py | Python | exp01_string.py | psb2509/learning-python3 | 38bd16f85d458b69ed677c72315c1023f83afc3d | [
"MIT"
] | null | null | null | exp01_string.py | psb2509/learning-python3 | 38bd16f85d458b69ed677c72315c1023f83afc3d | [
"MIT"
] | 4 | 2018-09-09T16:47:46.000Z | 2018-09-10T12:18:43.000Z | exp01_string.py | psb2509/learning-python3 | 38bd16f85d458b69ed677c72315c1023f83afc3d | [
"MIT"
] | null | null | null | print(4+3);
print("Hello");
print('Who are you');
print('This is Pradeep\'s python program');
print(r'C:\Users\N51254\Documents\NetBeansProjects');
print("Pradeep "*5); | 28.833333 | 54 | 0.687861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.635838 |
d759cca6aa40b22128380587e7622577a783f241 | 1,422 | py | Python | parse.py | OpenScienceFramework/citations | 3679ef4a1746a287061186078ce4b9144afe9083 | [
"Apache-2.0"
] | 4 | 2015-05-20T15:48:35.000Z | 2021-06-24T17:04:51.000Z | parse.py | OpenScienceFramework/citations | 3679ef4a1746a287061186078ce4b9144afe9083 | [
"Apache-2.0"
] | null | null | null | parse.py | OpenScienceFramework/citations | 3679ef4a1746a287061186078ce4b9144afe9083 | [
"Apache-2.0"
] | 1 | 2015-01-10T13:02:45.000Z | 2015-01-10T13:02:45.000Z | # encoding: utf-8
"""
Parse module for parsing citations into structured data. Currently this uses
the Parsley library to do this, the grammars are defined in the grammars/
folder and cycled through until one is found that works.
to_dict will convert the Reference named tuple into a dictionary, which allows
for easy transformation into JSON.
"""
import collections
import glob
import re
import parsley
DASHES = ['-', u'–']
fields = "ref names year title journal edition pages doi".split()
Reference = collections.namedtuple("Reference", ' '.join(fields))
def normalize(string):
"""Normalize whitespace."""
string = string.strip()
string = re.sub(r'\s+', ' ', string)
return string
parsers = []
for gname in glob.glob("grammars/*.parsley"):
with open(gname) as gfile:
grammar = unicode(gfile.read())
parser = parsley.makeGrammar(grammar, dict(DASHES=DASHES,
Reference=Reference, normalize=normalize))
parsers.append(parser)
def parse(text):
"""
Attempt to parse data into a Reference named tuple. Returns None if it
fails.
"""
for parser in parsers:
try:
return parser(text).line()
except Exception as e:
print e.message
pass
def to_dict(s):
"""Turns a citation into a dictioarny that can then be turned into JSON."""
return parser(s).line()._asdict()
| 28.44 | 79 | 0.662447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 646 | 0.453652 |
d75a122a7b1520cc30ff7d383f0593e9eebe913f | 2,260 | py | Python | src/utils/config.py | cpaismz89/DeepFireTopology | 9cfe7c5ed9997a2a89b7405af47d9991da7d5471 | [
"MIT"
] | null | null | null | src/utils/config.py | cpaismz89/DeepFireTopology | 9cfe7c5ed9997a2a89b7405af47d9991da7d5471 | [
"MIT"
] | null | null | null | src/utils/config.py | cpaismz89/DeepFireTopology | 9cfe7c5ed9997a2a89b7405af47d9991da7d5471 | [
"MIT"
] | null | null | null | # Run Keras on CPU
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = " " # -1 if CPU
# Importations
from IPython.display import Image
# Compressed pickle
import pickle
from compress_pickle import dump as cdump
from compress_pickle import load as cload
import io
# Importations
import numpy as np
import pandas as pd
from time import time
import re
import os
import random
import time
# Deep learning
import tensorflow as tf
import keras
from keras.models import Sequential, Model, load_model
from keras.regularizers import l2
from keras.layers import Dense, Input, Flatten, Dropout, BatchNormalization, Activation
from keras.wrappers.scikit_learn import KerasClassifier
from keras.constraints import maxnorm
from keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler
from keras.utils.vis_utils import plot_model
from keras.utils import np_utils
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.recurrent import LSTM, GRU
from keras.layers.wrappers import TimeDistributed
from keras.layers.merge import concatenate
from keras.optimizers import SGD, Adam
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
# Image Processing
from imutils import paths, build_montages
import imutils
import cv2
# Gridsearch
from sklearn.model_selection import GridSearchCV, KFold, train_test_split, cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
from sklearn.preprocessing import Normalizer, StandardScaler, MinMaxScaler, LabelBinarizer, MultiLabelBinarizer, LabelEncoder
from sklearn.utils import shuffle
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, roc_auc_score, auc, confusion_matrix, accuracy_score, classification_report
# Visuals
import seaborn as sns
import matplotlib.pyplot as plt
# Plotting training class
from IPython.display import clear_output
# Visuals scripts
import sys
sys.path.append('..') # Parent folder
from drawer.keras_util import convert_drawer_model
from drawer.pptx_util import save_model_to_pptx
from drawer.matplotlib_util import save_model_to_file | 32.753623 | 125 | 0.834513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.136726 |
d75a1604cab32ecaf5df590d83df4055be296ffd | 1,572 | py | Python | openstack_dashboard/test/integration_tests/steps.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 9 | 2016-06-03T03:53:24.000Z | 2017-05-20T16:53:23.000Z | openstack_dashboard/test/integration_tests/steps.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 1 | 2016-09-08T10:57:46.000Z | 2016-09-08T10:59:06.000Z | openstack_dashboard/test/integration_tests/steps.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 4 | 2016-08-01T10:50:15.000Z | 2017-02-22T12:11:19.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from openstack_dashboard.test.integration_tests.pages import loginpage
from openstack_dashboard.test.integration_tests.regions import messages
LOGGER = logging.getLogger(__name__)
def login(test_case):
test_case.login_pg = loginpage.LoginPage(test_case.driver,
test_case.CONFIG)
test_case.login_pg.go_to_login_page()
test_case.create_demo_user()
test_case.home_pg = test_case.login_pg.login(test_case.TEST_USER_NAME,
test_case.TEST_PASSWORD)
test_case.home_pg.change_project(test_case.HOME_PROJECT)
test_case.assertTrue(
test_case.home_pg.find_message_and_dismiss(messages.SUCCESS))
test_case.assertFalse(
test_case.home_pg.find_message_and_dismiss(messages.ERROR))
yield
if test_case.home_pg.is_logged_in:
test_case.home_pg.log_out()
else:
LOGGER.warn("{!r} isn't logged in".format(test_case.TEST_USER_NAME))
| 37.428571 | 78 | 0.714377 | 0 | 0 | 799 | 0.50827 | 0 | 0 | 0 | 0 | 583 | 0.370865 |
d75a18bd78e91d9f730e373f55bda8a6f4b069bd | 631 | py | Python | src/zabbix_enums/common/event.py | szuro/zabbix-enums | f2ef3b9ea630f678c336d4fc58b5401771a0e4d1 | [
"MIT"
] | 1 | 2022-02-07T01:21:34.000Z | 2022-02-07T01:21:34.000Z | src/zabbix_enums/common/event.py | szuro/zabbix-enums | f2ef3b9ea630f678c336d4fc58b5401771a0e4d1 | [
"MIT"
] | null | null | null | src/zabbix_enums/common/event.py | szuro/zabbix-enums | f2ef3b9ea630f678c336d4fc58b5401771a0e4d1 | [
"MIT"
] | null | null | null | from zabbix_enums.common import _ZabbixEnum
class EventSeverity(_ZabbixEnum):
NOT_CLASSIFIED = 0
INFORMATION = 1
WARNING = 2
AVERAGE = 3
HIGH = 4
DISASTER = 5
class EventSuppressed(_ZabbixEnum):
NO = 0
YES = 1
class EventObjectTrigger(_ZabbixEnum):
TRIGGER = 0
class EventObjectInternal(_ZabbixEnum):
TRIGGER = 0
ITEM = 4
LLD = 5
class EventSource(_ZabbixEnum):
TRIGGER = 0
DISCOVERY = 1
AUTOREGISTRATION = 2
INTERNAL = 3
class EventObjectDiscovery(_ZabbixEnum):
HOST = 1
SERVICE = 2
class EventObjectAutoregistration(_ZabbixEnum):
HOST = 3
| 15.02381 | 47 | 0.673534 | 566 | 0.896989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d75a3a6b573bad3eb8f11cb8fcb954ad3b080d4b | 3,283 | py | Python | tests/test_next_step_assignment_udf.py | EdinburghGenomics/clarity_scripts | 472299fc4edd4e0a08895ecc7c5630b253322e28 | [
"MIT"
] | 2 | 2018-06-18T16:31:09.000Z | 2021-03-31T20:13:39.000Z | tests/test_next_step_assignment_udf.py | EdinburghGenomics/clarity_scripts | 472299fc4edd4e0a08895ecc7c5630b253322e28 | [
"MIT"
] | 99 | 2016-02-15T16:21:51.000Z | 2022-03-11T23:43:26.000Z | tests/test_next_step_assignment_udf.py | EdinburghGenomics/clarity_scripts | 472299fc4edd4e0a08895ecc7c5630b253322e28 | [
"MIT"
] | null | null | null | from unittest.mock import patch, PropertyMock, Mock
import pytest
from EPPs.common import StepEPP
from tests.test_common import TestEPP, NamedMock
from scripts.next_step_assignment_udf import AssignNextStepUDF
class TestNextStepAssignmentUDF(TestEPP):
step_udfs1={'step_udf1':'udf_value1'}
step_udfs2={'step_udf1':'udf_value2'}
step_udfs3={'step_udf2':'udf_value3'}
protostep1 = NamedMock(real_name='next_step1',uri='http://test.com/config/protocols/1/step/2')
protostep2 = NamedMock(real_name='next_step2', uri='http://test.com/config/protocols/1/step/3')
protostep3 = NamedMock(real_name='next_step3', uri='http://test.com/config/protocols/1/step/3')
actions = Mock(next_actions=[{}])
protocol = Mock(steps=[protostep1, protostep2, protostep3])
patched_protocol = patch('scripts.next_step_assignment_udf.Protocol', return_value=protocol)
patched_process1= patch.object(
StepEPP,
'process',
new_callable=PropertyMock(return_value=Mock(
udf=step_udfs1,
step=Mock(
actions=actions,
configuration=protostep1)
)
)
)
patched_process2= patch.object(
StepEPP,
'process',
new_callable=PropertyMock(return_value=Mock(
udf=step_udfs2,
step=Mock(
actions=actions,
configuration=protostep1)
)
)
)
patched_process3= patch.object(
StepEPP,
'process',
new_callable=PropertyMock(return_value=Mock(
udf=step_udfs3,
step=Mock(
actions=actions,
configuration=protostep1)
)
)
)
def setUp(self):
self.epp = AssignNextStepUDF(self.default_argv
+['--step_udf','step_udf1']
+['--udf_values','udf_value1']
+['--next_steps','next_step3'])
def test_next_step_udf_happy_path(self): #next step is the step defined by the next_steps argument
with self.patched_process1, self.patched_protocol:
self.epp._run()
expected_next_actions = [
{'action': 'nextstep','step': self.protocol.steps[2]}
]
assert self.actions.next_actions == expected_next_actions
assert self.actions.put.call_count == 1
self.actions.put.reset_mock()
def test_next_step_udf_udf_value_false(self): #next action is the next step in protocol
with self.patched_process2, self.patched_protocol:
self.epp._run()
expected_next_actions = [
{'action': 'nextstep','step': self.protocol.steps[1]}
]
assert self.actions.next_actions == expected_next_actions
assert self.actions.put.call_count == 1
def test_next_step_udf_step_udf_not_present(self): #error message as step_udf defined by argument not present
with self.patched_process3, self.patched_protocol, pytest.raises(ValueError) as e:
self.epp._run()
assert str(e.value) == 'Step UDF step_udf1 not present' | 33.161616 | 113 | 0.604325 | 3,069 | 0.934816 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.187329 |
d75a625bfb5e6d5e2d2d9e1471c8fa852109fb7f | 17 | py | Python | haul2/src/__init__.py | hotkeymuc/haul2 | bac20b684fd81c24b91e505002723a6577c2b883 | [
"MIT"
] | null | null | null | haul2/src/__init__.py | hotkeymuc/haul2 | bac20b684fd81c24b91e505002723a6577c2b883 | [
"MIT"
] | null | null | null | haul2/src/__init__.py | hotkeymuc/haul2 | bac20b684fd81c24b91e505002723a6577c2b883 | [
"MIT"
] | null | null | null | __all__ = ["htk"] | 17 | 17 | 0.588235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.294118 |
d75b34d68f5f86482cc12361e48b4e2ab74ec8dd | 2,439 | py | Python | test_opencv_haar_img.py | sunnylgz/faceapi | 7de817a2924d7ad4cbbca9778c7a7ec0e4427458 | [
"MIT"
] | null | null | null | test_opencv_haar_img.py | sunnylgz/faceapi | 7de817a2924d7ad4cbbca9778c7a7ec0e4427458 | [
"MIT"
] | null | null | null | test_opencv_haar_img.py | sunnylgz/faceapi | 7de817a2924d7ad4cbbca9778c7a7ec0e4427458 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
"""find faces from input image based on mtcnn and locate the locations and landmarks
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import numpy as np
import cv2 as cv
face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier('haarcascade_eye.xml')
def detect_face_haar(img):
img = cv.imread(img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv.imshow('img',img)
cv.waitKey(0)
cv.destroyAllWindows()
def main(args):
detect_face_haar(args.image)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('image', type=str, default = '', help='Image to load')
parser.add_argument('-o', '--out', type=str, default = '', help='save output to disk')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 35.347826 | 90 | 0.738007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,330 | 0.545305 |
d75b85454e9ed8ed2910b5b6cb818fe560fdc4cc | 3,111 | py | Python | sphinx-sources/Examples/ComputerPrac/FresnelPlane.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 132 | 2017-03-15T15:28:46.000Z | 2022-03-09T00:28:25.000Z | sphinx-sources/Examples/ComputerPrac/FresnelPlane.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 63 | 2017-01-26T15:46:55.000Z | 2022-01-25T04:50:59.000Z | sphinx-sources/Examples/ComputerPrac/FresnelPlane.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 37 | 2017-02-17T16:11:38.000Z | 2022-01-25T18:03:47.000Z | #!/usr/bin/env python
"""
Computer practical 6.1. Fresnel diffraction, plane wavefront.
=============================================================
This is part of the 'computer practical' set of assignments.
Demonstrates Fresnel diffraction when a plane wavefront enters
a round hole.
Measure the values of z and d for which minima and/or maxima on-axis occur
and apply the Fresnel-zone theory to find the wavelength of the light.
"""
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import sys
import webbrowser
if sys.version_info[0] < 3:
from Tkinter import *
import Tkinter as Tk
else:
from tkinter import *
import tkinter as Tk
from LightPipes import *
root = Tk.Tk()
root.wm_title("Computer practical: 6.1 Fresnel plane wavefront. LP-version = " + LPversion)
root.wm_protocol("WM_DELETE_WINDOW", root.quit)
wavelength=530*nm;
size=5*mm;
N=200; N2=int(N/2)
z=20*cm
R=0.5*mm
D=DoubleVar()
Z=DoubleVar()
D.set(2*R/mm)
Z.set(z/cm)
fig=plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=Tk.LEFT, fill=Tk.BOTH, expand=1)
v=StringVar()
def TheExample(event):
global I
F=Begin(size,wavelength,N)
z=Z.get()*cm
R=D.get()/2*mm
F=CircAperture(R,0,0,F)
FN=R*R/z/wavelength
if (FN >= 15.0):
F=Forvard(z,F)
else:
F=Fresnel(z,F)
I=Intensity(0,F)
ax1.clear()
ax1.contourf(I,50,cmap='hot'); ax1.axis('off'); ax1.axis('equal')
str='Intensity distribution\ncenter-irradiance = %3.3f [a.u.]' %I[N2][N2]
ax1.set_title(str)
canvas.draw()
def motion(event):
x=event.xdata;y=event.ydata
if (x and y is not None and x>0 and x<N and y>0 and y<N):
v.set('x=%3.2f mm, y=%3.2f mm\n I=%3.3f [a.u.]' %((-size/2+x*size/N)/mm,(-size/2+y*size/N)/mm,I[int(x)][int(y)]))
root.configure(cursor='crosshair')
else:
v.set('')
root.configure(cursor='arrow')
def openbrowser(event):
webbrowser.open_new(r"https://opticspy.github.io/lightpipes/FresnelDiffraction.html")
def _quit():
root.quit()
Scale( root,
takefocus = 1,
orient='horizontal',
label = 'diameter aperture [mm]',
length = 200, from_=0.5, to=size/2/mm,
resolution = 0.001,
variable = D,
cursor="hand2",
command = TheExample).pack()
Scale( root,
takefocus = 1,
orient='horizontal',
label = 'z [cm]',
length = 200,
from_=0.01, to=200.0,
resolution = 0.01,
variable = Z,
cursor="hand2",
command = TheExample).pack()
Button( root,
width = 24,
text='Quit',
cursor="hand2",
command=_quit).pack(pady=10)
link = Label(root, text="help", fg="blue", cursor="hand2")
link.pack()
link.bind("<Button-1>", openbrowser)
Label(root, textvariable=v).pack(pady=50)
cid = fig.canvas.mpl_connect('motion_notify_event', motion)
TheExample(0)
root.mainloop()
root.destroy()
| 25.5 | 121 | 0.623594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.288332 |
d75cad8b32fab20545d08ae0ad7d2cccf0a03783 | 1,226 | py | Python | scripts/sliding_window.py | gustaveroussy/98drivers | 9886eb1995592f690d621db945e268f7c629e07d | [
"MIT"
] | null | null | null | scripts/sliding_window.py | gustaveroussy/98drivers | 9886eb1995592f690d621db945e268f7c629e07d | [
"MIT"
] | 7 | 2016-05-25T09:55:35.000Z | 2016-05-30T08:01:31.000Z | scripts/sliding_window.py | gustaveroussy/98drivers | 9886eb1995592f690d621db945e268f7c629e07d | [
"MIT"
] | null | null | null | import argparse
import tabix
import os
from common import *
def sliding_window(tabix_file, genom, window ):
sizes = chromosom_sizes(genom)
tx = tabix.open(tabix_file)
for chromosom in sizes:
for position in range(0,sizes[chromosom] - window, window):
start = position
end = position + window
count = count_mutation_ratio_std(tx, chromosom, start , end)
distinct_count = count_mutation_ratio_uniq(tx, chromosom, start ,end,3)
print(chromosom,start,end,count, distinct_count, sep="\t")
# for position in range(0, sizes[chromosom] - window):
# start = position
# end = position + window
# middle = start + window/2
# count_mutation_ratio_std(tx, chromosom:str, start , end:int):
parser = argparse.ArgumentParser(
description="wgs sliding window ",
usage=""
)
parser.add_argument("file", type=str, help="bed.gz file indexed with tabix")
parser.add_argument("-g","--genom", type=str, help="", required=True)
parser.add_argument("-w","--window", type=int, help="", default=1000)
args = parser.parse_args()
# # Start algo
# compute_feature_variation(args.file, args.feature, args.algorithm)
sliding_window(args.file, args.genom, args.window) | 26.652174 | 79 | 0.696574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.306688 |
d75d5e9c183dad29dd630cad94b9feb4b957bbfc | 535 | py | Python | examples/tornado/myapp/__init__.py | s-shin/wswrapper | f47500eb9d27d3aa96d91a50081945e3b83be9dd | [
"MIT"
] | 2 | 2017-05-15T22:23:36.000Z | 2018-02-19T12:27:45.000Z | examples/tornado/myapp/__init__.py | s-shin/wswrapper | f47500eb9d27d3aa96d91a50081945e3b83be9dd | [
"MIT"
] | null | null | null | examples/tornado/myapp/__init__.py | s-shin/wswrapper | f47500eb9d27d3aa96d91a50081945e3b83be9dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def setup_argparser(parser):
"""コマンドパーサーのセットアップ。
パーサーは共有されるので、被らないように上手く調整すること。
:param parser: ``argparse.ArgumentParser`` のインスタンス。
"""
pass
def setup_app(args):
"""コマンドパース後のセットアップ。
:param args: ``parser.arg_parse()`` の戻り値。
"""
pass
def on_open(client):
"""WebSocketのコネクションが成立した時に呼ばれる。
"""
pass
def on_close(client):
"""WebSocketのコネクションが切れた時に呼ばれる。
"""
pass
def on_setup(client, data):
client.emit("print", "Hello world!")
| 14.861111 | 55 | 0.605607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.719368 |
d75da4931865b017feed0648dddd5ffd50baa642 | 120 | py | Python | src/Bank.py | tokuma09/PyTDD | ae76cd7d6af13c383d4d860500c6291d924a56fd | [
"MIT"
] | null | null | null | src/Bank.py | tokuma09/PyTDD | ae76cd7d6af13c383d4d860500c6291d924a56fd | [
"MIT"
] | 15 | 2021-05-10T13:29:25.000Z | 2021-05-23T07:15:09.000Z | src/Bank.py | tokuma09/PyTDD | ae76cd7d6af13c383d4d860500c6291d924a56fd | [
"MIT"
] | null | null | null | class Bank():
def __init__(self):
pass
def reduce(self, source, to):
return source.reduce(to)
| 15 | 33 | 0.575 | 119 | 0.991667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d75e34968052a479dcb04800c417d5b42abec672 | 3,876 | py | Python | test/clean_directory.py | adevress/gfal2 | ce8945d1c153e26c5d10ad43d1940b8dcace0579 | [
"Apache-2.0"
] | null | null | null | test/clean_directory.py | adevress/gfal2 | ce8945d1c153e26c5d10ad43d1940b8dcace0579 | [
"Apache-2.0"
] | null | null | null | test/clean_directory.py | adevress/gfal2 | ce8945d1c153e26c5d10ad43d1940b8dcace0579 | [
"Apache-2.0"
] | 1 | 2020-04-28T09:36:46.000Z | 2020-04-28T09:36:46.000Z | #!/usr/bin/env python
import gfal2
import logging
import optparse
import stat
import sys
log = logging.getLogger('gfal2.clean_directory')
class Cleaner(object):
def __init__(self, abort_on_error=False, recursive=False, only_files=False, chmod=False):
self.abort_on_error = abort_on_error
self.recursive = recursive
self.only_files = only_files
self.chmod = chmod
self.context = gfal2.creat_context()
def _get_list(self, surl):
files = []
directories = []
dh = self.context.opendir(surl)
d_entry, d_stat = dh.readpp()
while d_entry:
full_path = surl + '/' + d_entry.d_name
if stat.S_ISREG(d_stat.st_mode):
files.append((full_path, d_stat))
elif stat.S_ISDIR(d_stat.st_mode):
directories.append((full_path, d_stat))
d_entry, d_stat = dh.readpp()
return files, directories
def __call__(self, surl):
log.info("Cleaning %s" % surl)
try:
files, directories = self._get_list(surl)
except gfal2.GError, e:
if self.abort_on_error:
raise
logging.error("Could not list %s (%s)" % (surl, e.message))
return 0, 0
n_files, n_directories = 0, 0
for file, f_stat in files:
try:
self.context.unlink(file)
log.info("Unlink %s" % file)
n_files += 1
except gfal2.GError, e:
if self.abort_on_error:
raise
log.error("Could not unlink %s (%s)" % (file, e.message))
for directory, d_stat in directories:
if self.chmod and not (d_stat.st_mode & 0666):
try:
self.context.chmod(directory, 0775)
log.info("Chmod for %s" % directory)
except gfal2.GError, e:
log.warn("Failed chmod for %s (%s)" % (directory, e.message))
sub_files, sub_directories = self(directory)
n_files += sub_files
n_directories += sub_directories
if not self.only_files:
try:
log.info("Rmdir %s" % directory)
self.context.rmdir(directory)
n_directories += 1
except gfal2.GError, e:
if self.abort_on_error:
raise
log.error("Failed to rmdir %s (%s)" % (directory, e.message))
return n_files, n_directories
if __name__ == '__main__':
parser = optparse.OptionParser(usage='usage: %prog [options] surl')
parser.add_option('-x', '--abort', dest='abort_on_error', default=False,
action='store_true', help='Abort cleaning on the first error')
parser.add_option('-r', '--recursive', dest='recursive', default=False,
action='store_true', help='Traverse directories recursively')
parser.add_option('-f', '--files', dest='only_files', default=False,
action='store_true', help='Unlink only files')
parser.add_option('-c', '--chmod', dest='chmod', default=False,
action='store_true', help='Attempt a chmod when a directory is not writeable')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('Wrong number of arguments')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
log.addHandler(stdout_handler)
log.setLevel(logging.INFO)
cleaner = Cleaner(options.abort_on_error, options.recursive, options.only_files, options.chmod)
n_files, n_directories = cleaner(args[0])
logging.info("Removed %d files and %d directories" % (n_files, n_directories))
| 36.914286 | 100 | 0.576625 | 2,449 | 0.631837 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.159185 |
d75e7b27d27b1524b1e76cacefb052ea9376911b | 2,776 | py | Python | app/views/recipe.py | baldur132/essensfindung | e1a8106d8a1de857340229a5fe36ca6910c55b35 | [
"MIT"
] | 1 | 2022-01-29T20:33:30.000Z | 2022-01-29T20:33:30.000Z | app/views/recipe.py | baldur132/essensfindung | e1a8106d8a1de857340229a5fe36ca6910c55b35 | [
"MIT"
] | 2 | 2022-03-08T06:41:22.000Z | 2022-03-09T11:52:06.000Z | app/views/recipe.py | baldur132/essensfindung | e1a8106d8a1de857340229a5fe36ca6910c55b35 | [
"MIT"
] | 6 | 2022-01-06T15:02:59.000Z | 2022-02-02T08:08:56.000Z | """Router and Logic for the Recipe of the Website"""
from datetime import timedelta
from typing import Union
import fastapi
from fastapi.responses import HTMLResponse
from sqlalchemy.orm import Session
from starlette.requests import Request
from starlette.templating import Jinja2Templates
from db.database import get_db
from schemes import scheme_cuisine
from schemes import scheme_filter
from schemes.scheme_user import User
from services import service_rec
from tools.security import get_current_user
templates = Jinja2Templates("templates")
router = fastapi.APIRouter()
@router.get("/findrecipe", response_class=HTMLResponse)
async def findrecipe(
request: Request,
length: int,
keywords: Union[str, None] = None,
db_session: Session = fastapi.Depends(get_db),
current_user: User = fastapi.Depends(get_current_user),
):
"""Requests user settings and search for recipe.
Args:
request (Request): the http request
length (int): the minimal length
keywords (Union[str, None], optional): the keywords. Defaults to None.
Returns:
TemplateResponse: the http response
"""
if length == 0:
total_length = timedelta(days=100)
else:
total_length = timedelta(seconds=length)
rec_filter = scheme_filter.FilterRecipe(
cuisines=[scheme_cuisine.PydanticCuisine(name="Restaurant")],
rating=1,
keyword=keywords,
total_time=total_length,
)
recipe = service_rec.search_recipe(db_session=db_session, user=current_user, recipe_filter=rec_filter)
prep_time_total_seconds = recipe.prepTime.total_seconds()
prep_time_days = int(prep_time_total_seconds // 86400)
prep_time_hours = int((prep_time_total_seconds % 86400) // 3600)
prep_time_minutes = int((prep_time_total_seconds % 3600) // 60)
prep_time_seconds = int(prep_time_total_seconds % 60)
cook_time_total_seconds = recipe.cookTime.total_seconds()
cook_time_days = int(cook_time_total_seconds // 86400)
cook_time_minutes = int((cook_time_total_seconds % 86400) // 3600)
cook_time_hours = int(cook_time_total_seconds // 3600)
cook_time_minutes = int((cook_time_total_seconds % 3600) // 60)
cook_time_seconds = int(cook_time_total_seconds % 60)
prep_time = {
"days": prep_time_days,
"hours": prep_time_hours,
"minutes": prep_time_minutes,
"seconds": prep_time_seconds,
}
cook_time = {
"days": cook_time_days,
"hours": cook_time_hours,
"minutes": cook_time_minutes,
"seconds": cook_time_seconds,
}
return templates.TemplateResponse(
"recipe/recipe_result.html",
{"request": request, "recipe": recipe, "prepTime": prep_time, "cookTime": cook_time},
)
| 33.445783 | 106 | 0.716499 | 0 | 0 | 0 | 0 | 2,195 | 0.790706 | 2,139 | 0.770533 | 503 | 0.181196 |
d75e92e1cf6688e5a8ad55c6253bccf7855e6a50 | 2,196 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_utilities.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 69 | 2021-12-16T01:34:09.000Z | 2022-03-31T08:27:39.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_utilities.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_utilities.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 21 | 2021-12-20T09:05:45.000Z | 2022-03-28T02:52:28.000Z | from pyradioconfig.parts.bobcat.calculators.calc_utilities import Calc_Utilities_Bobcat
from pycalcmodel.core.variable import ModelVariableFormat
from enum import Enum
class Calc_Utilities_Viper(Calc_Utilities_Bobcat):
def buildVariables(self, model):
#Build all variables from the inherited class
super().buildVariables(model)
#Now also build the fefilt_selected variable
self._addModelVariable(model, 'fefilt_selected', str, ModelVariableFormat.ASCII)
def calc_fefilt_selected(self, model):
#This method calculates which FEFILT register set should be used based on demod
#Read in model variables
demod_select = model.vars.demod_select.value
#Calculate fefilt_selected is FEFILT0 only for Viper
fefilt_selected = 'FEFILT0'
#Write the model variable
model.vars.fefilt_selected.value = fefilt_selected
def get_fefilt_actual(self, model, reg_name_str):
#This method queries the value of a reg name string based on the FEFILT register set in use
#Read in model variables
fefilt_selected = model.vars.fefilt_selected.value
#Get the register object
reg_name_complete = fefilt_selected+'_'+reg_name_str
reg = getattr(model.vars, reg_name_complete)
#Return the register value
return reg.value
def get_fefilt_value_forced(self, model, reg_name_str):
#This method queries the value of a reg name string based on the FEFILT register set in use
#Read in model variables
fefilt_selected = model.vars.fefilt_selected.value
#Get the register object
reg_name_complete = fefilt_selected+'_'+reg_name_str
reg = getattr(model.vars, reg_name_complete)
#Return the register value
return reg.value_forced
def write_fefilt_reg(self, model, reg_name_str, value):
#This method writes an FEFILT register field based on the FEFILT register set in use
#Read in model variables
fefilt_selected = model.vars.fefilt_selected.value
#Write the register field
self._reg_write_by_name_concat(model, fefilt_selected, reg_name_str, value)
| 37.220339 | 99 | 0.722222 | 2,026 | 0.922587 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.347905 |
d75ec669dfc620aaaefd9ee6ffa0b75d16e42209 | 2,029 | py | Python | tests/logger/check_logger.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | tests/logger/check_logger.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | tests/logger/check_logger.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import psutil
import shutil
import tempfile
from ducktape.tests.loggermaker import LoggerMaker, close_logger
class DummyFileLoggerMaker(LoggerMaker):
def __init__(self, log_dir, n_handles):
"""Create a logger with n_handles file handles, with files in log_dir"""
self.log_dir = log_dir
self.n_handles = n_handles
@property
def logger_name(self):
return "a.b.c"
def configure_logger(self):
for i in range(self.n_handles):
fh = logging.FileHandler(os.path.join(self.log_dir, "log-" + str(i)))
self._logger.addHandler(fh)
def open_files():
# current process
p = psutil.Process()
return p.open_files()
class CheckLogger(object):
def setup_method(self, _):
self.temp_dir = tempfile.mkdtemp()
def check_close_logger(self):
"""Check that calling close_logger properly cleans up resources."""
initial_open_files = open_files()
n_handles = 100
l = DummyFileLoggerMaker(self.temp_dir, n_handles)
# accessing logger attribute lazily triggers configuration of logger
the_logger = l.logger
assert len(open_files()) == len(initial_open_files) + n_handles
close_logger(the_logger)
assert len(open_files()) == len(initial_open_files)
def teardown_method(self, _):
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
| 30.283582 | 81 | 0.699359 | 1,219 | 0.600789 | 0 | 0 | 59 | 0.029078 | 0 | 0 | 798 | 0.393297 |
d75f55e688f1197eea211a05b99890e08a4f598e | 1,268 | py | Python | Egzersiz/ege/egzersizSet.py | ibrahimediz/ornekproje | c5ebeafc43a9c6d2aa639d0d95eedbce65991576 | [
"Apache-2.0"
] | null | null | null | Egzersiz/ege/egzersizSet.py | ibrahimediz/ornekproje | c5ebeafc43a9c6d2aa639d0d95eedbce65991576 | [
"Apache-2.0"
] | null | null | null | Egzersiz/ege/egzersizSet.py | ibrahimediz/ornekproje | c5ebeafc43a9c6d2aa639d0d95eedbce65991576 | [
"Apache-2.0"
] | null | null | null | first_angle = int(input("Lütfen ilk açıyı giriniz: "))
second_angle = int(input("Lütfen ilk açıyı giriniz: "))
gelenAcilar = {first_angle, second_angle}
eskenar = {60, 60, 60}
dik = {25, 65, 90}
ikizKenar = {45, 45, 90}
cesitKenar = {120, 41, 19}
if gelenAcilar.intersection(ikizKenar):
print("ikizkenar")
elif gelenAcilar.intersection(eskenar):
print("eskenar")
elif gelenAcilar.intersection(dik):
print("dik")
elif gelenAcilar.intersection(cesitKenar):
print("cesitKenar")
"""
aci1 = input("1. Açıyı Giriniz:")
aci2 = input("2. Açıyı Giriniz:")
if (aci1 and aci2) and (aci1.isdigit() and aci2.isdigit()):
aci1,aci2 = int(aci1),int(aci2)
liste = [aci1,aci2,(180-(aci1+aci2))]
if sum(liste) == 180:
if len(set(liste)) == 2:
print("İkizkenar üçgen")
if len(set(liste)) == 3:
print("Çeşitkenar üçgen")
if 90 in liste:
print("Dik Üçgen")
if len(set(liste)) == 1:
print("Eşkenar Üçgen")
else:
print("Açı Hatası")
else:
print("Giriş Hatası")
"""
text = input("Enter text")
counter = {}
for char in text:
if char in counter.keys():
counter[char] = 0
else :
counter[char] = counter[char] + 1
print(counter)
| 23.054545 | 59 | 0.603312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.548884 |
d7611de84d0c09241612fe2faace88345d97ec3c | 18,916 | py | Python | src/tests/presale/test_customer.py | n0emis/pretix | 57d68eaddb01ec4adc0837a915631871cae4d91a | [
"Apache-2.0"
] | null | null | null | src/tests/presale/test_customer.py | n0emis/pretix | 57d68eaddb01ec4adc0837a915631871cae4d91a | [
"Apache-2.0"
] | 8 | 2015-01-06T10:50:27.000Z | 2015-01-18T18:38:18.000Z | src/tests/presale/test_customer.py | n0emis/pretix | 57d68eaddb01ec4adc0837a915631871cae4d91a | [
"Apache-2.0"
] | null | null | null | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import datetime
from datetime import timedelta
from decimal import Decimal
from urllib.parse import parse_qs, urlparse
import pytest
from django.core import mail as djmail, signing
from django.core.signing import dumps
from django.test import Client
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pretix.base.models import Event, Item, Order, OrderPosition, Organizer
from pretix.multidomain.models import KnownDomain
from pretix.presale.forms.customer import TokenGenerator
@pytest.fixture
def env():
o = Organizer.objects.create(name='Big Events LLC', slug='bigevents')
o.settings.customer_accounts = True
event = Event.objects.create(
organizer=o, name='Conference', slug='conf',
date_from=now() + timedelta(days=10),
live=True, is_public=False
)
return o, event
@pytest.mark.django_db
def test_disabled(env, client):
env[0].settings.customer_accounts = False
r = client.get('/bigevents/account/register')
assert r.status_code == 404
r = client.get('/bigevents/account/login')
assert r.status_code == 404
r = client.get('/bigevents/account/pwreset')
assert r.status_code == 404
r = client.get('/bigevents/account/pwrecover')
assert r.status_code == 404
r = client.get('/bigevents/account/activate')
assert r.status_code == 404
r = client.get('/bigevents/account/change')
assert r.status_code == 404
r = client.get('/bigevents/account/confirmchange')
assert r.status_code == 404
r = client.get('/bigevents/account/')
assert r.status_code == 404
@pytest.mark.django_db
def test_org_register(env, client):
signer = signing.TimestampSigner(salt='customer-registration-captcha-127.0.0.1')
r = client.post('/bigevents/account/register', {
'email': 'john@example.org',
'name_parts_0': 'John Doe',
'challenge': signer.sign('1+2'),
'response': '3',
}, REMOTE_ADDR='127.0.0.1')
assert r.status_code == 302
assert len(djmail.outbox) == 1
with scopes_disabled():
customer = env[0].customers.get(email='john@example.org')
assert not customer.is_verified
assert customer.is_active
r = client.post(
f'/bigevents/account/activate?id={customer.identifier}&token={TokenGenerator().make_token(customer)}', {
'password': 'PANioMR62',
'password_repeat': 'PANioMR62',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.check_password('PANioMR62')
assert customer.is_verified
@pytest.mark.django_db
def test_org_register_duplicate_email(env, client):
with scopes_disabled():
env[0].customers.create(email='john@example.org')
r = client.post('/bigevents/account/register', {
'email': 'john@example.org',
'name_parts_0': 'John Doe',
})
assert b'already registered' in r.content
assert r.status_code == 200
@pytest.mark.django_db
def test_org_resetpw(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=False)
r = client.post('/bigevents/account/pwreset', {
'email': 'john@example.org',
})
assert r.status_code == 302
assert len(djmail.outbox) == 1
r = client.post(
f'/bigevents/account/pwrecover?id={customer.identifier}&token={TokenGenerator().make_token(customer)}', {
'password': 'PANioMR62',
'password_repeat': 'PANioMR62',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.check_password('PANioMR62')
assert customer.is_verified
@pytest.mark.django_db
def test_org_activate_invalid_token(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=False)
r = client.get(
f'/bigevents/account/activate?id={customer.identifier}&token=.invalid.{TokenGenerator().make_token(customer)}')
assert r.status_code == 302
@pytest.mark.django_db
def test_org_login_logout(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.get('/bigevents/account/')
assert r.status_code == 200
r = client.get('/bigevents/account/logout')
assert r.status_code == 302
r = client.get('/bigevents/account/')
assert r.status_code == 302
@pytest.mark.django_db
def test_org_login_invalid_password(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'invalid',
})
assert r.status_code == 200
assert b'alert-danger' in r.content
@pytest.mark.django_db
def test_org_login_not_verified(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=False)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 200
assert b'alert-danger' in r.content
@pytest.mark.django_db
def test_org_login_not_active(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True, is_active=False)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 200
assert b'alert-danger' in r.content
@pytest.mark.django_db
@pytest.mark.parametrize("url", [
"account/change",
"account/membership/1/",
"account/",
])
def test_login_required(client, env, url):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
assert client.get('/bigevents/' + url).status_code == 302
client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert client.get('/bigevents/' + url).status_code in (200, 404)
@pytest.mark.django_db
def test_org_order_list(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
event = env[1]
ticket = Item.objects.create(event=event, name='Early-bird ticket', default_price=23, admission=True)
o1 = Order.objects.create(
status=Order.STATUS_PENDING,
event=event,
email='admin@localhost',
datetime=now() - datetime.timedelta(days=3),
expires=now() + datetime.timedelta(days=11),
total=Decimal("23"),
)
OrderPosition.objects.create(
order=o1,
item=ticket,
variation=None,
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"}
)
o2 = Order.objects.create(
status=Order.STATUS_PENDING,
event=event,
email='john@example.org',
datetime=now() - datetime.timedelta(days=3),
expires=now() + datetime.timedelta(days=11),
total=Decimal("23"),
)
OrderPosition.objects.create(
order=o2,
item=ticket,
variation=None,
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"}
)
o3 = Order.objects.create(
status=Order.STATUS_PENDING,
event=event,
email='admin@localhost',
customer=customer,
datetime=now() - datetime.timedelta(days=3),
expires=now() + datetime.timedelta(days=11),
total=Decimal("23"),
)
OrderPosition.objects.create(
order=o3,
item=ticket,
variation=None,
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"}
)
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.get('/bigevents/account/')
assert r.status_code == 200
content = r.content.decode()
assert o1.code not in content
assert o2.code not in content
assert o3.code in content
env[0].settings.customer_accounts_link_by_email = True
r = client.get('/bigevents/account/')
assert r.status_code == 200
content = r.content.decode()
assert o1.code not in content
assert o2.code in content
assert o3.code in content
@pytest.mark.django_db
def test_change_name(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.post('/bigevents/account/change', {
'name_parts_0': 'John Doe',
'email': 'john@example.org',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.name == 'John Doe'
@pytest.mark.django_db
def test_change_email(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.post('/bigevents/account/change', {
'name_parts_0': 'John Doe',
'email': 'john@example.com'
})
assert r.status_code == 200
customer.refresh_from_db()
assert customer.email == 'john@example.org'
r = client.post('/bigevents/account/change', {
'name_parts_0': 'John Doe',
'email': 'john@example.com',
'password_current': 'foo',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.email == 'john@example.org'
assert len(djmail.outbox) == 1
token = dumps({
'customer': customer.pk,
'email': 'john@example.com'
}, salt='pretix.presale.views.customer.ChangeInformationView')
r = client.get(f'/bigevents/account/confirmchange?token={token}')
assert r.status_code == 302
customer.refresh_from_db()
assert customer.email == 'john@example.com'
@pytest.mark.django_db
def test_change_pw(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.post('/bigevents/account/password', {
'password_current': 'invalid',
'password': 'aYLBRNg4',
'password_repeat': 'aYLBRNg4',
})
assert r.status_code == 200
customer.refresh_from_db()
assert customer.check_password('foo')
r = client.post('/bigevents/account/password', {
'password_current': 'foo',
'password': 'aYLBRNg4',
'password_repeat': 'aYLBRNg4',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.check_password('aYLBRNg4')
@pytest.mark.django_db
def test_login_per_org(env, client):
with scopes_disabled():
o2 = Organizer.objects.create(name='Demo', slug='demo')
o2.settings.customer_accounts = True
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert client.get('/bigevents/account/').status_code == 200
assert client.get('/demo/account/').status_code == 302
@pytest.fixture
def client2():
# We need a second test client instance for cross domain stuff since the test client
# does not isolate sessions per-domain like browsers do
return Client()
def _cross_domain_login(env, client, client2):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
KnownDomain.objects.create(domainname='org.test', organizer=env[0])
KnownDomain.objects.create(domainname='event.test', organizer=env[0], event=env[1])
# Log in on org domain
r = client.post('/account/login?next=https://event.test/redeem&request_cross_domain_customer_auth=true', {
'email': 'john@example.org',
'password': 'foo',
}, HTTP_HOST='org.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'event.test'
assert u.path == '/redeem'
q = parse_qs(u.query)
assert 'cross_domain_customer_auth' in q
# Take session over to event domain
r = client2.get(f'/?{u.query}', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
@pytest.mark.django_db
def test_cross_domain_login(env, client, client2):
_cross_domain_login(env, client, client2)
# Logged in on org domain
r = client.get('/', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
# Logged in on event domain
r = client2.get('/', HTTP_HOST='org.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
@pytest.mark.django_db
def test_cross_domain_logout_on_org_domain(env, client, client2):
_cross_domain_login(env, client, client2)
r = client.get('/account/logout', HTTP_HOST='org.test')
assert r.status_code == 302
# Logged out on org domain
r = client.get('/', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
# Logged out on event domain
r = client2.get('/', HTTP_HOST='org.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
@pytest.mark.django_db
def test_cross_domain_logout_on_event_domain(env, client, client2):
_cross_domain_login(env, client, client2)
r = client2.get('/account/logout?next=/redeem', HTTP_HOST='event.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'org.test'
assert u.path == '/account/logout'
r = client.get(f'{u.path}?{u.query}', HTTP_HOST='org.test')
assert r.status_code == 302
assert r.headers['Location'] == 'http://event.test/redeem'
# Logged out on org domain
r = client.get('/', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
# Logged out on event domain
r = client2.get('/', HTTP_HOST='org.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
@pytest.mark.django_db
def test_cross_domain_login_otp_only_valid_once(env, client, client2):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
KnownDomain.objects.create(domainname='org.test', organizer=env[0])
KnownDomain.objects.create(domainname='event.test', organizer=env[0], event=env[1])
# Log in on org domain
r = client.post('/account/login?next=https://event.test/redeem&request_cross_domain_customer_auth=true', {
'email': 'john@example.org',
'password': 'foo',
}, HTTP_HOST='org.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'event.test'
assert u.path == '/redeem'
q = parse_qs(u.query)
assert 'cross_domain_customer_auth' in q
# Take session over to event domain
r = client.get(f'/?{u.query}', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
# Try to use again
r = client2.get(f'/?{u.query}', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
@pytest.mark.django_db
def test_cross_domain_login_validate_redirect_url(env, client, client2):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
KnownDomain.objects.create(domainname='org.test', organizer=env[0])
KnownDomain.objects.create(domainname='event.test', organizer=env[0], event=env[1])
# Log in on org domain
r = client.post('/account/login?next=https://evilcorp.test/redeem&request_cross_domain_customer_auth=true', {
'email': 'john@example.org',
'password': 'foo',
}, HTTP_HOST='org.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'org.test'
assert u.path == '/account/'
q = parse_qs(u.query)
assert 'cross_domain_customer_auth' not in q
| 33.185965 | 119 | 0.656164 | 0 | 0 | 0 | 0 | 16,143 | 0.853405 | 0 | 0 | 5,925 | 0.313227 |
d7644768742f958b906abb14fbff6d652cf10fee | 2,009 | py | Python | build/python_module.py | Romit-Maulik/CBurgers | 0b0b0d733049d731c9b82215f19219f99ea77b25 | [
"MIT"
] | 5 | 2020-09-17T04:56:00.000Z | 2021-04-01T23:33:51.000Z | build/python_module.py | Romit-Maulik/CBurgers | 0b0b0d733049d731c9b82215f19219f99ea77b25 | [
"MIT"
] | null | null | null | build/python_module.py | Romit-Maulik/CBurgers | 0b0b0d733049d731c9b82215f19219f99ea77b25 | [
"MIT"
] | 4 | 2020-09-17T13:00:34.000Z | 2021-06-24T00:36:17.000Z | print("From python: Within python module")
import os,sys
HERE = os.getcwd()
sys.path.insert(0,HERE)
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
data_array = np.zeros(shape=(2001,258)) # Very important that this matches the number of timesteps in the main solver
x = np.arange(start=0,stop=2.0*np.pi,step=2.0*np.pi/256)
iternum = 0
def collection_func(input_array):
global data_array,iternum
data_array[iternum,:] = input_array[:]
iternum+=1
return None
def analyses_func():
global data_array, x
plt.figure()
for i in range(0,np.shape(data_array)[0],400):
plt.plot(x,data_array[i,1:-1],label='Timestep '+str(i))
plt.legend()
plt.xlabel('x')
plt.xlabel('u')
plt.title('Field evolution')
plt.savefig('Field_evolution.png')
plt.close()
# Perform an SVD
data_array = data_array[:,1:-1]
print('Performing SVD')
u,s,v = np.linalg.svd(data_array,full_matrices=False)
# Plot SVD eigenvectors
plt.figure()
plt.plot(x, v[0,:],label='Mode 0')
plt.plot(x, v[1,:],label='Mode 1')
plt.plot(x, v[2,:],label='Mode 2')
plt.legend()
plt.title('SVD Eigenvectors')
plt.xlabel('x')
plt.xlabel('u')
plt.savefig('SVD_Eigenvectors.png')
plt.close()
np.save('eigenvectors.npy',v[0:3,:].T)
# Train an LSTM on the coefficients of the eigenvectors
time_series = np.matmul(v[0:3,:],data_array.T).T
num_timesteps = np.shape(time_series)[0]
train_series = time_series[:num_timesteps//2]
test_series = time_series[num_timesteps//2:]
# import the LSTM architecture and initialize
from ml_module import standard_lstm
ml_model = standard_lstm(train_series)
# Train the model
ml_model.train_model()
# Restore best weights and perform an inference
print('Performing inference on testing data')
ml_model.model_inference(test_series)
return_data = v[0:3,:].T
return return_data
if __name__ == '__main__':
pass | 26.434211 | 117 | 0.672474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.259831 |
d766600ac591a726caf5047535ca5f091cf8d7ac | 10,478 | py | Python | Tutorials/SENSEI/Advection_AmrLevel/Exec/SingleVortex/sensei/render_iso_catalyst_3d.py | ylunalin/amrex | 5715b2fc8a77e0db17bfe7907982e29ec44811ca | [
"BSD-3-Clause-LBNL"
] | null | null | null | Tutorials/SENSEI/Advection_AmrLevel/Exec/SingleVortex/sensei/render_iso_catalyst_3d.py | ylunalin/amrex | 5715b2fc8a77e0db17bfe7907982e29ec44811ca | [
"BSD-3-Clause-LBNL"
] | null | null | null | Tutorials/SENSEI/Advection_AmrLevel/Exec/SingleVortex/sensei/render_iso_catalyst_3d.py | ylunalin/amrex | 5715b2fc8a77e0db17bfe7907982e29ec44811ca | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-01-17T05:00:26.000Z | 2020-01-17T05:00:26.000Z |
from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ParaView 5.4.1 64 bits
#--------------------------------------------------------------
# Global screenshot output options
imageFileNamePadding=5
rescale_lookuptable=False
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
# state file generated using paraview version 5.4.1
# ----------------------------------------------------------------
# setup views used in the visualization
# ----------------------------------------------------------------
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [1000, 700]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.CenterOfRotation = [0.5, 0.5, 0.5]
renderView1.StereoType = 0
renderView1.CameraPosition = [0.5, 0.5, 3.2557533687070332]
renderView1.CameraFocalPoint = [0.5, 0.5, -0.09031184624419736]
renderView1.CameraParallelScale = 0.8660254037844386
renderView1.Background = [0.0, 0.0, 0.0]
# register the view with coprocessor
# and provide it with information such as the filename to use,
# how frequently to write the images, etc.
coprocessor.RegisterView(renderView1,
filename='pv_image_3d_%t.png', freq=1, fittoscreen=0, magnification=1, width=1000, height=700, cinema={})
renderView1.ViewTime = datadescription.GetTime()
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
# create a new 'XML UniformGrid AMR Reader'
# create a producer from a simulation input
mesh_000 = coprocessor.CreateProducer(datadescription, 'mesh')
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=mesh_000)
# create a new 'Contour'
contour1 = Contour(Input=cellDatatoPointData1)
contour1.ContourBy = ['POINTS', 'phi']
contour1.ComputeScalars = 1
contour1.Isosurfaces = [0.99429, 1.1043655555555556, 1.214441111111111, 1.3245166666666668, 1.4345922222222223, 1.5446677777777778, 1.6547433333333332, 1.764818888888889, 1.8748944444444444, 1.98497]
contour1.PointMergeMethod = 'Uniform Binning'
# create a new 'Annotate Time'
annotateTime1 = AnnotateTime()
annotateTime1.Format = 't = %0.2f'
# ----------------------------------------------------------------
# setup color maps and opacity mapes used in the visualization
# note: the Get..() functions create a new object, if needed
# ----------------------------------------------------------------
# get color transfer function/color map for 'phi'
phiLUT = GetColorTransferFunction('phi')
phiLUT.RGBPoints = [0.99429, 0.278431372549, 0.278431372549, 0.858823529412, 1.13595724, 0.0, 0.0, 0.360784313725, 1.2766338000000002, 0.0, 1.0, 1.0, 1.41929172, 0.0, 0.501960784314, 0.0, 1.55996828, 1.0, 1.0, 0.0, 1.70163552, 1.0, 0.380392156863, 0.0, 1.84330276, 0.419607843137, 0.0, 0.0, 1.9849700000000001, 0.878431372549, 0.301960784314, 0.301960784314]
phiLUT.ColorSpace = 'RGB'
phiLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'phi'
phiPWF = GetOpacityTransferFunction('phi')
phiPWF.Points = [0.99429, 0.0, 0.5, 0.0, 1.9849700000000001, 1.0, 0.5, 0.0]
phiPWF.ScalarRangeInitialized = 1
# ----------------------------------------------------------------
# setup the visualization in view 'renderView1'
# ----------------------------------------------------------------
# show data from mesh_000
mesh_000Display = Show(mesh_000, renderView1)
# trace defaults for the display properties.
mesh_000Display.Representation = 'AMR Blocks'
mesh_000Display.ColorArrayName = [None, '']
mesh_000Display.DiffuseColor = [0.0, 0.0, 0.0]
mesh_000Display.OSPRayScaleArray = 'GhostType'
mesh_000Display.OSPRayScaleFunction = 'PiecewiseFunction'
mesh_000Display.SelectOrientationVectors = 'None'
mesh_000Display.ScaleFactor = 0.1
mesh_000Display.SelectScaleArray = 'None'
mesh_000Display.GlyphType = 'Arrow'
mesh_000Display.GlyphTableIndexArray = 'None'
mesh_000Display.DataAxesGrid = 'GridAxesRepresentation'
mesh_000Display.PolarAxes = 'PolarAxesRepresentation'
mesh_000Display.ScalarOpacityUnitDistance = 0.0174438098693218
# init the 'GridAxesRepresentation' selected for 'DataAxesGrid'
mesh_000Display.DataAxesGrid.XTitle = 'X'
mesh_000Display.DataAxesGrid.YTitle = 'Y'
mesh_000Display.DataAxesGrid.ZTitle = 'Z'
mesh_000Display.DataAxesGrid.XTitleBold = 1
mesh_000Display.DataAxesGrid.XTitleFontSize = 14
mesh_000Display.DataAxesGrid.YTitleBold = 1
mesh_000Display.DataAxesGrid.YTitleFontSize = 14
mesh_000Display.DataAxesGrid.ZTitleBold = 1
mesh_000Display.DataAxesGrid.ZTitleFontSize = 14
mesh_000Display.DataAxesGrid.XLabelBold = 1
mesh_000Display.DataAxesGrid.XLabelFontSize = 14
mesh_000Display.DataAxesGrid.YLabelBold = 1
mesh_000Display.DataAxesGrid.YLabelFontSize = 14
mesh_000Display.DataAxesGrid.ZLabelBold = 1
mesh_000Display.DataAxesGrid.ZLabelFontSize = 14
# show data from contour1
contour1Display = Show(contour1, renderView1)
# trace defaults for the display properties.
contour1Display.Representation = 'Surface'
contour1Display.ColorArrayName = ['POINTS', 'phi']
contour1Display.LookupTable = phiLUT
contour1Display.OSPRayScaleArray = 'GhostType'
contour1Display.OSPRayScaleFunction = 'PiecewiseFunction'
contour1Display.SelectOrientationVectors = 'GhostType'
contour1Display.ScaleFactor = 0.0572519063949585
contour1Display.SelectScaleArray = 'GhostType'
contour1Display.GlyphType = 'Arrow'
contour1Display.GlyphTableIndexArray = 'GhostType'
contour1Display.DataAxesGrid = 'GridAxesRepresentation'
contour1Display.PolarAxes = 'PolarAxesRepresentation'
contour1Display.GaussianRadius = 0.02862595319747925
contour1Display.SetScaleArray = ['POINTS', 'GhostType']
contour1Display.ScaleTransferFunction = 'PiecewiseFunction'
contour1Display.OpacityArray = ['POINTS', 'GhostType']
contour1Display.OpacityTransferFunction = 'PiecewiseFunction'
# show color legend
contour1Display.SetScalarBarVisibility(renderView1, True)
# show data from annotateTime1
annotateTime1Display = Show(annotateTime1, renderView1)
# trace defaults for the display properties.
annotateTime1Display.Bold = 1
annotateTime1Display.FontSize = 12
annotateTime1Display.WindowLocation = 'LowerLeftCorner'
# setup the color legend parameters for each legend in this view
# get color legend/bar for phiLUT in view renderView1
phiLUTColorBar = GetScalarBar(phiLUT, renderView1)
phiLUTColorBar.WindowLocation = 'AnyLocation'
phiLUTColorBar.Position = [0.852, 0.07857142857142851]
phiLUTColorBar.Title = 'phi'
phiLUTColorBar.ComponentTitle = ''
phiLUTColorBar.TitleBold = 1
phiLUTColorBar.TitleFontSize = 24
phiLUTColorBar.LabelBold = 1
phiLUTColorBar.LabelFontSize = 18
phiLUTColorBar.ScalarBarThickness = 24
phiLUTColorBar.ScalarBarLength = 0.8357142857142857
# ----------------------------------------------------------------
# finally, restore active source
SetActiveSource(mesh_000)
# ----------------------------------------------------------------
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
# these are the frequencies at which the coprocessor updates.
freqs = {'mesh': [1, 1, 1]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variable that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView and the update frequency
coprocessor.EnableLiveVisualization(False, 1)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=rescale_lookuptable,
image_quality=0, padding_amount=imageFileNamePadding)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
| 44.777778 | 364 | 0.654132 | 7,692 | 0.73411 | 0 | 0 | 0 | 0 | 0 | 0 | 3,901 | 0.372304 |
d767675283bbb063fdc340913b93263291f2c950 | 212 | py | Python | tests/integration/test_integration_foreign_payment_codes.py | pwitab/visma | ffa6698738fcc1be9de727e7fe77cce30310f830 | [
"BSD-3-Clause"
] | 5 | 2018-08-10T19:12:48.000Z | 2021-07-08T12:43:24.000Z | tests/integration/test_integration_foreign_payment_codes.py | pwitab/visma | ffa6698738fcc1be9de727e7fe77cce30310f830 | [
"BSD-3-Clause"
] | 16 | 2018-06-17T18:51:05.000Z | 2021-01-10T10:44:36.000Z | tests/integration/test_integration_foreign_payment_codes.py | pwitab/visma | ffa6698738fcc1be9de727e7fe77cce30310f830 | [
"BSD-3-Clause"
] | 3 | 2019-03-05T15:01:13.000Z | 2021-06-15T14:35:37.000Z | from visma.models import ForeignPaymentCodes
class TestForeignPaymentCodes:
def test_list_foregin_payment_codes(self):
codes = ForeignPaymentCodes.objects.all()
assert len(codes) is not 0
| 21.2 | 49 | 0.754717 | 164 | 0.773585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7683f0603e42e3b0a2ea0473fd022c587fdae78 | 995 | py | Python | performance/forms.py | linikerunk/tcc-people-analytics | fdda975682d5299c8384e31ebb974dc085330875 | [
"MIT"
] | null | null | null | performance/forms.py | linikerunk/tcc-people-analytics | fdda975682d5299c8384e31ebb974dc085330875 | [
"MIT"
] | 1 | 2020-10-11T10:09:39.000Z | 2020-10-11T10:09:39.000Z | performance/forms.py | linikerunk/TCC_PeopleAnalytics | fdda975682d5299c8384e31ebb974dc085330875 | [
"MIT"
] | null | null | null | """ This is a forms.py that helps to work on the payload of front-end """
from django import forms
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.forms import ModelForm
from django.forms.models import inlineformset_factory
from django.forms.widgets import TextInput
# from extra_views import ModelFormSetView, FormSetView
from .models import EvaluationSkill, Skill, Evaluation
class SkillForm(forms.ModelForm):
class Meta:
model = Skill
fields = '__all__'
class EvaluationForm(forms.ModelForm):
class Meta:
model = Evaluation
exclude = ()
class EvaluationSkillForm(forms.ModelForm):
class Meta:
model = EvaluationSkill
exclude = ()
EvaluationSkillFormSet = inlineformset_factory(
Evaluation, EvaluationSkill, form=EvaluationSkillForm,
fields=['evaluation', 'skill', 'grade'], extra=1, can_delete=True
) | 27.638889 | 73 | 0.742714 | 319 | 0.320603 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.163819 |
d768dc679de8770bdc1267fbf48dedc2b6d9648d | 365 | py | Python | answers/Python/@oseme-techguy/03-word-in-reverse.py | Flipponachi/20-questions | a6ad9a468683646781426008e71dbb508e8e59bb | [
"MIT"
] | 1 | 2019-09-13T14:13:07.000Z | 2019-09-13T14:13:07.000Z | answers/Python/@oseme-techguy/03-word-in-reverse.py | Flipponachi/20-questions | a6ad9a468683646781426008e71dbb508e8e59bb | [
"MIT"
] | null | null | null | answers/Python/@oseme-techguy/03-word-in-reverse.py | Flipponachi/20-questions | a6ad9a468683646781426008e71dbb508e8e59bb | [
"MIT"
] | 1 | 2021-01-02T12:01:46.000Z | 2021-01-02T12:01:46.000Z | """
Solution to Word in Reverse
"""
if __name__ == '__main__':
while True:
word = input('Enter a word: ')
word = str(word)
i = len(word)
reversed_word = ''
while i > 0:
reversed_word += word[i - 1]
i -= 1
print('{reversed_word}\n'.format(reversed_word=reversed_word))
| 22.8125 | 71 | 0.493151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.241096 |
d768fe73af1f8acf9182c2673f7840224b4e40d8 | 2,592 | py | Python | src/spooq2/spooq2_logger.py | rt-phb/Spooq | ef101dcb4239cee43f56497908fee63998eb5846 | [
"MIT"
] | null | null | null | src/spooq2/spooq2_logger.py | rt-phb/Spooq | ef101dcb4239cee43f56497908fee63998eb5846 | [
"MIT"
] | null | null | null | src/spooq2/spooq2_logger.py | rt-phb/Spooq | ef101dcb4239cee43f56497908fee63998eb5846 | [
"MIT"
] | null | null | null | """
Global Logger instance used by Spooq2.
Example
-------
>>> import logging
>>> logga = logging.getLogger("spooq2")
<logging.Logger at 0x7f5dc8eb2890>
>>> logga.info("Hello World")
[spooq2] 2020-03-21 23:55:48,253 INFO logging_example::<module>::4: Hello World
"""
import os
import sys
import logging
from spooq2._version import __version__ as version_number
initialized = False
def initialize():
"""
Initializes the global logger for Spooq with pre-defined levels for ``stdout`` and ``stderr``.
No input parameters are needed, as the configuration is received via :py:meth:`get_logging_level`.
Note
----
The output format is defined as:
| "[%(name)s] %(asctime)s %(levelname)s %(module)s::%(funcName)s::%(lineno)d: %(message)s"
| For example "[spooq2] 2020-03-11 15:40:59,313 DEBUG newest_by_group::__init__::53: group by columns: [u'user_id']"
Warning
-------
The ``root`` logger of python is also affected as it has to have a level at least as
fine grained as the logger of Spooq, to be able to produce an output.
"""
global initialized
if initialized:
return
logging_level = get_logging_level()
# logging.getLogger("root").setLevel(logging_level)
logger = logging.getLogger("spooq2")
logger.setLevel(logging_level)
if not len(logger.handlers):
formatter = logging.Formatter(
"[%(name)s] %(asctime)s %(levelname)s %(module)s::%(funcName)s::%(lineno)d: %(message)s"
)
# STDOUT Handler
ch_out = logging.StreamHandler(sys.stdout)
ch_out.setLevel(logging_level)
ch_out.setFormatter(formatter)
logger.addHandler(ch_out)
# STDERR Handler
# ch_err = logging.StreamHandler(sys.stderr)
# ch_err.setLevel(logging_level)
# ch_err.setFormatter(formatter)
# logger.addHandler(ch_err)
initialized = True
logger.info(f"Thank you for choosing Spooq {version_number}!")
def get_logging_level():
"""
Returns the logging level depending on the environment variable `SPOOQ_ENV`.
Note
----
If SPOOQ_ENV is
* **dev** -> "DEBUG"
* **test** -> "ERROR"
* something else -> "INFO"
Returns
-------
:any:`str`
Logging level
"""
spooq_env = os.getenv('SPOOQ_ENV', "default").lower()
if spooq_env.startswith("dev"):
return "DEBUG"
elif spooq_env.startswith("test"):
return "ERROR"
elif spooq_env.startswith("pr"):
return "WARN"
else:
return "INFO"
| 27 | 124 | 0.631173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,673 | 0.645448 |
d769120bcdbd81031f2be3c1d9954eb83b339b6d | 3,610 | py | Python | tests/extra/math_ops_test.py | yaroslavvb/imperative | d08f08b4febc9005f7d91feaeb59ca18fbbca486 | [
"Apache-2.0"
] | 20 | 2016-10-05T20:23:15.000Z | 2021-07-10T03:56:40.000Z | tests/extra/math_ops_test.py | yaroslavvb/imperative | d08f08b4febc9005f7d91feaeb59ca18fbbca486 | [
"Apache-2.0"
] | 2 | 2016-10-05T22:43:26.000Z | 2016-10-12T01:16:11.000Z | tests/extra/math_ops_test.py | yaroslavvb/imperative | d08f08b4febc9005f7d91feaeb59ca18fbbca486 | [
"Apache-2.0"
] | 1 | 2017-10-31T09:18:51.000Z | 2017-10-31T09:18:51.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
# changes to turn test into imperative mode test
try:
from tensorflow.contrib import imperative
from tensorflow.contrib.imperative.python.imperative import test_util
except:
import imperative
from imperative import test_util
import tensorflow as tf
env = imperative.Env(tf)
math_ops = env.tf
constant_op = env.tf
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with self.test_session():
y_tf = math_ops.reduce_sum(x).eval()
self.assertEqual(y_tf, 21)
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
try:
x = [0.49, 0.7, -0.3, -0.8]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = y_tf.eval()
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
except:
import sys, pdb, traceback
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
y = np.array([-3, -2, -1], dtype=np.int32)
z = (x - y)*(x - y)
with self.test_session():
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
if __name__ == "__main__":
googletest.main()
| 30.854701 | 80 | 0.652355 | 2,202 | 0.609972 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.22964 |
d7694bd9262436a2c11c4b2d8fb0ff233914d037 | 7,327 | py | Python | battlefortune/batchrunner.py | pfassina/BattleFortune | 3a985687f395d32c41f170a173db0b50c9c82fdd | [
"MIT"
] | 3 | 2019-06-21T01:37:50.000Z | 2021-03-24T16:56:49.000Z | battlefortune/batchrunner.py | pfassina/BattleFortune | 3a985687f395d32c41f170a173db0b50c9c82fdd | [
"MIT"
] | 6 | 2021-03-18T21:37:16.000Z | 2022-03-11T23:35:21.000Z | battlefortune/batchrunner.py | pfassina/BattleFortune | 3a985687f395d32c41f170a173db0b50c9c82fdd | [
"MIT"
] | null | null | null | import keyboard
from logparser import parselog, validate_log
import os
from psutil import process_iter
from pyautogui import click
import subprocess
from turnhandler import backupturn, clonegame, cleanturns, delete_log, delete_temp
import yaml
from time import sleep
import threading
import time
import win32gui
import win32con
failed_rounds = []
def wait_screen_load(path):
"""
Waits Nation Selection screen to load
:param path: dominions log path
:return: True if load was complete
"""
valid = False
i = 0
while i < 1000000:
try:
with open(path + 'log.txt') as file:
blurb = file.read()
load_complete = blurb.rfind('playturn: autohost') # battle loaded
if load_complete == -1:
i += 1
continue
if load_complete != -1: # Player Won
valid = True
break
except FileNotFoundError:
i += 1
return valid
def select_nation():
"""
Selects the first Nation on Nation selection screen.
:return: True if Dominions window handle was found.
"""
# Loop until Dominions Window Handle is found
hwnd = 0
while hwnd == 0:
hwnd = win32gui.FindWindow(None, 'Dominions 5')
# Get Dominions Windows Coordinates
x, y = win32gui.ClientToScreen(hwnd, (0, 0))
# Move cursor by 400x280 to select first Nation
click((x + 400, y + 280))
return True
def go_to_province(province):
"""
Automates keyboard shortcuts to generate log.
:param province: Province number where battle occurs
:return: True when all commands were executed
"""
keyboard.press_and_release('esc') # exit messages
keyboard.press_and_release('g') # go to screen
keyboard.write(str(province)) # select province
keyboard.press_and_release('enter') # confirm
keyboard.press_and_release('c') # view casualities
keyboard.press_and_release('esc') # back to map
keyboard.press_and_release('d') # try to add PD
return True
def wait_host(path, start_time):
"""
Waits Dominions to Host battle.
:param path: dominions game path
:param start_time: Time when ftherlnd was last updated
:return: True if ftherlnd was updated
"""
# Loop until host is finished
done = False
while done is False:
# check if ftherlnd was updated
ftherlnd_update_time = os.path.getmtime(path + 'ftherlnd')
if ftherlnd_update_time > start_time:
done = True
break
# check for host error
hwnd = win32gui.FindWindow(None, 'NÃ¥got gick fel!')
if hwnd > 0:
win32gui.SetForegroundWindow(hwnd)
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
break
return done
def run_dominions(province, game='', switch='', turn=-1):
"""
Runs Dominions.
:param province: Province where battle occurs
:param game: Name of the game being simulated
:param switch: Additional Dominions switches
:param turn: Turn of the simulation
:return: True after process is terminated
"""
global failed_rounds
# Get Paths
with open('./battlefortune/data/config.yaml') as file:
paths = yaml.load(file)
dpath = paths['dompath']
gpath = paths['gamepath']
if turn > -1:
idx = gpath.rfind("/")
gpath = gpath[:idx] + str(turn) + gpath[idx:]
game = game + str(turn)
start_time = os.path.getmtime(gpath + 'ftherlnd') # ftherlnd last update
# Run Dominions on minimal settings
switches = ' --simpgui --nosteam --res 960 720 -waxsco' + switch + ' '
program = '/k cd /d' + dpath + ' & Dominions5.exe'
cmd = 'cmd ' + program + switches + game
process = subprocess.Popen(cmd) # run Dominions
if switch == 'g -T': # if auto hosting battle
success = wait_host(path=gpath, start_time=start_time)
if not success:
failed_rounds.append(turn)
else:
# Generate Log
wait_screen_load(dpath) # wait nation selection screen to load
select_nation() # select first nation
go_to_province(province) # check battle report
# Validate Round
valid = validate_log(dpath) # validate log
if not valid:
failed_rounds.append(turn)
# Terminate process
process.kill()
if switch != 'g -T':
if "Dominions5.exe" in (p.name() for p in process_iter()):
os.system("TASKKILL /F /IM Dominions5.exe")
return True
def host_battle(game, province, rounds):
""""
Host games concurrently based on the number of threads.
:param game: game name
:param province: province where battle occurs
:param rounds: number of rounds to be hosted
"""
switch = 'g -T'
threads = []
max_threads = yaml.load(open('./battlefortune/data/config.yaml'))['maxthreads']
start_range = 1
end_range = start_range + max_threads
if end_range > (rounds + 1):
end_range = rounds + 1
while start_range < (rounds + 1):
for i in range(start_range, end_range):
t = threading.Thread(target=run_dominions, args=(province, game, switch, i))
threads.append(t)
t.start()
for thread in threads:
thread.join()
threads = []
start_range = start_range + max_threads
end_range = end_range + max_threads
if end_range > (rounds + 1):
end_range = rounds + 1
def finalize_turn(game, province, turn=1):
"""
Generates the log for each simulation round, one at a time.
:param game: name of the game to be hosted
:param province: number of the province where battle occurs
:param turn: number of the simulation round
:return: turn log
"""
global failed_rounds
run_dominions(province=province, game=game, switch='d', turn=turn) # generate battle logs
turn_log = {}
if turn not in failed_rounds:
backupturn(turn) # back-up turn files
turn_log = parselog(turn) # read and parse battle log
# delete log
delete_log()
delete_temp()
return turn_log
def batchrun(rounds, game, province):
"""
Runs X numbers of Simulation Rounds.
:param rounds: Number of rounds to be simulated
:param game: game name that will be simulated
:param province: province number where battle occurs
:return:
"""
global failed_rounds
winners = []
battles = []
nations = {}
for i in range(1, rounds + 1):
clonegame(i)
host_battle(game, province, rounds)
for i in range(1, rounds + 1):
if i in failed_rounds:
continue
log = finalize_turn(game, province, i) # get turn log
if i in failed_rounds:
continue
nations = log['nations'] # get nation ids
winners.append(log['turn_score']) # get turn winner
for j in range(len(log['battlelog'])):
battles.append(log['battlelog'][j]) # get battle report
print('Round: ' + str(i))
cleanturns(rounds)
failed_rounds = []
output = {
'nations': nations,
'winners': winners,
'battles': battles
}
return output
| 26.9375 | 94 | 0.619763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,727 | 0.372084 |
d76a161827a2e2c2e3a71fbfaf612214e039ac3b | 1,188 | py | Python | preprocess.py | austinben/ECE470 | 05b6c4c8b41a2c1d634560a0e5ce6af5e4adbc3a | [
"MIT"
] | null | null | null | preprocess.py | austinben/ECE470 | 05b6c4c8b41a2c1d634560a0e5ce6af5e4adbc3a | [
"MIT"
] | null | null | null | preprocess.py | austinben/ECE470 | 05b6c4c8b41a2c1d634560a0e5ce6af5e4adbc3a | [
"MIT"
] | null | null | null | import numpy as np
import os
import cv2
import imutils
import numpy as np
from keras.preprocessing import image
from matplotlib import pyplot as plt
def crop_image(img):
#convert the images to greyscale and add a slight guassian blur
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5,5), 0)
#threshold the image and perform erosions and dilations to remove noise
thresh = cv2.threshold(gray,45,255,cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
#detect contours in the threshold image, then get the largest one we can find.
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
#map extreme points of the image.ext
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
# crop new image out of the original
new_image = img[extTop[1]:extBot[1], extLeft[0]:extRight[0]]
return new_image | 34.941176 | 86 | 0.686027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.239057 |
d76b7aa3673d7e4fdedf5a1cdab205aa228b5775 | 3,444 | py | Python | tests/test_db_awssimpledb.py | gyrospectre/securitybot | 90db2ae532667c48ca080108b895c2e1fe16b1e8 | [
"Apache-2.0"
] | 3 | 2020-10-09T04:46:15.000Z | 2021-12-30T10:12:37.000Z | tests/test_db_awssimpledb.py | gyrospectre/securitybot | 90db2ae532667c48ca080108b895c2e1fe16b1e8 | [
"Apache-2.0"
] | null | null | null | tests/test_db_awssimpledb.py | gyrospectre/securitybot | 90db2ae532667c48ca080108b895c2e1fe16b1e8 | [
"Apache-2.0"
] | 1 | 2020-08-11T19:28:13.000Z | 2020-08-11T19:28:13.000Z | import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
from securitybot.db.awssimpledb import DbClient
from securitybot.exceptions import DbException
SDB_CFG = {
'domain_prefix': 'secbot'
}
class TestChatProviderSlack(unittest.TestCase):
@patch('securitybot.db.awssimpledb.client')
def test__connect(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
self.assertEqual(cli._domain_prefix, 'secbot')
self.assertEqual(cli.queries, '')
@patch('securitybot.db.awssimpledb.client')
def test__execute_unknown_query(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
with self.assertRaises(DbException):
cli.execute('', params=None)
@patch('securitybot.db.awssimpledb.client')
def test__execute_no_params(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
cli._update_ignored_list = MagicMock()
cli.execute('update_ignored_list', params=None)
cli._update_ignored_list.assert_called_once_with(params=())
@patch('securitybot.db.awssimpledb.client')
def test__execute_update_ignored_success(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
cli._delete = MagicMock()
cli._delete.return_value = True
cli._dict_to_items = MagicMock()
cli._dict_to_items.return_value = (['eee'], [])
e_result = cli.execute('update_ignored_list', params=None)
self.assertEqual(e_result, True)
@patch('securitybot.db.awssimpledb.client')
def test__set_response_success(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
params = ('comment', 0, 0, 'hash')
e_result = cli.execute('set_response', params=params)
cli._new_alert_user_response = MagicMock()
self.assertEqual(e_result, True)
@patch('securitybot.db.awssimpledb.client')
def test__set_response_success_pop(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
params = ('comment', 0, 0, 'hash')
cli._new_alert_user_response = MagicMock()
cli.execute('set_response', params=params)
cli._new_alert_user_response.assert_called_once_with(['hash', 'comment', 0, 0])
@patch('securitybot.db.awssimpledb.client')
def test__set_response_params_missing(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
with self.assertRaises(DbException):
cli.execute('set_response')
@patch('securitybot.db.awssimpledb.client')
def test__get_alerts_no_params(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries="test")
with self.assertRaises(DbException):
cli.execute('get_alerts')
@patch('securitybot.db.awssimpledb.client')
def test__get_alerts_success(self, mk_boto):
cli = DbClient(config=SDB_CFG, queries=None)
cli._select = MagicMock()
cli._select.return_value = {
'ee': {
'title': 'test',
'ldap': 'user',
'reason': 'because',
'description': 'hi',
'url': 'n/a',
'event_time': '2020-01-01T00:00:00+0000',
'performed': 0,
'comment': 'woot',
'authenticated': 0,
'status': 2
}
}
cli.execute('get_alerts', params = ('0'))
| 32.186916 | 87 | 0.639082 | 3,213 | 0.932927 | 0 | 0 | 3,111 | 0.90331 | 0 | 0 | 719 | 0.208769 |
d76cb886459f15396940273045af583f04a800a8 | 1,759 | py | Python | src/waiting.py | aquova/bouncer | 9988b4f2a0e659e2d49fcc4e95815931b2e81670 | [
"MIT"
] | 5 | 2019-02-17T02:50:32.000Z | 2020-07-04T14:43:31.000Z | src/waiting.py | aquova/bouncer | 9988b4f2a0e659e2d49fcc4e95815931b2e81670 | [
"MIT"
] | 4 | 2019-02-16T23:37:40.000Z | 2020-07-03T14:54:17.000Z | src/waiting.py | aquova/bouncer | 9988b4f2a0e659e2d49fcc4e95815931b2e81670 | [
"MIT"
] | 2 | 2019-02-16T13:40:24.000Z | 2020-05-09T05:54:40.000Z | import datetime, discord
from dataclasses import dataclass
from commonbot.utils import getTimeDelta
@dataclass
class AnsweringMachineEntry:
name: str
timestamp: datetime
last_message: str
message_url: str
class AnsweringMachine:
def __init__(self):
self.waiting_list = {}
self.recent_reply = None
def set_recent_reply(self, user):
self.recent_reply = user
def get_recent_reply(self):
return self.recent_reply
def recent_reply_exists(self):
return self.recent_reply != None
def remove_entry(self, user_id):
if user_id in self.waiting_list:
del self.waiting_list[user_id]
def get_entries(self):
return self.waiting_list
def update_entry(self, user_id, user_entry):
self.waiting_list[user_id] = user_entry
async def clear_entries(self, message, _):
self.waiting_list.clear()
await message.channel.send("Waiting queue has been cleared")
async def gen_waiting_list(self, message, _):
curr_time = datetime.datetime.utcnow()
# Assume there are no messages in the queue
found = False
waiting_list = self.get_entries().copy()
for key, item in waiting_list.items():
days, hours, minutes = getTimeDelta(curr_time, item.timestamp)
# Purge items that are older than one day
if days > 0:
self.remove_entry(key)
else:
found = True
out = f"{item.name} ({key}) said `{item.last_message}` | {hours}h{minutes}m ago\n{item.message_url}\n"
await message.channel.send(out)
if not found:
await message.channel.send("There are no users awaiting replies")
| 30.327586 | 118 | 0.643547 | 1,644 | 0.934622 | 0 | 0 | 120 | 0.068221 | 916 | 0.52075 | 249 | 0.141558 |
d76d78b8f2dc7f9192fed4ce770d6fc94f87677e | 1,231 | py | Python | estimation/sample_z.py | yiruiliu110/eegnn | 253773c301681bb00b4789c34f48c82468ad16da | [
"MIT"
] | null | null | null | estimation/sample_z.py | yiruiliu110/eegnn | 253773c301681bb00b4789c34f48c82468ad16da | [
"MIT"
] | null | null | null | estimation/sample_z.py | yiruiliu110/eegnn | 253773c301681bb00b4789c34f48c82468ad16da | [
"MIT"
] | null | null | null | """
this script contains the function to compute z from sparse v , pi and w
"""
import torch
from estimation.truncated_poisson import TruncatedPoisson
def compute_z(log_w: torch.tensor, pi: torch.sparse, c: torch.sparse):
"""
This function computes the class indicators given cluster proportion vector pi and weight matrix w.
:param c: a sparse matrix to indicate the cluster membership.
:param log_w: weight matrix for all nodes in clusters.
:return: a sparse matrix for the number of hidden edges
"""
indices = c._indices()
indices_0, indices_1 = indices[0], indices[1]
poisson_para_tmp = torch.index_select(log_w, 1, indices_0) + torch.index_select(log_w, 1, indices_1) # K(the number of clusters) X number of edges
poisson_para = torch.gather(poisson_para_tmp, dim=0, index=torch.unsqueeze(c._values(), 0)) # https://zhuanlan.zhihu.com/p/352877584
poisson_para += torch.index_select(torch.log(pi), dim=0, index=c._values())
poisson_para = torch.where(indices_0==indices_1, poisson_para, poisson_para *2.0)
samples = TruncatedPoisson(torch.squeeze(torch.exp(poisson_para) + 1e-10)).sample()
z = torch.sparse_coo_tensor(indices, samples, c.size())
return z
| 41.033333 | 152 | 0.727051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.376929 |
d76d863e3b44be2672e1cb5cd88dd7bc048a830a | 446 | py | Python | test3/routes/MainRoute.py | Ca11MeE/dophon | 6737b0f0dc9ec2c2229865940c3c6d6ee326fc28 | [
"Apache-2.0"
] | 1 | 2018-08-13T09:57:34.000Z | 2018-08-13T09:57:34.000Z | test3/routes/MainRoute.py | Ca11MeE/dophon | 6737b0f0dc9ec2c2229865940c3c6d6ee326fc28 | [
"Apache-2.0"
] | null | null | null | test3/routes/MainRoute.py | Ca11MeE/dophon | 6737b0f0dc9ec2c2229865940c3c6d6ee326fc28 | [
"Apache-2.0"
] | null | null | null | from dophon import *
from dophon.annotation import *
app = blue_print('main', __name__,url_prefix='/main')
@RequestMapping('/', ['get'])
@ResponseTemplate(['index.html'])
def index():
return {}
@GetRoute('/get')
@ResponseTemplate(['index.html'])
def get_index():
return {}
@PostRoute('/post')
@ResponseTemplate(['index.html'])
def post_index():
return {}
@Get
@ResponseTemplate(['index.html'])
def test_get():
return {}
| 14.866667 | 53 | 0.656951 | 0 | 0 | 0 | 0 | 325 | 0.7287 | 0 | 0 | 82 | 0.183857 |
d76db77d529fe9e5f197cd3b9a8d53a6b929a6d0 | 17,179 | py | Python | paint_program.py | nick-tkachov/paint-program | 80b657acfe9c63dd62a8d93927255490b55372ed | [
"MIT"
] | null | null | null | paint_program.py | nick-tkachov/paint-program | 80b657acfe9c63dd62a8d93927255490b55372ed | [
"MIT"
] | null | null | null | paint_program.py | nick-tkachov/paint-program | 80b657acfe9c63dd62a8d93927255490b55372ed | [
"MIT"
] | null | null | null | # ---------------------------------------------------------------------------------------------------#
# Program Name: PAINT PROGRAM OOP ASSIGNMENT
# Programmer: Nick Tkachov
# Date: November 20, 2017
# Input: Options at the beginning of the game allow user to select SMALL,MEDIUM,LARGE grid sizes,
# and grid or no grid. After that, the options are entered
# and the user is allowed to draw using the various tools available:
# BRUSH, FILL, ERASE - SIZES 1-5 - COLOR PALLET WITH 9 COLORS
# Processing: User inputs are processed, from the cell that they click on to the various
# menu options that are selected. User save and load is also calculated
# by parsing text file
# Output: Any changes made to the screen are instantly drawn in the loop
# ----------------------------------------------------------------------------------------------------#
#Class Cell(object)
# Attributes:
# height,width,x,y,color,border,row,column,rect
#
#
# Methods:
# draw()
# on_mouse_hover()
#Class Grid(object)
# Attributes:
# color,screen_size,rows,columns,class_type,x,y,gap,height,width,cells,count
#
#
# Methods:
# draw()
# draw_cells()
# find_cell()
#Class Text_Render(object)
# Attributes:
# screen_size,surf,x,y,text,size,font,render
#
#
# Methods:
# draw()
#Class Menu(Cell)
# Attributes:
# same as cell +...
# text,data,hint,font,render,selected
#
#
# Methods:
# draw()
# on_mouse_click
#Class Pixel(Cell)
# Attributes:
# same as cell
#
#
#
# Methods:
# on_mouse_click
#Class Tool_Load(Menu)
# Attributes:
# same as menu
#
#
#
# Methods:
# on_mouse_click + menu methods
#Class Tool_Load(Menu)
# Attributes:
# same as menu
#
#
#
# Methods:
# on_mouse_click + menu methods
#Class Menu_Grid(Grid)
# Attributes:
# same as menu +..
# options,count,font,w,h,surf
#
#
# Methods:
# check_event,draw + grid methods
#Class Pallet(Menu_Grid)
# Attributes:
# same as Menu_Grid
#
#
#
# Methods:
# draw
#Class PixelArt(Grid)
# Attributes:
# same as Grid
#
#
#
# Methods:
# draw_grid
# set_grid
# check_event()
import pygame
from grid import Grid,Cell
from math import sqrt
from sys import exit
def fill(cell, grid, color, c): #Fill algorithm using recursion
if cell.color == color and cell.color != c: #if color that you clicked on is not already that color AND is the same color as others
cell.color = c
cell.border = 0
x = cell.row #row of cell
y = cell.column #column of cell
if x < grid.rows-1: #right
fill(grid.find_cell(x+1,y), grid, color, c) #fills squares to the right, same for ones on bottom
if x > 0: #left
fill(grid.find_cell(x-1,y), grid, color, c)
if y < grid.columns-1: #up
fill(grid.find_cell(x,y+1), grid, color, c)
if y > 0: #down
fill(grid.find_cell(x,y-1), grid, color, c)
def draw_brush(cell,grid,c,amount):
if cell.color != c:
cell.color = c
cell.border = 0
x = cell.row
y = cell.column
if x < amount and x > -amount: #right
fill(grid.draw_brush(x+1,y), grid, c, amount)
if x > amount and x < -amount: #left
fill(grid.draw_brush(x-1,y), grid, c, amount)
if y < amount and y > -amount: #up
fill(grid.draw_brush(x,y+1), grid, c, amount)
if y > amount and y < -amount: #down
fill(grid.draw_brush(x,y-1), grid, c, amount)
def set_cursor(screen,brush,tool): #allows for cursor to have custom shape
x,y = pygame.mouse.get_pos()
dynamic = brush_size * 10 #depending on brush size, the circle or square will be bigger
if tool == 'ERASE': #draws a square for erase
pygame.draw.rect(screen,WHITE,((x - (dynamic) /2,y - (dynamic) /2),(dynamic,dynamic)))
pygame.draw.rect(screen,BLACK,((x - (dynamic) /2,y - (dynamic) /2),(dynamic,dynamic)),2)
elif tool == 'FILL': #draws a rectangle for fill
pygame.draw.rect(screen,brush,((x-10,y-5),(20,10)))
pygame.draw.rect(screen,BLACK,((x-10,y-5),(20,10)),2)
else: #draws a circle for brush
pygame.draw.ellipse(screen,brush,((x - (dynamic) /2,y - (dynamic) /2),(dynamic,dynamic)))
pygame.draw.ellipse(screen,BLACK,((x - (dynamic) /2,y - (dynamic) /2),(dynamic,dynamic)),2)
class Text_Render(object):
def __init__(self,screen_size,x,y,text=None,size= 20): #text render class, allows text to be drawn
self.screen_size = screen_size
self.surf = pygame.Surface((self.screen_size[0],self.screen_size[1]))
self.surf.set_alpha(200)
self.surf.fill((100,100,100))
self.x = x
self.y = y
self.text = text
self.size = size
self.font = pygame.font.SysFont("Arial", self.size, True)
self.render = self.font.render(str(self.text),True,WHITE)
def draw(self,surface): #centers text
screen.blit(self.surf,(self.x,self.y))
if self.text is not None:
surface.blit(self.render,(self.screen_size[0]/2 - self.render.get_width()/2 + self.x
,(self.screen_size[1]/2 - self.render.get_height()/2) + self.y))
class Menu(Cell): #Menu pixel class, derived from cell
def __init__(self,x,y,width,height,color,option=(None,None),font = 50):
super().__init__(x,y,width,height,color)
self.text = option[0] #allows for custom text and data when clicked
self.data = option[1]
self.selected = False
try: self.hint = option[2] #a secret third option exists for text hints
except: self.hint=self.data
self.font = font
self.hint_text = Text_Render([170,30],0,0,str(self.hint),25) #draws hint text
if self.text is not None: #if data exists, will print text, else will have just squares
self.font = pygame.font.SysFont("Arial", self.font, False)
self.render = self.font.render(str(self.text),True,WHITE)
def draw(self,surface):
if self.text is not None: #will draw text if text is not set as none value
surface.blit(self.render,(self.rect.width/2 - self.render.get_width()/2 + self.rect.x
,(self.rect.height/2 - self.render.get_height()/2) + self.rect.y))
else:
pygame.draw.rect(surface,self.color,self.rect,0) #else will draw a filled in square instead of empty (used for color pallet)
pygame.draw.rect(surface,(255,255,255),self.rect,3) #white border
if self.selected:
pygame.draw.rect(surface,(255,0,0),self.rect,5)
self.hint_text.x, self.hint_text.y = pygame.mouse.get_pos() #positions hint on mouse
if self.on_mouse_hover():
self.hint_text.draw(surface)
def on_mouse_click(self,others): #allows for a red selection box
if self.on_mouse_hover():
for x in others: x.selected = False
self.selected = True
return self.on_mouse_hover()
class Tool_Save(Menu): #save tool used to save paints
def on_mouse_click(self,others):
if self.on_mouse_hover():
with open('data.txt','w') as file:
for x in pixel_grid.cells:
file.write(str(x.color) + '\n')
class Tool_Load(Menu): #loads paints
def on_mouse_click(self,others):
if self.on_mouse_hover():
grid = [eval(x) for x in open('data.txt','r')]
pixel_grid.set_grid(int(sqrt(len(grid))))
for i,v in enumerate(grid):
pixel_grid.cells[i].color = v
if v != WHITE:
pixel_grid.cells[i].border = 0
else:
if drawing_grid: pixel_grid.cells[i].border = 1
else:
pixel_grid.cells[i].border = 0
pixel_grid.cells[i].color = BLACK
class Menu_Grid(Grid): #a different version of grid that allows for drawing of a transparent BG & selection boxes
def __init__(self,screen_size,rows,columns,class_type,options,x=0,y=0,gap=0,w=100,h=100,font = 50):
self.options = options
self.count = 0
self.font = font
self.w = w
self.h = h
super().__init__(screen_size,rows,columns,class_type,x,y)
self.surf = pygame.Surface((self.screen_size[0],self.screen_size[1]))
self.surf.set_alpha(200) #draws transparent BG
self.surf.fill((100,100,100))
def draw_cells(self):
p = []
for x in range(self.rows):
for v in range(self.columns):
p.append(self.class_type((self.screen_size[0] - (self.w * self.columns))/2 + (((self.w * v)) + self.x)
,(self.screen_size[1] - (self.h * self.rows))/2 + (((self.h * x)) + self.y)
,self.w,self.h
,self.color, self.options[self.count], self.font))
if self.count == 0: #first item in list is always selected
p[0].selected = True
self.count +=1
return p
def draw(self,surface):
screen.blit(self.surf,(self.x,self.y))
for x in self.cells:
if x.selected: #if one of the buttons is selected,
self.cells += [self.cells.pop(self.cells.index(x))] #will be placed at the last spot in table so boxes don't overlap
x.draw(surface)
def check_event(self,old=0): #returns the value of the current selected cell
for x in self.cells:
if x.on_mouse_click(self.cells):
return x.data
return old
class Pixel(Cell): #cell object that can be brushed over
def on_mouse_click(self,new_color,brush,grid,cells,brush_size):
if self.on_mouse_hover():
if brush == 'BRUSH': #if its brush, do color
self.color = new_color
self.border = 0
if brush_size > 1:
draw_brush(self,cells,new_color,brush_size) #allows for different brush sizes
elif brush == 'ERASE': #erases pixel depending on if it has grid or not
if grid:
self.color = WHITE
self.border = 1
else:
self.color = BLACK
self.border = 0
elif brush == 'FILL': #fill recursion algorithm
fill(self,cells,self.color,new_color)
class Pallet(Menu_Grid): #pallet class derived from grid (to allow red selection box)
def draw_cells(self):
p = []
for x in range(self.rows):
for v in range(self.columns):
p.append(self.class_type((self.screen_size[0] - (self.w * self.columns))/2 + (((self.w * v)) + self.x)
,(self.screen_size[1] - (self.h * self.rows))/2 + (((self.h * x)) + self.y)
,self.w,self.h, self.options[self.count][1],self.options[self.count])) #the only difference is that this one allows color
if self.count == 0:
p[0].selected = True
self.count +=1
return p
class PixelArt(Grid): #the grid that you can draw on
def draw_grid(self,grid):
for x in self.cells: #if you want grid or not
if grid:
x.color = (255,255,255)
else:
x.color = BLACK
def set_grid(self,value): #changes grid size through options
self.rows = self.columns = value
self.width = self.screen_size[0] / self.rows
self.height = self.screen_size[1] / self.columns
self.cells = self.draw_cells()
def check_event(self,new_color,brush,grid,brush_size):
for x in self.cells:
x.on_mouse_click(new_color,brush,grid,self,brush_size)
#=====INITIALIZE PYGAME AND COLORS=====#
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode([700,900]) #screensize
pygame.mouse.set_visible(False)
pygame.display.set_caption('DRAW V1.0')
WHITE = (255, 255, 255)
TRUWHITE=(249, 249, 249)
BLACK = ( 0, 0, 0)
RED = (255, 66, 66)
BLUE = ( 91, 65, 255)
GREEN = ( 87, 255, 65)
YELLOW= (239, 255, 65)
PURPLE= (178, 65, 255)
PINK = (255, 65, 239)
TURQ = ( 65, 255, 248)
ORANGE= (244, 176, 66)
#=====OPTIONS FOR TOOLBAR AND BEFORE GAME STARTS=====#
colors = [(None,TRUWHITE,'WHITE'),(None,ORANGE,'ORANGE'),(None,RED,'RED')
,(None,BLUE,'BLUE'),(None,GREEN,'GREEN'),(None,YELLOW,'YELLOW')
,(None,PURPLE,'PURPLE'),(None,PINK,'PINK'),(None,TURQ,'TURQUOISE')]
options = [('SMALL',10,'10 x 10 squares')
,('MEDIUM',25,'25 x 25 squares')
,('LARGE',50,'50 x 50 squares')]
grid_options = [('YES',True,'YES'),('NO',False,'NO')]
start_options = [('START',False,'START PAINTING')]
toolbar_options = [('B','BRUSH'),('E','ERASE'),('F','FILL')]
save_options = [('SAVE',False,'SAVE PROG.')]
load_options = [('LOAD',False,'LOAD PREV.')]
brush_sizes = [('1',1,'1 SQUARE'),('2',2,'2 SQUARES'),('3',3,'3 SQUARES')
,('4',4,'4 SQUARES'),('5',5,'5 SQUARES')]
#=====VARIABLES FOR GAME=====#
tool = 'BRUSH'
brush_size = 1
brush = TRUWHITE
menu_displayed = True
drawing_grid = True
grid_size = 10
drawing_board = [700,600]
pallet_board = [170,170]
#=====MENU BEFORE GAME STARTS=====#
background = Text_Render([700,900],1,1)
select_size = Text_Render([700,50],0,15,'SELECT YOUR GRID SIZE',35)
visible_grid = Text_Render([700,50],0,485,'VISIBLE GRID?',35)
start_paint = Text_Render([700,50],0,695,'START PAINTING',35)
start = Menu_Grid([700,130],1,1,Menu,start_options,0,760,0,225,100)
size_window = Menu_Grid([700,390],3,1,Menu,options,0,80,150,190,100)
has_grid = Menu_Grid([700,130],1,2,Menu,grid_options,0,550,0)
#=====ACTUAL PAINT PROGRAM=====#
pallet_text = Text_Render([170,30],40,635,'COLOR PALLET',25)
credit_text = Text_Render([620,30],40,845,'PAINT PROGRAM BY: NICK TKACHOV',25)
toolbar_text = Text_Render([270,30],215,635,'TOOLBAR',25)
brush_text = Text_Render([270,30],215,740,'BRUSH SIZE (px)',25)
save_text = Text_Render([170,30],490,635,'SAVE',25)
load_text = Text_Render([170,30],490,740,'LOAD',25)
pixel_grid = PixelArt(drawing_board,grid_size,grid_size,Pixel)
draw_pallet = Pallet(pallet_board,3,3,Menu,colors,40,670,0,50,50)
toolbar = Menu_Grid([270,65],1,3,Menu,toolbar_options,215,670,10,50,50)
save = Menu_Grid([170,65],1,1,Tool_Save,save_options,490,670,10,130,50)
load = Menu_Grid([170,65],1,1,Tool_Load,load_options,490,775,10,130,50)
brushes = Menu_Grid([270,65],1,5,Menu,brush_sizes,215,775,10,50,50,40)
#=====LISTS THAT HAVE CLASSES TO DRAW IN LOOP=====#
menu_screen = [background,select_size,size_window
,visible_grid,has_grid,start_paint
,start]
drawing_screen = [pallet_text,toolbar_text,brush_text,save_text
,load_text,credit_text,save,load,toolbar
,brushes,pixel_grid,draw_pallet]
#=====MAIN LOOP=====#
while True:
screen.fill(BLACK)
if menu_displayed:
for x in menu_screen: x.draw(screen)
else:
for x in drawing_screen: x.draw(screen)
for event in pygame.event.get():
if event.type is pygame.QUIT:
pygame.quit()
exit()
if pygame.mouse.get_pressed()[0]:
if not menu_displayed: #IF DRAWING
pixel_grid.check_event(brush,tool,drawing_grid,brush_size)
brush = draw_pallet.check_event(brush)
tool = toolbar.check_event(tool)
brush_size = brushes.check_event(brush_size)
save.check_event()
load.check_event()
else: #IF IN MENU
grid_size = size_window.check_event(grid_size)
drawing_grid = has_grid.check_event(drawing_grid)
menu_displayed = start.check_event(menu_displayed)
pixel_grid.set_grid(grid_size)
pixel_grid.draw_grid(drawing_grid)
set_cursor(screen,brush,tool) #CHANGES CURSOR
clock.tick(0)
pygame.display.flip()
pygame.quit()
| 38.691441 | 163 | 0.560917 | 7,984 | 0.464753 | 0 | 0 | 0 | 0 | 0 | 0 | 4,674 | 0.272076 |
d76f76f73a1b4d94c460fbf42b33ff78ab858a28 | 8,668 | py | Python | Pcolor_Peaks.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | null | null | null | Pcolor_Peaks.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | 1 | 2015-04-18T14:47:49.000Z | 2015-05-01T21:51:44.000Z | Pcolor_Peaks.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | null | null | null | from __future__ import division
from netCDF4 import Dataset
import glob,os.path
import numpy as np
import numpy.ma as ma
from scipy.interpolate import UnivariateSpline
from matplotlib import cm
from matplotlib import ticker
import matplotlib.pyplot as plt
#import site
#site.addsitedir('/tera/phil/nchaparr/SAM2/sam_main/python')
#from Percentiles import *
from matplotlib.patches import Patch
import sys
#sys.path.insert(0, '/tera/phil/nchaparr/python')
import nchap_fun as nc
from Make_Timelist import *
import warnings
warnings.simplefilter('ignore', np.RankWarning)
#import pywt
from scipy import stats
from datetime import datetime
import fastfit as fsft
"""
In testing phase -- get_fit() for identifying ML top
To plot gradient maxima ie BL heights, and w on a 2d horizontal domain,
and get a histogram or contour plot of BL heigths
for an individual case
added function to get ticks and labels based on mean and standard deviation
"""
#TODO: a mess right now. but can be tidied up once regression code is included
def get_ticks(mean, stddev, max, min):
"""
gets ticks and tick lavels for contour plot based on mean and standard deviation
Arguments:
mean, stddev, max, min
Returns:
ticks, tick_labels
"""
tick_list = []
label_list = []
int1=int(np.ceil((mean-min)/stddev))
int2=int(np.ceil((max-mean)/stddev))
for i in range(int1):
if int1==1:
tick_list.append(min)
label_list.append(r'$\mu - %.1f \sigma$' %((mean-min)/stddev))
elif i > 0:
tick_list.append(mean - (int1-i)*stddev)
label_list.append(r'$\mu - %.1f \sigma$' %(int1-i))
#else:
#tick_list.append(min)
#label_list.append(r'$\mu - %.1f \sigma$' %((mean-min)/stddev))
tick_list.append(mean)
label_list.append(r'$\mu$')
for i in range(int2):
if int2==1:
tick_list.append(max)
label_list.append(r'$\mu + %.1f \sigma$' %((max-mean)/stddev))
elif i< int2-1:
tick_list.append(mean + (i+1)*stddev)
label_list.append(r'$\mu + %.1f \sigma$' %(i+1))
#else:
#tick_list.append(max)
#label_list.append(r'$\mu + %.1f \sigma$' %((max-mean)/stddev))
return label_list, tick_list
def get_fit(theta, height):
"""
Fitting the local theta profile with three lines
"""
fitvals = np.zeros_like(theta)
RSS = np.empty((290, 290))+ np.nan
print RSS[0,0]
for j in range(290):
if j > 2:
for k in range(290):
if k>j+1 and k<289:
b_1 = (np.sum(np.multiply(height[:j], theta[:j])) - 1/j*np.sum(height[:j])*np.sum(theta[:j]))/(np.sum(height[:j]**2) - 1/j*np.sum(height[:j])**2)
a_1 = np.sum(np.multiply(height[:j], theta[:j]))/np.sum(height[:j]) - b_1*np.sum(height[:j]**2)/np.sum(height[:j])
b_2 = (np.sum(theta[j:k]) - (k-j)*(a_1+b_1*height[j]))/(np.sum(height[j:k]) - (k-j)*height[j])
a_2 = np.sum(np.multiply(height[j:k], theta[j:k]))/np.sum(height[j:k]) - b_2*np.sum(height[j:k]**2)/np.sum(height[j:k])
b_3 = (np.sum(theta[k:290]) - (290-k)*(a_2+b_2*height[k]))/(np.sum(height[k:290]) - (290-k)*height[k])
a_3 = np.sum(np.multiply(height[k:290], theta[k:290]))/np.sum(height[k:290]) - b_3*np.sum(height[k:290]**2)/np.sum(height[k:290])
RSS[j, k] = np.sum(np.add(theta[2:j], -(a_1+ b_1*height[2:j]))**2) + np.sum(np.add(theta[j:k], -(a_2+ b_2*height[j:k]))**2) + np.sum(np.add(theta[k:290], -(a_3+ b_3*height[k:290]))**2)
RSS = ma.masked_where(np.isnan(RSS), RSS)
[j, k] = np.unravel_index(ma.argmin(RSS), RSS.shape)
b_1 = (np.sum(np.multiply(height[:j], theta[:j])) - 1/j*np.sum(height[:j]*np.sum(theta[:j])))/(np.sum(height[:j]**2) - 1/j*np.sum(height[2:j])**2)
a_1 = np.sum(np.multiply(height[:j], theta[:j]))/np.sum(height[:j]) - b_1*np.sum(height[:j]**2)/np.sum(height[:j])
b_2 = (np.sum(theta[j:k]) - (k-j)*(a_1+b_1*height[j]))/(np.sum(height[j:k]) - (k-j)*height[j])
a_2 = np.sum(np.multiply(height[j:k], theta[j:k]))/np.sum(height[j:k]) - b_2*np.sum(height[j:k]**2)/np.sum(height[j:k])
b_3 = (np.sum(theta[k:290]) - (290-k)*(a_2+b_2*height[k]))/(np.sum(height[k:290]) - (290-k)*height[k])
a_3 = np.sum(np.multiply(height[k:290], theta[k:290]))/np.sum(height[k:290]) - b_3*np.sum(height[k:290]**2)/np.sum(height[k:290])
fitvals[:j] = b_1*height[:j] + a_1
fitvals[j:k] = b_2*height[j:k] + a_2
fitvals[k:290] = b_3*height[k:290] + a_3
return fitvals, RSS, j, k
#Lists of times relating to output (nc) files
dump_time_list, time_hrs = Make_Timelists(1, 600, 28800)
dump_time = dump_time_list[11]
print dump_time
for k in range(1):
#getting variables from nc files
[wvels, theta, tracer, height] = nc.Get_Var_Arrays("/tera2/nchaparr/Mar52014/runs/sam_case", "/OUT_3D/keep/NCHAPP1_testing_doscamiopdata_24_", dump_time, k+1)
#getting points of maximum theta gradient, getting rid of this soon
#[dvardz, grad_peaks] = nc.Domain_Grad(theta, height)
#tops_indices=np.where(np.abs(grad_peaks - 1400)<10)
#choosing one horizontal point
for i in range(1):
#top_index = [tops_indices[0][i], tops_indices[1][i]]
#[i, j] = top_index
[i, j] = [50, 50]
thetavals = theta[:, i, j]
startTime = datetime.now()
#print 'Start', startTime#1
top = np.where(np.abs(height-2300)<100)[0][0]
print top, height[top]
RSS, J, K = fsft.get_fit(thetavals, height, top)
#print J, height[J]
#print 'RSS time', (datetime.now()-startTime)
fitvals = np.zeros_like(thetavals[:top])
b_1 = (np.sum(np.multiply(height[9:J], thetavals[9:J])) - 1.0/(J-9)*np.sum(height[9:J]*np.sum(thetavals[9:J])))/(np.sum(height[9:J]**2) - 1.0/(J-9)*np.sum(height[9:J])**2)
#print np.sum(np.multiply(height[9:J], thetavals[9:J])), - 1.0/(J-9)*np.sum(height[9:J]*np.sum(thetavals[9:J])), np.sum(height[9:J]**2), - 1.0/(J-9)*np.sum(height[9:J])**2
a_1 = np.sum(np.multiply(height[9:J], thetavals[9:J]))/np.sum(height[9:J]) - b_1*np.sum(height[9:J]**2)/np.sum(height[9:J])
b_2 = (np.sum(thetavals[J:K]) - (K-J)*(a_1+b_1*height[J]))/(np.sum(height[J:K]) - (K-J)*height[J])
a_2 = np.sum(np.multiply(height[J:K], thetavals[J:K]))/np.sum(height[J:K]) - b_2*np.sum(height[J:K]**2)/np.sum(height[J:K])
b_3 = (np.sum(thetavals[K:top]) - (top-K)*(a_2+b_2*height[K]))/(np.sum(height[K:top]) - (top-K)*height[K])
a_3 = np.sum(np.multiply(height[K:top], thetavals[K:top]))/np.sum(height[K:top]) - b_3*np.sum(height[K:top]**2)/np.sum(height[K:top])
#print b_2, b_3
fitvals[:J] = b_1*height[:J] + a_1
fitvals[J:K] = b_2*height[J:K] + a_2
fitvals[K:top] = b_3*height[K:top] + a_3
#set up plot
theFig = plt.figure(i)
theFig.clf()
theAx = theFig.add_subplot(121)
theAx.set_title('Fit')
theAx.set_xlabel(r'$\overline{\theta} (K)$')
theAx.set_ylabel('z (m)')
theAx1 = theFig.add_subplot(122)
theAx1.set_title('Profile and Fit')
theAx1.set_xlabel(r'$\overline{\theta} (K) $')
theAx1.set_ylabel('z (m)')
theAx1.plot(thetavals, height[:], 'wo')
theAx.plot(fitvals[:J], height[:J], 'r-')
theAx.plot(fitvals[J:K], height[J:K], 'b-')
theAx.plot(fitvals[K:top], height[K:top], 'g-')
theAx1.plot(fitvals[:top], height[:top], 'r-')
theAx1.set_xlim(300, 320)
theAx1.set_ylim(0, 2000)
theAx.set_ylim(0, 2000)
theAx.set_xlim(300, 320)
plt.show()
| 40.12963 | 210 | 0.53311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,972 | 0.227503 |