blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca1b707109866827e14056049f57c913b474171f | 4229a406a83a573dc357c1144cae7c5aad6f673b | /trestle/tasks/__init__.py | 86a3ca67891864e7f9daa7aafdae8b49ba9a8a8d | [
"Apache-2.0"
] | permissive | xee5ch/compliance-trestle | dbc0647fe18e1164a75bcfdc4d38687df14e3247 | 969c10eceb73202d2b7856bac598f9b11afc696e | refs/heads/main | 2023-09-02T17:21:35.659432 | 2021-11-17T00:01:27 | 2021-11-17T00:01:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle tasks module. Designed for arbitrary actions that are not editing to be flexible for multiple usecases."""
| [
"noreply@github.com"
] | noreply@github.com |
495c686194df3677c638940f0900b1e8bed5d5c9 | c9cb89a4835371e84963f235f090669048b75d57 | /control.py | 3a72df06c2c0421cd318538b3b68c7b5ff72c36c | [] | no_license | SergioGasquez/autonomous-mobile-robot | a333197a12d009a97399e8e705c0c727b5c700ab | fe06e155cec24eae40c51510eb422c496cc0cf25 | refs/heads/master | 2018-10-14T05:33:22.887383 | 2018-07-11T21:52:49 | 2018-07-11T21:52:49 | 118,379,898 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,847 | py | #!/usr/bin/python
# Import libraries
from math import *
import rospy
import tf2_ros
from geometry_msgs.msg import Twist
from nav_msgs.msg import Path
from time import sleep # Library for implementing stops signs
class Control(object):
def __init__(self):
self._tfBuffer = tf2_ros.Buffer()
self._tfListener = tf2_ros.TransformListener(self._tfBuffer)
self._pubCmdVel = rospy.Publisher('cmd_vel', Twist, queue_size=1) # Publication for sending velocities to the robot
self._subPlan = rospy.Subscriber('plan', Path, self._handlePlan) # Subscription for plan in order to recibe comands from plannyng.py
# Initialize variables
self.k=None # Index of the point that we are triying to reach
self.v=0.3 # Linear Velocity
self.validPath = 0 # Boolean used verify if the path is correct and to keep the program waiting
def _handlePlan(self, msg):
rospy.loginfo('New path with %d poses received.' % len(msg.poses))
# We obtain the points to reach for the whole trajectory,appending them into arrays
self.xRef=[]; self.yRef=[]; self.zRef=[]; self.wRef=[];
self.length=len(msg.poses); # Number of points in the recived path
for i in range(0,self.length):
self.xRef.append(msg.poses[i].pose.position.x)
self.yRef.append(msg.poses[i].pose.position.y)
self.zRef.append(msg.poses[i].pose.orientation.z)
self.wRef.append(msg.poses[i].pose.orientation.w)
self.phiRef=2*atan(self.zRef[i]/self.wRef[i])
self.validPath = 1 # Change the boolean since the recived path is correct
def spin(self):elf
self.aux=0 # Boolean that checks if we have stopped already in a cell
rate = rospy.Rate(30.0)
while not rospy.is_shutdown():
try:
trans = self._tfBuffer.lookup_transform('world', 'robot14', rospy.Time())
except tf2_ros.TransformException:
rospy.logwarn('TF from world to wmr is not available.')
rospy.sleep(rospy.Duration(1.0))
continue
# We obtain the position of the robot
self.x=(trans.transform.translation.x)
self.y=(trans.transform.translation.y)
self.z=(trans.transform.rotation.z)
self.w=(trans.transform.rotation.w)
self.phi=2*atan2(self.z,self.w) # Phi must be between [pi,-pi]
# Transform the position into cells and into integers
x=self.x // 0.15
y=self.y // 0.15
x=int(x)
y=int(y)
# If the path is not valid,the robot won't move
if self.validPath == 0:
msg = Twist()
msg.angular.z = 0
msg.linear.x = 0
self._pubCmdVel.publish(msg)
rate.sleep()
continue
if self.k is None:
self.k=0
# We use the formula to obtain the angle that we should take to reach the goal
phiTarget=atan2((self.yRef[self.k]-self.y),(self.xRef[self.k]-self.x))
k2=1 # Angular velocity gain
ePhi=phiTarget-self.phi # Difference between robots angle and goal position
# ePhi must be between [pi,-pi]
if ePhi > pi:
ePhi=ePhi-2*pi
if ePhi < -pi:
ePhi=ePhi+2*pi
# Definition of velocities
self.Omega=k2*(ePhi)
self.v=0.1*cos(ePhi)**2 # We multiply the speed with the cos² in order to reduce the speed when it goes through a corner
D=sqrt((self.xRef[self.k]-self.x)**2 + (self.yRef[self.k]-self.y)**2) # Distance between the robot and the actual goal point
if (D<0.1): # We set the threshold at 10cms
self.k=self.k+1 # We face the next goal point
if (self.length==self.k): # We check if we are in the last point
self.v=0
self.Omega=0
self.k=0
self.validPath = 0
# Print Distance and velocities
print('Distance:%f'%D)
print('v:%f'%self.v)
print('w:%f'%self.w)
# Stop signs map in form of bolean matrix
stop = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,0,0,0,1],
[0,0,0,0,1,0,0,0,1,0,0,0,0,0],
[0,0,0,1,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,1,0,0,0,0,1],
[0,0,1,0,0,1,0,0,0,0,0,0,0,0]]
if (stop[11-y][x]==1 and self.aux==0): # We check if we are in a stop sign and if have already stopped in this cell
# We make a 1 second stop by publishing the message with all the velocities set at 0 and change the aux boolean
msg = Twist()
msg.angular.z = 0
msg.linear.x = 0
self._pubCmdVel.publish(msg)
self.aux=1
sleep(1)
# We publish the message with the velocities that we had before stopping
msg = Twist()
msg.angular.z = self.Omega
msg.linear.x = self.v
self._pubCmdVel.publish(msg)
elif(stop[11-y][x]==0): # If we are not in a stop cell ,the aux boolean is 0
self.aux=0
# Message containing velocities is sent to the robot
msg = Twist()
msg.angular.z = self.Omega
msg.linear.x = self.v
self._pubCmdVel.publish(msg)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('control')
control = Control()
control.spin()
| [
"noreply@github.com"
] | noreply@github.com |
af0fe42d093a7499d9686e4fb26fe87b9ee1d317 | 36920a18ad0b91ccd88a8c1dfbf38eced123115c | /iftt.py | 12a4c0e13017abcdcef77d31da627489ff59d9c5 | [] | no_license | mahesh07513/Flask-Alexa | 8926b83163d93f68dd4c64c8356006419f96d577 | 7952494893fc2b5408f17e4c3787b5e9264d523b | refs/heads/master | 2020-04-30T01:17:43.585664 | 2019-03-19T14:08:36 | 2019-03-19T14:08:36 | 176,525,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import requests
def email_alert(first, second, third):
report = {}
report["value1"] = first
report["value2"] = second
report["value3"] = third
requests.post("https://maker.ifttt.com/trigger/callpython/with/key/cE-g8Ez3Be3ryVsJKlQTHL", data=report)
print("Choose your first string.")
a = input()
print("Choose your second string.")
b = input()
print("Choose your third string.")
c = input()
email_alert(a, b, c) | [
"mahesh07513@gmail.com"
] | mahesh07513@gmail.com |
534fa21819f8bfb302859d97f72214eed5737a33 | ac3ef13962d8995c3d546b0b67b99acaa63aa88a | /quizXZ/migrations/0003_auto_20160302_1940.py | 4b67fe13f2e73c9c860d303c92a10ce904911c6b | [] | no_license | Caroline-xinyue/quizBase | a77f75a132afc9b9d873796710339a576810fee8 | 18919845535d5883c2eddbf791580cb497c50a57 | refs/heads/master | 2020-04-10T21:37:51.561960 | 2018-03-07T21:12:57 | 2018-03-07T21:12:57 | 70,264,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('quizXZ', '0002_auto_20160301_1843'),
]
operations = [
migrations.AlterField(
model_name='userchoice',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
| [
"xzhang@brynmawr.edu"
] | xzhang@brynmawr.edu |
8d7db82a4657e40c36be1643c2d9987499a8b4a9 | 5451f259e39c8601316a091e1a91a62d2e55d5d1 | /ncvxsp/linear_model/scad_coordinate_descent.py | 5792b104f76eb8062b3cc1cc165fd02d80646034 | [
"BSD-3-Clause"
] | permissive | Clej/ncvx-sparse | 22bf5672fee415d2d2ffb909bbdd52e89d531514 | 457a1a1e7c4422e1e7cd761d46c08055108d5c14 | refs/heads/master | 2023-06-29T14:11:23.495929 | 2021-08-06T20:12:59 | 2021-08-06T20:12:59 | 393,133,213 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,107 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 21 12:19:50 2021
@author: Clément Lejeune (clement.lejeune@irit.fr; clementlej@gmail.com)
"""
import sys
import warnings
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from joblib import Parallel, effective_n_jobs
from sklearn.base import RegressorMixin, MultiOutputMixin
from sklearn.linear_model._base import LinearModel, _preprocess_data, _pre_fit
from sklearn.utils import check_array
from sklearn.utils.validation import check_random_state
from sklearn.model_selection import check_cv
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.fixes import _astype_copy_false, _joblib_parallel_args
from sklearn.utils.validation import check_is_fitted, _check_sample_weight
# from ..utils.validation import column_or_1d
# from ..utils.validation import _deprecate_positional_args
# from ..utils.fixes import delayed
from sklearn.linear_model._coordinate_descent import _alpha_grid, _set_order
from . import scad_cd_fast as scad_cd_fast
def scadnet_path(X, y, *, scad_ratio=0.5, gam = 3.7, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""
Compute SCAD elastic net path with coordinate descent.
The optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * scad_ratio * SCAD(w, gam)
+ 0.5 * alpha * (1 - scad_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||_Fro^2
+ alpha * SCAD_ratio * \\sum_i SCAD(||W_i||_1, gam)
+ 0.5 * alpha * (1 - scad_ratio) * ||W||_Fro^2
Where::
SCAD(u, gam) =
i.e. the L1 norm for small coefficients, the L0 pseudo-norm for large coefficients and a quadratic transition between both penalties.
This is a continuous semi-concave (concave w.r.t |u|) version of the L0 pseudo-norm which is sparse and unbiased.
# Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
scad_ratio : float, default=0.5
Number between 0 and 1 passed to SCAD net (scaling between
SCAD and l2 penalties). ``scad_ratio=1`` corresponds to the SCAD regression without l2^2 regularization, a (biased) LASSO.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_outputs),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : ndarray of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default=True
If set to False, the input validation checks are skipped (including the
Gram matrix when provided). It is assumed that they are handled
by the caller.
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
# dual_gaps : ndarray of shape (n_alphas,)
# The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, accept_sparse='csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, accept_sparse='csc', dtype=X.dtype.type,
order='F', copy=False, ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
# TODO: include task_sparsity ['group-l1', 'group-l2']
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
task_sparsity_gl1 = 'group-l1'
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskSCADnet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False, check_input=check_input)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=scad_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
# account for n_samples scaling in objectives between here and cd_fast
scad_reg = alpha * scad_ratio * n_samples
l2_reg = alpha * (1.0 - scad_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
# model = cd_fast.sparse_enet_coordinate_descent(
# coef_, scad_reg, l2_reg, X.data, X.indices,
# X.indptr, y, X_sparse_scaling,
# max_iter, tol, rng, random, positive)
raise ValueError("SCADnet cannot deal sparse matrices and may do it in future version. "
"Instead, use Lasso or ElasticNet.")
elif multi_output:
# if gam is None:
# model = cd_fast.enet_coordinate_descent_multi_task(
# coef_, scad_reg, l2_reg, X, y, max_iter, tol, rng, random)
# else:
model = scad_cd_fast.scad_coordinate_descent_multi_task(
coef_, scad_reg, l2_reg, gam,
X, y,
max_iter, tol, rng, random, task_sparsity_gl1)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
# model = cd_fast.enet_coordinate_descent_gram(
# coef_, scad_reg, l2_reg, precompute, Xy, y, max_iter,
# tol, rng, random, positive)
raise ValueError("SCADnet cannot deal Gram matrix as input and may do it in future version. "
"Instead, use Lasso or ElasticNet.")
elif precompute is False:
model = scad_cd_fast.scad_coordinate_descent(
coef_, scad_reg, l2_reg, gam, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
# we correct the scale of the returned dual gap, as the objective
# in cd_fast is n_samples * the objective in this docstring.
dual_gaps[i] = dual_gap_ / n_samples
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# SCAD + L2 linear regression
class SCADnet(MultiOutputMixin, RegressorMixin, LinearModel):
"""Linear regression combined with SCAD and ridge priors as regularizers.
The optimization objective for SCADnet regression is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * scad_ratio * SCAD(w, gam)
+ 0.5 * alpha * (1 - scad_ratio) * ||w||^2_2
Where::
SCAD(w, gam) =
i.e. the L1 norm for small coefficients (in absolute value), the L0 norm for large coefficients and a quadratic transition between both penalties.
This is a continuous semi-concave (concave w.r.t |u|) relaxation of the L0 pseudo-norm which is sparse and unbiased.
"""
path = staticmethod(scadnet_path)
# @_deprecate_positional_args
def __init__(self, alpha=1.0, *, scad_ratio=0.5,
gam = 3.7,
fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.scad_ratio = scad_ratio
self.gam = gam
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive # positive=True, currently not available for SCAD
self.random_state = random_state
self.selection = selection
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix} of (n_samples, n_features)
Data.
y : {ndarray, sparse matrix} of shape (n_samples,) or \
(n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weight. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, str):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
if (not isinstance(self.scad_ratio, numbers.Number) or
self.scad_ratio < 0 or self.scad_ratio > 1):
raise ValueError("scad_ratio must be between 0 and 1; "
f"got scad_ratio={self.scad_ratio}")
# if self.scad_ratio < 1.0:
# raise ValueError("Set scad_ratio < 1 when some columns in matrix 'X' are highly correlated, "
# "otherwise SCAD coordinate descent may not converge well. "
# "Note that for scad_ratio < 1, coefficients are biased (less than with ElasticNet).")
if (not isinstance(self.gam, numbers.Number) or
self.gam < 2):
raise ValueError("gam parameter must be greater than 2; "
f"got gam={self.gam}. "
"It is recommended to use default value 3.7.")
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = self._validate_data(X, y, accept_sparse='csc',
order='F',
dtype=[np.float64, np.float32],
copy=X_copied, multi_output=True,
y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
n_samples, n_features = X.shape
alpha = self.alpha
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
if check_input:
if sparse.issparse(X):
raise ValueError("Sample weights do not (yet) support "
"sparse matrices.")
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# simplify things by rescaling sw to sum up to n_samples
# => np.average(x, weights=sw) = np.mean(sw * x)
sample_weight = sample_weight * (n_samples / np.sum(sample_weight))
# Objective function is:
# 1/2 * np.average(squared error, weights=sw) + alpha * penalty
# but coordinate descent minimizes:
# 1/2 * sum(squared error) + alpha * penalty
# enet_path therefore sets alpha = n_samples * alpha
# With sw, enet_path should set alpha = sum(sw) * alpha
# Therefore, we rescale alpha = sum(sw) / n_samples * alpha
# Note: As we rescaled sample_weights to sum up to n_samples,
# we don't need this
# alpha *= np.sum(sample_weight) / n_samples
# Ensure copying happens only once, don't do it again if done above.
# X and y will be rescaled if sample_weight is not None, order='F'
# ensures that the returned X and y are still F-contiguous.
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=should_copy,
check_input=check_input, sample_weight=sample_weight)
# coordinate descent needs F-ordered arrays and _pre_fit might have
# called _rescale_data
if check_input or sample_weight is not None:
X, y = _set_order(X, y, order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
# fits n_targets single-output linear regressions independently
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
scad_ratio=self.scad_ratio, eps=None,
gam=self.gam,
n_alphas=None, alphas=[alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale,
return_n_iter=True, coef_init=coef_[k],
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
"""Sparse representation of the fitted `coef_`."""
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model.
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : ndarray of shape (n_samples,)
The predicted decision function.
"""
check_is_fitted(self)
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super()._decision_function(X)
class MultiTaskSCADnet(SCADnet):
"""Multi-task SCADNet model trained with SCAD/L2 mixed-norm as
regularizer.
The optimization objective for MultiTaskSCADnet is::
(1 / (2 * n_samples)) * ||Y - XW||_Fro^2
+ alpha * SCAD_ratio * \\sum_i SCAD(||W_i||_1, gam)
+ 0.5 * alpha * (1 - scad_ratio) * ||W||_Fro^2
Where::
SCAD(||W_i||_1, gam) =
i.e. the sum of SCAD of the l1 norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the SCAD/L2 term. Defaults to 1.0.
: float, default=0.5
The MultiSCADnet mixing parameter, with 0 < scad_ratio <= 1.
For scad_ratio = 1 the penalty is an SCAD/L2 penalty. For scad_ratio = 0 it
is an L2 penalty.
For ``0 < scad_ratio < 1``, the penalty is a combination of SCAD/L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float
The dual gaps at the end of the optimization.
eps_ : float
The tolerance scaled scaled by the variance of the target `y`.
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
Sparse representation of the `coef_`.
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
# @_deprecate_positional_args
def __init__(self, alpha=1.0, *, scad_ratio=0.5, gam=3.7, task_sparsity = 'group-l1', fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.scad_ratio = scad_ratio
self.gam = gam
self.alpha = alpha
self.task_sparsity = task_sparsity
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit SCADNet model with coordinate descent
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data.
y : ndarray of shape (n_samples, n_tasks)
Target. Will be cast to X's dtype if necessary.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if (self.task_sparsity not in ['group-l1', 'group-l2']) or (not isinstance(self.task_sparsity, str)):
raise ValueError("task_sparsity should be a string, either 'group-l1' or 'group-l2'; "
f"got gam={self.task_sparsity}. ")
task_sparsity_gl1 = (self.task_sparsity == 'group-l1')
if (not isinstance(self.gam, numbers.Number) or
self.gam < 2):
raise ValueError("gam parameter must be greater than 2; "
f"got gam={self.gam}. ")
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be csr.
check_X_params = dict(dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
check_y_params = dict(ensure_2d=False, order='F')
X, y = self._validate_data(X, y, validate_separately=(check_X_params,
check_y_params))
y = y.astype(X.dtype)
if hasattr(self, 'scad_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or not hasattr(self, "coef_"):
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
scad_reg = self.alpha * self.scad_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.scad_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
scad_cd_fast.scad_coordinate_descent_multi_task(
self.coef_, scad_reg, l2_reg, self.gam,
X, y,
self.max_iter, self.tol, check_random_state(self.random_state), random, task_sparsity_gl1)
# account for different objective scaling here and in cd_fast
self.dual_gap_ /= n_samples
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
def _more_tags(self):
return {'multioutput_only': True}
| [
"54889281+Clej@users.noreply.github.com"
] | 54889281+Clej@users.noreply.github.com |
ed9a98749599fa918cd6108cf1017d3f5d6ab8ab | 3a47f59b5b51470ccc8bd5db1de1daea53d0b39c | /experiments/dyadic/amazon_crawler/amazon_crawler/settings.py | 703192f30b6a3edaa107b51d703b5db1d7d10bc2 | [] | no_license | gegetang/compatibility-family-learning | c9c9218d4c90b8fea2ab9733c103f670e9367979 | d760068388b3e8fd6c58a883137ba3db7c9ffa29 | refs/heads/master | 2020-04-14T01:56:28.060115 | 2018-01-08T05:51:19 | 2018-01-23T05:58:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | # -*- coding: utf-8 -*-
# Scrapy settings for amazon_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amazon_crawler'
SPIDER_MODULES = ['amazon_crawler.spiders']
NEWSPIDER_MODULE = 'amazon_crawler.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# The S3 path to store items
#ITEMS_STORE = ...
# The S3 path to store images
#IMAGES_STORE = ...
# AWS S3 Keys
#AWS_ACCESS_KEY_ID = ...
#AWS_SECRET_ACCESS_KEY = ...
| [
"yongsiang.shih@appier.com"
] | yongsiang.shih@appier.com |
a4f7ad0e605de064bcf4e4489cdaed329f43483d | 5f1d1ba201c209022f1c3b12b2afa9429a23f7ea | /main.py | d283f8e78f042af754df23192e5f8cb655b13a91 | [] | no_license | DSchana/Art-Assemblage-and-Appreciator | 61927b321662faf72efb83aa5da6c9504f79d3a5 | f41be8571a22a26cb9eb315aed341f14f186ec4f | refs/heads/master | 2023-02-03T00:23:54.441047 | 2020-12-17T23:54:44 | 2020-12-17T23:54:44 | 321,794,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,460 | py | from typing import Optional, List
from fastapi import FastAPI, status, HTTPException
from fastapi.responses import JSONResponse
from models.user import User, UserAuth
from models.art import Art, Assemblage
import os
import json
import uvicorn
art = FastAPI()
art_lock = False
art_json = {}
### Users
@art.post("/api/v1/user", status_code=status.HTTP_201_CREATED)
async def register_user(user_info: User):
"""Create new user and return an auth token. If the user already exists, this will also generate a
new auth token for the user and invalidate the old one."""
user_info.generateToken()
user_info.encryptPassword()
# Check if user already exists
if user_info.username in art_json["users"]:
# Grant new token if username and password are correct
if user_info.password == art_json["users"][user_info.username]["password"]:
user_info.the_art = art_json["users"][user_info.username]["the_art"]
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="username or password are incorrect")
art_json["users"][user_info.username] = user_info.__dict__
save_the_art()
return { "token": user_info.token }
@art.put("/api/v1/user", status_code=status.HTTP_200_OK)
async def update_user(user_auth: UserAuth, new_user_info: User):
"""Change username or password using an auth token. Username and password are both
needed in the `new_user_info`, even if only one is changing."""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
new_user_info.encryptPassword()
# Change username and password if something is different
art_json["users"][new_user_info.username] = art_json["users"][user_auth.username]
art_json["users"][new_user_info.username]["password"] = new_user_info.password
save_the_art()
@art.delete("/api/v1/user", status_code=status.HTTP_200_OK)
async def remove_user(user_auth: UserAuth):
"""Delete user and all of their art collections."""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
# Delete art collections
for aid in art_json["users"][user_auth.username]["the_art"]:
del art_json["arts"][aid]
# Delete user
del art_json["users"][user_auth.username]
save_the_art()
@art.get("/api/v1/user/{username}", status_code=status.HTTP_200_OK)
async def get_user(username: str, token: str):
"""Get user info with authentication."""
if username not in art_json["users"]:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="user does not exist")
if art_json["users"][username]["token"] != token:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="invalid auth token")
return {
"username": username,
"the_art": art_json["users"][username]["the_art"]
}
### The Art
@art.post("/api/v1/assemblage", status_code=status.HTTP_201_CREATED)
async def create_assemblage(user_auth: UserAuth, a_name: str):
"""Create new art assemblage under the authenticating user with the name `a_name`."""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
a = Assemblage()
a.name = a_name
art_json["arts"][a.id] = a.__dict__
art_json["users"][user_auth.username]["the_art"].append(a.id)
save_the_art()
return { "id": a.id }
@art.delete("/api/v1/assemblage", status_code=status.HTTP_200_OK)
async def delete_assemblage(user_auth: UserAuth, aid: str):
"""Delete assemblage with `id = aid` if the authenticating user owns it."""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
if aid not in art_json["users"][user_auth.username]["the_art"]:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="This user does not own the collection of art")
art_json["users"][user_auth.username]["the_art"].remove(aid)
del art_json["arts"][aid]
save_the_art()
@art.put("/api/v1/assemblage/{aid}", status_code=status.HTTP_200_OK)
async def update_assemblage(aid:str, user_auth: UserAuth, a_name: str):
"""Change the name of the assemblage."""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
if aid not in art_json["users"][user_auth.username]["the_art"]:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="This user does not own the collection of art")
art_json["arts"][aid]["name"] = a_name
save_the_art()
@art.post("/api/v1/assemblage/{aid}", status_code=status.HTTP_201_CREATED)
async def add_art(aid: str, user_auth: UserAuth, art_list: List[Art]):
"""Add works of art to assemblage by reference hosted location."""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
if aid not in art_json["users"][user_auth.username]["the_art"]:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="This user does not own the collection of art")
art_json["arts"][aid]["art"] += [art.__dict__ for art in art_list]
save_the_art()
@art.delete("/api/v1/assemblage/{aid}", status_code=status.HTTP_200_OK)
async def remove_art(aid: str, user_auth: UserAuth, art_list: List[str]):
"""Delete works of art by name or hosted location"""
s, d = user_auth.authorize(art_json)
if s != status.HTTP_200_OK:
raise HTTPException(status_code=s, detail=d)
if aid not in art_json["users"][user_auth.username]["the_art"]:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="This user does not own the collection of art")
to_remove = []
for art_id in art_list:
for i in range(len(art_json["arts"][aid]["art"])):
art: Art = art_json["arts"][aid]["art"][i]
if art_id == art["name"] or art_id == art["src"]:
to_remove.append(i)
break
to_remove.sort(reverse=True)
for r in to_remove:
del art_json["arts"][aid]["art"][r]
save_the_art()
### Public access
# No need for an account to view art; art is for everyone
@art.get("/api/v1/assemblage", status_code=status.HTTP_200_OK)
async def get_assemblage(_id: str = "", name: str = None):
"""Get full assemblage of art works by name or id."""
ret = []
for id, assemblage in art_json["arts"].items():
if _id == id or name != None and name in assemblage["name"]:
ret.append(assemblage)
return ret
@art.get("/api/v1/art", status_code=status.HTTP_200_OK)
async def get_art(name: str = None, src: str = None, tags: List[str] = []):
"""Find individual works of art by name, hosting source or descriptive tag."""
ret = []
for assemblage in art_json["arts"].values():
for art in assemblage["art"]:
# Find matching art name or src
if name != None and name in art["name"] or art["src"] == src:
ret.append(art)
else:
# Find matching tags
for tag in tags:
if tag in art["tags"]:
ret.append(art)
return ret
def save_the_art():
"""Save json to file for permanent storage"""
global art_lock
while (art_lock):
continue
art_lock = True
with open("databases/art_assemblage.json", 'w') as f:
f.write(json.dumps(art_json))
art_lock = False
# Check for storage files
if not os.path.exists("databases"):
os.system("mkdir databases")
if not os.path.exists("databases/art_assemblage.json"):
os.system("touch databases/art_assemblage.json")
# Load data file from storage into memory for faster access
# For the small scale use this project will see, a JSON file is much faster and optimal.
# However, for scaling I would convert this to an SQL database
with open("databases/art_assemblage.json", 'r') as f:
try:
art_json = json.loads(f.read().strip())
except Exception as e:
art_json = {}
if "users" not in art_json:
art_json["users"] = {}
if "arts" not in art_json:
art_json["arts"] = {}
save_the_art()
if __name__ == "__main__":
uvicorn.run(art)
| [
"dschana6@gmail.com"
] | dschana6@gmail.com |
5802522e4965030c8e5f570f5d2332f19194e9f3 | 3ef0c1c5302922fe65d81553dd995ba13f868eb1 | /04_fgcmcal/fgcmHscCalibrations_cycle03_config.py | 2fe0434a2f060b16abcbd562fb5949911d7f1f9d | [] | no_license | lsst-dm/s20-hsc-pdr2-reprocessing | 5899cb71740c42301e0dce31441b7dcbbcb38dd3 | b60fdc58e144b18577946714ad88ed84bad6167a | refs/heads/main | 2023-07-24T21:09:03.264853 | 2023-07-10T00:59:30 | 2023-07-10T00:59:30 | 237,534,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,261 | py | import lsst.fgcmcal.fgcmFitCycle
assert type(config)==lsst.fgcmcal.fgcmFitCycle.FgcmFitCycleConfig, 'config is of type %s.%s instead of lsst.fgcmcal.fgcmFitCycle.FgcmFitCycleConfig' % (type(config).__module__, type(config).__name__)
import lsst.fgcmcal.sedterms
# Bands to run calibration (in wavelength order)
config.bands=['N387', 'g', 'r', 'i', 'N816', 'z', 'N921', 'y']
# Flag for which bands are directly constrained in the FGCM fit. Bands set to 0 will have the atmosphere constrained from observations in other bands on the same night. Must be same length as config.bands, and matched band-by-band.
config.fitFlag=[1, 1, 1, 1, 1, 1, 1, 1]
# Flag for which bands are required for a star to be considered a calibration star in the FGCM fit. Typically this should be the same as fitFlag. Must be same length as config.bands, and matched band-by-band.
config.requiredFlag=[0, 0, 0, 0, 0, 0, 0, 0]
# Mapping from 'filterName' to band.
config.filterMap={'g': 'g', 'r': 'r', 'r2': 'r', 'i': 'i', 'i2': 'i', 'z': 'z', 'y': 'y', 'N387': 'N387', 'N816': 'N816', 'N921': 'N921'}
# Use reference catalog as additional constraint on calibration
config.doReferenceCalibration=True
# Reference star signal-to-noise minimum to use in calibration. Set to <=0 for no cut.
config.refStarSnMin=50.0
# Number of sigma compared to average mag for reference star to be considered an outlier. Computed per-band, and if it is an outlier in any band it is rejected from fits.
config.refStarOutlierNSig=4.0
# Apply color cuts to reference stars?
config.applyRefStarColorCuts=True
# Number of cores to use
config.nCore=12
# Number of stars to run in each chunk
config.nStarPerRun=200000
# Number of exposures to run in each chunk
config.nExpPerRun=1000
# Fraction of stars to reserve for testing
config.reserveFraction=0.1
# Freeze atmosphere parameters to standard (for testing)
config.freezeStdAtmosphere=False
# Precompute superstar flat for initial cycle
config.precomputeSuperStarInitialCycle=False
# Compute superstar flat on sub-ccd scale
config.superStarSubCcd=True
# Order of the 2D chebyshev polynomials for sub-ccd superstar fit. Global default is first-order polynomials, and should be overridden on a camera-by-camera basis depending on the ISR.
config.superStarSubCcdChebyshevOrder=2
# Should the sub-ccd superstar chebyshev matrix be triangular to suppress high-order cross terms?
config.superStarSubCcdTriangular=False
# Number of sigma to clip outliers when selecting for superstar flats
config.superStarSigmaClip=5.0
# Compute CCD gray terms on sub-ccd scale
config.ccdGraySubCcd=True
# Order of the 2D chebyshev polynomials for sub-ccd gray fit.
config.ccdGraySubCcdChebyshevOrder=1
# Should the sub-ccd gray chebyshev matrix be triangular to suppress high-order cross terms?
config.ccdGraySubCcdTriangular=True
# FGCM fit cycle number. This is automatically incremented after each run and stage of outlier rejection. See cookbook for details.
config.cycleNumber=3
# Is this the final cycle of the fitting? Will automatically compute final selection of stars and photometric exposures, and will output zeropoints and standard stars for use in fgcmOutputProducts
config.isFinalCycle=False
# Maximum fit iterations, prior to final cycle. The number of iterations will always be 0 in the final cycle for cleanup and final selection.
config.maxIterBeforeFinalCycle=75
# Boundary (in UTC) from day-to-day
config.utBoundary=0.0
# Mirror wash MJDs
config.washMjds=[56650.0, 57500.0, 57700.0, 58050.0]
# Epoch boundaries in MJD
config.epochMjds=[56650.0, 57420.0, 57606.0, 59000.0]
# Minimum good observations per band
config.minObsPerBand=2
# Observatory latitude
config.latitude=19.8256
# Maximum gray extinction to be considered bright observation
config.brightObsGrayMax=0.15
# Minimum number of good stars per CCD to be used in calibration fit. CCDs with fewer stars will have their calibration estimated from other CCDs in the same visit, with zeropoint error increased accordingly.
config.minStarPerCcd=5
# Minimum number of good CCDs per exposure/visit to be used in calibration fit. Visits with fewer good CCDs will have CCD zeropoints estimated where possible.
config.minCcdPerExp=5
# Maximum error on CCD gray offset to be considered photometric
config.maxCcdGrayErr=0.05
# Minimum number of good stars per exposure/visit to be used in calibration fit. Visits with fewer good stars will have CCD zeropoints estimated where possible.
config.minStarPerExp=100
# Minimum number of good exposures/visits to consider a partly photometric night
config.minExpPerNight=3
# Maximum exposure/visit gray value for initial selection of possible photometric observations.
config.expGrayInitialCut=-0.25
# Maximum (negative) exposure gray for a visit to be considered photometric. Must be same length as config.bands, and matched band-by-band.
config.expGrayPhotometricCut=[-0.0325, -0.0125, -0.0125, -0.015, -0.0125, -0.0225, -0.025, -0.025]
# Maximum (positive) exposure gray for a visit to be considered photometric. Must be same length as config.bands, and matched band-by-band.
config.expGrayHighCut=[0.04, 0.0175, 0.015, 0.0225, 0.02, 0.03, 0.035, 0.0325]
# Maximum (negative) exposure gray to be able to recover bad ccds via interpolation. Visits with more gray extinction will only get CCD zeropoints if there are sufficient star observations (minStarPerCcd) on that CCD.
config.expGrayRecoverCut=-1.0
# Maximum exposure variance to be considered possibly photometric
config.expVarGrayPhotometricCut=0.0025
# Maximum exposure gray error to be able to recover bad ccds via interpolation. Visits with more gray variance will only get CCD zeropoints if there are sufficient star observations (minStarPerCcd) on that CCD.
config.expGrayErrRecoverCut=0.05
# Number of aperture bins used in aperture correction fit. When set to 0no fit will be performed, and the config.aperCorrInputSlopes will be used if available.
config.aperCorrFitNBins=10
# Aperture correction input slope parameters. These are used on the first fit iteration, and aperture correction parameters will be updated from the data if config.aperCorrFitNBins > 0. It is recommended to set thiswhen there is insufficient data to fit the parameters (e.g. tract mode). If set, must be same length as config.bands, and matched band-by-band.
config.aperCorrInputSlopes=[-1.0, -1.1579, -1.3908, -1.1436, -1.8149, -1.6974, -1.331, -1.2057]
config.sedboundaryterms.data={}
config.sedboundaryterms.data['gr']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['gr'].primary='g'
# name of secondary band
config.sedboundaryterms.data['gr'].secondary='r'
config.sedboundaryterms.data['ri']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['ri'].primary='r'
# name of secondary band
config.sedboundaryterms.data['ri'].secondary='i'
config.sedboundaryterms.data['iz']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['iz'].primary='i'
# name of secondary band
config.sedboundaryterms.data['iz'].secondary='z'
config.sedboundaryterms.data['zy']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['zy'].primary='z'
# name of secondary band
config.sedboundaryterms.data['zy'].secondary='y'
config.sedboundaryterms.data['N387g']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['N387g'].primary='N387'
# name of secondary band
config.sedboundaryterms.data['N387g'].secondary='g'
config.sedboundaryterms.data['N816i']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['N816i'].primary='N816'
# name of secondary band
config.sedboundaryterms.data['N816i'].secondary='i'
config.sedboundaryterms.data['N921z']=lsst.fgcmcal.sedterms.Sedboundaryterm()
# name of primary band
config.sedboundaryterms.data['N921z'].primary='N921'
# name of secondary band
config.sedboundaryterms.data['N921z'].secondary='z'
config.sedterms.data={}
config.sedterms.data['g']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['g'].primaryTerm='gr'
# Name of secondary Sedboundaryterm
config.sedterms.data['g'].secondaryTerm='ri'
# Extrapolate to compute SED slope
config.sedterms.data['g'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['g'].constant=1.6
# Primary band name for extrapolation
config.sedterms.data['g'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['g'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['g'].tertiaryBand=None
config.sedterms.data['r']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['r'].primaryTerm='gr'
# Name of secondary Sedboundaryterm
config.sedterms.data['r'].secondaryTerm='ri'
# Extrapolate to compute SED slope
config.sedterms.data['r'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['r'].constant=0.9
# Primary band name for extrapolation
config.sedterms.data['r'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['r'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['r'].tertiaryBand=None
config.sedterms.data['i']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['i'].primaryTerm='ri'
# Name of secondary Sedboundaryterm
config.sedterms.data['i'].secondaryTerm='iz'
# Extrapolate to compute SED slope
config.sedterms.data['i'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['i'].constant=1.0
# Primary band name for extrapolation
config.sedterms.data['i'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['i'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['i'].tertiaryBand=None
config.sedterms.data['z']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['z'].primaryTerm='iz'
# Name of secondary Sedboundaryterm
config.sedterms.data['z'].secondaryTerm='zy'
# Extrapolate to compute SED slope
config.sedterms.data['z'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['z'].constant=1.0
# Primary band name for extrapolation
config.sedterms.data['z'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['z'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['z'].tertiaryBand=None
config.sedterms.data['y']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['y'].primaryTerm='zy'
# Name of secondary Sedboundaryterm
config.sedterms.data['y'].secondaryTerm='iz'
# Extrapolate to compute SED slope
config.sedterms.data['y'].extrapolated=True
# Adjustment constant for SED slope
config.sedterms.data['y'].constant=0.25
# Primary band name for extrapolation
config.sedterms.data['y'].primaryBand='y'
# Secondary band name for extrapolation
config.sedterms.data['y'].secondaryBand='z'
# Tertiary band name for extrapolation
config.sedterms.data['y'].tertiaryBand='i'
config.sedterms.data['N387']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['N387'].primaryTerm='N387g'
# Name of secondary Sedboundaryterm
config.sedterms.data['N387'].secondaryTerm=None
# Extrapolate to compute SED slope
config.sedterms.data['N387'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['N387'].constant=1.0
# Primary band name for extrapolation
config.sedterms.data['N387'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['N387'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['N387'].tertiaryBand=None
config.sedterms.data['N816']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['N816'].primaryTerm='N816i'
# Name of secondary Sedboundaryterm
config.sedterms.data['N816'].secondaryTerm=None
# Extrapolate to compute SED slope
config.sedterms.data['N816'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['N816'].constant=1.0
# Primary band name for extrapolation
config.sedterms.data['N816'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['N816'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['N816'].tertiaryBand=None
config.sedterms.data['N921']=lsst.fgcmcal.sedterms.Sedterm()
# Name of primary Sedboundaryterm
config.sedterms.data['N921'].primaryTerm='N921z'
# Name of secondary Sedboundaryterm
config.sedterms.data['N921'].secondaryTerm=None
# Extrapolate to compute SED slope
config.sedterms.data['N921'].extrapolated=False
# Adjustment constant for SED slope
config.sedterms.data['N921'].constant=1.0
# Primary band name for extrapolation
config.sedterms.data['N921'].primaryBand=None
# Secondary band name for extrapolation
config.sedterms.data['N921'].secondaryBand=None
# Tertiary band name for extrapolation
config.sedterms.data['N921'].tertiaryBand=None
# Maximum mag error for fitting sigma_FGCM
config.sigFgcmMaxErr=0.01
# Maximum (absolute) gray value for observation in sigma_FGCM. May be 1 element (same for all bands) or the same length as config.bands.
config.sigFgcmMaxEGray=[0.05, 0.05, 0.05, 0.05, 0.05, 0.15, 0.15, 0.15]
# Maximum error on a star observation to use in ccd gray computation
config.ccdGrayMaxStarErr=0.1
# Approximate overall throughput at start of calibration observations. May be 1 element (same for all bands) or the same length as config.bands.
config.approxThroughput=[1.0]
# Allowed range for systematic error floor estimation
config.sigmaCalRange=[0.001, 0.003]
# Magnitude percentile range to fit systematic error floor
config.sigmaCalFitPercentile=[0.05, 0.15]
# Magnitude percentile range to plot systematic error floor
config.sigmaCalPlotPercentile=[0.05, 0.95]
# Systematic error floor for all zeropoints
config.sigma0Phot=0.003
# Reference longitude for plotting maps
config.mapLongitudeRef=0.0
# Healpix nside for plotting maps
config.mapNSide=256
# Filename start for plot output files
config.outfileBase='fgcmHscCalibrations'
# Encoded star-color cuts (to be cleaned up)
config.starColorCuts=['g,r,-0.25,2.25', 'r,i,-0.50,2.25', 'i,z,-0.50,1.00', 'g,i,0.0,3.5']
# Band indices to use to split stars by color
config.colorSplitIndices=[1, 3]
# Should FGCM model the magnitude errors from sky/fwhm? (False means trust inputs)
config.modelMagErrors=True
# Model PWV with a quadratic term for variation through the night?
config.useQuadraticPwv=False
# Model instrumental parameters per band? Otherwise, instrumental parameters (QE changes with time) are shared among all bands.
config.instrumentParsPerBand=True
# Minimum time change (in days) between observations to use in constraining instrument slope.
config.instrumentSlopeMinDeltaT=20.0
# Fit (intraband) mirror chromatic term?
config.fitMirrorChromaticity=False
# Mirror coating dates in MJD
config.coatingMjds=[56650.0, 58050.0]
# Output standard stars prior to final cycle? Used in debugging.
config.outputStandardsBeforeFinalCycle=False
# Output standard stars prior to final cycle? Used in debugging.
config.outputZeropointsBeforeFinalCycle=False
# Use star repeatability (instead of exposures) for computing photometric cuts? Recommended for tract mode or bands with few exposures. May be 1 element (same for all bands) or the same length as config.bands.
config.useRepeatabilityForExpGrayCuts=[False, False, False, False, False, True, True, True]
# Number of sigma for automatic computation of (low) photometric cut. Cut is based on exposure gray width (per band), unless useRepeatabilityForExpGrayCuts is set, in which case the star repeatability is used (also per band).
config.autoPhotometricCutNSig=3.0
# Number of sigma for automatic computation of (high) outlier cut. Cut is based on exposure gray width (per band), unless useRepeatabilityForExpGrayCuts is set, in which case the star repeatability is used (also per band).
config.autoHighCutNSig=4.0
# Be less verbose with logging.
config.quietMode=False
| [
"hchiang@lsst.org"
] | hchiang@lsst.org |
570e791962616acf2b90d808f402aaea2ee15533 | e1834bce67d20e73d10eb4533584d635f2840782 | /onir/datasets/nyt.py | 14b837a35362df7067ee673b9743e46b16f78994 | [
"MIT"
] | permissive | tgeral68/OpenNIR | f1d8361c1543fda401386ee5d87ecb14766c16da | 225b26185bd67fdc00f24de3ef70d35768e22243 | refs/heads/master | 2023-02-27T02:06:28.357884 | 2021-02-08T16:22:09 | 2021-02-08T16:22:09 | 327,644,600 | 0 | 0 | MIT | 2021-02-08T15:55:28 | 2021-01-07T15:03:22 | null | UTF-8 | Python | false | false | 16,788 | py | import os
import tarfile
import contextlib
import functools
from glob import glob
from multiprocessing import Pool
from pytools import memoize_method
from bs4 import BeautifulSoup
import onir
from onir import datasets, util, indices, log, config
from onir.interfaces import trec, plaintext
logger = log.easy()
_HELD_OUT_IDS = {'1206388', '46335', '1223589', '1642970', '144845', '420493', '1186325', '564166', '1092844', '1232733', '243508', '946470', '1147459', '84957', '87385', '1298633', '1327402', '1482333', '1069716', '1575477', '1110091', '655579', '1562062', '541298', '1571257', '639395', '1341710', '663400', '1174700', '1406944', '1368755', '1315376', '1609162', '1746895', '1447812', '193348', '882027', '213652', '126658', '799474', '1677212', '1254313', '43743', '250901', '426439', '1803638', '1111630', '1220244', '1142672', '944176', '860862', '342011', '1556809', '1574691', '292048', '855559', '1473717', '157893', '252570', '305646', '198014', '1444467', '1842149', '161276', '455333', '146910', '1414339', '1413851', '1352725', '509114', '563685', '1738087', '1115555', '639541', '427073', '1435887', '862324', '476212', '870108', '315852', '144389', '684154', '845724', '117999', '35935', '716125', '1818546', '551762', '687923', '1817616', '135841', '618338', '1597113', '1549790', '1292666', '147051', '1778945', '1347630', '1337511', '299371', '1384273', '388274', '938995', '263847', '195638', '303927', '646946', '1620311', '1455534', '325463', '1380230', '1038853', '1040633', '1831119', '363686', '260491', '1611855', '147526', '542544', '581106', '1766627', '899656', '236785', '1408409', '300748', '742732', '986023', '1662861', '1083296', '152722', '1458233', '1203328', '1810235', '996231', '1226680', '427277', '517560', '1230947', '185677', '1524891', '492603', '1023515', '334223', '1219069', '1021319', '152336', '1227959', '1501876', '765819', '395940', '524179', '1494335', '66871', '105130', '1660760', '744794', '1616161', '876120', '714837', '35529', '42617', '198139', '1811671', '147293', '1041065', '841417', '1346509', '200467', '850536', '1235945', '184078', '1269259', '1314141', '1368414', '387436', '896464', '84650', '375608', '423014', '1201696', '883245', '137547', '1376881', '1207160', '280170', '968570', '1438840', '626732', '1085071', '632127', '1206647', '399973', '1316303', '1187122', '805546', '1727291', '570037', '1178896', '555992', '977573', '1340396', '632958', '63542', '1280664', '977205', '1567169', '783676', '814977', '1668678', '1735184', '1074278', '1652858', '1108702', '955404', '1784962', '1185130', '250831', '818408', '623624', '134405', '104342', '965709', '956076', '1260229', '27255', '1500603', '1127679', '1722973', '1734641', '309555', '1681934', '695555', '48767', '433808', '995051', '180797', '123367', '378006', '1216681', '324683', '1711346', '211935', '1801492', '103678', '446767', '594334', '860460', '660793', '1393998', '266826', '876460', '994066', '1282229', '1587147', '815344', '1103826', '343997', '1200405', '179480', '742314', '1780439', '1066709', '1330760', '1368900', '1549318', '1110897', '619788', '188464', '173770', '34154', '578909', '645650', '1157537', '62836', '700552', '1388063', '408649', '848686', '1694615', '1617883', '1765655', '1466678', '155464', '1445513', '1303273', '231804', '581627', '742052', '1212886', '1405769', '481040', '1855639', '54259', '111905', '1313586', '387001', '1185491', '1670617', '906527', '69825', '499522', '1819890', '164762', '970999', '1179216', '993221', '372699', '296270', '1185999', '792835', '1037962', '1740374', '1624046', '954664', '368818', '1087747', '1026355', '812422', '1544110', '1226870', '155570', '1190376', '869921', '296349', '595907', '614301', '1241703', '442373', '995807', '1369864', '1709789', '114305', '184927', '1120202', '584073', '828184', '1473187', '1521230', '440704', '1013610', '1830313', '721770', '1658974', '313921', '692325', '368461', '985252', '290240', '1251117', '1538562', '422046', '1630032', '1181653', '125066', '1837263', '1656997', '441', '490006', '1643057', '165954', '69049', '1199388', '1507218', '1329673', '509136', '1466695', '16687', '508419', '268880', '969961', '340902', '253378', '256155', '863620', '1683671', '1560798', '675553', '1748098', '458865', '1665924', '1055150', '66385', '215071', '13148', '986080', '236365', '517825', '873311', '441741', '720189', '572737', '1225926', '624119', '997868', '515426', '691257', '419206', '1130476', '100471', '6461', '1807548', '1544601', '407787', '380030', '1152266', '1065150', '694778', '811554', '1854529', '444117', '1099590', '922315', '1217477', '1779802', '369061', '775743', '72992', '144419', '552889', '1181556', '1292830', '1778514', '1489202', '914269', '1706337', '1196929', '184181', '314027', '1227737', '559948', '784834', '1704396', '1256508', '1508836', '317087', '96486', '747998', '1632274', '950708', '1649807', '446890', '593993', '814566', '1292672', '560408', '1077779', '978883', '393982', '844217', '398230', '183055', '53060', '1210135', '916178', '1532407', '1139738', '1518821', '728959', '1304148', '491724', '1568275', '712403', '1728481', '660217', '821176', '1222683', '1778005', '1195123', '1817074', '974513', '426701', '1111638', '1240027', '1664639', '1464379', '521007', '1199739', '578456', '1439699', '284928', '494919', '491912', '232568', '923474', '99386', '1643092', '1790124', '1061993', '621986', '1122877', '100662', '1473138', '1030173', '71586', '1096287', '1138157', '262640', '602945', '1300130', '1338721', '1270177', '39801', '1692635', '56624', '211659', '1646283', '324374', '255385', '1255526', '1786203', '1406143', '1788514', '289251', '672936', '452286', '137862', '185683', '1430', '1380422', '845912', '775802', '647375', '145796', '355527', '146542', '1410218', '345442', '190717', '371036', '1797336', '120994', '1718571', '1054043', '4558', '428059', '1396897', '1201117', '1158485', '1089656', '519981', '43015', '520964', '1494349', '1094063', '1392684', '978574', '1052143', '1118795', '1687088', '1314160', '162771', '911024', '1820168', '1192318', '91766', '143489', '1004985', '518421', '166275', '370104', '974150', '546915', '1323563', '1798085', '938123', '182313', '1364401', '9506', '557187', '112370', '611777', '1159485', '1403348', '683930', '797900', '1383582', '114608', '350383', '1604331', '568871', '1047323', '394651', '165898', '283949', '810556', '105425', '1013875', '1464119', '1312394', '1695169', '58536', '1169598', '1125874', '1665958', '769476', '594319', '683707', '882361', '1302321', '450679', '254550', '1033539', '1301128', '1320428', '41154', '1657029', '1227578', '171871', '1792745', '288902', '453868', '271254', '409591', '143722', '535764', '1830350', '578047', '230266', '111402', '773754', '1245031', '1350576', '1624207', '1807992', '1015799', '1794740', '511024', '789525', '319777', '1132669', '1327710', '1272568', '1390168', '1533260', '617767', '638910', '496086', '1205039', '1626665', '191596', '1810513', '1556267', '1100153', '207238', '1501543', '834402', '279588', '568816', '1632682', '822260', '343317', '430137', '1768788', '545282', '279954', '165473', '828347', '1470816', '1327112', '1529515', '1016007', '270386', '1702078', '286404', '1088273', '1322387', '1643857', '489043', '380855', '1083556', '1619528', '583350', '132853', '546862', '1253587', '535138', '264437', '943235', '1620828', '1006607', '553760', '828792', '1624460', '1434951', '833541', '212690', '200229', '1064862', '220330', '1579543', '363926', '1258350', '1184051', '720391', '1459592', '457690', '38548', '81369', '1679222', '390074', '286007', '378270', '816642', '283001', '372084', '411601', '910971', '1590440', '135775', '1112005', '75424', '213834', '689492', '1005355', '1139329', '808335', '720425', '1267233', '263546', '1222854', '258056', '837513', '940506', '1103175', '1378900', '1385626', '237112', '730612', '301649', '273771', '497029', '736059', '1193481', '797044', '1144902', '1030001', '719277', '1119289', '1337197', '942773', '982474', '584235', '1707268', '1754255', '1104478', '1534921', '128481', '470969', '347013', '509587', '408644', '772685', '1733430', '1317735', '848134', '404829', '267884', '953680', '1303696', '884333', '968388', '1201708', '1112434', '303328', '1304264', '1133757', '1724836', '1334405', '1829066', '925761', '946016', '552534', '943383', '1100246', '1846843', '1088146', '544438', '1753939', '74810', '1807078', '100915', '1236323', '803592', '429972', '393687', '1378937', '456043', '1613185', '613184', '417913', '1563559', '1339387', '1502489', '656071', '365604', '1151482', '1259752', '277596', '673808', '161493', '873580', '832327', '260612', '924572', '1064547', '1125330', '1641045', '1151695', '256879', '394244', '556588', '1305678', '1263185', '136826', '1399892', '557148', '1358190', '1776190', '249236', '1492533', '1303288', '521017', '1066272', '541133', '1623539', '137859', '687241', '237814', '1369332', '371264', '24081', '1552898', '1502059', '1047404', '1023221', '177279', '1267817', '1411135', '191656', '980600', '951516', '499404', '1695509', '811244', '238763', '1284303', '585143', '1033260', '942257', '1349353', '1429932', '140492', '1044892', '418808', '698145', '1796223', '59227', '194957', '269275', '730734', '1145222', '253742', '581098', '45351', '66070', '426605', '1050966', '529688', '1801056', '1718077', '1266182', '129555', '1531233', '74473', '302447', '215843', '792070', '1104761', '1573381', '202553', '60314', '1503921', '280964', '711987', '136821', '832921', '1419515', '1662966', '1819530', '716942', '219736', '436016', '1735969', '713752', '60858', '121707', '689812', '193395', '1624062', '1330056', '563645', '1492653', '1449544', '376209', '1750188', '1478352', '410699', '777880', '1029514', '108914', '720269', '1448513', '74549', '972109', '215002', '404357', '1647764', '550693', '1255375', '1293865', '1264570', '896848', '789563', '826347', '903589', '1018558', '277290', '1683375', '1496790', '1112399', '860557', '127350', '1015623', '312660', '233953', '1565217', '1639977', '1607902', '397905', '490534', '1513419', '174443', '1215224', '66269', '275494', '209655', '516500', '1675849', '836893', '947869', '789401', '1553981', '155710', '496679', '821652', '1139493', '286234', '128146', '1207153', '1199733', '1778364', '1704065', '326315', '317132', '1824346', '319345', '1219375', '99297', '1850878', '755324', '1737932', '1556261', '1389561', '128767', '24850', '1105008', '1046487', '390245', '899371', '623036', '1190883', '1218126', '334762', '1496567', '1228970', '540795', '689403', '1465965', '1585171', '734591', '1257610', '685476', '784313', '1178416', '1468942', '883627', '1000719', '952670', '51709', '933442'}
@datasets.register('nyt')
class NytDataset(datasets.IndexBackedDataset):
"""
Interface to the New York Times (NYT) dataset, useful for content-based weak supervision.
> Sean MacAvaney, Andrew Yates, Kai Hui, Ophir Frieder. Content-Based Weak Supervision for
> Ad-Hoc Re-Ranking. SIGIR 2019.
"""
DUA = """Will begin downloading Robust04 dataset.
Copy or link NYT source file directory (contains data/1987/01.tgz, data/1987/02.tgz, ...) to:
{ds_path}/nyt_corpus
Please confirm you agree to the authors' data usage stipulations found at
https://catalog.ldc.upenn.edu/LDC2008T19"""
@staticmethod
def default_config():
result = datasets.IndexBackedDataset.default_config()
result.update({
'subset': config.Choices(['main', 'heldout']),
})
return result
def __init__(self, config, vocab):
super().__init__(config, logger, vocab)
base_path = util.path_dataset(self)
self.index = indices.AnseriniIndex(os.path.join(base_path, 'anserini'), stemmer='none')
self.index_stem = indices.AnseriniIndex(os.path.join(base_path, 'anserini.porter'), stemmer='porter')
self.doc_store = indices.SqliteDocstore(os.path.join(base_path, 'docs.sqllite'))
def _get_index(self, record):
return self.index
def _get_docstore(self):
return self.doc_store
def _get_index_for_batchsearch(self):
return self.index_stem
def qrels(self, fmt='dict'):
return self._load_qrels(self.config['subset'], fmt=fmt)
def _load_run_base(self, index, subset, rankfn, ranktopk, fmt='dict', fscache=False, memcache=True):
return super()._load_run_base(index, subset, rankfn, ranktopk, fmt, fscache, memcache)
@memoize_method
def _load_qrels(self, subset, fmt):
with logger.duration('loading qrels'):
base_path = util.path_dataset(self)
path = os.path.join(base_path, f'{subset}.qrels')
return trec.read_qrels_fmt(path, fmt)
def load_queries(self) -> dict:
return self._load_queries_base(self.config['subset'])
@memoize_method
def _load_queries_base(self, subset):
with logger.duration('loading queries'):
base_path = util.path_dataset(self)
path = os.path.join(base_path, f'{subset}.queries')
return dict(plaintext.read_tsv(path))
def pair_iter_pos_candidates_intersect(self, qrels_fn, run_fn, pos_minrel):
# overrides
# simply removes anything that doesn't retrieve itself in the ranktopk results
run = run_fn()
return run[run['qid'] == run['did']]
def pair_iter_neg_candidates_run(self, qrels_fn, run_fn, unjudged_rel):
# overrides
return run_fn()
def pair_iter_neg_candidates_union(self, qrels_fn, run_fn, unjudged_rel):
# overrides
raise ValueError('unsupported operation')
def pair_iter_neg_candidates_qrels(self, qrels_fn, run_fn, unjudged_rel):
# overrides
raise ValueError('unsupported operation')
def init(self, force=False):
path = util.path_dataset(self)
needs_collection = []
for index in [self.index, self.index_stem, self.doc_store]:
if force or not index.built():
needs_collection.append(index.build)
for subset in ['main', 'heldout']:
is_heldout = (subset == 'heldout')
query_file = os.path.join(path, f'{subset}.queries')
if force or not os.path.exists(query_file):
needs_collection.append(self._init_build_queryfile(query_file, is_heldout))
qrels_file = os.path.join(path, f'{subset}.qrels')
if force or not os.path.exists(query_file):
needs_collection.append(self._init_build_qrels(qrels_file, is_heldout))
if needs_collection and self._confirm_dua():
with contextlib.ExitStack() as stack:
collection_iter = logger.pbar(self._init_iter_corpus(), desc='collection')
sub_iters = util.blocking_tee(collection_iter, len(needs_collection))
for fn, it in zip(needs_collection, sub_iters):
stack.enter_context(util.CtxtThread(functools.partial(fn, it)))
def _init_iter_corpus(self):
nyt_corpus_dir = os.path.join(util.path_dataset(self), 'nyt_corpus')
with Pool(onir.util.safe_thread_count()) as pool:
for tgz_file in sorted(glob(f'{nyt_corpus_dir}/*/*.tgz')):
logger.debug(f'reading file {tgz_file}')
for doc in pool.imap(_parse_file, _read_tarfile(tgz_file)):
if doc:
yield doc
def _init_build_queryfile(self, file, is_heldout):
def wrapped(it):
with util.finialized_file(file, 'wt') as f:
for doc in it:
if is_heldout == (doc.did in _HELD_OUT_IDS):
plaintext.write_tsv(f, [(doc.did, doc.data['headline'])])
return wrapped
def _init_build_qrels(self, file, is_heldout):
def wrapped(it):
with util.finialized_file(file, 'wt') as f:
for doc in it:
if is_heldout == (doc.did in _HELD_OUT_IDS):
trec.write_qrels_dict(f, {doc.did: {doc.did: 1}})
return wrapped
def _parse_file(text):
soup = BeautifulSoup(text, 'lxml-xml')
did = soup.find('doc-id')
if did is None:
return None
did = did['id-string']
content = soup.find_all('block')
headline = soup.find('hl1') # 'headline' element can contain multiple (e.g. hl2 for online)
if content and headline:
content = content[-1].get_text()
headline = headline.get_text()
return indices.misc.RawDoc(did, text=content, headline=headline.strip())
return None
def _read_tarfile(tgz_fn):
with tarfile.open(tgz_fn, 'r') as tgz:
for member in tgz.getmembers():
if member.isfile():
yield tgz.extractfile(member).read()
| [
"sean.macavaney@gmail.com"
] | sean.macavaney@gmail.com |
803fd511cf8216badd7699cc5ab9371a42411d21 | e863d44139f345eb256847a133b14966c3a81148 | /work/lastSubString.py | 95b9cbe706cfa5524c750ae00f502b97e28ea9ac | [
"MIT"
] | permissive | sreeaurovindh/code_sprint | 4d8b4fbce753e4ae463e0a0f2bd730ba91649dc7 | 4c37becbdfb49d5b8f942190006c2d27769da282 | refs/heads/master | 2021-05-01T15:50:59.975559 | 2020-12-30T04:32:57 | 2020-12-30T04:32:57 | 121,039,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #https://leetcode.com/problems/last-substring-in-lexicographical-order/discuss/369191/JavaScript-with-Explanation-no-substring-comparison-fast-O(n)-time-O(1)-space.-(Credit-to-nate17)
def lastSubstring(input):
start = end = 0
skip = 1
while (skip+end) < len(input):
if input[start+end] == input[skip+end]:
end += 1
elif input[start+end] < input[skip+end]:
start = max(start+end+1,skip)
skip = start+1
end= 0
elif input[start+end] > input[skip+end]:
skip = skip+end+1
end=0
return input[start:]
print(lastSubstring('zyxbzyxc')) | [
"sree@Srees-MacBook-Pro.local"
] | sree@Srees-MacBook-Pro.local |
536e8eda7de1c4a381f2c709fa56729cfbf19ee7 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /native_client_sdk/src/build_tools/tests/verify_filelist_test.py | 2e01da1c93e9b3e5b6743a0e4d6f71f712de429d | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 3,854 | py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import verify_filelist
def Verify(platform, rules_contents, directory_list):
rules = verify_filelist.Rules('test', platform, rules_contents)
rules.VerifyDirectoryList(directory_list)
class VerifyFilelistTestCase(unittest.TestCase):
def testBasic(self):
rules = """\
foo/file1
foo/file2
foo/file3
bar/baz/other
"""
dirlist = ['foo/file1', 'foo/file2', 'foo/file3', 'bar/baz/other']
Verify('linux', rules, dirlist)
def testGlob(self):
rules = 'foo/*'
dirlist = ['foo/file1', 'foo/file2', 'foo/file3/and/subdir']
Verify('linux', rules, dirlist)
def testPlatformVar(self):
rules = 'dir/${PLATFORM}/blah'
dirlist = ['dir/linux/blah']
Verify('linux', rules, dirlist)
def testPlatformVarGlob(self):
rules = 'dir/${PLATFORM}/*'
dirlist = ['dir/linux/file1', 'dir/linux/file2']
Verify('linux', rules, dirlist)
def testPlatformRule(self):
rules = """\
[linux]dir/linux/only
all/platforms
"""
linux_dirlist = ['dir/linux/only', 'all/platforms']
other_dirlist = ['all/platforms']
Verify('linux', rules, linux_dirlist)
Verify('mac', rules, other_dirlist)
def testMultiPlatformRule(self):
rules = """\
[linux,win]dir/no/macs
all/platforms
"""
nonmac_dirlist = ['dir/no/macs', 'all/platforms']
mac_dirlist = ['all/platforms']
Verify('linux', rules, nonmac_dirlist)
Verify('win', rules, nonmac_dirlist)
Verify('mac', rules, mac_dirlist)
def testPlatformRuleBadPlatform(self):
rules = '[frob]bad/platform'
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, [])
def testMissingFile(self):
rules = """\
foo/file1
foo/missing
"""
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testExtraFile(self):
rules = 'foo/file1'
dirlist = ['foo/file1', 'foo/extra_file']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testEmptyGlob(self):
rules = 'foo/*'
dirlist = ['foo'] # Directory existing is not enough!
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testBadGlob(self):
rules = '*/foo/bar'
dirlist = []
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, dirlist)
def testUnknownPlatform(self):
rules = 'foo'
dirlist = ['foo']
for platform in ('linux', 'mac', 'win'):
Verify(platform, rules, dirlist)
self.assertRaises(verify_filelist.ParseException, Verify,
'foobar', rules, dirlist)
def testUnexpectedPlatformFile(self):
rules = '[mac,win]foo/file1'
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testWindowsPaths(self):
if os.path.sep != '/':
rules = 'foo/bar/baz'
dirlist = ['foo\\bar\\baz']
Verify('win', rules, dirlist)
else:
rules = 'foo/bar/baz\\foo'
dirlist = ['foo/bar/baz\\foo']
Verify('linux', rules, dirlist)
def testNestedGlobs(self):
rules = """\
foo/*
foo/bar/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
rules = """\
foo/bar/*
foo/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
if __name__ == '__main__':
unittest.main()
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
acc27c596245e4d7cc567d15066edcddd718ffee | eae5c9829e1ed45f8761379d0fc88216ecc884bf | /mainapp/templatetags/specifications.py | 80da9ad96dac257688f9e80e737279dfef4258e9 | [] | no_license | wildmaus/shopN | e125ca54c9fc6f77ae255b8c2295a189ec24f944 | b5f44604f9eef4e1842330482251f4051c034ac2 | refs/heads/master | 2023-08-24T13:35:14.851996 | 2021-11-02T09:47:16 | 2021-11-02T09:47:16 | 325,253,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | from django import template
from django.utils.safestring import mark_safe
from mainapp.models import Smartphone
register = template.Library()
TABLE_HEAD = """
<table class="table">
<tbody>
"""
TABLE_TAIL = """
</tbody>
</table>
"""
TABLE_CONTENT = """
<tr>
<td>{name}</td>
<td>{value}</td>
</tr>
"""
PRODUCT_SPEC = {
'notebook': {
'Диагональ': 'diagonal',
'Тип дисплея': 'display_type',
'Частота процессора': 'processor_freq',
'Оперативная память': 'ram',
'Видеокарта': 'video',
'Время работы аккумулятора': 'time_without_charge'
},
'smartphone': {
'Диагональ': 'diagonal',
'Тип дисплея': 'display_type',
'Разрешение экрана': 'resolution',
'Заряд аккумулятора': 'accum_volume',
'Оперативная память': 'ram',
'Наличие слота для SD карты': 'sd',
'Максимальный объем SD карты': 'sd_volume_max',
'Камера (МП)': 'main_cam_mp',
'Фронтальная камера (МП)': 'frontal_cam_mp'
}
}
def get_product_spec(product, model_name):
table_content = ''
for name, value in PRODUCT_SPEC[model_name].items():
table_content += TABLE_CONTENT.format(name=name, value=getattr(product, value))
return table_content
@register.filter
def product_spec(product):
model_name = product.__class__._meta.model_name
if isinstance(product, Smartphone):
if not product.sd:
if 'Максимальный объем SD карты' in PRODUCT_SPEC['smartphone'].keys():
PRODUCT_SPEC['smartphone'].pop('Максимальный объем SD карты', None)
else:
PRODUCT_SPEC['smartphone']['Максимальный объем SD карты'] = 'sd_volume_max'
return mark_safe(TABLE_HEAD + get_product_spec(product, model_name) + TABLE_TAIL)
| [
"tagilmaus@gmail.com"
] | tagilmaus@gmail.com |
c1e2c8f2be5494462cd1924e83a6c93983a158ce | 9e9e0985789b51210c7fe315ae98949de8b23469 | /vyper/semantics/analysis/base.py | 449e6ca33884f5ad3fe588a64274abbbb3cdb0fa | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | vyperlang/vyper | ea614230edccf3424aad746f66874e1f32c55c57 | 158099b9c1a49b5472293c1fb7a4baf3cd015eb5 | refs/heads/master | 2023-08-30T02:18:48.923346 | 2023-08-27T02:59:27 | 2023-08-27T02:59:27 | 73,461,676 | 2,359 | 469 | NOASSERTION | 2023-09-14T13:05:24 | 2016-11-11T08:56:41 | Python | UTF-8 | Python | false | false | 8,346 | py | import enum
from dataclasses import dataclass
from typing import Dict, List, Optional
from vyper import ast as vy_ast
from vyper.exceptions import (
CompilerPanic,
ImmutableViolation,
StateAccessViolation,
VyperInternalException,
)
from vyper.semantics.data_locations import DataLocation
from vyper.semantics.types.base import VyperType
class _StringEnum(enum.Enum):
@staticmethod
def auto():
return enum.auto()
# Must be first, or else won't work, specifies what .value is
def _generate_next_value_(name, start, count, last_values):
return name.lower()
# Override ValueError with our own internal exception
@classmethod
def _missing_(cls, value):
raise VyperInternalException(f"{value} is not a valid {cls.__name__}")
@classmethod
def is_valid_value(cls, value: str) -> bool:
return value in set(o.value for o in cls)
@classmethod
def options(cls) -> List["_StringEnum"]:
return list(cls)
@classmethod
def values(cls) -> List[str]:
return [v.value for v in cls.options()]
# Comparison operations
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
raise CompilerPanic("Can only compare like types.")
return self is other
# Python normally does __ne__(other) ==> not self.__eq__(other)
def __lt__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
raise CompilerPanic("Can only compare like types.")
options = self.__class__.options()
return options.index(self) < options.index(other) # type: ignore
def __le__(self, other: object) -> bool:
return self.__eq__(other) or self.__lt__(other)
def __gt__(self, other: object) -> bool:
return not self.__le__(other)
def __ge__(self, other: object) -> bool:
return self.__eq__(other) or self.__gt__(other)
class FunctionVisibility(_StringEnum):
# TODO: these can just be enum.auto() right?
EXTERNAL = _StringEnum.auto()
INTERNAL = _StringEnum.auto()
class StateMutability(_StringEnum):
# TODO: these can just be enum.auto() right?
PURE = _StringEnum.auto()
VIEW = _StringEnum.auto()
NONPAYABLE = _StringEnum.auto()
PAYABLE = _StringEnum.auto()
@classmethod
def from_abi(cls, abi_dict: Dict) -> "StateMutability":
"""
Extract stateMutability from an entry in a contract's ABI
"""
if "stateMutability" in abi_dict:
return cls(abi_dict["stateMutability"])
elif abi_dict.get("payable"):
return StateMutability.PAYABLE
elif "constant" in abi_dict and abi_dict["constant"]:
return StateMutability.VIEW
else: # Assume nonpayable if neither field is there, or constant/payable not set
return StateMutability.NONPAYABLE
# NOTE: The state mutability nonpayable is reflected in Solidity by not
# specifying a state mutability modifier at all. Do the same here.
class DataPosition:
_location: DataLocation
class CalldataOffset(DataPosition):
__slots__ = ("dynamic_offset", "static_offset")
_location = DataLocation.CALLDATA
def __init__(self, static_offset, dynamic_offset=None):
self.static_offset = static_offset
self.dynamic_offset = dynamic_offset
def __repr__(self):
if self.dynamic_offset is not None:
return f"<CalldataOffset: static {self.static_offset}, dynamic {self.dynamic_offset})>"
else:
return f"<CalldataOffset: static {self.static_offset}, no dynamic>"
class MemoryOffset(DataPosition):
__slots__ = ("offset",)
_location = DataLocation.MEMORY
def __init__(self, offset):
self.offset = offset
def __repr__(self):
return f"<MemoryOffset: {self.offset}>"
class StorageSlot(DataPosition):
__slots__ = ("position",)
_location = DataLocation.STORAGE
def __init__(self, position):
self.position = position
def __repr__(self):
return f"<StorageSlot: {self.position}>"
class CodeOffset(DataPosition):
__slots__ = ("offset",)
_location = DataLocation.CODE
def __init__(self, offset):
self.offset = offset
def __repr__(self):
return f"<CodeOffset: {self.offset}>"
@dataclass
class VarInfo:
"""
VarInfo are objects that represent the type of a variable,
plus associated metadata like location and constancy attributes
Object Attributes
-----------------
is_constant : bool, optional
If `True`, this is a variable defined with the `constant()` modifier
"""
typ: VyperType
location: DataLocation = DataLocation.UNSET
is_constant: bool = False
is_public: bool = False
is_immutable: bool = False
is_transient: bool = False
is_local_var: bool = False
decl_node: Optional[vy_ast.VyperNode] = None
def __hash__(self):
return hash(id(self))
def __post_init__(self):
self._modification_count = 0
def set_position(self, position: DataPosition) -> None:
if hasattr(self, "position"):
raise CompilerPanic("Position was already assigned")
if self.location != position._location:
if self.location == DataLocation.UNSET:
self.location = position._location
else:
raise CompilerPanic("Incompatible locations")
self.position = position
@dataclass
class ExprInfo:
"""
Class which represents the analysis associated with an expression
"""
typ: VyperType
var_info: Optional[VarInfo] = None
location: DataLocation = DataLocation.UNSET
is_constant: bool = False
is_immutable: bool = False
def __post_init__(self):
should_match = ("typ", "location", "is_constant", "is_immutable")
if self.var_info is not None:
for attr in should_match:
if getattr(self.var_info, attr) != getattr(self, attr):
raise CompilerPanic("Bad analysis: non-matching {attr}: {self}")
@classmethod
def from_varinfo(cls, var_info: VarInfo) -> "ExprInfo":
return cls(
var_info.typ,
var_info=var_info,
location=var_info.location,
is_constant=var_info.is_constant,
is_immutable=var_info.is_immutable,
)
def copy_with_type(self, typ: VyperType) -> "ExprInfo":
"""
Return a copy of the ExprInfo but with the type set to something else
"""
to_copy = ("location", "is_constant", "is_immutable")
fields = {k: getattr(self, k) for k in to_copy}
return self.__class__(typ=typ, **fields)
def validate_modification(self, node: vy_ast.VyperNode, mutability: StateMutability) -> None:
"""
Validate an attempt to modify this value.
Raises if the value is a constant or involves an invalid operation.
Arguments
---------
node : Assign | AugAssign | Call
Vyper ast node of the modifying action.
mutability: StateMutability
The mutability of the context (e.g., pure function) we are currently in
"""
if mutability <= StateMutability.VIEW and self.location == DataLocation.STORAGE:
raise StateAccessViolation(
f"Cannot modify storage in a {mutability.value} function", node
)
if self.location == DataLocation.CALLDATA:
raise ImmutableViolation("Cannot write to calldata", node)
if self.is_constant:
raise ImmutableViolation("Constant value cannot be written to", node)
if self.is_immutable:
if node.get_ancestor(vy_ast.FunctionDef).get("name") != "__init__":
raise ImmutableViolation("Immutable value cannot be written to", node)
# TODO: we probably want to remove this restriction.
if self.var_info._modification_count: # type: ignore
raise ImmutableViolation(
"Immutable value cannot be modified after assignment", node
)
self.var_info._modification_count += 1 # type: ignore
if isinstance(node, vy_ast.AugAssign):
self.typ.validate_numeric_op(node)
| [
"noreply@github.com"
] | noreply@github.com |
0d6361a1c0ab589a30c8857539292b0ea2ba6f17 | 43dabf77afd5c44d55b465c1b88bf9a5e7c4c9be | /drawing_random_circles.py | be298cbf90b23e67ea008144b485fca1b94b056c | [] | no_license | geegatomar/OpenCV-Computer-Vision-Adrian-Rosebrock | cc81a990a481b5e4347dd97369b38479b46e55bc | daa579309010e6e7fefb004b878ffb26374401d0 | refs/heads/master | 2022-11-18T13:07:08.040483 | 2020-07-20T01:55:39 | 2020-07-20T01:55:39 | 280,987,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | import cv2
import numpy as np
# drawing 25 random circles
canvas = np.zeros((400, 400, 3), dtype="uint8")
for i in range(25):
radius = np.random.randint(180) # will generate random radius value between 0 and 100
centre = np.random.randint(0, 400, size=(2, ))
color = np.random.randint(0, 255, size=(3, ))
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.circle(canvas, tuple(centre), radius, tuple(color), 2)
cv2.imshow("MyCanvas", canvas)
cv2.waitKey(0)
| [
"geegatomar@gmail.com"
] | geegatomar@gmail.com |
b02b27006734f133f41bee3ab8a81bc2992bd3f2 | d74f4d0ba3bb3c76813fe6d765106a1036404c49 | /Roblox2021_HackerRank/q3.py | e2d904a4e0a628a49b73b05eb771e212cab6b09f | [] | no_license | TahaKhan8899/Coding-Practice | 235ed05f0233f02ee9563aad374bcef456d4e1d8 | 29bee7e09d7b6c25b536775ad4257ea09ed202c5 | refs/heads/master | 2021-10-20T10:37:30.679328 | 2021-10-07T14:40:23 | 2021-10-07T14:40:23 | 175,536,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | #
# Complete the 'compressWord' function below.
#
# The function is expected to return a STRING.
# The function accepts following parameters:
# 1. STRING word
# 2. INTEGER k
#
def compressWord(word, k):
stk = []
for i in range(0, len(word)):
if len(stk) == 0:
charFreq = [word[i], 1]
stk.append(charFreq)
print(stk)
continue
print(stk)
if stk[len(stk)-1][0] == word[i]:
stk[len(stk)-1][1] += 1
if stk[len(stk)-1][1] == k:
stk.pop()
else:
charFreq = [word[i], 1]
stk.append(charFreq)
print(stk)
finalWord = ""
subString = ""
for i in range(0, len(stk)):
subString = stk[i][0] * stk[i][1]
finalWord += subString
print(finalWord)
return finalWord
print(compressWord("aba", 2))
| [
"khant21@mcmaster.ca"
] | khant21@mcmaster.ca |
24040a1b52951cb5c8894c540fa340873bbc9d93 | 2b42629e41c2473658335a0b750308fbb6e08249 | /app/model/fit_model_vector.py | feafcb8887512958be91f9f74bb7eecc397817c1 | [] | no_license | letotefrank/nlp-sim | 4bb163f099e1e43cf43e3ac56badcff790223e5d | 314f9dd822f87256bc679ef11861daa9815da4da | refs/heads/master | 2020-03-28T12:29:41.578013 | 2018-11-30T07:38:25 | 2018-11-30T07:38:25 | 148,303,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | from fastText import train_unsupervised
import config
import os
import json
import itertools
from app.model.data_process import data_process
def fit():
'''
fastText训练语料库,save:local_model.bin;利用 预处理和分词 后的pre_data.txt 训练 sentence-vector
:return: fit result
'''
# 数据预处理。返回 id_url_df 用于结果拼接(id,url)
id_url_df = data_process()
print('fit 数据预处理,分词完成')
# fastText fit
model = train_unsupervised(
# input=os.path.join(config.jb_path, 'pre_data.txt'),
input=os.path.join(config.jb_path, 'del_dp_text_data.txt'),
model='skipgram',
epoch=10,
dim=300,
# pretrainedVectors="{}/wiki.zh.vec".format(config.model_path),
minCount=1
)
model.save_model("{}/local_model.bin".format(config.model_path))
print('local_model.bin saved')
vector_list = []
with open(os.path.join('{}pre_data.txt'.format(config.jb_path))) as f:
for line in f:
line = line.replace('\n', '')
vector = model.get_sentence_vector(line)
vector_list.append(vector.tolist())
# 组装 pid + vec 字典
pid = id_url_df['id'].values
pid_list = pid.tolist()
id_vec_dict = dict(itertools.zip_longest(pid_list, vector_list))
# pid,vector持久化为json
with open('{}id_vec_dict.json'.format(config.model_path), 'w') as outfile:
json.dump(id_vec_dict, outfile)
print('id_vec_dict.json 持久化完成')
return 'fit success!!!'
| [
"frank.li@letote.cn"
] | frank.li@letote.cn |
d04ae994a53ff06417f846f19c0403d3bc065f10 | e5d83ede8521027b05d9b91c43be8cab168610e6 | /0x0B-python-input_output/1-number_of_lines.py | 1dfc5fcc64012fcf583f7f599a0cd5e13d80cbb1 | [] | no_license | Danielo814/holbertonschool-higher_level_programming | 8918c3a6a9c136137761d47c5162b650708dd5cd | 832b692529198bbee44d2733464aedfe650bff7e | refs/heads/master | 2020-03-28T11:09:00.343055 | 2019-02-22T03:33:54 | 2019-02-22T03:33:54 | 148,181,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #!/usr/bin/python3
"""
1-number_of_lines module
"""
def number_of_lines(filename=""):
"""
returns the number of lines of a text file
"""
numlines = 0
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
numlines += 1
return numlines
| [
"211@holbertonschool.com"
] | 211@holbertonschool.com |
10b90cedd70c0f884a769e49b3708e09ff50e374 | a1ae058be4e115fc943f4a97ff20bc6d9578b4e7 | /checklist/views.py | e9c8aa0d45aa25224212a043435508d0526a3dc1 | [] | no_license | twagner000/tristanwagner | 141925784e7b5f2aa0c2d1e5c945cf53bdcb0740 | e1b0d523831659a58a172b3ab5fe2ffebab04890 | refs/heads/master | 2023-01-04T19:41:41.044650 | 2019-10-03T06:56:00 | 2019-10-03T06:56:00 | 39,306,731 | 1 | 0 | null | 2023-01-04T07:54:11 | 2015-07-18T17:30:52 | Python | UTF-8 | Python | false | false | 2,060 | py | from django.shortcuts import render, get_object_or_404, redirect
from .models import Checklist, AnsweredChecklist
def index(request):
checklist_list = Checklist.objects.all()
return render(request, 'checklist/index.html', {'checklist_list': checklist_list})
def take(request, checklist_id):
checklist = get_object_or_404(Checklist, pk=checklist_id)
return render(request, 'checklist/take.html', {'checklist': checklist})
def history(request, checklist_id):
checklist = get_object_or_404(Checklist, pk=checklist_id)
if request.user.is_authenticated():
ans_checklist_list = AnsweredChecklist.objects.filter(ans_by=request.user).filter(checklist=checklist).order_by('-for_date')
return render(request, 'checklist/history.html', {'checklist': checklist, 'ans_checklist_list': ans_checklist_list})
return render(request, 'checklist/history.html', {'checklist': checklist})
def results(request, ans_checklist_id):
if request.user.is_authenticated():
ans_checklist_list = AnsweredChecklist.objects.filter(ans_by=request.user)
ans_checklist = get_object_or_404(ans_checklist_list, pk=ans_checklist_id)
return render(request, 'checklist/results.html', {'ans_checklist': ans_checklist})
return render(request, 'checklist/results.html')
def submit(request, checklist_id):
checklist = get_object_or_404(Checklist, pk=checklist_id)
if not request.user.is_authenticated():
return render(request, 'checklist/take.html', {
'checklist': checklist,
'error_message': "You must be logged in to submit a completed checklist.",
})
ac = AnsweredChecklist(checklist=checklist, ans_by=request.user)
ac.save()
for gr in checklist.questiongroup_set.all():
for qu in gr.question_set.all():
ac.answeredquestion_set.create(question=qu, score=request.POST.get('q%d' % qu.id, default=0), comment=request.POST.get('qc%d' % qu.id, default=None))
ac.save()
return redirect('checklist:results', ans_checklist_id=ac.id)
| [
"twagner000@users.noreply.github.com"
] | twagner000@users.noreply.github.com |
2af51abc842ea604d18e6b1ee94c111c9bc35cb8 | d35c58b7ff808537dc7edb47606dd2cc2253dbdc | /hometask3/hometask3.py | 8fb6d0bcd29ed5bbb9dce89d2bd9cb508aa926be | [
"MIT"
] | permissive | karotkin/hometasks | e5f043d299043e949c3ed9e70213426785da917e | 709d51f89a70b5b2146ecfd7b33e0fea41a8b24c | refs/heads/master | 2021-01-21T08:36:54.600877 | 2016-08-25T13:10:21 | 2016-08-25T13:10:21 | 66,083,066 | 0 | 0 | null | 2016-08-19T13:08:27 | 2016-08-19T13:08:26 | null | UTF-8 | Python | false | false | 3,432 | py | #!/usr/bin/python
import psutil
import datetime
import configparser
import time
import json
import schedule
config = configparser.ConfigParser()
config.read('config.ini')
timeint = config.get('setup', 'int')
filetype = config.get('setup', 'filetype')
snapshot = 1
# CPU_Load
cpu_usage = psutil.cpu_percent(interval=1, percpu=True)
# Overall_Memory_Usage
mem_usage = psutil.Process().memory_info()[0]
# Hard_Disk_Memory_Usage
disk_usage = psutil.disk_usage('/')
# Overal_Virtual_Memory_Usage
total_memory = psutil.virtual_memory()
# IO_Information
disk_part = psutil.disk_io_counters(perdisk=False)
# Network_Information
net_inf = psutil.net_io_counters()
def write_to_txt(myfile='result.txt'):
global snapshot
print("info >> top(SNAPSHOT {0})".format(snapshot))
fmt = '%Y-%m-%d %H:%M:%S %Z'
currenttime = datetime.datetime.now()
tstmp = datetime.datetime.strftime(currenttime, fmt)
text_file = open(myfile, "w")
text_file.write("Snapshot {0}:, timestamp - {1}:\n".format(snapshot, tstmp))
text_file.write("CPU: {0}\n".format(cpu_usage[0]))
text_file.write("MEM: {0}\n".format(mem_usage/1048576))
text_file.write("DISK TOTAL: {0}Mb\n".format(disk_usage.total/1048576))
text_file.write("DISK FREE: {0}Mb\n".format(disk_usage.free/1048576))
text_file.write("DISK USED: {0}Mb\n".format(disk_usage.used/1048576))
text_file.write("DISK PERCENT: {0}\n".format(disk_usage.percent))
text_file.write("TOTAL MEMORY: {}Mb\n".format(total_memory.total/1048576))
text_file.write("USED MEMORY: {}Mb\n".format(total_memory.used/1048576))
text_file.write("FREE MEMORY: {}Mb\n".format(total_memory.free/1048576))
text_file.write("MEMORY PERCENT: {0}\n".format(total_memory.percent))
text_file.write("DISK PARTS: {}\n".format(disk_part))
text_file.write("BYTES RECIVED: {}\n".format(net_inf.bytes_recv))
text_file.write("BYTES SENT: {}\n".format(net_inf.bytes_sent))
text_file.write("PACKETS RECIVED: {}\n".format(net_inf.packets_recv))
text_file.write("PACKETS SENT: {}\n".format(net_inf.packets_sent))
text_file.write("\n")
text_file.close()
snapshot += 1
def mydict(kf):
a = list(kf)
b = kf._fields
final_dict = dict(zip(a,b))
return final_dict
def write_to_json(myfile="result.json"):
global snapshot
print("info >> top(SNAPSHOT {0})".format(snapshot))
fmt = '%Y-%m-%d %H:%M:%S %Z'
currtime = datetime.datetime.now()
tstmp = datetime.datetime.strftime(currtime, fmt)
jsonf = open("result.json", "a")
jsonf.write("\nSnapshot #{0}, Snapshot Time - {1}\n".format(snapshot, tstmp))
jsonf.write("\nCPU\n")
json.dump(cpu_usage, jsonf, indent=1)
jsonf.write("\nVMem Usage\n")
json.dump(mem_usage, jsonf, indent=1)
jsonf.write("\nDisk Usage\n")
json.dump(mydict(disk_usage), jsonf, indent=1)
jsonf.write("\nDisk Part\n")
json.dump(mydict(disk_part), jsonf, indent=1)
jsonf.write("\nNetInf\n")
json.dump(net_inf, jsonf, indent=1)
jsonf.write("\n\n")
jsonf.close()
snapshot += 1
if filetype == "txt":
print(filetype + ' in' + timeint + ' minute(s)')
schedule.every(int(timeint)).minutes.do(write_to_txt)
elif filetype == "json":
print(filetype + ' in' + timeint + ' minute(s)')
schedule.every(int(timeint)).minutes.do(write_to_json)
else:
print("check type in conf")
quit()
while True:
schedule.run_pending()
time.sleep(5)
| [
"“yaroslav909@gmail.com”"
] | “yaroslav909@gmail.com” |
ce3a6f33c2f5edf47d8f33c41751a788409d2fad | 1c6bbd9223dce3f40765cb097e2d40a770c26319 | /myenv/Scripts/django-admin.py | 54b9f159656458777fce053021a294542693f12e | [] | no_license | db237/my-first-blog | a1521082b442a02f0491b1322c3b9c3ec3f2142e | a1e469d9e82fcd1a3d214e6fc7f429ffe2354fda | refs/heads/master | 2022-12-09T20:34:28.741319 | 2020-08-19T18:19:18 | 2020-08-19T18:19:18 | 288,805,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #!c:\users\dev bhati\djangogirls\myenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"devbhati2307@gmail.com"
] | devbhati2307@gmail.com |
5bbdc724ce248b43af1343eeccc75057631580a6 | c2e7648fca2aebc9577b26b411b0da10609da3f5 | /Pb_policy_iteration/cart_pole_modified_env/reproduce_FHCP_results/modified_algo/parameter_config_6.py | de351ae624e309c2014e04efafb2db15a7dec42a | [] | no_license | PandulaP/RL_Research | c3a6d42b6b3dad52b7c8758cd853cfb6cd638c28 | 8170206ef9ad2c42d690b8cf2f90a9bded7c9418 | refs/heads/main | 2023-06-14T22:04:35.216509 | 2021-07-02T07:05:02 | 2021-07-02T07:05:02 | 318,289,678 | 1 | 0 | null | 2023-01-03T13:43:04 | 2020-12-03T18:51:49 | Jupyter Notebook | UTF-8 | Python | false | false | 40,403 | py | #######################################
### Tested Parameter Configurations ###
#######################################
CONFIGS = {'S': [100]
, 'Actions' : [3]
, 'Roll-outs': [20]
, 'Significance' : [0.025, 0.05, 0.1]
}
########################################
### importing the necessary packages ###
########################################
import gym
from gym import wrappers
import custom_cartpole # custom cart-pole environment
import numpy as np
import pandas as pd
import random
from scipy.stats import rankdata as rd
from scipy import stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt
import seaborn as sns
from IPython import display
from IPython.display import HTML
import io
import base64
import itertools
import tqdm
import os
########################################
########## helper functions ##########
#####################################
# generate a random action from a given environment
def random_action(environment, seed=10):
""" return a random action from the given environment. """
# set env. seeds for reproducibility
#environment.action_space.np_random.seed(seed)
#environment.seed(seed)
return environment.action_space.sample()
# generate a list of initial states from a given environment
def generate_init_states_S(seed
, env = 'CustomCartPole-v0'
, sample_size = 10 # how many states to include in the sample
):
""" this function returns a list of randomly generated initial states from a given environment. """
# set the random seed for reproducibility
np.random.seed(seed)
# define how many initial states to generate altogether
n_states = np.random.randint(low=201, high=301)
# define how many states to sample from the generated states
n_states_sample = np.random.randint(low=sample_size, high=sample_size+1)
# define a list to store the generated initial states
init_states_S = []
# create a given environment object
env = gym.make(env)
env.action_space.np_random.seed(seed) # set env. seeds for reproducibility
env.seed(seed) # set env. seeds for reproducibility
env.reset(init_state = np.array([0,0,0,0]))
# generate initial states
s_count = 0
while s_count < n_states:
# step through the environment by taking random actions
state, reward, done, info = env.step(env.action_space.sample())
# If terminates, reset the environment and continue to next step
# (without appending the termination state to the list).
# Increment 'n_states' count by 7 since last 7 states from the termination state are removed
# to avoid having states close to termination in the initial state list.
if done:
env.reset(init_state = np.array([0,0,0,0]))
n_states+=7
init_states_S = init_states_S[:-7]
continue
# append the observed state to the initial state list
init_states_S.append(state)
s_count +=1
env.close()
# remove any duplicate state values from the list
state_str_li = []
for state in init_states_S:
state_str_li.append("".join([str(item[0]) for item in [item.reshape(-1) for item in state.flatten()]]))
uniq, uni_id = np.unique(state_str_li, return_index=True)
init_states_S = [init_states_S[j] for j in uni_id]
# sample the required number of states (uniform random sampling)
sampled_states = random.sample(init_states_S, n_states_sample)
return sampled_states #init_states_S
# partition the action space of a given environment
def partition_action_space(env_name:'string'
, n_actions:'int'):
"""function to partitions the action space of an environment into a given number of actions`"""
# initialize environment
env = gym.make(env_name)
# partition the action space to a given number of actions
part_act_space = np.linspace(env.action_space.low[0,0]
,env.action_space.high[0,0],n_actions)
return part_act_space
#####################################
### preference generation process ###
#####################################
def evaluate_preference(starting_state # starting state of roll-outs
, action_1 # first action to execute at the starting-state
, action_2 # second action to execute at the starting state
, policy_in # policy to folow
, environment_name = 'CustomCartPole-v0' # name of the environment
, discount_fac = 1 # discounting factor
, n_rollouts = 20 # number of roll-outs to generate per action
, max_rollout_len = 1500 # maximum length of a roll-out
, label_ranker = False # whether to use the label-ranking model or not
, p_sig = 0.05 # p-value to use for t-test (to compare returns of roll-outs)
, tracking = False
):
"""
Description:
- Roll-outs are generated at each state in the initial state set by starting from the given input action
and following the given policy afterwards.
- Returns of the roll-outs are used to generate preferences for the input action pair.
- Generated preferences are returned to be create a training dataset to learn the LabelRanker model.
"""
# initializing variables
policy = policy_in
n_rollouts = n_rollouts
gamma = discount_fac
s_init = starting_state
max_traj_len = max_rollout_len
# we store the num. actions executed within the evaluation process (to measure complexity)
action_count = 0
# dictionary to store input action values
actions = { 'one' : action_1
, 'two' : action_2}
# dictionary to store rewards of roll-outs
r = { 'one' : [None]*n_rollouts
, 'two' : [None]*n_rollouts}
# dictionary to store average discounted return for each action
avg_r = {}
# select each action of the input actions to generate roll-outs:
for action_key, action_value in actions.items():
# generate the defined number of roll-outs for selected action
for rollout in range(n_rollouts):
# create an environment object and set the starting state to the input (initial) state
env = gym.make(environment_name)
env.reset(init_state=s_init) # modified env.reset() in custom env: it accepts a starting state
# genereate random noice for action
rand_act_noice = np.array([[np.random.uniform(low = -.2,high=.2)]])
# apply the action (custom environment accepts float actions)
observation, reward, done, info = env.step(np.clip(action_value + rand_act_noice,-1,1)) # clip action value to (-1,1) range
# define the history variable to store the last observed state
hist = observation
# add the immediate reward received after executing the action
r[action_key][rollout] = reward
# follow the given policy to generate a roll-out trajectory
traj_len = 1
while traj_len < max_traj_len and not done:
# sample next state using the label-ranking model (if TRUE)
if label_ranker:
observation, reward, done, info = env.step(policy.label_ranking_policy(hist))
# replace current history with the observed state
hist = observation
action_count+=1
# sample next state using a random policy
else:
observation, reward, done, info = env.step(policy(env))
action_count+=1
# compute discounted-reward at each step of the roll-out and store the roll-out return
r[action_key][rollout] += (gamma**traj_len) * reward
traj_len += 1
# close the environment after creating roll-outs
env.close()
del env
# calculate the average discounted returns of the two actions
avg_r[action_key] = sum(r[action_key]) / len(r[action_key])
# run a t-test to check whether the observed difference between average returns is significant
# (unpaird t-tests: equal variance)
t_val, p_val = stats.ttest_ind(r['one'],r['two'])
# track output
if tracking:
print(f"state: {[state_dim.reshape(-1)[0] for state_dim in [s_init[2],s_init[3][0][0]]]} | a_j(R): {avg_r['one']} | a_k(R): {avg_r['two']} | sig: {'Yes' if (p_val <= p_sig) else '--'}")
# return preference information
if (avg_r['one'] > avg_r['two']) and (p_val <= p_sig):
return {'state': s_init
, 'a_j' : actions['one']
, 'a_k' : actions['two']
, 'preference_label' : 1}, action_count
elif(avg_r['one'] < avg_r['two']) and (p_val <= p_sig):
return {'state': s_init
, 'a_j' : actions['one']
, 'a_k' : actions['two']
, 'preference_label' : 0}, action_count
# return NaN if avg. returns are not significantly different from each other OR are equal
else:
return {'state': np.nan
, 'a_j' : np.nan
, 'a_k' : np.nan
, 'preference_label' : np.nan}, action_count
##########################################
### LabelRanker Model training process ###
##########################################
def train_model(train_data # collection of all preference data
, action_space # action space of the task
, model_name:str # name for the model (to store)
, batch_s = 4 # batch size to train the NN model
, mod_layers = [10] # model configuration
, n_epochs = 1000 # num. of epochs to train the model
, l_rate = 0.01 # learning rate for the optimization process
, show_train_plot = False # flag to display the 'training-loss vs. epoch' plot
, show_dataset = False): # flag to display the training dataset
"""
Description:
- This function process all preference data to construct a training dataset for the LabelRanker model.
- One training sample takes the form:
X: [state-value (2-D)]
Y: [(normalized) ranking of actions (n-D)], where 'n' is the number of actions in the action space.
- For a given (2-D) state input, the (trained) model, i.e., LabelRanker, predicts the rank of
all possible actions at the input state
"""
### creating the training dataset ###
# convert training data input to a dataframe |
# remove the rows that have NaN, i.e.,preference evaluations without any action preference
train_df = pd.DataFrame(train_data).dropna()
# create a key for each state in the dataset
# (only select the 'pendulum-velocity & pendulum-angle)
#train_df.loc[:, 'state_key'] = train_df.state.apply(lambda x: x[2].astype(str)+"_"+x[3].astype(str))
#train_df.loc[:, 'state_key'] = train_df.state.apply(lambda x: round(x[2].reshape(-1)[0],6).astype(str)+"_"+round(x[3].reshape(-1)[0],6).astype(str))
train_df.loc[:, 'state_key'] = train_df.state.apply(lambda x: x[2].reshape(-1)[0].astype(str)+"_"+x[3].reshape(-1)[0].astype(str))
# ******************************** # EXPERIMENTAL STEP START
# create a full state key (state+action preference)
#train_df.loc[:, 'state_action_key'] = train_df.state.apply(lambda x: round(x[2],6).astype(str)+"_"+round(x[3],6).astype(str)) +"_"+ train_df.a_j.apply(lambda x: x[0][0].astype(str))+"_"+ train_df.a_k.apply(lambda x: x[0][0].astype(str))
# drop duplicates (if one training-set maintained) : only keep the first learned preference
#train_df.drop_duplicates(subset=['state_key'], keep='first', inplace=True)
#train_df.drop_duplicates(subset=['state_action_key'], keep='first', inplace=True)
#train_df.drop(columns=['state_action_key'], inplace=True)
# ******************************** # EXPERIMENTAL STEP END
# check if the training dataset is empty
# (if empty, subsequent steps have to be skipped)
if not(train_df.shape[0]>0):
# if training dataset is emtpy - return None (break the training loop)
return None
else:
### computing action-preference counts for every action (for every states) ###
# identify the 'prefered-action' at each 'state, action-pair' preference evaluation
train_df.loc[:,'prefered_action'] = train_df.apply(lambda row: row['a_j'][0][0] if row['preference_label'] == 1 else row['a_k'][0][0] ,axis=1)
# compute the number of times each action is prefered at each state
action_preference_counts = train_df.groupby('state_key').prefered_action.value_counts().unstack()
action_preference_counts.replace(np.nan,0,inplace=True) # if an action is not preferred at a state, set pref. count to '0'
# remove the column index names of the `action_preference_counts' summary table
action_preference_counts.columns.name = None
# find any action(s) that was not preferred at all sampled states
# - this is important because a ranking for every possible action
# at each state needs to be included in the training (label) data
missed_actions = [action for action in action_space if action not in action_preference_counts.columns.tolist()]
missed_actions = np.array(missed_actions).astype(action_preference_counts.columns.dtype) # convert to the same data-type of remaining columns
# add any missing actions to the `action_preference_counts' table
if len(missed_actions)>0:
# add the missing action (with a preference count of zero)
for action in missed_actions:
action_preference_counts.loc[:,action] = 0
# sort the actions in the summary according to arrangement in action space (ascending order)
action_preference_counts = action_preference_counts.reindex(sorted(action_preference_counts.columns), axis=1)
# convert the action-preference-counts (of actions at each state) to a vector and add it as a new column
# - data in this column is used to create training labels
action_preference_counts.loc[:, 'preference_label_vector'] = pd.DataFrame({'label_data': action_preference_counts.iloc[:,0:].values.tolist()}).values
# append the column having action-preference-counts vectors to the training dataset
train_df = train_df.merge(right = action_preference_counts.loc[:,['preference_label_vector']]
, right_index= True
, left_on = 'state_key'
, how = 'left')
# create the reduced training dataset
# - drop unnecessary columns & duplicate rows (which have duplicate data for same states)
train_df_reduced = train_df.loc[:,['state', 'state_key', 'preference_label_vector']]
train_df_reduced.drop_duplicates(subset=['state_key'],inplace=True)
train_df_reduced.preference_label_vector = train_df_reduced.preference_label_vector.apply(lambda row: np.array(row).astype(np.float)) # convert all label vectors to float
if show_dataset:
print(f'Training data samples: {train_df_reduced.shape[0]}')
print(train_df_reduced.loc[:,['state_key', 'preference_label_vector']])
### preparing the training dataset for the neural network (LabelRanker) model ###
# normalize the action-preference-counts vectors (label data for the model)
# - this step produces the rankings:
# - i.e., the action(s) with the highest preference count(s) will have the highest value(s)
# - after normalization
output_labels_temp = np.array(train_df_reduced.preference_label_vector.tolist())
row_sums = output_labels_temp.sum(axis=1)
output_labels_normalized = output_labels_temp / row_sums[:, np.newaxis]
output_labels = torch.from_numpy(output_labels_normalized) # convert to tensor
# generate the input state data tensors (feature data for the model)
# - this should only include pendulum-angle and pendulum-velocity
#input_states = torch.from_numpy(np.array(train_df_reduced.state.apply(lambda x: [x[2].astype(float),x[3].astype(float)]).tolist())) # only select pole-position and pole-velocity
#input_states = torch.from_numpy(np.array(train_df_reduced.state.apply(lambda x: [round(x[2].reshape(-1)[0],6).astype(float),round(x[3].reshape(-1)[0],6).astype(float)]).tolist())) # only select pole-position and pole-velocity
input_states = torch.from_numpy(np.array(train_df_reduced.state.apply(lambda x: [x[2].reshape(-1)[0].astype(float),x[3].reshape(-1)[0].astype(float)]).tolist())) # only select pole-position and pole-velocity
# create TensorDataset
train_ds = TensorDataset(input_states , output_labels)
# define the batch size
batch_size = batch_s #train_df_reduced.shape[1]
# define the data loader
train_dl = DataLoader(train_ds
, batch_size
, shuffle=True
#, drop_last=True
)
### defining and training the neural network (LabelRanker) model ###
class Model(nn.Module):
def __init__(self, input_state_len, output_label_len, layers, p=0.3):
super(Model,self).__init__()
all_layers = []
input_size = input_state_len
# create layers
for layer_dim in layers:
all_layers.append(nn.Linear(input_size, layer_dim))
all_layers.append(nn.LeakyReLU(inplace=True))
#all_layers.append(nn.BatchNorm1d(layer_dim))
#all_layers.append(nn.Dropout(p))
input_size = layer_dim
all_layers.append(nn.Linear(layers[-1], output_label_len))
self.layers = nn.Sequential(*all_layers)
def forward(self, state_vec):
x = self.layers(state_vec)
return x
# create a NN model instance
model = Model(input_states.shape[1], output_labels.shape[1], mod_layers)
# define optimizer and loss
opt = torch.optim.SGD(model.parameters(), lr = l_rate)
loss_fn = F.mse_loss
# list to store losses
aggregated_losses = []
# defining a function to train the model
def fit(num_epochs, model, loss_fn, opt):
for epoch in range(num_epochs):
for xb,yb in train_dl:
# Generate predictions
pred = model(xb.float())
loss = loss_fn(pred, yb.float())
# Perform gradient descent
loss.backward()
opt.step()
opt.zero_grad()
aggregated_losses.append(loss_fn(model(input_states.float()), output_labels.float()).detach().numpy())
#print('\nTraining loss: ', loss_fn(model(input_states.float()), output_labels.float()).detach().numpy(),'\n')
# return training loss
return loss_fn(model(input_states.float()), output_labels.float()).detach().numpy()
# train the model
epochs = n_epochs
loss_v = fit(epochs, model, loss_fn, opt)
# save the trained model
PATH = f"./models/{model_name}_pbpi_model.pt"
torch.save(model.state_dict(), PATH)
# plot the model loss
if show_train_plot:
plt.plot(range(epochs), aggregated_losses)
plt.ylabel('Loss')
plt.xlabel('epoch')
plt.title(f'Training samples: {train_df_reduced.shape[0]} | Training loss: {np.round(loss_v,5)}\n')
plt.show()
# set the model to evaluation mode and return it
return model.eval()
########################################
### Derived policy using LabelRanker ###
########################################
class Policy():
"""
Description:
- This Policy object takes a given neural network (LabelRanker) model and uses it to define a policy for the agent to follow
"""
def __init__(self, action_space, model, probs):
self.action_space = action_space # action space of the current environment
self.model = model # trained NN (LabelRanker) model
self.probs = probs # list of probabilities for actions
def label_ranking_policy(self,obs):
""" Produces an action for a given state based on the LabelRanker model prediction
Note: only the pendulum-angle and pendulum-velocity of the input state are considered when producing an action
At each input state:
- Highest ranked action is selected with a prob. of 0.95
- Second highest ranked action is selected with a prob. of 0.04
- Any remaining actions are selected with an equal proabability of .01 """
# only select the pendulum-velocity and angle from the input state vector
#state_obs = np.array([obs[2].reshape(-1)[0],obs[3].reshape(-1)[0]])
#state_obs = np.array([round(obs[2].reshape(-1)[0],6),round(obs[3].reshape(-1)[0],6)]) # rounded input
state_obs = np.array([obs[2].reshape(-1)[0],obs[3].reshape(-1)[0]])
#state_obs = state_obs.reshape(-1,state_obs.shape[0]) # reshape to be a 2D array
state_obs = torch.from_numpy(state_obs) # convert to a tensor
# make ranking predictions for all actions
with torch.no_grad():
preds = self.model(state_obs.float())
# rank the indexes of actions (from highest ranked/preferred action to lowest)
#ranked_action_idx = (-rd(preds.detach().numpy())).argsort()[:preds.shape[1]]
ranked_action_idx = (-rd(preds.detach().numpy())).argsort()
### return the selected action ###
# if there are more than 2 actions
if len(self.action_space)>2:
# compute the probabilities for the 3rd action onward
remain_probs = .00/len(ranked_action_idx[2:])
n_remain_actions = ranked_action_idx.shape[0]-2
# since we add random noise to action, policy becomes stochastic (even if we select the 1st ranked action always)
# select one of the remaining actions 1% time
action = np.random.choice(ranked_action_idx,1 , p=[self.probs[0], self.probs[1]] + list(np.repeat(remain_probs,n_remain_actions)))[0]
else:
# if there are only 2 actions: select highest preferred actions 95% and 5% of the time
action = np.random.choice(ranked_action_idx,1 , p=[self.probs[0], self.probs[1]])[0]
# when action space is partitioned, return the corresponding action
# - a uniform noise term is added to action signals to make all state transitions non-deterministic
# clip action value to (-1,1) range
return np.array([[np.clip(self.action_space[int(action)] + np.array(np.random.uniform(low = -.2,high=.2),dtype=float),-1,1)]])
######################################
### Evaluating the learned policy ####
######################################
def run_evaluations(policy # input policy
, state_list # list of initial states
, step_thresh = 1000 # step-count (threshold)
, env_name = 'CustomCartPole-v0' # name of the environment
, simulations_per_state = 100 # number of simulations to generate per state
):
"""
Description:
- For every state in a given list of initial states, 100 simulations are generate and the percentage of
these simulations that exceeds a predefined step-count threadhold (trajectory length) is computed to measure
the performance of the given input policy."""
simu_per_state = simulations_per_state
# create an environment instance
env_test = gym.make(env_name)
# variable to record the sufficient policy count (across all simulations)
suf_policy_count = 0
# variable to record episodic returns
ep_returns = []
max_return = 0
min_return = 2000
# iterate over all states in the state list
for state in state_list:
# generate 100 simulations from each state
for _ in range(simu_per_state):
# set the starting state and the current observation to the given state
env_test.reset(init_state=state)
obs = state
# variable to store the return of an episode
return_ep = 0
# execute 1001 steps in the environment
for _ in range(1001):
action = policy.label_ranking_policy(obs) # generate action from the policy
observation, reward, done, info = env_test.step(action) # execute action
obs = observation # set history
return_ep += reward # compute return
if done: break
env_test.close()
# append the return of the episode
ep_returns.append(return_ep)
# update the max and min return variables
max_return = max(max_return,return_ep)
min_return = min(min_return,return_ep)
# increment the sufficient policy count if return exceeds given threshold
# (note: at every step, 1 reward is produced in the environment)
if return_ep>=step_thresh:
suf_policy_count += 1
# returns
# 1. % sufficient policy counts (total sufficient policies/ total # evaluation runs)
# 2. 'avg. episodic return'
# 3. maximum episodic return (across all evaluations)
# 4. minimum episodic return (across all evaluations)
return (suf_policy_count/(len(state_list)*simu_per_state))*100, (sum(ep_returns)/(len(state_list)*simu_per_state)), max_return, min_return
################################################################################
### Training loop for a single hyper-parameter configuration (multiple-runs) ###
################################################################################
def evaluations_per_config(s_size
, n_actions
, max_n_rollouts
, sig_lvl
, runs_per_config = 10
, off_policy_explr = False
, env_name = 'CustomCartPole-v0'
, print_run_eval_plot = False
):
#########################
### PARAMETER INPUTS ###
## hyper-parameters ##
env_name = env_name
s_size = s_size # initial state stample size
n_actions = n_actions # number of actions in the action space
n_rollouts = max_n_rollouts # max. number of roll-outs to generate per action
sig_lvl = sig_lvl # statistical significance for action-pair comparisons
runs_per_config = runs_per_config # training runs for a single parameter configuration
# hyper-parameter configurations (string)
param_config_string = f'Samples: {s_size} | Actions: {n_actions} | Roll-outs: {n_rollouts} | Significance: {sig_lvl}'
## task settings ##
seed = 2 # set seed
max_iterr = 10 # max. num. of policy iterations
off_policy_exploration = off_policy_explr # trigger to use off-policy exploration [MY MODIFICATION]
eval_simu_per_state = 100 # number of evaluation runs from each initial starting state (evaluation)
model_name = f'CartPole_{s_size}_{n_actions}_{n_rollouts}_{sig_lvl}' # name for the saved LabelRanker model
## flags/triggers ##
print_iterr = False # trigger to print progress bars of training iterations
print_states_cover = False # trigger to print progress bars of visited states
print_rollouts = False # trigger printing roll-out results
print_training_plot = False # trigger printing the training loss of LabelRanker Model
print_eval_plot = True # trigger printing the evaluation results
#########################
### variable initialization ###
env = gym.make(env_name) # create environment
sample_states = generate_init_states_S(seed = seed, env = env_name, sample_size = s_size) # generate sample states
act_space = partition_action_space(env_name = env_name, n_actions = n_actions) # partition the action space
act_pairs = list(itertools.combinations(act_space,2)) # generate action-pairs from the partitioned action space
print(f'\nCurrently evaluated configs:\n '+ param_config_string, end='\r')
# Initialize the LabelRanker model and epoch configs
# Note: these configs were decided after testing different settings; there can be better/different choices
if s_size < 49:
model_config = [50]
epch_config = 500
elif s_size >= 49 and s_size < 149:
model_config = [100]
epch_config = 2000
else:
model_config = [125]
epch_config = 2000
# list to store results of the evaluation run
run_results = []
# generate evaluations for a single hyper-parameter configuration
for run in tqdm.tqdm(range(runs_per_config), desc="Runs"):
### place holders for evaluation metrics ###
agg_pct_suff_policies = [] # list to store the % of learned sufficient policies
action_count_li = [] # list to store the action counts in each training iteration
### flags, triggers and adjustments ###
label_r_flag = False # trigger to start using the trained LabelRanker model
policy = random_action # set the initial policy to a random policy
max_iterr = max_iterr + 1 # since iteration count starts from '1', increment the max. iteration count by 1
### training loop ###
iterr = 1
while iterr < max_iterr:
train_data = [] # place-holder to store training data
actions_in_iterr = 0 # variable to store the num. actions excuted in each training iteration
for state in sample_states: # generate roll-outs from each starting state
for action_pair in act_pairs: # generate roll-outs for each action pair
# generate preference data & executed num. of actions in each action pair evaluation step
preference_out, actions_per_pair = evaluate_preference(starting_state = state
, action_1 = np.array([[action_pair[0]]])
, action_2 = np.array([[action_pair[1]]])
, policy_in = policy
, label_ranker = label_r_flag
, n_rollouts = n_rollouts
, p_sig = sig_lvl
, tracking = False
)
# append the generated preference data to the training data list
if preference_out is not None:
train_data.append(preference_out)
else:
pass
# compute/update the tot. # actions executed in the training iteration
actions_in_iterr += actions_per_pair
# generate the training dataset and learn the LabelRanker model
model = train_model(train_data = train_data
, action_space = act_space
, model_name = model_name
, mod_layers = model_config
, batch_s = 4
, n_epochs = epch_config
, l_rate = 0.1
, show_train_plot = False
, show_dataset = False
)
# When no traiing data is found, the LabelRanker model will not be trained.
# Therefore, break the current training iteration and continue to the next
# (after updating the aggregated evaluation results)
if model is None:
print(f'No training data collected!')
# update the tot. # actions executed across all training iterations
if iterr>1:
action_count_li.append(actions_in_iterr+action_count_li[iterr-2])
else:
action_count_li.append(actions_in_iterr)
# Add '0' to the evaluation results
agg_pct_suff_policies.append(0) # pct. of sufficient policies in evaluations
iterr += 1
continue
# Derive a new policy using the trained model
if off_policy_exploration:
# Generate separate 'target' and 'behaviour' policies
# Target policy to be used in evaluations, and behaviour policy to generate roll-outs (training data)
target_policy = Policy(act_space, model, [1.0, 0.0]) # always select the highest ranked action
exp_policy = Policy(act_space, model, [0.5, 0.5]) # select the first two highest ranked actions w/ same prob.
else:
# Set both 'target' and 'behaviour' policies to follow the optimal policy
# I.e., always select the highest ranked action
target_policy = Policy(act_space, model, [1.0, 0.0])
exp_policy = Policy(act_space, model, [1.0, 0.0])
# update the tot. # actions executed across all training iterations
if iterr>1:
action_count_li.append(actions_in_iterr+action_count_li[iterr-2])
else:
action_count_li.append(actions_in_iterr)
# evaluate the performance of the learned policy
pct_succ_policies, x, y, z = run_evaluations(target_policy
, sample_states
, simulations_per_state = eval_simu_per_state
, step_thresh = 1000 # steps needed for a sufficient policy
)
# record evaluation results (across training iterations)
agg_pct_suff_policies.append(pct_succ_policies) # pct. of sufficient policies in evaluations
### TERMINATION CONDITION ###
# If the current policy's performance (% of sufficient policies) is less than
# half of the last policy's performance, TERMINATE the training process
if iterr>1:
prvs_policy_perf = agg_pct_suff_policies[-2]
curr_policy_perf = agg_pct_suff_policies[-1]
if prvs_policy_perf * (0.5) > curr_policy_perf:
#print(f'Policy performance decreased! Run-{run} terminated!')
# remove the records from the worsen policy
agg_pct_suff_policies = agg_pct_suff_policies[:-1]
action_count_li = action_count_li[:-1]
break
# Start using the trained LabelRanker model
# The first policy of the training process is always a random-policy
# From the second iteration onward, it uses the learned LabelRanker model
label_r_flag = True
if label_r_flag is False:
policy = random_action # set the random policy
else:
policy = exp_policy
iterr += 1
# plot evaluation results of the training run
if print_run_eval_plot:
#plt.clf()
#plt.cla()
#plt.close()
fig, ax2 = plt.subplots(figsize =(6,4))
ax2.plot(action_count_li, agg_pct_suff_policies, 'm-.', label = 'success rate')
ax2.set_xlabel('# actions')
ax2.set_ylabel('Pct. of sufficient policies')
ax2.legend(loc='upper left')
plt.title(f'Evaluation Results | Run: {run+1}')
plt.savefig(f'./train_imgs/{model_name}_{run}.png') # save the evaluation image
#plt.show()
# store the evaluation results of the training run
run_results.append({'S': s_size
, 'Actions' : n_actions
, 'Roll-outs': n_rollouts
, 'Significance' : sig_lvl
, 'run': run
, 'action_record': action_count_li
, 'SR': agg_pct_suff_policies})
if print_iterr:
pbar.close()
# output the recorded evaluation results for the hyper-parameter configuration
return run_results
###############################################################
### Run experiments for the tested parameter configurations ###
###############################################################
def run_experiment(CONFIGS):
configs = CONFIGS
agg_results = []
eval_count = len(configs['S'])*len(configs['Actions'])*len(configs['Roll-outs'])*len(configs['Significance'])
pbar_evals = tqdm.tqdm(total=eval_count, desc="Evaluations", leave=False)
for sample_size in configs['S']:
for rollout_max in configs['Roll-outs']:
for sig_lvl in configs['Significance']:
run_results = evaluations_per_config(s_size = sample_size
, n_actions = configs['Actions'][0]
, max_n_rollouts = rollout_max
, sig_lvl = sig_lvl
, runs_per_config = 10
, off_policy_explr = True
, print_run_eval_plot = True)
agg_results.append(run_results)
pbar_evals.update(1)
pbar_evals.close()
## Save the evaluation results ##
results_dfs = []
for result in agg_results:
results_dfs.append(pd.DataFrame(result))
results_df = pd.concat(results_dfs)
results_df.to_excel('eval_results/modified_experiment_results_para_config_6.xlsx',index=False)
if __name__ == '__main__':
run_experiment(CONFIGS)
| [
"pandula2011@gmail.com"
] | pandula2011@gmail.com |
c68086f3cfc0a8c4d58bddae6939012e5f5d8e4b | 5cf4bcc45a2b755684a328fb6d99096d27ff506b | /M5HW2_McMillanChristian.py | d462294dfdfbd12da6cda1eb19666c1b5b8d75c5 | [] | no_license | meekmillan/cti110 | 6f6e34fb0e94be51788c07e2498ef14dbf1cdf4e | 7da0e4ce741ec2dda2fdab343246ad7d896ef04c | refs/heads/master | 2021-08-28T16:18:59.434491 | 2017-12-12T18:12:10 | 2017-12-12T18:12:10 | 104,909,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | #CTI-110
#M5HW2 - Running Total
#Christian McMillan
#10/24
#This program will calculate a running total
# Initialize
total=0
newNum = int (input( "Enter a number?: "))
# Enter loop
while newNum >=0:
total +=newNum
newNum = int (input( 'Enter a number?: '))
# Print end of loop total
print("Total: ",total)
| [
"noreply@github.com"
] | noreply@github.com |
9a6669dbb8aa1d8739a39c14d383548d2e889676 | 557d75e6dfb42c881d4df73950c41935635f2162 | /preprocessing/recon_all.py | d9bf4632fb59ca40f6606a9db0ddc41864471963 | [] | no_license | sssilvar/multiple-sclerosis | e6139558249f00a882ffeb9d4b82ac323a50ec96 | a2e1e97e1297d45c2b84c5c57b372eee26047941 | refs/heads/master | 2020-06-05T00:09:04.781033 | 2019-07-13T23:15:00 | 2019-07-13T23:15:00 | 192,245,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | #!/bin/env python3
import os
import glob
from multiprocessing.pool import Pool
from os.path import join, isdir, basename
def recon_all(vol_file):
sid = basename(vol_file).split('_')[0]
t2_file = vol_file.replace('T1Wreg.nii.gz', 'T2Wreg.nii.gz')
cmd = f'recon-all -i {vol_file} -T2 {t2_file} -s {sid} -sd {out_folder} -all'
print(cmd)
os.system(cmd)
if __name__ == "__main__":
# Set dataset folder
dataset_folder = '/home/jullygh/Downloads/MS/extracted/*'
pattern = join(dataset_folder, 'patient*_study1_T1Wreg.nii.gz')
print(f'Finging pattern: {pattern}')
# Output Folder
out_folder = '/home/jullygh/Downloads/MS/processed_fs/'
# Find files in folder
files = glob.glob(pattern, recursive=True)
print(f'Total files found: {len(files)}')
confirm = input('Start [y/n]:')
if confirm == 'y':
# Process subjects in parallel
pool = Pool(20)
pool.map(recon_all, files)
pool.close()
else:
print('No process started')
print('Done')
| [
"sssilvar@unal.edu.co"
] | sssilvar@unal.edu.co |
5b2adb99ac1c7f639cd70f0a78682b1b33699973 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/292/85489/submittedfiles/testes.py | 8470e2323f37fdef8789f886efdda325a1056e93 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
import datetime
n=date.today()
print(n) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5af30a2b0dea4f99b3e6d50ab17ef88efd55dbee | 43aac8bd31c412e2eccd24d65a349e535c1c4508 | /models/graphunet.py | a0323d9537794ed8b9e8cff92591df046d780b9e | [] | no_license | Spj-Zhao/HOPE | d7145f22141e0182d86650b27b617f169ebd13c9 | 24f99bb6c4f1d3e185e8988179347dde5c37f42c | refs/heads/master | 2022-12-27T08:57:30.052335 | 2020-10-01T15:16:54 | 2020-10-01T15:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,585 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, activation=nn.ReLU(inplace=True)):
super(GraphConv, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
#self.adj_sq = adj_sq
self.activation = activation
#self.scale_identity = scale_identity
#self.I = Parameter(torch.eye(number_of_nodes, requires_grad=False).unsqueeze(0))
def laplacian(self, A_hat):
D_hat = (torch.sum(A_hat, 0) + 1e-5) ** (-0.5)
L = D_hat * A_hat * D_hat
return L
def laplacian_batch(self, A_hat):
#batch, N = A.shape[:2]
#if self.adj_sq:
# A = torch.bmm(A, A) # use A^2 to increase graph connectivity
#I = torch.eye(N).unsqueeze(0).to(device)
#I = self.I
#if self.scale_identity:
# I = 2 * I # increase weight of self connections
#A_hat = A + I
batch, N = A_hat.shape[:2]
D_hat = (torch.sum(A_hat, 1) + 1e-5) ** (-0.5)
L = D_hat.view(batch, N, 1) * A_hat * D_hat.view(batch, 1, N)
return L
def forward(self, X, A):
batch = X.size(0)
#A = self.laplacian(A)
A_hat = A.unsqueeze(0).repeat(batch, 1, 1)
#X = self.fc(torch.bmm(A_hat, X))
X = self.fc(torch.bmm(self.laplacian_batch(A_hat), X))
if self.activation is not None:
X = self.activation(X)
return X
class GraphPool(nn.Module):
def __init__(self, in_nodes, out_nodes):
super(GraphPool, self).__init__()
self.fc = nn.Linear(in_features=in_nodes, out_features=out_nodes)
def forward(self, X):
X = X.transpose(1, 2)
X = self.fc(X)
X = X.transpose(1, 2)
return X
class GraphUnpool(nn.Module):
def __init__(self, in_nodes, out_nodes):
super(GraphUnpool, self).__init__()
self.fc = nn.Linear(in_features=in_nodes, out_features=out_nodes)
def forward(self, X):
X = X.transpose(1, 2)
X = self.fc(X)
X = X.transpose(1, 2)
return X
class GraphUNet(nn.Module):
def __init__(self, in_features=2, out_features=3):
super(GraphUNet, self).__init__()
self.A_0 = Parameter(torch.eye(29).float().cuda(), requires_grad=True)
self.A_1 = Parameter(torch.eye(15).float().cuda(), requires_grad=True)
self.A_2 = Parameter(torch.eye(7).float().cuda(), requires_grad=True)
self.A_3 = Parameter(torch.eye(4).float().cuda(), requires_grad=True)
self.A_4 = Parameter(torch.eye(2).float().cuda(), requires_grad=True)
self.A_5 = Parameter(torch.eye(1).float().cuda(), requires_grad=True)
self.gconv1 = GraphConv(in_features, 4) # 29 = 21 H + 8 O
self.pool1 = GraphPool(29, 15)
self.gconv2 = GraphConv(4, 8) # 15 = 11 H + 4 O
self.pool2 = GraphPool(15, 7)
self.gconv3 = GraphConv(8, 16) # 7 = 5 H + 2 O
self.pool3 = GraphPool(7, 4)
self.gconv4 = GraphConv(16, 32) # 4 = 3 H + 1 O
self.pool4 = GraphPool(4, 2)
self.gconv5 = GraphConv(32, 64) # 2 = 1 H + 1 O
self.pool5 = GraphPool(2, 1)
self.fc1 = nn.Linear(64, 20)
self.fc2 = nn.Linear(20, 64)
self.unpool6 = GraphUnpool(1, 2)
self.gconv6 = GraphConv(128, 32)
self.unpool7 = GraphUnpool(2, 4)
self.gconv7 = GraphConv(64, 16)
self.unpool8 = GraphUnpool(4, 7)
self.gconv8 = GraphConv(32, 8)
self.unpool9 = GraphUnpool(7, 15)
self.gconv9 = GraphConv(16, 4)
self.unpool10 = GraphUnpool(15, 29)
self.gconv10 = GraphConv(8, out_features, activation=None)
self.ReLU = nn.ReLU()
def _get_decoder_input(self, X_e, X_d):
return torch.cat((X_e, X_d), 2)
def forward(self, X):
X_0 = self.gconv1(X, self.A_0)
X_1 = self.pool1(X_0)
X_1 = self.gconv2(X_1, self.A_1)
X_2 = self.pool2(X_1)
X_2 = self.gconv3(X_2, self.A_2)
X_3 = self.pool3(X_2)
X_3 = self.gconv4(X_3, self.A_3)
X_4 = self.pool4(X_3)
X_4 = self.gconv5(X_4, self.A_4)
X_5 = self.pool5(X_4)
global_features = self.ReLU(self.fc1(X_5))
global_features = self.ReLU(self.fc2(global_features))
X_6 = self.unpool6(global_features)
X_6 = self.gconv6(self._get_decoder_input(X_4, X_6), self.A_4)
X_7 = self.unpool7(X_6)
X_7 = self.gconv7(self._get_decoder_input(X_3, X_7), self.A_3)
X_8 = self.unpool8(X_7)
X_8 = self.gconv8(self._get_decoder_input(X_2, X_8), self.A_2)
X_9 = self.unpool9(X_8)
X_9 = self.gconv9(self._get_decoder_input(X_1, X_9), self.A_1)
X_10 = self.unpool10(X_9)
X_10 = self.gconv10(self._get_decoder_input(X_0, X_10), self.A_0)
return X_10
class GraphNet(nn.Module):
def __init__(self, in_features=2, out_features=2):
super(GraphNet, self).__init__()
self.A_hat = Parameter(torch.eye(29).float().cuda(), requires_grad=True)
self.gconv1 = GraphConv(in_features, 128)
self.gconv2 = GraphConv(128, 16)
self.gconv3 = GraphConv(16, out_features, activation=None)
def forward(self, X):
X_0 = self.gconv1(X, self.A_hat)
X_1 = self.gconv2(X_0, self.A_hat)
X_2 = self.gconv3(X_1, self.A_hat)
return X_2
| [
"bdoosti@iu.edu"
] | bdoosti@iu.edu |
b8f2c3b12f8e50b050374859c2cd7f0411825d78 | 2a4dc7f112b14bd336e343af3cd83232bba6ca4c | /telethon_add_to_group.py | 84cddc3ccf3c0be92d4d717c310c3d4f784c6d39 | [] | no_license | NdibeRaymond/telethon | 594b9ce3dc4de0ec1d81080ade99a3c7bca708f9 | 6fe1a26bb54addeb94bc0fe7bca28f2d086c0d5b | refs/heads/master | 2022-12-15T07:31:39.181468 | 2020-09-19T17:38:17 | 2020-09-19T17:38:17 | 296,770,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | py | from telethon.sync import TelegramClient
from telethon.tl.functions.messages import GetDialogsRequest
from telethon.tl.types import InputPeerEmpty, InputPeerChannel, InputPeerUser
from telethon.errors.rpcerrorlist import PeerFloodError, UserPrivacyRestrictedError
from telethon.tl.functions.channels import InviteToChannelRequest
from telethon.tl.functions.messages import AddChatUserRequest
import sys
import csv
import traceback
import time
import random
api_id = 1495975
api_hash = '53e42c39310aa2e4f33684e6089a16ed'
phone = '+2349033142556'
client = TelegramClient(phone, api_id, api_hash)
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
client.sign_in(phone, input('Enter the code: '))
input_file = input("input csv file name: ")
users = []
with open(input_file, encoding='latin-1') as f:
rows = csv.reader(f,delimiter=",",lineterminator="\n")
next(rows, None)
for row in rows:
user = {}
user['username'] = row[0]
user['id'] = int(row[1])
user['access_hash'] = int(row[2])
user['name'] = row[3]
users.append(user)
chats = []
last_date = None
chunk_size = 200
groups=[]
result = client(GetDialogsRequest(
offset_date=last_date,
offset_id=0,
offset_peer=InputPeerEmpty(),
limit=chunk_size,
hash = 0
))
chats.extend(result.chats)
for chat in chats:
try:
if chat.megagroup== True:
groups.append(chat)
except:
continue
print('Choose a group to add members:')
i=0
for group in groups:
print(str(i) + '- ' + group.title)
i+=1
g_index = input("Enter a Number: ")
target_group=groups[int(g_index)]
target_group_entity = InputPeerChannel(target_group.id,target_group.access_hash)
mode = int(input("Enter 1 to add by username or 2 to add by ID: "))
n = 0
for user in users:
n += 1
if n % 50 == 0:
time.sleep(350)
try:
print ("Adding {}".format(user['id']))
if mode == 1:
if user['username'] == "":
continue
user_to_add = client.get_input_entity(user['username'])
elif mode == 2:
user_to_add = InputPeerUser(user['id'], user['access_hash'])
else:
sys.exit("Invalid Mode Selected. Please Try Again.")
client(InviteToChannelRequest(target_group_entity,[user_to_add]))
print("Waiting for 10-20 Seconds...")
time.sleep(random.randrange(10,20))
except PeerFloodError as e:
print("Getting Flood Error from telegram. Script is stopping now. Please try again after some time.",e)
except UserPrivacyRestrictedError:
print("The user's privacy settings do not allow you to do this. Skipping.")
except:
traceback.print_exc()
print("Unexpected Error")
continue
| [
"noreply@github.com"
] | noreply@github.com |
3f7c090f4bee5ac6ad1794ca0676a0e4b8056d8e | e5fa58651bfd550ca79cfbdddbde6b570d6041e8 | /Chapter 5/tango_with_django_project/tango_with_django_project/urls.py | 4a51e4ce0cba6076c16d4266be4ebfc05eabbc62 | [] | no_license | ellieleep/tangowithdjango | 7dc6a3298c7978b5b7c8dacfd5d7aba270d53672 | 3bb7600be31f2f07f702104d5bf8805f7b2d0a1c | refs/heads/master | 2023-07-15T08:11:49.326139 | 2021-08-19T19:14:26 | 2021-08-19T19:14:26 | 390,139,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | """tango_with_django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from rango import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('rango/', include('rango.urls')),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"32715340+ellieleep@users.noreply.github.com"
] | 32715340+ellieleep@users.noreply.github.com |
c1fb632462fb073565ae995962ae392db45905b3 | a411a55762de11dc2c9d913ff33d2f1477ac02cf | /lte/gateway/python/magma/mobilityd/subscriberdb_client.py | 1fec443db1956ef872a11cfbc3a1d98d7a4c2e0f | [
"BSD-3-Clause"
] | permissive | magma/magma | 0dc48c1513d9968bd05fb7589f302c192b7c0f94 | 0e1d895dfe625681229e181fbc2dbad83e13c5cb | refs/heads/master | 2023-09-04T09:31:56.140395 | 2023-08-29T13:54:49 | 2023-08-29T13:54:49 | 170,803,235 | 1,219 | 525 | NOASSERTION | 2023-09-07T17:45:42 | 2019-02-15T04:46:24 | C++ | UTF-8 | Python | false | false | 6,221 | py | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import logging
from typing import Optional
import grpc
from lte.protos.apn_pb2 import APNConfiguration
from magma.mobilityd.utils import log_error_and_raise
from magma.subscriberdb.sid import SIDUtils
class NetworkInfo:
def __init__(
self, gw_ip: Optional[str] = None, gw_mac: Optional[str] = None,
vlan: int = 0,
):
gw_ip_parsed = None
try:
gw_ip_parsed = ipaddress.ip_address(gw_ip) # type: ignore
except ValueError:
logging.debug("invalid internet gw ip: %s", gw_ip)
self.gw_ip = gw_ip_parsed
self.gw_mac = gw_mac
self.vlan = vlan
def __str__(self):
return f"GW-IP: {self.gw_ip} GW-MAC: {self.gw_mac} VLAN: {self.vlan}"
class StaticIPInfo:
"""
Operator can configure Static GW IP and MAC.
This would be used by AGW services to generate networking
configuration.
"""
def __init__(
self, ip: Optional[str],
gw_ip: Optional[str],
gw_mac: Optional[str],
vlan: int,
):
self.ip = None
if ip:
self.ip = ipaddress.ip_address(ip)
self.net_info = NetworkInfo(gw_ip, gw_mac, vlan)
def __str__(self):
return f"IP: {self.ip} NETWORK: {self.net_info}"
class SubscriberDbClient:
def __init__(self, subscriberdb_rpc_stub):
self.subscriber_client = subscriberdb_rpc_stub
def get_subscriber_ip(self, sid: str) -> Optional[StaticIPInfo]:
"""
Make RPC call to 'GetSubscriberData' method of local SubscriberDB
service to get assigned IP address if any.
"""
if self.subscriber_client is None:
return None
try:
apn_config = self._find_ip_and_apn_config(sid)
logging.debug("ip: Got APN: %s", apn_config)
if apn_config and apn_config.assigned_static_ip:
return StaticIPInfo(
ip=apn_config.assigned_static_ip,
gw_ip=apn_config.resource.gateway_ip,
gw_mac=apn_config.resource.gateway_mac,
vlan=apn_config.resource.vlan_id,
)
except ValueError as ex:
logging.warning(
"static Ip: Invalid or missing data for sid %s: ", sid,
)
logging.debug(ex)
raise SubscriberDBStaticIPValueError(sid)
except grpc.RpcError as err:
log_error_and_raise(
SubscriberDBConnectionError,
"GetSubscriberData: while reading vlan-id error[%s] %s",
err.code(),
err.details(),
)
return None
def get_subscriber_apn_network_info(self, sid: str) -> NetworkInfo:
"""
Make RPC call to 'GetSubscriberData' method of local SubscriberDB
service to get assigned IP address if any.
TODO: Move this API to separate APN configuration service.
"""
if self.subscriber_client:
try:
apn_config = self._find_ip_and_apn_config(sid)
logging.debug("vlan: Got APN: %s", apn_config)
if apn_config and apn_config.resource.vlan_id:
return NetworkInfo(
gw_ip=apn_config.resource.gateway_ip,
gw_mac=apn_config.resource.gateway_mac,
vlan=apn_config.resource.vlan_id,
)
except ValueError as ex:
logging.warning(
"vlan: Invalid or missing data for sid %s", sid,
)
logging.debug(ex)
raise SubscriberDBMultiAPNValueError(sid)
except grpc.RpcError as err:
log_error_and_raise(
SubscriberDBConnectionError,
"GetSubscriberData: while reading vlan-id error[%s] %s",
err.code(),
err.details(),
)
return NetworkInfo()
# use same API to retrieve IP address and related config.
def _find_ip_and_apn_config(
self, sid: str,
) -> Optional[APNConfiguration]:
if '.' in sid:
imsi, apn_name_part = sid.split('.', maxsplit=1)
apn_name, _ = apn_name_part.split(',', maxsplit=1)
else:
imsi, _ = sid.split(',', maxsplit=1)
apn_name = ''
logging.debug("Find APN config for: %s", sid)
data = self.subscriber_client.GetSubscriberData(SIDUtils.to_pb(imsi))
if data and data.non_3gpp and data.non_3gpp.apn_config:
selected_apn_conf = None
for apn_config in data.non_3gpp.apn_config:
logging.debug("APN config: %s", apn_config)
try:
if apn_config.assigned_static_ip:
ipaddress.ip_address(apn_config.assigned_static_ip)
except ValueError:
continue
if apn_config.service_selection == '*':
selected_apn_conf = apn_config
elif apn_config.service_selection == apn_name:
selected_apn_conf = apn_config
break
return selected_apn_conf
return None
class SubscriberDBConnectionError(Exception):
""" Exception thrown subscriber DB is not available
"""
pass
class SubscriberDBStaticIPValueError(Exception):
""" Exception thrown when subscriber DB has invalid IP value for the subscriber.
"""
pass
class SubscriberDBMultiAPNValueError(Exception):
""" Exception thrown when subscriber DB has invalid MultiAPN vlan value
for the subscriber.
"""
pass
| [
"noreply@github.com"
] | noreply@github.com |
22a47b9290477f745fd0080eea00f78909aa3193 | 3445a1ea3ee67b03cff101ff8fdb4fedf9ba9a36 | /sql_queries.py | 191dd58c78933bc1e26acdbc41bf18252cab44dd | [] | no_license | DnyanadaArjunwadkar/Udacity-Data-Engineer-ND | c0d373f0a1da04084dd6f52b6b34017ae9cebc0b | e22151c031b7f3c76647a94f02364bfa98a4b88c | refs/heads/master | 2020-05-31T12:37:24.351126 | 2019-06-04T22:12:12 | 2019-06-04T22:12:12 | 190,285,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS song_plays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""CREATE TABLE IF NOT EXISTS song_plays(songplay_id SERIAL PRIMARY KEY,start_time TIMESTAMP, user_id INT NOT NULL, level VARCHAR, song_id VARCHAR, artist_id VARCHAR, session_id INT, location TEXT, user_agent TEXT ) """)
user_table_create = ("""CREATE TABLE IF NOT EXISTS users(
user_id int NOT NULL ,
first_name VARCHAR ,
last_name VARCHAR,
gender VARCHAR,
level VARCHAR,
PRIMARY KEY(user_id)) """)
song_table_create = (""" CREATE TABLE IF NOT EXISTS songs(
song_id VARCHAR NOT NULL,
title VARCHAR ,
artist_id VARCHAR,
year INT,
duration FLOAT,
PRIMARY KEY(song_id))""")
artist_table_create = ("""CREATE TABLE IF NOT EXISTS artists(
artist_id VARCHAR NOT NULL,
name VARCHAR,
location varchar,
lattitude numeric,
longitude numeric,
PRIMARY KEY(artist_id)
)
""")
time_table_create = (""" CREATE TABLE IF NOT EXISTS time(
start_time TIMESTAMP NOT NULL,
hour INT,
day INT,
week INT,
month INT,
year INT,
weekday VARCHAR,
PRIMARY KEY(start_time)
)
""")
# INSERT RECORDS
songplay_table_insert = ("""INSERT INTO song_plays( start_time, user_id,level,artist_id,song_id, session_id, location, user_agent)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)""")
user_table_insert = ("""INSERT INTO users(user_id, first_name, last_name, gender,level)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT (user_id)
DO UPDATE SET level = excluded.level""")
song_table_insert = (""" INSERT INTO songs(song_id,title,artist_id,year,duration) values(%s,%s,%s,%s,%s) ON CONFLICT(song_id) DO NOTHING""")
artist_table_insert = ("""INSERT INTO artists(artist_id, name,location,lattitude, longitude)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT (artist_id)
DO NOTHING""")
time_table_insert = ("""INSERT INTO time(start_time,hour,day,week,month, year,weekday)
VALUES(%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (start_time)
DO NOTHING""")
# FIND SONGS
song_select = ("""SELECT songs.song_id, artists.artist_id FROM songs
JOIN artists ON songs.artist_id=artists.artist_id
WHERE songs.title=%s AND artists.name=%s AND songs.duration=%s;""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] | [
"noreply@github.com"
] | noreply@github.com |
74ff9da07d0e3ef8fc39177aaef0c51a8f5fabb6 | 641357069b54b5044aff093e8fdce50fa5dde34b | /mod_02_globals_and_locals.py | a8d56af04676abe43d71b70ee435e8c63f9a97f8 | [] | no_license | jorgevila/decorators | 559000eff7c5fc084fbaa3f530386e70faf0558e | f73ede2f4d4d29d0918fb9127f343d534d2a1f51 | refs/heads/master | 2021-01-15T23:30:50.289603 | 2015-07-24T08:41:28 | 2015-07-24T08:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,782 | py | #===============================================================================
# MOD 02: global, globals and locals
#===============================================================================
# Let's continue with a small exercise before diving into scopes and namespaces
#===============================================================================
# EXERCISE: exercises/mod_02_globals_and_locals/exercise_mod_02.py
#
# - Modify our simple in-memory cache:
# - Add a method to clear the cache content (don't use dict.clear())
#
# - Check the documentation
#
# - Run the tests in 'tests_mod_02.py' executing 'nosetests -v' inside its folder
#
# - Check the solution in module 'solution_mod_02.py'
#===============================================================================
import time
from collections import OrderedDict
CACHE = OrderedDict()
CACHE_SIZE = 5
CACHE_TTL = 10
def set_key(key, value, ttl=None):
"""Set a key value in the cache with its expiration time.
If no ttl is provided CACHE_TTL is taken by default.
If cache length exceeds CACHE_SIZE when adding a key, the oldest (first inserted) key is removed (FIFO)
"""
CACHE[key] = (time.time() + (ttl or CACHE_TTL), value)
if len(CACHE) > CACHE_SIZE:
CACHE.popitem(last=False)
def get_key(key):
"""Retrieve a key value from the cache.
Returns None if does not exist or the key expired.
If the key expired it is removed from the cache.
"""
content = CACHE.get(key, None)
if content:
if content[0] > time.time():
return content[1]
else:
del CACHE[key]
return None
def clear_keys():
"""Remove all cache keys content
"""
global CACHE # global statement does all the magic
CACHE = OrderedDict()
set_key("my_key", "my_value")
print CACHE
print CACHE["my_key"]
print get_key("my_key")
clear_keys()
print CACHE
print get_key("my_key")
# Another example
my_global_var = 0
def func_a():
global my_global_var
my_global_var = "A" # Value (binding) is changed in module's global scope
print "INSIDE func_a:", my_global_var
def func_b():
my_global_var = "B" # Value (binding) is changed only in local scope
print "INSIDE func_b:", my_global_var
func_a()
print "AFTER func_a:", my_global_var
func_b()
print "AFTER func_b:", my_global_var # Value was changed only in local scope
# What about this?
def func_c():
print "INSIDE func_c:", my_global_var
global my_global_var
my_global_var = "C" # What is it gonna happen?
func_c()
print "AFTER func_c:", my_global_var
#==============================================================================
# - Python 'global' statement
# - The listed identifiers are to be interpreted as globals
# - They are defined in CURRENT module's namespace
# - Their value can be modified
# - This declaration holds for the entire current code block (namespace)
# - Use it at the beginning of the code block
# - There are some restrictions on how global names can be used
#==============================================================================
#==============================================================================
# - Python 'nonlocal' statement (ONLY IN Py3k!)
# - Listed identifiers refer to previously bound variables in the nearest enclosing scope
# - It allows rebinding variables outside of the local scope and besides the global (module) scope
# - It must refer to pre-existing bindings in an enclosing scope!
#==============================================================================
# Let's see more useful stuff
def func_a():
print "INSIDE func_a globals:", globals().keys() # Even 'func_a' appears in globals
print
print "INSIDE func_a locals:", locals()
print
global another_global_var # This time we use a different attribute
another_global_var = "AAA"
print "INSIDE func_a:", another_global_var
print
print "EXITING func_a globals:", globals().keys() # The value has been updated directly in globals
print
print "EXITING func_a locals:", locals() # Locals remains empty
func_a()
def func_b():
print "INSIDE func_b globals:", globals().keys()
print
print "INSIDE func_b locals:", locals()
print
another_global_var = "BBB"
print "INSIDE func_b:", another_global_var
print
print "EXITING func_b globals:", globals().keys() # The value remains unchanged in globals
print
print "EXITING func_b locals:", locals() # Now locals is not empty
func_b()
def func_c():
g = globals()
for k in g:
if k != "__builtins__" and not k.startswith("_"):
print k, "==>", g[k]
print "-" * 5
print "new_global_var ==>", g.get("new_global_var", "???")
#===============================================================================
# EXERCISE:
#
# - Open python interpreter in current folder
#
# - Declare a new attribute
# >>> new_global_var = "xyz"
#
# - Check gobals() and locals()
#
# - Import func_c and execute it
# >>> from mod_02_globals_and_locals import func_c
# ...
# >>> func_c()
#
# - WHAT HAPPENED WITH YOUR NEW GLOBAL ATTRIBUTE?
#===============================================================================
#==============================================================================
# - globals()
# - Return a dictionary-like object mapping the current global symbol table
# - This is always the mapping of the current module (inside a function or
# method, this is the module where it is defined, not the module from which
# it is called)
#
# - locals()
# - Update and return a dictionary representing the current local symbol table
# - The contents of this dictionary should not be modified
#==============================================================================
def change_globals_locals():
g = globals()
l = locals()
g["another_global_var"] = 123
l["another_local_var"] = 980
print globals().keys()
print locals().keys()
print another_global_var
try:
print another_local_var
except Exception as e:
print "WOT?", e.__class__, e
change_globals_locals()
print another_global_var
#===============================================================================
# REFERENCES:
# - http://docs.python.org/2/tutorial/classes.html#python-scopes-and-namespaces
# - http://docs.python.org/2/reference/simple_stmts.html#global
# - http://docs.python.org/3/reference/simple_stmts.html#nonlocal
# - http://docs.python.org/2/library/functions.html#globals
# - http://docs.python.org/2/library/functions.html#locals
#===============================================================================
| [
"pablo.enfedaque@skyscanner.net"
] | pablo.enfedaque@skyscanner.net |
b350f1f0416822ef956cae7c7a8e285fdeae380a | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190421181756.py | fa5c4dea237b41fd8aea882ecec9f2e1f521c0ff | [] | no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,240 | py | # Jiaxi Zhang
# George McAlear
# If you run into an "[NSApplication _setup] unrecognized selector" problem on macOS,
# try uncommenting the following snippet
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose, Angle
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
async def run(robot: cozmo.robot.Robot):
await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
print("LAST POSE IS:", last_pose)
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
directions = goal_pose - last_pose
current_pose = last_pose
last_robot_pose = robot.pose
print("SETTING LAST ROBOT POSE TO: ", last_robot_pose)
print("SO WE GOING TO FOLLOW THIS TO PICKUP ZONE:", directions)
await execute_directions(robot, directions)
await robot.turn_in_place(angle=Angle(degrees=45)).wait_for_completed()
print("LAST ROBOT POSE IS: ", last_robot_pose)
print("CURRENT POSE IS:", robot.pose)
print("WE THINK WE MOVED THIS MUCH TO GO TO PICKUP ZONE: ", convertPoseToInches(robot.pose - last_robot_pose))
current_pose = current_pose + convertPoseToInches(rotate_point(robot.pose, - last_robot_pose)
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER DRIVING TO PICKUPZONE: ", current_pose)
# await robot.say_text('Ready for pick up!').wait_for_completed()
while True:
cube = await robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
await robot.pickup_object(cube, num_retries=5).wait_for_completed()
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
print("WE THINK WE MOVED THIS MUCH TO PICK UP CUBE: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
#cosimo.update_pose()
print("COZMO THINKS IT IS AT AFTER PICKING UP CUBE: ", current_pose)
#await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
#current_pose = last_pose
# rrt to drop zone and drop off cube
for destination in drop_off_directions:
directions = convertInchesToPose(destination) - current_pose
await execute_directions(robot,directions)
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
print("WE THINK WE MOVED THIS MUCH TO FOLLOW DIRECTIONS: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER FOLLOWING DIRECTIONS: ", current_pose)
#await cosimo.go_to_goal(goal_node=dropoff_node)
await robot.set_lift_height(0.0).wait_for_completed()
# rrt to just in front of pick up zone
# await cosimo.go_to_goal(goal_node=pickup_node)
def CozmoWarehouseWorker:
def __init__(self, robot:cozmo.robot.Robot, current_arena_pose):
self.current_arena_pose = current_arena_pose
self.last_robot_pose = robot.pose
self.robot = robot
# start streaming
await robot.set_head_angle(degrees(3)).wait_for_completed()
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
self.gui = GUIWindow(self.grid, show_camera=True)
self.drop_off_directions = [Pose(x=3, y=4.5, 0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), Pose(x=21.75, y=13.75, 0, angle_z=degrees(90))]
self.pick_up_directions = [Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), Pose(x=3, y=4.5, 0, angle_z=degrees(0)), Pose(x=4.5, y=20, 0, angle_z=degrees(90))]
async def execute_directions(directions):
print("Robot is at: ", self.robot.pose)
await self.robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=speed_mmps(80)).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose)
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=speed_mmps(80)).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose)
async def localize(self):
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.last_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(20)).wait_for_completed()
odometry = compute_odometry()
detected_markers, camera_image = await marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=Angle(degrees=curr_h))
def compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.last_robot_pose.position.x, self.last_robot_pose.position.y, \
self.last_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / grid.scale, dy / grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
# init
gui.show_particles(pf.particles)
gui.show_mean(0, 0, 0)
gui.start() | [
"josh@lawn-143-215-110-217.lawn.gatech.edu"
] | josh@lawn-143-215-110-217.lawn.gatech.edu |
eabb7322a1743993a880d4cad03011ddb2915ea7 | 5f57c3b3e62598a810696eebfe1a5b637dcd45a3 | /Custom calling.py | 7f1db2abdf7e348da2e7517dfbaf870bbbece706 | [] | no_license | k1k2k311/HLA-combined | 37d3f276a67dd8b831f4f2e6f155aedc6ac24201 | eb433aaa76ddda3b4883b8f80dd3d6214ca84b57 | refs/heads/main | 2023-07-01T00:33:26.427104 | 2021-07-03T07:54:31 | 2021-07-03T07:54:31 | 381,877,700 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | # "data" format should be an allele list of results from various tools [2420,3303,2420,4404]
# "hla" should be either "A","B" or "C" (hla type)
# model_acc.csv = ( 100- accuracy rate) %
# Searching most frequent value
def frequent(list):
count = 0
no = list[0]
for i in list:
current_freq = list.count(i)
if (current_freq > count):
count = current_freq
num = i
return num
# Finding most frequent value location
def find_index(data, target):
res = []
lis = data
while True:
try:
res.append(lis.index(target) + (res[-1]+1 if len(res)!=0 else 0))
lis = data[res[-1]+1:]
except:
break
return res
# List of possible model combinations
def comb(x):
from itertools import combinations
return list(combinations(x,2))
# Counting accuracy of models
import csv
def acc(x,y):
content = list(csv.reader(open("model_acc.csv","rt",encoding='UTF8')))
content.pop(0)
parseContent = []
for target in x:
if (y.lower() == "a"):
yIdx = 1
elif (y.lower() == "b"):
yIdx = 2
elif (y.lower() == "c"):
yIdx = 3
else:
continue
parseContent.append(content[target - 1][yIdx])
return parseContent
# Correlation value calling
def cor(x):
content = list(csv.reader(open("cor_score.csv", "rt",encoding='UTF8')))
content.pop(0)
parseContent = []
for target in x:
parseContent.append(content[target[0]][target[1]+1])
return parseContent
# Multipy values on the list
from functools import reduce
def multiply(a):
return reduce(lambda x, y: x * y, a)
# Calculation Weight
def calw(data,hla):
weight= sum(list(map(float,cor(comb(find_index(data,frequent(data)))))))/len(cor(comb(find_index(data,frequent(data)))))
weight2=weight**len(cor(comb(find_index(data,frequent(data)))))
return weight2
#Multiplying models' accuracies
def calt(data,hla):
arr = list(map(float,acc((find_index(data,frequent(data))),hla)))
return multiply(arr)
# Calculation accuracy
def calc(data,hla):
cal= calw(data,hla)*calt(data,hla)
return cal
# Calling / you can set up threshold of percentage of acccuracy here, default:98%
def call(data,hla):
if calc(data,hla)<0.02:
print(frequent(data))
else:
print('Recommended PCR-SBT')
| [
"noreply@github.com"
] | noreply@github.com |
59d74395b97e83d72ba4ba294b668e324f777e81 | c24e122c5f39896074a11664f521bc0258068675 | /src/Utils/ProcessCommunicator.py | 1af5f2e8a533a4816edbf7d3bf6d4eb1c3d355cf | [] | no_license | sjanibekova/selector_explorer_windows_desktop | 51c61adfdb7ca94e3e3eedab49bff1e9bf950027 | 04c1d47b7af23ac4cba3442fb736a90b827998f0 | refs/heads/main | 2023-07-08T20:04:10.448805 | 2021-08-12T11:18:57 | 2021-08-12T11:18:57 | 393,403,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,899 | py | import json
import zlib
import sys
from . import JSONNormalize
############################################
####Межпроцессное взаимодействие
############################################
#ProcessParentReadWaitString
def ProcessParentReadWaitString():
#Выполнить чтение строки
#ctypes.windll.user32.MessageBoxW(0, "Hello", "Your title", 1)
lResult = sys.stdin.buffer.readline()
#Вернуть потенциальные \n
lResult = lResult.replace(b'{{n}}',b'\n')
lResult = zlib.decompress(lResult[0:-1])
lResult = lResult.decode("utf-8")
#Вернуть результат
return lResult
#ParentProcessWriteString
def ProcessParentWriteString(lString):
lByteString = zlib.compress(lString.encode("utf-8"))
#Выполнить отправку строки в родительский процесс
#Вернуть потенциальные \n
lByteString = lByteString.replace(b'\n',b'{{{n}}}')
#Вернуть \r
lByteString = lByteString.replace(b'\r',b'{{{r}}}')
#Вернуть \0
lByteString = lByteString.replace(b'\0',b'{{{0}}}')
#Вернуть \a
lByteString = lByteString.replace(b'\a',b'{{{a}}}')
#Вернуть \b
lByteString = lByteString.replace(b'\b',b'{{{b}}}')
#Вернуть \t
lByteString = lByteString.replace(b'\t',b'{{{t}}}')
#Вернуть \v
lByteString = lByteString.replace(b'\v',b'{{{v}}}')
#Вернуть \f
lByteString = lByteString.replace(b'\f',b'{{{f}}}')
############################
#lByteString= b'x\x9c\xdd\x95]O\xc20\x14\x86\xffJ\xb3[5\xa1Cqz\x07\xc4\xe8\x8d\x1fQ\x13.\x0cYJw\xb6U\xbav\xe9\xce"\x84\xf0\xdfm\'"\xb8\xa0L%Q\xb3\x9b\xf6=\xdfO\x9a\xb3\x99\x17\x97\x8a\xa3\xd0\xea\x8ae\xe0\x9d\x12\xaf[\xa2\xce\x98S\xee\x80\x19\x9e^\xea\xb2\x803\t\x19(\xbc\x10`\x9c6\xf5\xf6\x89\xc7LRt\x8daS\x1b\xf5\xf00\xf3\xd4"\xc1u\x0e\xea\xf6\xa6K\x0e\xc8\xb9\xd6\x89\x04\xd2O\x8d\xb6&\x1bb\x04OC\x84\t~\xe2\x97\x1b\xcd\xa1(B\x11YG\xdaj\xfb\xc1\x9b\xb8\xa2\xa4LE\xd2\xd5\xa4\xf6\xdenY\x85Kf\xc3^;yI\x18\x0eD\x94\x00\x0e\x84{{n}}\xa9K\xce\xb5B\xa3e\x88\xd3\xbc\xf2Z\xd5\xaa\x82\xaa\x94\xd25\x0b\x1c\x99J\xaa\x023OB\xec\xbavEP\xe7\x8b\x93\x11I\xeaTz\xe2\xbb\xebH\xa3eW5\xe8\xb7\xe6\xce^*\x14\xb6\x83e\xda\xf9phe]b^\xe2\xf5\xe8\xd1Vp\xf0\xfe.\xbb\x1b\xa6`\x87\xfc8\x1a\x9bSE0q\xa2\x15\xeer\xe0"\x16\xbcz\x9f\xfdT\xc8h\x9d\xdf\xc7\xd4\xbe\xcdj1\xd9:\xa9\x1f\xe1B7\x81\xa1\xef\xc0\xd0:\x98\xc3-\xc0\xd4X\xfc\xda\xf1i\xbb\xe9\xfc\xdb<\x8c\xff2\x7f\'\xa8\x8d\xdf\xdab\xfc\x9e\xd6\xe3\x8c\x99qQ\xe3\xb0f\xd9\x19\x90{\xade\x8f\x99/3\xa1AC(\xfe\x16P\x06F \x90\xb3\t\x07Iba\x17\x83P\xa4\xbf\xb7G\x9e\x04\xa6vE\x13\xb6\xfc\x13\xd6\xa85\x0b\xdd\x19\xd6^i\x11\xa8FT;G\xfe\x06\xac\xc1q\xb0N\x956\xd84\xae\xe4p\xbe\xfa=\x03\x01\xce\x95\x9a'
#lByteString = b"x\x9c\xb5\x91\xcfO\xc3 \x14\xc7\xff\x95\xa6\xd7uI\xf9Q\x8a\xde\xd4\x93\x07\xbdx\xf00\x97\x05)[I(\x90\x8ef3\xcb\xfew\x81M\xbb\xd9M]\x8c!y\xd0\xf7}\xbc\xef\xe3\xd3\xc9&\xd5\xac\x11\xe9u\x92j\xb1J@2N\x1e\x8d\x13\x96U\xa3Q\x9a%i+y=sb\xed\xceV\xd8\xd6p\xb1\\\xced\xe5K{{n}}\x80`\x9f\xeb\x135\xd3\x95{{n}}.\x08RR\xe4>\xc3\x15\xf3\x97>\xbc\x8f:r\xa3]k\xd4\xcc\xbd\xd9(>K]\x99\xd5\xa1\x12\xbd\x00\xc6\xb0\xcc\xcb0\xa4\xe0\x8e\xe9E4\xd8\xa4J\xcc\xc3\xb44\x07^r\xc6\xfa3\x04(\xbeeQ\x07\x05P\x1a\xa4W\xe3\x9ci\xfc\xf7\x15(\xb6A\xee\xb4\x93\x8d\xd85\x9f`?\xf6n\xd8i0v\xadw\xd5\x95X\x87n>\xf1d\x05\x97s\xc9\x99\x93F\xdf\xd5R\xc5K=\xcc\x1bk\xd5^\x1d`\xfc\xa2]\x06PwJ\r\xf0\x9d\xa2\xf6 tw\xcb\xda\x01\xb6}\x83\xd3\xcc\x00\xec\x99\x15\xf4\x88Y\x99\x1f2\x83\xb4\xfc\x8e\x99\xdf\xb3d\x0c\x01.1E\x04\x93l\xff\x8e\xcf\x7f6\xa4Z\xfc\x82\xeaK\x97c BD\xf3\x101\x89g\xba\x8b\x03\xd0?\x97\xff#\xfb{'\x9a\x8b\xe0\x03H\xc89\xfa\x08\x15\x7f\xa2\x0f >\x80_\x0e\xe0\x93\xb3\xf0\xc3\xc4\xd3m\\\xef\xf8\x958\xa0"
#lt=open("logSendByteStringWithoutN.log","wb")
#lt.write(lByteString)
#lt.close()
############################
sys.stdout.buffer.write(lByteString+bytes("\n","utf-8"))
sys.stdout.flush();
return
#ProcessParentWriteObject
def ProcessParentWriteObject(inObject):
#Выполнить нормализацию объекта перед форматированием в JSON
JSONNormalize.JSONNormalizeDictList(inObject)
#Выполнить отправку сконвертированного объекта в JSON
ProcessParentWriteString(json.dumps(inObject))
return
#ProcessParentReadWaitObject
def ProcessParentReadWaitObject():
#Выполнить получение и разбор объекта
lResult=json.loads(ProcessParentReadWaitString());
return lResult;
#ProcessChildSendString
def ProcessChildSendString(lProcess,lString):
lByteString = zlib.compress(lString.encode("utf-8"))
#Вернуть потенциальные \n
lByteString = lByteString.replace(b'\n',b'{{n}}')
#Отправить сообщение в дочерний процесс
lProcess.stdin.write(lByteString+bytes('\n',"utf-8"))
lProcess.stdin.flush()
#Вернуть результат
return
#ProcessChildReadWaitString
def ProcessChildReadWaitString(lProcess):
#Ожидаем ответ от процесса
#pdb.set_trace()
lResult = lProcess.stdout.readline()
#Обработка спец символов
#Вернуть потенциальные \n
lResult = lResult.replace(b'{{{n}}}',b'\n')
#Вернуть \r
lResult = lResult.replace(b'{{{r}}}',b'\r')
#Вернуть \0
lResult = lResult.replace(b'{{{0}}}',b'\0')
#Вернуть \a
lResult = lResult.replace(b'{{{a}}}',b'\a')
#Вернуть \b
lResult = lResult.replace(b'{{{b}}}',b'\b')
#Вернуть \t
lResult = lResult.replace(b'{{{t}}}',b'\t')
#Вернуть \v
lResult = lResult.replace(b'{{{v}}}',b'\v')
#Вернуть \f
lResult = lResult.replace(b'{{{f}}}',b'\f')
try:
lResult = zlib.decompress(lResult[0:-1])
lResult = lResult.decode("utf-8")
except zlib.error as e:
raise Exception(f"Exception from child process: {lProcess.stderr.read()}")
#Вернуть результат
return lResult
#ProcessChildSendObject
def ProcessChildSendObject(inProcess,inObject):
#Выполнить отправку сконвертированного объекта в JSON
ProcessChildSendString(inProcess,json.dumps(inObject))
return
#ProcessChildReadWaitObject
def ProcessChildReadWaitObject(inProcess):
#Выполнить получение и разбор объекта
lResult=json.loads(ProcessChildReadWaitString(inProcess));
return lResult;
#ProcessChildSendReadWaitString
def ProcessChildSendReadWaitString(lProcess,lString):
ProcessChildSendString(lProcess,lString)
#Вернуть результат
return ProcessChildReadWaitString(lProcess)
#ProcessChildSendReadWaitObject
def ProcessChildSendReadWaitObject(inProcess,inObject):
ProcessChildSendObject(inProcess,inObject)
#Вернуть результат
return ProcessChildReadWaitString(inProcess)
#ProcessChildSendReadWaitQueue
#QueueObject - [Object,Object,...]
def ProcessChildSendReadWaitQueueObject(inProcess,inQueueObject):
lOutputObject=[]
#Циклическая отправка запросов в дочерний объект
for lItem in inQueueObject:
#Отправить запрос в дочерний процесс, который отвечает за работу с Windows окнами
ProcessChildSendObject(inProcess,lItem)
#Получить ответ от дочернего процесса
lResponseObject=ProcessChildReadWaitObject(inProcess)
#Добавить в выходной массив
lOutputObject.append(lResponseObject)
#Сформировать ответ
return lOutputObject
| [
"sdzhanybekova@seikolabs.kz"
] | sdzhanybekova@seikolabs.kz |
e25ef82fd5e2f90d8835e797672d42b0e20ae461 | b62f99db3bccf932aaa4885257ecc3d30596df59 | /OFDM/PAPR reduction/PAPR (single carrier).py | 381df597e0f90ecdacb2386734d19512f573b9aa | [] | no_license | ganlubbq/communication-simulation | 00cb36ae0f3dfc3ea042dfcb77da0795d5140ad7 | 4dfbfc229ff1ac3bcfaa6b32db958c35a7a5e6d8 | refs/heads/master | 2021-07-26T17:23:47.212096 | 2017-11-07T10:27:48 | 2017-11-07T10:27:48 | 110,319,362 | 1 | 1 | null | 2017-11-11T04:59:23 | 2017-11-11T04:59:22 | null | UTF-8 | Python | false | false | 6,046 | py | import numpy as np
import matplotlib.pyplot as plt
Ts = 1 # 正常情況的取樣周期,同時傳送端也會每個Ts送一個symbol
L = 8 # oversampling factor
Fs = 1/Ts * L # 取樣頻率 (乘上L是因為過取樣(oversampled)的原因)
Fc = 1 # 載波頻率
for k in range(3):
if k == 0:
constellation = [-1+0j, 1+0j]
constellation_name = 'BPSK'
elif k == 1:
constellation = [-1-1j, -1+1j, 1-1j, 1+1j]
constellation_name = 'QPSK'
elif k == 2:
constellation = [1 + 1j, 1 + 3j, 3 + 1j, 3 + 3j, -1 + 1j, -1 + 3j, -3 + 1j, -3 + 3j, -1 - 1j, -1 - 3j, -3 - 1j, -3 - 3j, 1 - 1j, 1 - 3j, 3 - 1j, 3 - 3j]
constellation_name = '16QAM'
N_symbol = len(constellation) # 用來代表傳送端會送多少個symbol
n_time = [0]*(L*N_symbol) # 用來代表過取樣的時間點
for m in range(len(n_time)):
n_time[m] = m * Ts/L
t_time = [0]*(L*N_symbol*50) # 用來近似連續信號的時間
for m in range(len(t_time)):
t_time[m] = m * Ts/(L*50)
# 先來決定baseband signal的discrete time sequence
symbol_sequence = constellation[:] # 假設傳送端送出所有星座圖中的每個symbol,而這個離散序列就是symbol_sequence
s = [0]*(L*N_symbol) # s就是對baseband的連續時間信號過取樣後的結果
for m in range(len(symbol_sequence)):
for n in range(L): # L為oversampling factor (就是會過取樣多少倍)
s[m*L + n] = symbol_sequence[m]
s_power = [0]*(L*N_symbol) # 這是將s的每個取樣點取絕對值平方,代表每個取樣點的能量
for m in range(len(s)):
s_power[m] = abs(s[m])**2
# 最後還要算一下s的PAPR
# 先算average power
# 並順便找出peak power
avg_power = 0
peak_power = 0
for m in range(len(s_power)):
avg_power += s_power[m]
if s_power[m] > peak_power:
peak_power = s_power[m]
avg_power /= len(s_power)
PAPR = peak_power / avg_power
PAPR_dB = 10*np.log10(PAPR)
s_real = [0]*len(s) # s_real 就是過取樣信號s的實部
s_imag = [0]*len(s) # s_imag 就是過取樣信號s的虛部
for m in range(len(s)):
s_real[m] = s[m].real
s_imag[m] = s[m].imag
plt.figure(constellation_name)
plt.subplot(3,2,1)
markerline, stemlines, baseline = plt.stem(n_time, s_real, markerfmt=' ')
plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑
plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑
plt.title('{0}, {1} symbols, Ts={2}s, Fs={3}Hz, L={4}'.format(constellation_name, N_symbol, Ts, Fs, L))
plt.ylabel(r'$\~s_I[n]$')
plt.subplot(3,2,3)
markerline, stemlines, baseline = plt.stem(n_time, s_imag, markerfmt=' ')
plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑
plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑
plt.ylabel(r'$\~s_Q[n]$')
plt.subplot(3,2,5)
markerline, stemlines, baseline = plt.stem(n_time, s_power, markerfmt=' ')
plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑
plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑
plt.title('PAPR={0:.3F}dB'.format(PAPR_dB))
plt.xlabel('time(s)\nbaseband signal')
plt.ylabel(r'$|\~s_I[n]|^2+|\~s_Q[n]|^2$')
# 接下來決定passband signal的continuous time signal及過取樣後的discrete time sequence
# 先決定continuous time signal吧 (在模擬中仍是discrete time sequence,只是時間點較密集,所以看不出來是離散信號)
continuous_s = [0]*len(t_time)
p = 0
for m in range(len(symbol_sequence)):
for n in range(len(t_time) // len(symbol_sequence)):
continuous_s[p] = ( symbol_sequence[m] * np.exp(1j * 2*np.pi * Fc * t_time[p]) ).real
p += 1
# 決定對continuous time signal過取樣後的discrete time sequence
discrete_s = [0]*len(n_time)
p = 0
for m in range(len(symbol_sequence)):
for n in range(len(n_time) // len(symbol_sequence)):
discrete_s[p] = ( symbol_sequence[m] * np.exp(1j * 2*np.pi * Fc * n_time[p]) ).real
p += 1
# 接下來對continuous time signal和discrete time sequence的每一個點取平方來得到每一點的能量
continuous_s_power = [0]*len(t_time)
discrete_s_power = [0]*len(n_time)
for m in range(len(continuous_s_power)):
continuous_s_power[m] = abs(continuous_s[m])**2
for m in range(len(discrete_s_power)):
discrete_s_power[m] = abs(discrete_s[m])**2
# 最後透過discrete time sequence找passband signal 的PAPR
# 先算average power
# 並順便找出peak power
avg_power = 0
peak_power = 0
for m in range(len(discrete_s_power)):
avg_power += discrete_s_power[m]
if discrete_s_power[m] > peak_power:
peak_power = discrete_s_power[m]
avg_power /= len(discrete_s_power)
PAPR = peak_power / avg_power
PAPR_dB = 10 * np.log10(PAPR)
plt.figure(constellation_name)
plt.subplot(2, 2, 2)
plt.plot(t_time, continuous_s, color='red', linestyle='--')
markerline, stemlines, baseline = plt.stem(n_time, discrete_s, markerfmt=' ')
plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑
plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑
plt.title('{0}, {1} symbols, Ts={2}s, Fs={3}Hz, L={4}'.format(constellation_name, N_symbol, Ts, Fs, L))
plt.ylabel(r'$s[n]$')
plt.subplot(2,2,4)
plt.plot(t_time, continuous_s_power, color='red', linestyle='--')
markerline, stemlines, baseline = plt.stem(n_time, discrete_s_power, markerfmt=' ')
plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑
plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑
plt.title('PAPR={0:.3F}dB'.format(PAPR_dB))
plt.xlabel('time (s)\npassband signal')
plt.ylabel(r'$|s[n]|^2$')
plt.show()
| [
"a5083a5083@gmail.com"
] | a5083a5083@gmail.com |
b6ce5d0d5ecec1d02a42b40f0d1e9f4ca9fab2e9 | dd8bf0fa2f8b0dda9e91790b066bfd8e419bbb86 | /candidate/views.py | 8bcbf0dd9b5e147cc07fde56d8a2575481bad1a0 | [] | no_license | devanshbhatia26/CVPrioritize | 3a73518d8fbbdf81d744ca3673f73322d7215e94 | d86ae12498e803c91933f0d7be0c3b2e54c599ca | refs/heads/master | 2023-02-18T14:57:20.979087 | 2018-08-09T10:58:32 | 2018-08-09T10:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,411 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import json
from django.shortcuts import render, reverse
from django.http import HttpResponse, HttpResponseRedirect
from .models import Candidate, UploadFileModel, Application
from .forms import EditDetails, UploadFile
from django.utils import timezone
from cvscan.cli.cli import parse
from os.path import splitext
from .Calc_Score import score_calculation
def totimestamp(dt, epoch=datetime(1970,1,1)):
td = dt - epoch
return int((int(td.microseconds) + (int(td.seconds) + int(td.days) * 86400) * 10*6) / 10*6)
def index(request):
if (request.method == 'POST'):
form = UploadFile(request.POST, request.FILES)
if form.is_valid():
obj = UploadFileModel()
filename = str(totimestamp(datetime.now())) + ".pdf"
obj.file.save(filename, form.cleaned_data['file'])
return HttpResponseRedirect(reverse('editdetails', args=(obj.id,)))
else:
form = UploadFile()
return render(request, 'candidate/index.html', {'form': form})
def editdetails(request, objId):
obj = UploadFileModel.objects.get(id=objId)
if request.method == 'POST':
form = EditDetails(request.POST)
if form.is_valid():
print "validated"
obj = UploadFileModel.objects.get(id = objId)
q = Candidate()
q.name = request.POST['name']
q.email = request.POST['email']
q.address = request.POST['address']
q.pincode = request.POST['pincode']
q.experience = request.POST['experience']
q.phone = request.POST['phone']
q.skills = request.POST['skills']
q.qualification = request.POST['qualification']
q.cv_path = obj.file
q.created_timestamp = timezone.now()
q.save()
score_calculation(q)
obj.delete()
return HttpResponseRedirect(reverse('home'))
else:
print "Not Validated"
else:
result = json.loads(parse(str(obj.file.url)[:-4]),'utf-8')
print result
email = ""
if result["emails"]:
email = result["emails"][0]
qual = ""
if result["qualifications"]:
qual = ", " .join(result["qualifications"])
skill = ""
if result["skills"]:
skill = ", ".join(result["skills"])
exp = result["experience"]
if exp in [0,1,2]:
exp = '0-2'
elif exp in [3,4,5]:
exp = '3-5'
elif exp in [6,7,8]:
exp= '6-8'
elif exp in [9,10,11,12]:
exp = '9-12'
elif exp in [13,14,15]:
exp = '13-15'
else:
exp = '15+'
name = ""
if result["name"]!="Applicant name couldn't be processed":
name = result["name"]
form = EditDetails(initial = {
'name' : name,
'email' : email,
'phone' : result["phone_numbers"],
'address' : result["address"]["district"] +", "+ result["address"]["state"],
'pincode' : result["address"]["pincode"],
'qualification' : qual,
'skills' : skill,
'experience' : exp
})
return render(request, 'candidate/editdetails.html', {'form': form})
| [
"dbhrockzz1@gmail.com"
] | dbhrockzz1@gmail.com |
873fd33b792017d4797bb0d1acbb046e82beacde | 26f8a8782a03693905a2d1eef69a5b9f37a07cce | /test/test_destiny_historical_stats_destiny_historical_stats_period_group.py | 54f3aa5d3731b9a1cb0a50764667212af0aef180 | [] | no_license | roscroft/openapi3-swagger | 60975db806095fe9eba6d9d800b96f2feee99a5b | d1c659c7f301dcfee97ab30ba9db0f2506f4e95d | refs/heads/master | 2021-06-27T13:20:53.767130 | 2017-08-31T17:09:40 | 2017-08-31T17:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | # coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
OpenAPI spec version: 2.0.0
Contact: support@bungie.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.destiny_historical_stats_destiny_historical_stats_period_group import DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup
class TestDestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup(unittest.TestCase):
""" DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup(self):
"""
Test DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.destiny_historical_stats_destiny_historical_stats_period_group.DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup()
pass
if __name__ == '__main__':
unittest.main()
| [
"adherrling@gmail.com"
] | adherrling@gmail.com |
55f6a7da71eeddea31816626e8b0ee0545b8b523 | 65d165179b1a497fcb654e69ab3d4830fcec0644 | /cogs/owner.py | 316c82881895d1c65cffc94fdb89b0251be4ab57 | [] | no_license | Haz1118/Discord_self_Bot | 04addbb3169621b3b3c13c7d09a5455121e107ee | 5101e8b1d8857f1af4ea21f02c7ab289ef087706 | refs/heads/master | 2021-04-08T13:36:21.996359 | 2020-03-20T15:06:40 | 2020-03-20T15:06:40 | 248,780,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import discord
from discord.ext import commands
import asyncio
class Owner(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
async def 청소(self, ctx, num: int =1):
await ctx.channel.purge(limit=num + 1)
await ctx.send("청소완료!")
await asyncio.sleep(1)
await ctx.channel.purge(limit=1)
def setup(bot):
bot.add_cog(Owner(bot)) | [
"noreply@github.com"
] | noreply@github.com |
c3e597348ecd704038d52109bd25c04c2baf9da0 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc003/D/4547211.py | 6a0acb9b36e11c97948531a48a505d78d41e9f86 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | mod = 10**9 + 7
def powmod(x, n):
ret = 1
while n > 0:
if n & 1:
ret *= x; ret %= mod; n -= 1
else:
x *= x; x %= mod; n >>= 1
return ret
fact = [1 for _ in range(1000)]
revfact = [1 for _ in range(1000)]
def setfact(n):
for i in range(n):
fact[i+1] = fact[i] * (i+1); fact[i+1] %= mod
revfact[n] = powmod(fact[n], mod-2)
for i in range(n):
revfact[n-i-1] = revfact[n-i] * (n-i); revfact[i] %= mod
return
def getC(n, r):
if n < r: return 0
return fact[n] * revfact[r] % mod * revfact[n-r] % mod
r, c = map(int, input().split())
x, y = map(int, input().split())
d, l = map(int, input().split())
setfact(x*y)
num = 0
for i in range(1, 2**4):
txy = [x, y]
cnt = 0
for j in range(4):
if (i>>j)&1:
txy[j%2] -= 1
cnt += 1
if txy[0] > 0 and txy[1] > 0:
num += (cnt%2*2-1) * getC(txy[0]*txy[1], d+l) % mod
print((r-x+1) * (c-y+1) % mod * (getC(x*y, d+l) - num) % mod * getC(d+l, d) % mod) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
f740a1d550dbe236f23cb79de7deffe6e5213387 | a2195e150561bfe5c025c3883be144aa173f36a8 | /pyimagesearch/nn/perceptron.py | 324afe4622b9f39c505b036fb4e3a5e060eaf058 | [] | no_license | s0lver/ann-katas | 0aab3f2a1396dd10a696df60c49feebcc174b632 | cccb605f292cb6a487c86efaa2b1a01eafe76830 | refs/heads/master | 2020-03-25T01:07:19.041950 | 2018-08-13T02:42:09 | 2018-08-13T02:42:09 | 143,222,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | import numpy as np
class Perceptron:
"""
Class that models the Rosenblatt perceptron
"""
def __init__(self, input_dimensions, alpha=0.1):
"""
Creates a Perceptron instance
:param input_dimensions: The dimensions of the input data (number of features))
:param alpha: The learning rate
"""
self.W = np.random.randn(input_dimensions + 1) / np.sqrt(input_dimensions)
self.alpha = alpha
@staticmethod
def step_function(x):
"""
The step function
:param x: the input to evaluate
:return: 1 if x is greater than 0, otherwise 0
"""
return 1 if x > 0 else 0
def fit(self, X_train, y, epochs=10) -> None:
"""
Fits the model for a sample of data
:param X_train: The input data (matrix)
:param y: The actual label (classes)
:param epochs: The number of epochs to train the Perceptron
:return:
"""
# This inserts the column for the bias (ones)
X_train = np.c_[np.ones((X_train.shape[0])), X_train]
for _ in np.arange(0, epochs):
for (x_sample, target) in zip(X_train, y):
# dot product, i.e., sigma w_i * x_i
weighted_sum = np.dot(x_sample, self.W)
p = self.step_function(weighted_sum)
if p != target:
error = p - target
self.W += -self.alpha * error * x_sample
def predict(self, X_test):
# ensure that input is a matrix
X_test = np.atleast_2d(X_test)
# This inserts the column for the bias (ones)
X_test = np.c_[np.ones((X_test.shape[0])), X_test]
# dot product between the input features and the weight matrix,
# take such value to the weight matrix
return self.step_function(np.dot(X_test, self.W))
| [
"rperez.github@gmail.com"
] | rperez.github@gmail.com |
c29ff1701a3bfbca5682d464670a0183a3517f7b | 8882bfe78b3a6e5d022f81c86512b22f851d9dc8 | /tgflow/TgFlow.py | ffb115f18b4e9744371863b4ed3007956ddc5bbd | [
"MIT"
] | permissive | inexcode/tgflow | 5600fa4040d30157daf6d2ad5fe8d625ac64789d | e7bbd7df87e7a711c1b2924f3f2ae909fb2086c5 | refs/heads/master | 2020-03-29T10:19:39.405683 | 2018-09-21T18:13:30 | 2018-09-21T18:13:30 | 149,799,442 | 0 | 0 | null | 2018-09-21T17:59:23 | 2018-09-21T17:59:23 | null | UTF-8 | Python | false | false | 6,673 | py | #import telebot
import hashlib
from enum import Enum
from . import handles
from . import render
import pickle,time
from .api.tg import telegramAPI
import pprint
pp = pprint.PrettyPrinter(indent=4)
action = handles.action
api,key = None,None
def_state = None
def_data= None
States = {}
UI = {}
Data = {}
Actions = {}
Keyboards = {}
Reaction_triggers = {}
def read_sd(sf,df):
with open(sf,'rb') as f:
try:
s= pickle.load(f)
except:
s={}
with open(df,'rb') as f:
try:
d = pickle.load(f)
except:
d={}
return s,d
def save_sd(states,data):
try:
with open('states.p','wb+') as f:
pickle.dump(states,f)
with open('data.p','wb+') as f:
pickle.dump(data,f)
except Exception as e:
print('Non-picklable',str(e))
try:
States,Data = read_sd('states.p','data.p')
except FileNotFoundError:
print("tgflow: creating data.p and states.p files")
def configure(token=None, state=None,
apiModel=telegramAPI, data={},
group_id=None
):
global def_state,def_data
global api,key
if not token:
raise Exception("tgflow needs your bot token")
if not state:
raise Exception("tgflow needs a default state for new users")
key =token
def_state=state
def_data =data
# create bot and assign handlers
# Group Id is not used in telegram
api = apiModel(key,group_id=group_id)
api.set_message_handler(message_handler)
api.set_callback_handler(callback_handler)
def start(ui):
global api,UI
UI = ui
print("tgflow: listening")
try:
api.start(none_stop=True)
except Exception as e:
print("tgflow:polling error",e)
def get_file_link(file_id):
# TODO: implement this in api
finfo = bot.get_file(file_id)
l='https://api.telegram.org/file/bot%s/%s'%(
key,finfo.file_path)
return l
def message_handler(messages):
global States,UI
for msg in messages:
s = States.get(msg.chat.id,def_state)
print('tgflow: got message. State:'+str(s))
# for security reasons need to hash. user can call every action in this state
# key format: kb_+ButtonName
a = Actions.get('kb_'+str(msg.text))
if not a:
if Reaction_triggers.get(msg.chat.id):
for r,a_ in Reaction_triggers[msg.chat.id]:
if msg.__dict__.get(r):
a = a_
if r=='all':
a = a_
d = Data.get(msg.chat.id,def_data)
# following restriction is dictaded by telegram api
messages = flow(a,s,d,msg,msg.chat.id)
send(messages,msg.chat.id)
def callback_handler(call):
s = States.get(call.message.chat.id,def_state)
a = Actions.get(call.data)
d = Data.get(call.message.chat.id,def_data)
print("tgflow: got callback. State:",s)
messages = flow(a,s,d,call,call.message.chat.id)
if a:
if not a.update:
send(messages,call.message.chat.id)
else:
update(messages, call.message)
else:
print("tgflow: Warning: no action found but should")
send(messages,call.message.chat.id)
def gen_state_msg(i,ns,nd,_id,state_upd=True):
pre_a = UI.get(ns).get('prepare')
if pre_a:
# call user-defined data perparations.
print("tgflow: found a prep function, calling...")
nd = pre_a(i,ns,**nd)
args = {'s':ns,'d':nd}
ui = render.prep(UI.get(ns),args)
# saving data and state
Data[_id] = nd
if state_upd: States[_id] = ns
save_sd(States,Data)
# registering callback triggers on buttons
save_iactions(ui.get('b'))
save_kactions(ns,ui.get('kb'),ns,_id)
print("tgflow: actions registered:\n",Actions)
# registering reaction triggers
rc = ui.get('react') or ui.get('react_to')
if rc:
trigs = Reaction_triggers.get(_id)
if trigs:
Reaction_triggers[_id].append((rc.react_to,rc))
else:
Reaction_triggers.update({_id:[(rc.react_to,rc)]})
print("tgflow: reaction tgigger for %s registrated %s"%(str(_id),str(rc)))
# clearing reaction triggers if needed
rc = ui.get('clear_trig')
if rc:
print("tgflow: reaction trigger clear",rc)
if Reaction_triggers.get(_id):
for r,a_ in Reaction_triggers[_id]:
#TODO: handle arrays of triggers
if rc == r:
Reaction_triggers[_id].remove((r,a_))
else:
print("tgflow:WARN removing unset trigger",rc)
# rendering message and buttons
messages = render.render(ui)
return messages
def send_state(ns,tg_id):
d = Data.get(tg_id,def_data)
msg = gen_state_msg(None,ns,d,tg_id)
send(msg,tg_id)
def flow(a,s,d,i,_id):
if a:
ns,nd = a.call(i,s,**d)
print('tgflow: called action:'+str(a))
if isinstance(s,Enum) and isinstance(ns,Enum):
print ('tgflow: states change %s --> %s'%(s.name,ns.name))
else:
print ('tgflow: states change %s --> %s'%(s,ns))
else:
print('tgflow: no action found for message. %s unchanged'%s)
ns,nd = s,d
return gen_state_msg(i,ns,nd,_id)
def get_state(id,s):
pass
def save_iactions(ui):
if isinstance(ui,action):
#TODO: assign actions to every user distinctly, as with butons
key = ui.get_register_key()
Actions[key]=ui
if isinstance(ui,dict):
for k,v in ui.items():
save_iactions(v)
elif isinstance(ui,list):
d = [save_iactions(x) for x in ui ]
# TODO: remove s argument
def save_kactions(k,ui,s,_id):
if isinstance(ui,action):
# key format: State+ButtonName
if ui.react_to:
trigs = Reaction_triggers.get(_id)
if trigs:
Reaction_triggers[_id].append((ui.react_to,ui))
else:
Reaction_triggers.update({_id:[(ui.react_to,ui)]})
print("tgflow: reaction tgigger for %s registrated %s"%(str(_id),str(ui)))
else:
Actions['kb_'+str(k)]=ui
if isinstance(ui,dict):
for k,v in ui.items():
save_kactions(k,v,s,_id)
elif isinstance(ui,list):
ui = [save_kactions(k,x,s,_id) for x in ui ]
def send(message,id):
print("tgflow: sending message")
for text,markup in message:
api.send(id,text=text,markup=markup)
def update(messages,msg):
for text,markup in messages:
print("tgflow: updating message")
api.update(msg,text=text,markup=markup)
| [
"lkv97dn@gmail.com"
] | lkv97dn@gmail.com |
fb8634a4c603789431dd34afcdb2ed4356e85859 | f23947b7531aa2ceae3e8444edc570b845362818 | /pygamess/pygamess.py | 6ecc75bb56ba96c5ec9ad0f7751952ed3e7639c8 | [] | no_license | andrepd/gamess | 8b581ad5aae4931274a42b5006a68966be84ff55 | 9365f4caedb300b8707be72ff8b32bcc2d51a9c8 | refs/heads/master | 2018-01-08T14:41:27.540722 | 2016-03-12T03:15:54 | 2016-03-12T03:15:54 | 51,661,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,566 | py | import sys
import os
sys.path.append('./lib')
sys.path.append('./data')
from gamin import gamin
from basis_list import basis_list
if(len(sys.argv)!=3):
print "Usage: python "+sys.argv[0]+" infile.pygam outfile.inp"
sys.exit()
test=gamin(sys.argv[1])
#DFT
functional={}
functionaldata=open("./data/xcfunctionals.data","r")
functionalread=functionaldata.readlines()
for i in range(len(functionalread)):
if "#" not in functionalread[i] and functionalread[i]!='\n':
ispl=functionalread[i].strip().split("\t")
functional.update({ispl[0]:ispl[1]})
#--------------------------------------
#SCFTYP
shell={"rhf":"SCFTYP=RHF ",
"rohf":"SCFTYP=ROHF ",
"uhf":"SCFTYP=UHF "
}
#--------------------
#RUNTYP
calcmode={"gs":"RUNTYP=ENERGY ",
"go":"RUNTYP=OPTIMIZE ",
"hessian":"RUNTYP=HESSIAN "
}
#-------------------
#CC
theory={"mp2":"MPLEVL=2 ",
"dft":"DFTTYP=",
"lccd":"CCTYP=LCCD ",
"ccsd":"CCTYP=CCSD ",
"ccd":"CCTYP=CCD ",
"ccsdt":"CCTYP=CCSD(T) "
}
#----------------------------
#Units
Units={"bohr":"BOHR",
}
#-------------------
#SCF
if test.DirectSCF():
if test.DirectSCF()=="yes":
scf=" $SCF DIRSCF=.TRUE. $END\n"
elif test.DirectSCF()=="no":
pass
else:
sys.exit("Choose yes or no for Direct SCF!")
#Mult Basis
basislist=[]
for i in range(len(test.Basis())):
basis=" $BASIS "
place=0
if "#" in test.Basis()[i]:
place=test.Basis()[i].find("#")
if test.Basis()[i][:place] in basis_list and place!=0:
basis+=basis_list[test.Basis()[i][:place]]
elif test.Basis()[i] in basis_list and place==0:
basis+=basis_list[test.Basis()[i]]
else:
sys.exit("That basis set does not exist in the database!")
if "#" in test.Basis()[i]:
if test.Basis()[i].count("+")==1:
basis+="DIFFSP=.TRUE. "
if test.Basis()[i].count("+")==2:
basis+="DIFFS=.TRUE. "
if "p" in test.Basis()[i]:
basis+="NPFUNC="+test.Basis()[i][test.Basis()[i].find("p")-1]+" "
if "d" in test.Basis()[i]:
basis+="NDFUNC="+test.Basis()[i][test.Basis()[i].find("d")-1]+" "
if "f" in test.Basis()[i]:
basis+="NFFUNC="+test.Basis()[i][test.Basis()[i].find("f")-1]+" "
basis+="$END\n"
basislist.append(basis)
#------------------------------
#$CONTRL
control=" $CONTRL "
if test.Shell() in shell:
control+=shell[test.Shell()]
else:
sys.exit("Choose either rhf, rohf or uhf!")
if test.CalculationMode() in calcmode:
control+=calcmode[test.CalculationMode()]
else:
sys.exit("That calculation mode does not exist!")
if test.MoleculeCharge():
control+="ICHARG="+test.MoleculeCharge()+" "
if test.Multiplicity():
control+="MULT="+test.Multiplicity()+" "
if test.MaxSCFiter():
if test.MaxSCFiter()=="30":
pass
else:
control+="MAXIT="+test.MaxSCFiter()+" "
if test.Units():
if test.Units()=="angstrom":
pass
if test.Units()=="bohr":
control+="UNITS="+test.Units()+" "
if test.NumericalGrad():
if test.NumericalGrad()=="yes":
control+="NUMGRD=.TRUE. "
elif test.NumericalGrad()=="no":
pass
if test.Theory():
if test.Theory()=="dft":
control+=theory[test.Theory()]
if test.XCFunctional():
control+=functional[test.XCFunctional()]+" "
else:
sys.exit("To use DFT you have to choose a XC functional!")
elif test.Theory() in theory:
control+=theory[test.Theory()]+" "
else:
sys.exit("Choose a valid theory level (mp2,ccd,dft...)!")
controllist=[]
controlclean=control
for i in range(len(test.Basis())):
control=controlclean
if "cc_" in test.Basis()[i]:
control+="ISPHER=1 "
control+="$END\n"
controllist.append(control)
#---------------------------------------
#HESSIAN
if test.CalculationMode()=="hessian":
hessian=" $FORCE "
if test.HessMethod():
if test.HessMethod()=="analytic":
hessian+="METHOD=ANALYTIC "
elif test.HessMethod()=="numeric":
hessian+="METHOD=SEMINUM "
else:
sys.exit("Choose either analytic or numeric method!")
if test.VibAnal():
if test.VibAnal()=="yes":
hessian+="VIBANL=.TRUE. "
elif test.HessMethod()=="no":
hessian+="VIBANL=.FALSE. "
else:
sys.exit("Choose yes or no for vibrational analysis!")
hessian+="$END\n"
#----------------------------------------
#Optimization
if test.CalculationMode()=="go":
statopt=" $STATPT "
if test.NOptStep():
if type(int(test.NOptStep())) is int:
statopt+="NSTEP="+test.NOptStep()+" "
else:
sys.exit("Choose a number of steps!")
if test.OptStepSize():
if test.OptStepSize():
if type(float(test.OptStepSize())) is float:
statopt+="OPTTOL="+test.OptStepSize()+" "
else:
sys.exit("Choose a number of steps!")
statopt+="$END\n"
#$DATA
data=" $DATA\ncomment\nC1\n"
for i in test.Coordinates():
data+=i
data+="\n"
data+=" $END\n"
#---------------------------
for i in range(len(test.Basis())):
f=open(sys.argv[2]+"_"+test.Basis()[i]+".inp","w")
f.write(basislist[i])
f.write(controllist[i])
f.write(" $SYSTEM TIMLIM=525600 MEMORY=524288000 MEMDDI=125 $END\n")
if test.DirectSCF():
f.write(scf)
if test.CalculationMode()=="hessian":
f.write(hessian)
if test.NOptStep() or test.OptStepSize():
f.write(statopt)
f.write(data)
f.close()
for i in range(len(test.Basis())):
os.system("python gamessrun.py "+sys.argv[2]+"_"+test.Basis()[i]+".inp")
for i in range(len(test.Basis())):
os.system("python read.py "+sys.argv[2]+"_"+test.Basis()[i]\
+".log energy cc geometry > "+sys.argv[2]+"_"+test.Basis()[i]+".res")
os.system("rm *.inp")
| [
"marcosgouveia13@gmail.com"
] | marcosgouveia13@gmail.com |
a0cb28bad70fcd7e7477f099e1ce87cedae8050d | f33b30743110532ddae286ba1b34993e61669ab7 | /比赛/力扣杯2020春季全国编程大赛/1.py | dff7e2ce6da1326a6dd9870c6a3b18e8dfb798d9 | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | from typing import List
import collections
a = Solution()
print()
| [
"762307667@qq.com"
] | 762307667@qq.com |
a76cfb7955e09eef938a7627cc74429d43d652ef | 35ccd03ab2deca4802b1e5bc48408a21d06ba6a7 | /word_embed.py | ada2bf5c4bb22b8a41c937ca9119d95db5d17208 | [] | no_license | Allen-Wu/CS394N-Project | 9f328b055c8a950ffeb69c43545f6aa66cd14bec | 294c920f432a0c422c2ba9ccbfbd5d747e9c049f | refs/heads/main | 2023-01-30T07:10:29.346015 | 2020-12-12T23:42:20 | 2020-12-12T23:42:20 | 312,946,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,857 | py | import numpy as np
import csv
import transformers
import tensorflow as tf
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer
import model
words_list = []
words_dict = {}
unique_word_cnt = 0
text_lists = []
type_idx_list = []
typename_to_idx = {
'INTJ': 0,
'INTP': 1,
'ENTJ': 2,
'ENTP': 3,
'INFJ': 4,
'INFP': 5,
'ENFJ': 6,
'ENFP': 7,
'ISTJ': 8,
'ISFJ': 9,
'ESTJ': 10,
'ESFJ': 11,
'ISTP': 12,
'ISFP': 13,
'ESTP': 14,
'ESFP': 15
}
typeidx_to_name = {v: k for k, v in typename_to_idx.items()}
with open('mbti_preprocessed.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
firstLine = True
for row in readCSV:
if firstLine:
firstLine = False
continue
words = []
text = row[1]
text_lists.append(text)
type_idx_list.append(typename_to_idx[row[0]])
sentences = text.split(" ")
paragraph = ""
for w in sentences:
if not w == "":
words.append(w)
if not w in words_dict:
unique_word_cnt += 1
words_dict[w] = 1
else:
words_dict[w] += 1
words_list.append(words)
print('# of unique words: {}'.format(str(unique_word_cnt)))
assert len(words_dict) == unique_word_cnt
# Show the top10 words
# cnt = 0
# for k, v in sorted(words_dict.items(), key=lambda item: item[1], reverse=True):
# if cnt > 10:
# break
# print('{} occurs {} times'.format(k, str(v)))
# cnt += 1
# Use sklearn transformer
text_lists = np.array(text_lists)
type_idx_list = np.array(type_idx_list)
type_idx_list = type_idx_list.reshape([len(type_idx_list), 1])
# Word counting
cntizer = CountVectorizer(analyzer="word",
max_features=512,
tokenizer=None,
preprocessor=None,
stop_words=None,
max_df=0.9,
min_df=0.1)
# Learn the vocabulary dictionary and return term-document matrix
X_cnt = cntizer.fit_transform(text_lists)
top_feature_names = cntizer.get_feature_names()
def remove_extra_words(str):
words = str.split()
if len(words) <= 512:
return str
w = []
for word in words:
if word in top_feature_names:
w.append(word)
# Truncate to 512
if len(w) > 512:
del w[512:]
return ' '.join(w)
# Use BERT tokenizer to generate input_ids
text_lists = text_lists.tolist()
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
for i in range(len(text_lists)):
t = remove_extra_words(text_lists[i])
tokens = tokenizer(t)['input_ids']
if len(tokens) > 512:
del tokens[512:]
elif len(tokens) < 512:
tokens.extend([0] * (512 - len(tokens)))
text_lists[i] = tokens
X_tfidf = np.array(text_lists)
# Transform the count matrix to a normalized tf or tf-idf representation
# tfizer = TfidfTransformer()
# Learn the idf vector (fit) and transform a count matrix to a tf-idf representation
# X_tfidf = tfizer.fit_transform(X_cnt).toarray()
all_data = np.append(X_tfidf, type_idx_list, 1)
print('All dataset size: {}'.format(all_data.shape))
train, test = train_test_split(all_data)
train, val = train_test_split(train)
print('Training set shape: {}'.format(str(train.shape)))
print('Validation set shape: {}'.format(str(val.shape)))
print('Test set shape: {}'.format(str(test.shape)))
def idx_to_one_hot_vec(X):
# X = X.astype('int32')
vec = np.zeros([len(X), 16])
for i in range(len(X)):
vec[i][X[i]] = 1
# vec = vec.astype('int32')
return vec
train_input = train[:, :-1]
train_label = train[:, -1]
train_label = idx_to_one_hot_vec(train_label)
val_input = val[:, :-1]
val_label = val[:, -1]
val_label = idx_to_one_hot_vec(val_label)
test_input = test[:, :-1]
test_label = test[:, -1]
test_label = idx_to_one_hot_vec(test_label)
print(train_input.shape)
print(train_label.shape)
batch_size = 4
bert_model = model.create_model(512)
checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor='val_loss',
mode='min',
save_best_only=True,
save_freq='epoch')
bert_model.fit(train_input,
train_label,
validation_data=(val_input, val_label),
verbose=1,
epochs=20,
batch_size=batch_size,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=5),
model_checkpoint_callback])
| [
"shiyuwu@umich.edu"
] | shiyuwu@umich.edu |
66e384d968761a60e6fb3cf12522ac49e6c0e794 | 10e67ea36c018d0caa0e169ee5a71464d7e2ad29 | /myprojectenv/Scripts/django-admin.py | 50d8f4553aeb7c61d472cbf415d8276c53ed6764 | [] | no_license | nikharj7/iBlog | ca7d68d11b90d6cb4bea70047a5eddab48cf5c14 | 637238851932d901b8f16f2160368efc8c8fbf2c | refs/heads/main | 2023-02-06T05:04:36.884114 | 2020-12-28T19:17:55 | 2020-12-28T19:17:55 | 320,803,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #!c:\users\nikha\desktop\blog\myprojectenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"nikharj7@gmail.com"
] | nikharj7@gmail.com |
e9c2b8cd6f09b728d9ba53ba44af92899b6be039 | 6ab13b962c99523c12f52b7b99723cf470dfd504 | /0x02-python-import_modules/2-args.py | fcfdc066e3da2eb6727ad55f2f1d3ed506948cb1 | [] | no_license | OctaveC/holbertonschool-higher_level_programming | db4522a73c39a31a29d2e8190200cd6733019e13 | 350735ec7077647a25077ad24727c7f644eaf2d7 | refs/heads/main | 2023-08-18T03:48:17.535787 | 2021-09-23T01:05:47 | 2021-09-23T01:05:47 | 361,696,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/python3
if __name__ == "__main__":
import sys
for argv in range(len(sys.argv)):
if (argv == 0):
if (len(sys.argv) == 1):
print("0 arguments.")
elif (len(sys.argv) == 2):
print("1 argument:")
else:
print("{} arguments:".format(len(sys.argv) - 1))
else:
print("{}: {}".format(argv, sys.argv[argv]))
| [
"octavec@outlook.com"
] | octavec@outlook.com |
b993b3230a55f5fd587e1ba760952deb2b9114a7 | 73744088a38ba3938563dc57677e3c4b6814e3d6 | /commands/pause.py | e8b1b7004998c5184033bb2c8377979a8ca2be90 | [
"BSD-3-Clause"
] | permissive | roman-kachanovsky/cmd.fm-python | 76ed6a4f4da20c2ef7e6d4d9c1bee207b416f151 | 3365a9b26bc272fe7a11b404b74cd1920bd72d31 | refs/heads/master | 2021-01-19T00:29:59.902473 | 2017-05-05T13:30:21 | 2017-05-05T13:30:21 | 87,176,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from __future__ import unicode_literals, absolute_import
from .base import Command
from utils.colorize import colorize, Colors
class Pause(Command):
name = 'pause'
pattern = 'pause'
example = ('pause',)
description = 'Pause playback.'
@staticmethod
def handle(self, *args):
if self.player:
if self.player.is_playing:
self.player.pause()
return self.INDENT + colorize(Colors.GREEN, 'Track paused.')
elif self.player.is_paused:
return self.INDENT + colorize(Colors.RED, 'Track already paused.')
return self.INDENT + colorize(Colors.RED, 'No active players found.')
| [
"roman.kachanovsky@gmail.com"
] | roman.kachanovsky@gmail.com |
5fe4c7ed46fc6342f89f21baa980a8b8f0c9a22a | a814debee728e59a7a10d8c12b92c1f3ee97e19d | /Cadeias/Questao01.py | 5a06773ddc2a07e94da38507662ab3bf4ae50ea1 | [] | no_license | PedroVitor1995/Algoritmo-ADS-2016.1 | 0ee034d2f03b29d3c8177fb3402f7aeae08d07cf | 8e3b6dfb0db188b9f5d68dcb8619f6636883ab89 | refs/heads/master | 2021-01-01T15:51:56.636502 | 2017-07-19T13:47:36 | 2017-07-19T13:47:36 | 81,328,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #__*__ encoding:utf8 __*__
"""1. Faça a criptografia de uma frase digitada pelo usuário. Na criptografia, a frase deverá ser invertida e as
consoantes deverão ser substituídas pelo caractere #."""
def main():
frase = raw_input('Digite uma frase: ')
consoantes = 'BCDFGHJKLMNPQRSTVXYWZbcdfghjklmnpqrstvxywz'
for letra in consoantes:
if letra in frase:
frase = frase[::-1].replace(letra,'#')
print frase
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
f5e3ef2dd23127bdafeb35e907d3692943c20b20 | 3c5ec7efc413bcbfd930ad2fe0de2ce873d98b87 | /wordcount/urls.py | 8dea4f12cba6059907a74f29721e92012f62291e | [] | no_license | pythonstudy0708/wordcount-project | 75c0be909e3e3bbd5b5cb582c125210e3e7bdd7e | c76e9290bc15fe8cdd9e042ce87d0928cc3734ee | refs/heads/master | 2020-03-29T01:28:57.262645 | 2018-09-19T04:14:47 | 2018-09-19T04:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.homepage, name='home'),
path('count/', views.count, name='count'),
path('about/', views.about, name='about'),
]
| [
"python.study.0708@gmail.com"
] | python.study.0708@gmail.com |
f86cca745638f0ae4f6c7c24b5a49c2942f72fda | 356c2be7861c53cb058e04e983bd04ebcb0e4ef7 | /Python-100-Days-Of-Code/Python Code Exercises/main 2.py | de5aa8a49030160bbd280d2fe23408d967747197 | [] | no_license | HyperMG/Python-100-Days | 62c12dd5e5f6aca508539f9b8bd7064026817fff | 409deb295b8094b0d7b1968574a17ac2987edfdb | refs/heads/main | 2023-08-08T04:46:52.308496 | 2021-09-18T12:11:12 | 2021-09-18T12:11:12 | 392,434,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | name = 'jack'
print(name)
name = 'Mahi'
print(name)
name = input('What is your name?')
length = len(name)
print(length)
| [
"noreply@github.com"
] | noreply@github.com |
374eb12b1ec6126e692a94315444e4a7bcf0621b | 4eaab9327d25f851f9e9b2cf4e9687d5e16833f7 | /problems/search_suggestions_system/solution.py | 47a2ff3a14f8b27c1b8af6d2a0b73ebff62b06d6 | [] | no_license | kadhirash/leetcode | 42e372d5e77d7b3281e287189dcc1cd7ba820bc0 | 72aea7d43471e529ee757ff912b0267ca0ce015d | refs/heads/master | 2023-01-21T19:05:15.123012 | 2020-11-28T13:53:11 | 2020-11-28T13:53:11 | 250,115,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | class Solution:
def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:
products.sort() # time O(nlogn)
array_len = len(products)
ans = []
input_char = ""
for chr in searchWord:
tmp = []
input_char += chr
insertion_index = self.binary_search(products, input_char) # find where input_char can be inserted in-order in the products array
for word_ind in range(insertion_index, min(array_len, insertion_index+3)): # check the following 3 words, if valid
if products[word_ind].startswith(input_char):
tmp.append(products[word_ind])
ans.append(tmp)
return ans
def binary_search(self, array, target): # bisect.bisect_left implementation
lo = 0
hi = len(array)
while lo < hi:
mid = (lo + hi) //2
if array[mid] < target: lo = mid + 1
else: hi = mid
return lo
| [
"kadhirash@gmail.com"
] | kadhirash@gmail.com |
4ef86e3b784b706086dad83933e4de1171ce7c67 | 26cae5bfec8dd416890714e095171c86eba9b521 | /0x16-api_advanced/2-recurse.py | 7d3893c192d0f843cf6e20da8d42e340a2a476fc | [] | no_license | hugocortesmu/holberton-system_engineering-devops | 58f1359bff9b97d2546e3d80f4b809557018824b | 59557c9a2424ebb4619397e3f49d6520312fb973 | refs/heads/main | 2023-08-03T13:51:53.440376 | 2021-10-09T02:56:23 | 2021-10-09T02:56:23 | 295,881,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | #!/usr/bin/python3
""" module for function to return top 10 hot posts of a given subreddit """
import requests
import sys
after = None
def recurse(subreddit, hot_list=[]):
""" Args:
subreddit: subreddit name
hot_list: list of hot titles in subreddit
after: last hot_item appended to hot_list
Returns:
a list containing the titles of all hot articles for the subreddit
or None if queried subreddit is invalid """
global after
headers = {'User-Agent': 'xica369'}
url = "https://www.reddit.com/r/{}/hot.json".format(subreddit)
parameters = {'after': after}
response = requests.get(url, headers=headers, allow_redirects=False,
params=parameters)
if response.status_code == 200:
next_ = response.json().get('data').get('after')
if next_ is not None:
after = next_
recurse(subreddit, hot_list)
list_titles = response.json().get('data').get('children')
for title_ in list_titles:
hot_list.append(title_.get('data').get('title'))
return hot_list
else:
return (None)
| [
"hugocortesmu@gmail.com"
] | hugocortesmu@gmail.com |
480b22b1552952509a2e4c51d2fa8303ef6314d2 | 72579ed38872d8d752fe87dce87986635758563b | /tests/base_testcase.py | 4cecbcaffcd42700dde0fd99957a17377825c8d7 | [
"Apache-2.0"
] | permissive | devhub/cardisco | 8f58c8be430f6b1a8624dc18b937bc44d66edb57 | 05449e88958740b0e698373a8fc9acd3f1b94a28 | refs/heads/master | 2020-06-01T06:47:59.337460 | 2011-12-30T19:35:29 | 2011-12-30T19:35:29 | 3,076,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | # -*- coding: utf-8 -*-
# Copyright 2010 Mark Lee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import html5lib
from importlib import import_module
import os
from unittest2 import TestCase
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class BaseTestCase(TestCase):
'''Base test case for tesing autodiscovery classes.'''
@classmethod
def setUpClass(cls):
cls.base_dir = os.path.join(BASE_DIR, cls.dir_name)
if not hasattr(cls, 'discoverer_class'):
cls.discoverer_class = import_module(cls.module_name).Discoverer
def doc_from_path(self, path):
abspath = '%s.html' % os.path.join(self.base_dir, path)
return html5lib.parse(open(abspath), treebuilder='lxml')
def assertADLinks(self, basename, expected, parse_func=None):
if not parse_func:
parse_func = self.discoverer_class.parse
doc = self.doc_from_path(basename)
feeds = parse_func(doc)
self.assertEqual(feeds, expected)
| [
"dlrust@gmail.com"
] | dlrust@gmail.com |
9186c018173a5b0a44c6503fb95bc790c2898d4e | f3c4509c4f2830f5429058e0f324a9876654c2a9 | /sequbot_data/errors.py | 3c51604c38597b892baeadbb59f4c3a3974f9a76 | [] | no_license | phasnox/sequbot | 2482b65b1e4dd855d6c780fe54002affc21b88b0 | 86865383246c463028e263bf3a1e5fb6ba0134da | refs/heads/master | 2022-11-27T11:59:31.382355 | 2020-04-08T19:05:18 | 2020-04-08T19:05:18 | 254,175,116 | 0 | 0 | null | 2022-11-04T19:10:09 | 2020-04-08T18:58:11 | Python | UTF-8 | Python | false | false | 50 | py | class SocialAccountAlreadyExists(Exception): pass
| [
"phasnox@gmail.com"
] | phasnox@gmail.com |
d831169e024a98da203941639f6ffb0ea1e5e70c | 2c966de9274c3d432b66b876c57f6da174266a5b | /keras/utils/generic_utils.py | 32f5b96639ae1ec1f307f4184c2dd3a3cb98aa88 | [
"MIT"
] | permissive | ruizhang-ai/GCP | 72e34a038e2adab83b89556502d57c1e4e6ade8f | 7a0f30c6c3d732627fa269ce943c62a9005cc40f | refs/heads/main | 2023-08-06T22:41:10.661786 | 2021-09-27T16:03:04 | 2021-09-27T16:03:04 | 410,938,287 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,431 | py | """Python utilities required by Keras."""
from __future__ import absolute_import
import numpy as np
import time
import sys
import six
import marshal
import types as python_types
import inspect
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
# Arguments
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
# Returns
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
# Example
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
# Returns
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_object(instance):
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_keras_object(identifier, module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name +
': ' + class_name)
if hasattr(cls, 'from_config'):
custom_objects = custom_objects or {}
if has_arg(cls.from_config, 'custom_objects'):
return cls.from_config(config['config'],
custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name +
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' +
printable_module_name + ': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
# Arguments
func: the function to serialize.
# Returns
A tuple `(code, defaults, closure)`.
"""
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
# Arguments
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
# Returns
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
code = marshal.loads(code.encode('raw_unicode_escape'))
if closure is not None:
closure = func_reconstruct_closure(closure)
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs,
name=code.co_name,
argdefs=defaults,
closure=closure)
def func_reconstruct_closure(values):
'''Deserialization helper that reconstructs a closure.'''
nums = range(len(values))
src = ["def func(arg):"]
src += [" _%d = arg[%d]" % (n, n) for n in nums]
src += [" return lambda:(%s)" % ','.join(["_%d" % n for n in nums]), ""]
src = '\n'.join(src)
try:
exec(src)
except:
raise SyntaxError(src)
return func(values).__closure__
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
For Python 2, checks if there is an argument with the given name.
For Python 3, checks if there is an argument with the given name, and
also whether this argument can be called with a keyword (i.e. if it is
not a positional-only argument).
# Arguments
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
# Returns
bool, whether `fn` accepts a `name` keyword argument.
"""
if sys.version_info < (3,):
arg_spec = inspect.getargspec(fn)
if accept_all and arg_spec.keywords is not None:
return True
return (name in arg_spec.args)
elif sys.version_info < (3, 3):
arg_spec = inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return (name in arg_spec.args or
name in arg_spec.kwonlyargs)
else:
signature = inspect.signature(fn)
parameter = signature.parameters.get(name)
if parameter is None:
if accept_all:
for param in signature.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY))
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules)
def update(self, current, values=None, force=False):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
force: Whether to force visual progress update.
"""
values = values or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far),
current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self.start)
if self.verbose == 1:
if (not force and (now - self.last_update) < self.interval and
current < self.target):
return
prev_total_width = self.total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self.total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self.unique_values:
info += ' - %s:' % k
if isinstance(self.sum_values[k], list):
avg = np.mean(
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += (' ' * (prev_total_width - self.total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self.unique_values:
info += ' - %s:' % k
avg = np.mean(
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self.last_update = now
def add(self, n, values=None):
self.update(self.seen_so_far + n, values)
| [
"ubuntu@bayudt1-new.localdomain"
] | ubuntu@bayudt1-new.localdomain |
b11b73d0f6d0548f2c0f073c85bd9cb2efbb7d01 | fa43f2da570e69440ab61bd5794e32622b289177 | /apps/login_reg/migrations/0001_initial.py | 0477742a6a49c98dfd18d710299ba10e66607197 | [] | no_license | ardenzhan/travel | 14d86885289ec5eb03523233b1f2335e6ac9a3e7 | f2a7836066d4e9b472dd28c5138a711a0c0a509e | refs/heads/master | 2021-08-19T07:09:18.248365 | 2017-11-25T04:22:37 | 2017-11-25T04:22:37 | 111,597,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 17:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('username', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254, unique=True)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"arden@protonmail.ch"
] | arden@protonmail.ch |
007948663d69f619005fc8e3dd55d57ac8bf414b | a6858bdf10144e95076ad8da5bf817f18dcc4ed5 | /python/08p1.py | 3a70f52f898162e9f667056ca0dbfccab79ab3cd | [
"MIT"
] | permissive | dsumike/adventofcode | cbc0183c20965fd289b80972afd857220a60c9c5 | cd5e484fa162bada67625c3779580d77e87d1daa | refs/heads/master | 2021-01-10T12:18:40.032290 | 2015-12-23T22:51:49 | 2015-12-23T22:51:49 | 47,273,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/env python
literal = 0
inmemory = 0
with open("../input/08.txt") as fileobj:
for line in fileobj:
line = line.rstrip()
literal += len(line)
# remove the outer " "'s, and decode the rest
inmemory += len(line[1:-1].decode('string_escape'))
print "literal: %d" % literal
print "in mem : %d" % inmemory
print "l-m : %d" % (literal-inmemory)
| [
"dsumike@gmail.com"
] | dsumike@gmail.com |
55ecd3989086e450eef41364aee966a64c5e9c46 | 4ee9094152009d4bcc08fd47b9e46cdbd25eadf1 | /src/test/update.py | 73cd91c08112f28267c7e42aa7edcc525da409b3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | slaclab/central_node_engine | a2100f398e1595b8a06b493220040076d9ed3a64 | 272a48619395f663d88d95146f4a871b10a86889 | refs/heads/master | 2023-08-17T01:30:30.578524 | 2023-08-03T15:29:12 | 2023-08-03T15:29:12 | 85,611,819 | 0 | 2 | NOASSERTION | 2023-05-15T20:56:17 | 2017-03-20T18:27:07 | C++ | UTF-8 | Python | false | false | 5,758 | py | #!/usr/bin/env python
import socket
import sys
import os
import argparse
import time
#
# Input line:
#
# Input 0 ... Input 1
# bit bit ...
# 0 1 ...
# --- --- ...
# X X X X ...
# | | | |
# | | | + wasHigh
# | | +- wasLow
# | |
# | + wasHigh
# +- wasLow
#
def readFile(f, debug):
appData = bytearray()
appCount = 1
for line in f:
if debug:
print "+---------------------------------------+"
print "| Global AppId #" + str(appCount) + " "
print "+---------------------------------------+"
lineData = bytearray([0, 0, 0, 0, 0, 0, 0, 0, # 32 bits for timestamp
0, 0, 0, 0, 0, 0, 0, 0, # 32 bits for timestamp
0, 0, 0, 0, 0, 0, 0, 0, # Data starts at byte #17
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0])
current = 16
end = len(lineData)
line.rstrip()
lineIndex = 0 # points to which char we are in the line
inputCount = 1
if debug:
print "| LN Inputs\t| L H | L H | L H | L H |"
print "+---------------------------------------+"
while current < end:
debugOut = ""
byte = 0
bitIndex = 0 # bit index within input byte
# For each input read wasLow/wasHigh and set proper bits
# One byte has bits for 4 inputs
debugOut = "| " + str(inputCount).zfill(3) + ".." + str(inputCount+3).zfill(3) + "\t| "
for i in range(4):
wasLowBit = 0
wasHighBit = 0
# Read wasLow/wasHigh for input
if lineIndex < len(line):
if line[lineIndex] == '1':
wasLowBit = 1
lineIndex = lineIndex + 1
if lineIndex < len(line):
if line[lineIndex] == '1':
wasHighBit = 1
lineIndex = lineIndex + 1
if debug:
debugOut = debugOut + str(wasLowBit) + " " + str(wasHighBit) + " | "
byte |= (wasLowBit << bitIndex)
bitIndex = bitIndex + 1
byte |= (wasHighBit << bitIndex)
bitIndex = bitIndex + 1
inputCount = inputCount + 1
# end for
if debug:
if inputCount < 30:
print debugOut
lineData[current] = byte
current = current + 1
# end while
if debug:
print "+---------------------------------------+"
appData = appData + lineData
appCount = appCount + 1
# end for line
return appData
def sendUpdate(sock, file_base, index, host, port, debug):
file_name = file_base + "-" + str(index) + '.txt'
if not os.path.isfile(file_name):
print "ERROR: file " + file_name + " can't be opened, please check if it exists"
sys.exit()
f = open(file_name, 'r')
appdata = readFile(f, debug)
NUM_APPLICATIONS = 1024
APPLICATION_SIZE = 512/8 # 64 bytes per application
try :
sock.sendto(appdata, (host, port))
print 'Update #' + str(index) + " sent. (file=" + file_name + ")"
except socket.error, msg:
print 'Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
f.close()
parser = argparse.ArgumentParser(description='Send link node update to central_node_engine server')
parser.add_argument('--host', metavar='hostname', type=str, nargs=1, help='Central node hostname')
parser.add_argument('input', metavar='input_name', type=str, nargs=1, help='input file')
parser.add_argument('--start', metavar='start_index', type=int, nargs='?', help='start index (default=1)')
parser.add_argument('--size', metavar='size', type=int, nargs='?', help='number of input files (default=1)')
parser.add_argument('--debug', action="store_true", help='enable debug output')
parser.add_argument('--repeat', metavar='number', type=int, nargs='?', help='repeat a finite number of times (default=0 -> forever)')
parser.add_argument('--sleep', metavar='seconds', type=int, nargs='?', help='number of seconds to sleep between updates (default=1)')
args = parser.parse_args()
# create dgram udp socket
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
print 'Failed to create socket'
sys.exit()
if args.host:
host = args.host[0]
else:
host = 'lcls-dev3'
debug = False
if args.debug:
debug = True
repeat = 0
forever = True
if args.repeat:
repeat = args.repeat
forever = False
file_index = 1
if args.start:
file_index = args.start
num_files = 1
if args.size:
num_files = args.size
sleep = 1
if args.sleep:
sleep = args.sleep
port = 4356;
file_base = args.input[0]
if forever:
msg = "Sending " + str(num_files) + " updates to central node engine at " + host + ":" + str(port) + ". Press ctrl-C to stop."
else:
msg = "Sending " + str(num_files) + " updates to central node engine at " + host + ":" + str(port) + ". Repeating " + str(repeat) + " times."
print msg
repeatCounter = 0;
done = False
while not done:
i = file_index
repeatCounter = repeatCounter + 1
print "Update cycle #" + str(repeatCounter)
while i < file_index + num_files:
sendUpdate(sock, file_base, i, host, port, debug)
time.sleep(sleep)
i = i + 1
if not forever:
if repeatCounter == repeat:
done = True
| [
"lpiccoli@slac.stanford.edu"
] | lpiccoli@slac.stanford.edu |
c66751d12e778467d78176d59d437fc89c8bee5b | afd9185976540033ccaa7c5a2607090ec0b3c48a | /heroHealthApp/migrations/0001_initial.py | e4077d93adb6bc257017d486bf32d877f4d7b5f6 | [] | no_license | lukijan98/Simple-Django-App | 4510824e17648046ff6bb5d604ec629e726a94d5 | 26947ca0cb9a03833a351306861be28d82c65b5e | refs/heads/master | 2023-08-21T21:20:35.320535 | 2021-10-17T10:31:15 | 2021-10-17T10:31:15 | 417,804,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | # Generated by Django 3.2.8 on 2021-10-15 14:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('passcode', models.CharField(max_length=128)),
('timezone_name', models.CharField(max_length=128)),
('active', models.BooleanField()),
],
),
migrations.CreateModel(
name='Device',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Pill',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('expiration_date', models.DateField()),
('passcode_mandatory', models.BooleanField()),
('dosage', models.CharField(max_length=128)),
('max_doses', models.IntegerField()),
('form', models.CharField(max_length=128)),
('pill_count', models.IntegerField()),
('slot_index', models.IntegerField()),
('config', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='heroHealthApp.config')),
],
),
migrations.AddField(
model_name='config',
name='device',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='heroHealthApp.device'),
),
]
| [
"jelicl@ymail.com"
] | jelicl@ymail.com |
28eb9cf5f13dc05100ba9264f00df18331a9e5ba | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04030/s999899727.py | d1c7afb12be8ed4228e960b1a6e2e0f7fc222ea5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | s=input()
fin=""
for c in s:
if c=='1':
fin+="1"
elif c=='0':
fin+="0"
else:
if len(fin)>0:
fin=fin[:-1]
print(fin)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d864b293e38504f4a6999312151c7b67dac4710d | fc8f76fa05deca2cc504170370d69c458ed499fe | /listings/urls.py | fed6b2a0217bc5a4a1c8116aa0c05adf7b22e4fb | [] | no_license | intelligentCoding/DjangoProject1 | 3e02eb0946e328634261ee3f87d77c3bb6677326 | 989407ad5724c0297963708ad4b9f553f913d5f8 | refs/heads/master | 2021-02-09T00:48:38.641093 | 2020-03-01T20:42:01 | 2020-03-01T20:42:01 | 244,217,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from django.urls import path
from . import views
urlpatterns = [
# home page
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search')
] | [
"kaaf_kaaf@hotmail.com"
] | kaaf_kaaf@hotmail.com |
b05c7f2697b02781d7a365257e25e20a392ec234 | 816fcaa344334f4b582ff0936b2b225b13e9d1fe | /Other Files/test.py | c9d6be9d92eb37ef9a7d74e7b2c4ac43532aa60f | [] | no_license | sdaless/psychic-carnival | 118c6f084c6581e40d89aeda2bbfab7498e19309 | a3721f1047bd6f6911327bed84638854b10d3bfa | refs/heads/master | 2021-09-16T12:32:37.114251 | 2018-06-20T16:56:21 | 2018-06-20T16:56:21 | 113,626,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | numBitcoins = float(input("How many DANK BITCOINS do you have: "))
numDollars = numBitcoins * 4450
print("That'd be", '{0:.2f}'.format(numDollars), " GREEN AMERICAN DOLLARS.") | [
"noreply@github.com"
] | noreply@github.com |
bb63b320769286448e7c3926ae41d07edfdb0f8f | dfd73d8b6fd6e952f4f627863b8ed8604c32039c | /train_evaluate_RNN/test_LSTM.py | a48ffcbb38fee61a71f3e920550d8c3c7ed23a56 | [] | no_license | pablo-martin/PFC_Behavior | 57d23995f0ff4cf1f2429cfc21b2065c2e17d03a | 4a47ce7c890c9b3c638098069c3d99ecef96fb2c | refs/heads/master | 2020-03-25T09:18:51.523123 | 2019-02-11T00:10:16 | 2019-02-11T00:10:16 | 143,659,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,493 | py |
import itertools
import numpy as np
import multiprocessing as mp
import pandas as pd
import pickle
from behavioral_performance.utils import fileNames
from keras.models import load_model
from RNNmodule.SequenceClass import Sequences
ROOT = '/home/pablo/python/'
datatype = 'binaryOmni'
model_dir = ROOT + 'Models/LSTM/Pablo/' + datatype + '/'
sequence_dir = ROOT + 'DATA_structures/RNN_sequences/' + datatype + '/'
Layers = ['Single', 'Double', 'Triple']
hidden_dimensions = [2, 5, 10, 20, 50, 100]
idx = pd.IndexSlice
def propagate_models(seqs, fileName):
scores = pd.DataFrame(np.zeros([14, 18]),
index = fileNames,
columns = pd.MultiIndex.from_product(
[Layers, hidden_dimensions],
names = ['Layer', 'HD']))
for Layer, hd in itertools.product(Layers, hidden_dimensions):
print '%s - %s - %i' %(fileName, Layer, hd)
model_path = model_dir + Layer + '/' + fileName[:-2] + str(hd) + '.h5'
model = load_model(model_path)
val_scores = model.evaluate(seqs.X_validate, seqs.y_validate)
scores.loc[fileName, idx[Layer, hd]] = val_scores[1]
pickle.dump(scores,
open(ROOT + \
'Model_Evaluation/LSTM/' \
+ datatype + '_classification_behavior'
+ fileName, 'wb'))
def glue_datasets_back():
masterScores = pd.DataFrame(np.zeros([14, 18]),
index = fileNames,
columns = pd.MultiIndex.from_product(
[Layers, hidden_dimensions],
names = ['Layer', 'HD']))
for fileName in fileNames:
scores = pickle.load(open(ROOT + 'Model_Evaluation/LSTM/' \
+ datatype + '_classification_behavior'
+ fileName, 'rb'))
masterScores.loc[fileName, :] = scores.loc[fileName, :]
pickle.dump(masterScores, open(ROOT + 'Model_Evaluation/LSTM/' \
+ datatype + '_classification_behavior.p', 'wb'))
if __name__ == '__main__' :
pool = mp.Pool(processes = 32)
for fileName in fileNames:
sequence_path = sequence_dir + fileName
seqs = pickle.load(open(sequence_path, 'rb'))
pool.apply_async(propagate_models, [seqs, fileName])
pool.close()
pool.join()
glue_datasets_back()
| [
"elektrochose@gmail.com"
] | elektrochose@gmail.com |
352ab7a70416903b7c6a9aed0e6bb43d1fe798ae | 793fd49d1f4d0f5c21c071e3ca8c32e663774241 | /common/login.py | 01a80c9e9892beedab07908e42bf2351a226d01f | [] | no_license | luoqingfu/eliteu_live_auto | ab4797fe76a9265f1b505295c630a94109b73d42 | 440d529c5bc00322e9353e4eef8e090220c56fcd | refs/heads/master | 2020-03-10T00:01:37.414258 | 2018-04-25T08:25:00 | 2018-04-25T08:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | import unittest as unittest
from common.drive import Driver
from page_obj.base import Page
from readYaml import yl
class tes_login():
#browser = yl['t_ed_student']['browser']
#url = yl['t_ed_student']['url']
def t_login(self,url,browser):
self.driver = Driver(self.brower).open_browser(self.url)
self.click("test_login", "username")
username = yl['t_ed_student']['username']
#print(username)
self.send_keys("test_login", "username", username)
password = yl['t_ed_student']['pwd']
self.send_keys("test_login", "password", password)
self.click("test_login", "confirm")
| [
"qf.luo@eliteu.com.cn"
] | qf.luo@eliteu.com.cn |
115b553734b82703e35ac36ab72a8e52d72bee3c | 45a9db818c980db472bac010af8e1bdb69a230e3 | /api_tests.py | 93a8010c8e96c003fa4faf45b9508ffd501397dd | [] | no_license | bira37/desafio-sanar | db7f933be0490bbae0197dd14970fc0d22d82994 | b94f1d72105516bc5f07768e2efc2792be6575c0 | refs/heads/master | 2023-05-25T03:17:58.288779 | 2019-08-25T02:25:59 | 2019-08-25T02:25:59 | 204,233,106 | 0 | 0 | null | 2023-05-22T22:29:36 | 2019-08-25T01:49:23 | Python | UTF-8 | Python | false | false | 15,197 | py | import os
import unittest
from flask import Flask
from app import db
from api.User import User
from app import flaskApp
class FlaskAPITest(unittest.TestCase):
def setUp(self):
self.app = flaskApp.test_client()
def tearDown(self):
# elimina todos os usuarios de teste da base de dados depois de cada teste
User.query.filter_by(name = "teste").delete()
db.session.commit()
#testa buscar informacoes de um usuario inexistente
def test_get_request_on_non_existing_user(self):
response = self.app.get('/api/users/0')
self.assertEqual(400, response.status_code)
#testa realizar uma requisicao get sem informar o id
def test_get_request_without_id_specified(self):
response = self.app.get('/api/users/')
self.assertEqual(400, response.status_code)
#testa adicionar um usuario com plano mensal
def test_post_add_new_user_with_monthly_subscription(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
self.assertTrue(response.json["id"] > 0)
self.assertEqual(response.json["nome"], "teste")
self.assertEqual(response.json["plano"], "Plano SanarFlix Mensal")
#testa adicionar um usuario com plano trimestral
def test_post_add_new_user_with_three_months_subscription(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_trim"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
self.assertTrue(response.json["id"] > 0)
self.assertEqual(response.json["nome"], "teste")
self.assertEqual(response.json["plano"], "Plano SanarFlix Trimestral")
#testa adicionar um usuario com plano mensal com 7 dias de teste
def test_post_add_new_user_with_trial_subscription(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens_teste"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
self.assertTrue(response.json["id"] > 0)
self.assertEqual(response.json["nome"], "teste")
self.assertEqual(response.json["plano"], "Plano SanarFlix Mensal com 7 Dias de Teste")
#testa adicionar um usuario com plano mensal promocional com livro yellowbook
def test_post_add_new_user_with_trial_subscription(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_promo_yellowbook"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
self.assertTrue(response.json["id"] > 0)
self.assertEqual(response.json["nome"], "teste")
self.assertEqual(response.json["plano"], "Plano SanarFlix Promocional Com Livro Yellowbook")
#testa adicionar usuario com plano inexistente
def test_post_add_new_user_with_non_existing_plan(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_semestral"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(400, response.status_code)
#testa adicionar usuario com informacoes faltando no corpo
def test_post_add_new_user_with_missing_info(self):
request_json = {"cliente": {"email":"joao123@gmail.com"},
"produto":{"plano_id":"plan_semestral"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv":"591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(400, response.status_code)
#testa adicionar mais de um usuario com plano qualquer
def test_post_add_more_than_one_user(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
self.assertTrue(response.json["id"] > 0)
self.assertEqual(response.json["nome"], "teste")
self.assertEqual(response.json["plano"], "Plano SanarFlix Mensal")
request_json["produto"]["plano_id"] = "plan_trim"
request_json["cliente"]["email"] = "jonas312@hotmail.com"
second_response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, second_response.status_code)
self.assertTrue(second_response.json["id"] > 0)
self.assertEqual(second_response.json["nome"], "teste")
self.assertEqual(second_response.json["plano"], "Plano SanarFlix Trimestral")
#testa obter informacoes de um usuario com uma assinatura qualquer
def test_get_request_on_existing_user(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
self.assertTrue(response.json["id"] > 0)
self.assertEqual(response.json["nome"], "teste")
get_response = self.app.get('/api/users/' + str(response.json["id"]))
self.assertEqual(200, get_response.status_code)
self.assertEqual(get_response.json["nome"], "teste")
self.assertEqual(get_response.json["id"], response.json["id"])
#testa cancelar assinatura de um usuario
def test_delete_cancel_subscription_of_user(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
delete_response = self.app.delete('/api/users/' + str(response.json["id"]))
self.assertEqual(200, delete_response.status_code)
self.assertEqual(delete_response.json["status"], "canceled")
self.assertEqual(response.json["mundi_subscription_id"], delete_response.json["mundi_subscription_id"])
get_response = self.app.get('/api/users/' + str(response.json["id"]))
self.assertEqual(200, get_response.status_code)
self.assertEqual(None, get_response.json["mundi_subscription_id"])
#testa cancelar assinatura inexistente de um usuario
def test_delete_cancel_subscription_already_cancelled(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
delete_response = self.app.delete('/api/users/' + str(response.json["id"]))
self.assertEqual(200, delete_response.status_code)
self.assertEqual(delete_response.json["status"], "canceled")
self.assertEqual(response.json["mundi_subscription_id"], delete_response.json["mundi_subscription_id"])
new_delete_response = self.app.delete('/api/users/' + str(response.json["id"]))
self.assertEqual(400, new_delete_response.status_code)
#testa alterar cartao de credito de um usuario
def test_put_change_user_card(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
card_json = {"cartao": {"nome_cartao": "jose", "numero": "4532912167490007", "expiracao_mes": 1, "expiracao_ano":28, "cvv": "123"}}
put_response = self.app.put('/api/users/' + str(response.json["id"]), json = card_json)
self.assertEqual(200, put_response.status_code)
self.assertEqual(response.json["mundi_subscription_id"], put_response.json["mundi_subscription_id"])
self.assertEqual("453291", put_response.json["primeiros 6 digitos do cartao"])
self.assertEqual("0007", put_response.json["ultimos 4 digitos do cartao"])
#testa alterar cartao de credito de um usuario com o corpo da requisicao incompleto
def test_put_change_user_with_missing_information(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
card_json = {"cartao": {"nome_cartao": "jose", "numero": "4532912167490007", "expiracao_mes": 1, "expiracao_ano":28}}
put_response = self.app.put('/api/users/' + str(response.json["id"]), json = card_json)
self.assertEqual(400, put_response.status_code)
#testa criar nova assinatura para um cliente antigo que cancelou a sua assinatura anterior
def test_put_create_new_subscription_for_existing_user(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
delete_response = self.app.delete('/api/users/' + str(response.json["id"]))
self.assertEqual(200, delete_response.status_code)
card_json = {"produto":{"tipo":"plano", "plano_id":"plan_trim"}, "cartao": {"nome_cartao": "jose", "numero": "4532912167490007", "expiracao_mes": 1, "expiracao_ano":28, "cvv": "123"}}
put_response = self.app.put('/api/users/' + str(response.json["id"]), json = card_json)
self.assertEqual(200, put_response.status_code)
self.assertEqual(put_response.json["nome"], "teste")
self.assertEqual(put_response.json["id"], response.json["id"])
self.assertEqual("453291", put_response.json["primeiros 6 digitos do cartao"])
self.assertEqual("0007", put_response.json["ultimos 4 digitos do cartao"])
self.assertEqual(put_response.json["plano"], "Plano SanarFlix Trimestral")
#testa criar nova assinatura para um cliente antigo que cancelou a sua assinatura anterior com informacoes faltando
def test_put_create_new_subscription_for_existing_user_with_missing_information(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_mens"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
delete_response = self.app.delete('/api/users/' + str(response.json["id"]))
self.assertEqual(200, delete_response.status_code)
card_json = {"produto":{"tipo":"plano", "plano_id":"plan_trim"}}
put_response = self.app.put('/api/users/' + str(response.json["id"]), json = card_json)
self.assertEqual(400, put_response.status_code)
#testa obter informacoes detalhadas de um cliente e suas assinaturas (usando a assinatura promocional)
def test_get_request_for_detailed_information(self):
request_json = {"cliente": {"nome": "teste", "email":"joao123@gmail.com"},
"produto":{"tipo":"plano", "plano_id":"plan_promo_yellowbook"},
"cartao":{"nome_cartao": "joao", "numero":"4584441896453869", "expiracao_mes":12, "expiracao_ano":19, "cvv": "591"}
}
response = self.app.post('/api/users/', json = request_json)
self.assertEqual(200, response.status_code)
get_detailed_response = self.app.get('/api/users/details/' + str(response.json["id"]))
self.assertEqual(200, get_detailed_response.status_code)
get_response = self.app.get('/api/users/' + str(response.json["id"]))
self.assertEqual(200, get_response.status_code)
info = get_detailed_response.json
self.assertEqual(get_response.json["mundi_customer_id"], info["cliente"]["mundi_customer_id"])
self.assertEqual(get_response.json["mundi_subscription_id"], info["assinatura"]["mundi_subscription_id"])
self.assertEqual("teste", info["cliente"]["nome"])
self.assertEqual("joao123@gmail.com", info["cliente"]["email"])
self.assertEqual("458444", info["assinatura"]["cartao"]["primeiros 6 digitos"])
self.assertEqual("3869", info["assinatura"]["cartao"]["ultimos 4 digitos"])
self.assertEqual("Plano SanarFlix Mensal", info["assinatura"]["produto"][0]["nome"])
self.assertEqual(None, info["assinatura"]["produto"][0]["ciclos"])
self.assertEqual("Livro Yellowbook", info["assinatura"]["produto"][1]["nome"])
self.assertEqual(1, info["assinatura"]["produto"][1]["ciclos"])
if __name__ == "__main__":
unittest.main()
| [
"ubiratanneto37@hotmail.com"
] | ubiratanneto37@hotmail.com |
2723e040a2d91c56676e7645fc79b32a8370686e | 9a5561080b694e96c642e990e1748162cfb6a414 | /Management/migrations/0001_initial.py | 1334c797b3d0ed5f225cd1ce01e3342bdcab0205 | [] | no_license | ArlexDu/Training_Management | a81c969e83777c4edf309e4ff39787f9c52ccb9f | f5c14f30fe82b782b7099d152545e30b6f99cc61 | refs/heads/master | 2020-12-30T13:39:45.696249 | 2018-10-20T12:17:57 | 2018-10-20T12:17:57 | 91,239,784 | 0 | 2 | null | 2018-10-20T12:17:58 | 2017-05-14T11:48:16 | JavaScript | UTF-8 | Python | false | false | 5,290 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-17 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson1', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('lesson2', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('lesson3', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('average', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
],
),
migrations.CreateModel(
name='LessonOne',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(default='1700001', max_length=32)),
('level1', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level2', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level3', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level4', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level5', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('average', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('train_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='LessonThree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(default='1700001', max_length=32)),
('level1', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level2', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level3', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level4', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level5', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level6', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('average', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('train_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='LessonTwo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(default='1700001', max_length=32)),
('level1', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level2', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('level3', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('average', models.DecimalField(decimal_places=2, default=60.0, max_digits=5)),
('train_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('number', models.CharField(default='1700001', max_length=32, primary_key=True, serialize=False)),
('idcard', models.CharField(default='13027472347324', max_length=32)),
('province', models.CharField(default='Shanghai', max_length=32)),
('name', models.CharField(default='LiHua', max_length=32)),
('gender', models.CharField(default='F', max_length=2)),
('age', models.CharField(default='25', max_length=3)),
('phone', models.CharField(default='18510309110', max_length=15)),
('email', models.CharField(default='bigplane@qq.com', max_length=32)),
('address', models.CharField(default='\u4e0a\u6d77\u5e02\u5609\u5b9a\u533a\u66f9\u5b89\u516c\u8def4800\u53f7', max_length=128)),
('image', models.ImageField(default='pictures/timg.jpg', upload_to='pictures')),
],
),
migrations.CreateModel(
name='SuperUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(default='admin', max_length=32)),
('password', models.CharField(default='admin', max_length=32)),
],
),
migrations.AddField(
model_name='grade',
name='person',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='person_grade', to='Management.Person'),
),
]
| [
"772773671@qq.com"
] | 772773671@qq.com |
23aa400084b7cbdf2c7d215796fe0d309501e22f | f30464842a6698ab9a61c05029b0cd246bed8829 | /env/lib/python3.5/sre_compile.py | 3e0d51477ddd328ec46aecebeba518bac711f6ad | [] | no_license | chrispdharman/unbroken-symmetry | e80ba1c02fce3b2c37a8450aeb28a089291cf925 | e9a2d3e95f63b76d17bc065588e992abd8ffc3cb | refs/heads/master | 2021-06-25T10:12:17.584167 | 2018-09-12T22:24:11 | 2018-09-12T22:24:11 | 145,339,589 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /Users/ch392/miniconda3/lib/python3.5/sre_compile.py | [
"christopher.p.d.harman@gmail.com"
] | christopher.p.d.harman@gmail.com |
a27df39f6759523f35257c05c958641d535518dc | adcdc01294ef1563f063b26c4513825879d34262 | /koala/koala_server/app/api_1_0/api_project.py | 646bbeea5a100c2539ece28cad23593c043978ca | [] | no_license | clearloveyin/Cararote | 0312d18061fd6b23e72450d956ff2a4f07deb31a | 2d764d5d682391a8909650387e84eb735044c5ff | refs/heads/master | 2023-01-29T07:08:32.696111 | 2019-08-03T04:12:15 | 2019-08-03T04:12:15 | 200,329,856 | 0 | 0 | null | 2023-01-04T06:14:17 | 2019-08-03T04:07:49 | Python | UTF-8 | Python | false | false | 4,914 | py | # -*- coding: UTF-8 -*-
import os
from app.ctrl.ctrl_project import CtrlProject
from flask_restful import Resource, request
from token_manage import auth
class ApiProjectState(Resource):
@auth.login_required
def get(self):
result = {"result": "OK", "content": []}
res = CtrlProject().get_proj_state_options()
if res:
result["result"] = "OK"
result["content"] = res
return result
class ApiProjectInside(Resource):
@auth.login_required
def get(self):
result = {"result": "OK", "content": []}
res = CtrlProject().get_inside_name_list()
if res:
result["result"] = "OK"
result["content"] = res
return result
class ApiProjectType(Resource):
@auth.login_required
def get(self):
result = {"result": "OK", "content": []}
res = CtrlProject().get_proj_type_list()
if res:
result["result"] = "OK"
result["content"] = res
return result
class ApiProjectNameCheck(Resource):
@auth.login_required
def get(self):
result = {"result": "OK", "content": []}
res = CtrlProject().check_proj_name()
if res:
result["result"] = "OK"
result["content"] = res
return result
class ApiProjectList(Resource):
@auth.login_required
def get(self, user_id=None):
result = {"result": "OK", "content": []}
if user_id:
res = CtrlProject().get_proj_list_by_user_id(user_id)
else:
res = CtrlProject().get_proj_list()
if res:
result["result"] = "OK"
result["content"] = res
return result
@auth.login_required
def post(self):
result = {"result": "NG"}
data = request.get_json(force=True)
if data:
proj_id, message = CtrlProject().add_project_with_observer(data)
if proj_id:
result = {"result": "OK", 'content': proj_id}
else:
result["error"] = message
else:
result["error"] = "请不要传空数据"
return result
class ApiProjectInfo(Resource):
@auth.login_required
def get(self, pro_id):
result = {"result": "NG", "content": []}
res, msg = CtrlProject().get_one_proj_by_id(pro_id)
if res:
result["result"] = "OK"
result["content"] = msg
else:
result["content"] = msg
return result
@auth.login_required
def put(self, pro_id):
result = {"result": "NG"}
data = request.get_json(force=True)
if data:
proj_id, message = CtrlProject().change_proj_by_id_with_observer(pro_id, data)
if proj_id:
result = {"result": "OK", 'content': proj_id}
else:
result["error"] = message
else:
result["error"] = "请不要传空数据"
return result
# @auth.login_required
def post(self):
result = {"result": "NG"}
data = request.get_json(force=True)
# data = request
if data:
proj_id, message = CtrlProject().delete_proj_by_id(data)
if proj_id:
result = {"result": "OK", 'content': proj_id}
else:
result["error"] = message
else:
result["error"] = "请不要传空数据"
return result
class ApiProjectManager(Resource):
"""项目体制"""
@auth.login_required
def get(self, proj_id, user_id):
result = {"result": "NG", "content": []}
res, msg = CtrlProject().get_project_manager(proj_id, user_id)
if res:
result["result"] = "OK"
result["content"] = msg
else:
result["error"] = msg
return result
class ApiManageList(Resource):
"""项目体制option"""
@auth.login_required
def get(self):
result = {"result": "NG", "content": []}
res, msg = CtrlProject().get_manager_list()
if res:
result["result"] = "OK"
result["content"] = msg
else:
result["error"] = msg
return result
class ApiManagerImport(Resource):
"""项目体制导入"""
# def get(self):
# """测试"""
# proj_id = 2
# file_path = r'C:\workspace\koala\Spec2DB\koala\koala_server\template\开发体制_template_ver0.1.xlsx'
# res, msg = CtrlProject().import_project_manager(file_path, proj_id)
# print(res, msg)
@auth.login_required
def post(self):
result = {"result": "NG"}
request_data = request
res, msg = CtrlProject().import_manager(request_data)
if res:
result["result"] = "OK"
result["content"] = res
else:
result["error"] = msg
return result
| [
"1484091708@qq,com"
] | 1484091708@qq,com |
d6bb9fecfdd54bc0099de28046aacffa170065d6 | 40456139d3d3dafdf5331ff3d5a6e1ce7b1c6421 | /image_normalizer.py | bc85788aa4ef132dc925d686fd976173c05be284 | [] | no_license | nacro90/age-gender | 5a06f6374f8f31f199dfd0c290656c8bd8f52753 | 491fa10754001fc20d1ffe9694819cfb67591646 | refs/heads/master | 2020-03-19T20:04:26.161270 | 2018-06-11T07:01:13 | 2018-06-11T07:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | import os
from PIL import Image
DATASET_PATH = os.curdir + os.sep + 'data' + os.sep + 'Adience'
IMAGE_FOLDER = os.sep + 'faces'
ANNOTATION_FOLDER = os.sep + 'annotations'
JSON_FOLDER = os.sep + 'set'
def main():
path = DATASET_PATH + IMAGE_FOLDER
directories = os.listdir(DATASET_PATH + IMAGE_FOLDER)
counter = 0
for d in directories:
directory_path = path + os.sep + d
image_file_names = os.listdir(directory_path)
for i in image_file_names:
if i[-3:] != 'txt':
image_path = directory_path + os.sep + i
image: Image.Image = Image.open(image_path)
resized = image.resize((200, 200))
resized.save(image_path)
counter += 1
print(counter)
if __name__ == '__main__':
main()
# 464 467
| [
"orcan.tiryakioglu@gmail.com"
] | orcan.tiryakioglu@gmail.com |
f78ec480786556e08f9a2cddea0271a0013e24e1 | 9ff1d0d5049dfe1c14528e098bdd8c934fb2274a | /tests/test3/test_port7_unittest.py | 6465c5d08297ccf489943439d23ab7e7aca49cfa | [] | no_license | 486dx/utility_Python | 43e06b3f74dac140396643d0e5c132fb874d2467 | 598117f7e9fd416f4bc7f1ccea931048a977a0bc | refs/heads/master | 2022-04-23T06:36:36.220406 | 2020-04-06T08:59:35 | 2020-04-06T08:59:35 | 264,519,552 | 1 | 0 | null | 2020-05-16T20:17:30 | 2020-05-16T20:17:30 | null | UTF-8 | Python | false | false | 1,690 | py | # test_port7_unittest.py
import unittest
from portfolio3 import Portfolio
class PortfolioTest(unittest.TestCase):
def test_empty(self):
p = Portfolio()
self.assertEqual(p.cost(), 0.0)
def test_buy_one_stock(self):
p = Portfolio()
p.buy("IBM", 100, 176.48)
self.assertEqual(p.cost(), 17648.0)
def test_buy_two_stocks(self):
p = Portfolio()
p.buy("IBM", 100, 176.48)
p.buy("HPQ", 100, 36.15)
self.assertEqual(p.cost(), 21263.0)
def test_bad_input(self):
p = Portfolio()
with self.assertRaises(TypeError):
p.buy("IBM")
class PortfolioSellTest(unittest.TestCase):
def setUp(self):
self.p = Portfolio()
self.p.buy("MSFT", 100, 27.0)
self.p.buy("DELL", 100, 17.0)
self.p.buy("ORCL", 100, 34.0)
def test_sell(self):
self.p.sell("MSFT", 50)
self.assertEqual(self.p.cost(), 6450)
def test_not_enough(self):
with self.assertRaises(ValueError):
self.p.sell("MSFT", 200)
def test_dont_own_it(self):
with self.assertRaises(ValueError):
self.p.sell("IBM", 1)
# Replace Portfolio.current_prices with a stub implementation.
# This avoids the web, but also skips all our current_prices
# code.
class PortfolioValueTest(unittest.TestCase):
def fake_current_prices(self):
return {'IBM': 140.0, 'HPQ': 32.0}
def setUp(self):
self.p = Portfolio()
self.p.buy("IBM", 100, 120.0)
self.p.buy("HPQ", 100, 30.0)
self.p.current_prices = self.fake_current_prices
def test_value(self):
self.assertEqual(self.p.value(), 17200)
| [
"f339339@gmail.com"
] | f339339@gmail.com |
fa804a7ddde1f1ff2df71b0e7b4718e2bb101e06 | 7316069d259c7a2338ae1891bdbba6e46486b823 | /table_tree/DataLocation.py | d94cccd15896aeec68dd27161cd7cf14943ec610 | [] | no_license | GuoXi722/PyQtPractice | 07dbb77d36776d2e0e96a6aa69aeb5da8f58571d | 40ded82c2dc70864763e5e93dd0b2da57cb8dd18 | refs/heads/master | 2021-01-08T20:21:49.450035 | 2020-02-27T02:23:02 | 2020-02-27T02:23:02 | 242,132,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class DataLocation(QWidget):
def __init__(self):
super(DataLocation, self).__init__()
self.initUi()
def initUi(self):
self.setWindowTitle('在表格中查找指定数据')
self.resize(600, 800)
vlayout = QVBoxLayout()
hlayout = QHBoxLayout()
self.searchEdit = QLineEdit()
searchButton = QPushButton('查找')
searchButton.clicked.connect(self.search)
hlayout.addWidget(self.searchEdit)
hlayout.addWidget(searchButton)
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(40)
self.tableWidget.setColumnCount(4)
for i in range(40):
for j in range(4):
itemContent = f'({i}, {j})'
self.tableWidget.setItem(i, j, QTableWidgetItem(itemContent))
vlayout.addLayout(hlayout)
vlayout.addWidget(self.tableWidget)
self.setLayout(vlayout)
def search(self):
text = self.searchEdit.text()
items = self.tableWidget.findItems(text, Qt.MatchStartsWith)
if len(items) > 0:
print(self.tableWidget.itemFromIndex(1))
for item in items:
item.setBackground(QBrush(QColor(0, 255, 0)))
item.setForeground(QBrush(QColor(255, 0, 0)))
row = items[0].row()
# 定位到指定的行
self.tableWidget.verticalScrollBar().setSliderPosition(row)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = DataLocation()
window.show()
sys.exit(app.exec_())
| [
"asaf227@126.com"
] | asaf227@126.com |
208e0c3629cefe2c406cecbcf1ccfcca3d306438 | a1c21b0c1b0e074060005c0714a69b4f983a237e | /TestPro/TestPro/urls.py | 018ae5eedca4a315e69f184f22637156fd34a88c | [] | no_license | Jzarecta/Django3 | e2d7f830060634e9035e72d525d93cb505824c90 | 3277d4e1831122a5cceb6767199fff7bfa0bf0a1 | refs/heads/master | 2020-12-03T20:21:54.637165 | 2020-01-04T00:13:25 | 2020-01-04T00:13:25 | 231,473,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | """TestPro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('SportApp.urls')),
]
| [
"jza@bitcuners.org"
] | jza@bitcuners.org |
2470fdcf5635d5d8d83f280e1f1672e706801756 | f3a7ca57c3027c3ab40dc006126a71f151804d73 | /09_valor_mais_proximo_media.py | d167a1d2dafabb836533f4576ec283abf8c4fd57 | [] | no_license | evaldojr100/Python_Lista_4 | 9ff87df3fcb140bcfeffd59842a76a2b58e26bee | d5fa499d3ada2e20fbb5319928a8239d9784b366 | refs/heads/master | 2020-04-08T17:44:47.980772 | 2018-11-28T23:38:55 | 2018-11-28T23:38:55 | 159,579,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # print(int)+(min({abs(int(sum([2.5,7.5,10.0,4.0])/len([2.5,7.5,10.0,4.0]))-i) for i in [2.5,7.5,10.0,4.0]})))
'''lista,lista2=[2.5,7.5,10,4.0],[]
{lista2.append(abs(i-6.0)) for i in lista}
print(lista[lista2.index(min(lista2))])'''
print(min({abs(i-(sum([2.5,7.5,10.0,4.0])/len([2.5,7.5,10.0,])))if 6.0<i else abs(i+(sum([2.5,7.5,10.0,4.0])/len([2.5,7.5,10.0,4.0])))for i in [2.5,7.5,10,4.0]}))
| [
"evaldojr_melo@hotmail.com"
] | evaldojr_melo@hotmail.com |
415094acc513de4473c05da95d87fe6a98ca7811 | e940f34521c1e15f4134e28584403abd1d2b048d | /intro_to_ml.py | 354af8a5398070a6a94369eb989505d34291f619 | [] | no_license | abdurehman12/Intro-To-ML | eaf8032ac510c6ba902bba759a6466638ee25492 | 5d8fddccc5ba38348b5effb5e3982485783d9f80 | refs/heads/main | 2023-08-02T07:19:55.583491 | 2021-10-06T16:25:12 | 2021-10-06T16:25:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | # -*- coding: utf-8 -*-
"""Intro to ML.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1yHZysp6_Ii9SLgZhrxgXmdAf8KODHVOx
"""
import pandas as pd
import numpy as np
# save filepath to variable for easier access
melbourne_file_path = 'melb_data.csv'
# read the data and store data in DataFrame titled melbourne_data
melbourne_data = pd.read_csv(melbourne_file_path)
# print a summary of the data in Melbourne data
melbourne_data.describe()
melbourne_data["Price"].mean()
melbourne_data.columns
melbourne_data = melbourne_data.dropna(axis=0)
y = melbourne_data.Price
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']
X = melbourne_data[melbourne_features]
X.describe()
X.head()
from sklearn.tree import DecisionTreeRegressor
# Define model. Specify a number for random_state to ensure same results each run
melbourne_model = DecisionTreeRegressor(random_state=1)
# Fit model
melbourne_model.fit(X, y)
print("Making predictions for the following 5 houses:")
print(X.head())
print("The predictions are")
print(melbourne_model.predict(X.head()))
import pprint as pp
pp.pprint(X.head())
filtered_melbourne_data = melbourne_data.dropna(axis=0)
# Choose target and features
y = filtered_melbourne_data.Price
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'BuildingArea',
'YearBuilt', 'Lattitude', 'Longtitude']
X = filtered_melbourne_data[melbourne_features]
from sklearn.tree import DecisionTreeRegressor
# Define model
melbourne_model = DecisionTreeRegressor()
# Fit model
melbourne_model.fit(X, y)
from sklearn.metrics import mean_absolute_error
predicted_home_prices = melbourne_model.predict(X)
mean_absolute_error(y, predicted_home_prices)
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 0)
# Define model
melbourne_model = DecisionTreeRegressor()
# Fit model
melbourne_model.fit(train_X, train_y)
# get predicted prices on validation data
val_predictions = melbourne_model.predict(val_X)
print(mean_absolute_error(val_y, val_predictions))
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)
# compare MAE with differing values of max_leaf_nodes
for max_leaf_nodes in [5, 50, 500, 5000]:
my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)
print("Max leaf nodes: %d \t\t Mean Absolute Error: %d" %(max_leaf_nodes, my_mae))
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor(random_state=10)
forest_model.fit(train_X, train_y)
melb_preds = forest_model.predict(val_X)
print(mean_absolute_error(val_y, melb_preds))
| [
"noreply@github.com"
] | noreply@github.com |
8cdd5f52e919892a5acf7fabc7f846d69d487956 | 5491f4b600f7ecd1d0848d60d7b017e5e407d4c7 | /inventario/migrations/0005_ventamodel.py | 79ad0c9268a28f2a5951adb94199d7fd065bfa48 | [] | no_license | GustavoPMex/web-inventario | 409456dd356bbfcadd735cc9b8e2aae7605a0e37 | d0ac36ee791ff0262f9390497da1dd990581a4fd | refs/heads/master | 2023-06-10T10:08:39.029666 | 2021-06-30T23:40:19 | 2021-06-30T23:40:19 | 296,677,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # Generated by Django 3.0.8 on 2020-09-29 03:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventario', '0004_historicalarticulomodel'),
]
operations = [
migrations.CreateModel(
name='VentaModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('proveedor', models.CharField(max_length=100)),
('vendidos', models.IntegerField()),
('precio', models.CharField(max_length=100)),
('fecha_venta', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Venta',
'verbose_name_plural': 'Ventas',
},
),
]
| [
"gustavoppymex@gmail.com"
] | gustavoppymex@gmail.com |
45334efb19f0bd14bb4fadb6ad06af11137d51fc | ea9d8a93f03291681c54e4e0af07a07200614b3b | /venv/bin/easy_install | 1884943234ec09ce04dd3afb7d3583ba101b968e | [] | no_license | arpitmohanty9/pawfect | 201310ca392fea5ead602c1a086bc42f7a4b6892 | adbc6072a737de3c6e17709d5bdc65738c7e1df9 | refs/heads/master | 2023-04-24T06:47:38.035053 | 2019-05-31T14:17:06 | 2019-05-31T14:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | #!/home/arpit/Documents/Git/pawfect/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"arpitmohanty9@gmail.com"
] | arpitmohanty9@gmail.com | |
7fbf7aa3784e3013167159849a122f698bad690e | 1eb98e1605507f7ca400f4b90d37b6124d00abc3 | /tensorflow/lib/python2.7/posixpath.py | 7d8a72347346a9bb112262497587f777180aaf82 | [] | no_license | arcoyk/ml | 4519c4ef95bb207feb642a4201bb00f035d25d0a | 9b67d799552ea854f248af311918fb30aca8d48a | refs/heads/master | 2022-10-21T22:15:34.967709 | 2017-08-25T15:29:32 | 2017-08-25T15:29:32 | 84,820,976 | 1 | 1 | null | 2022-10-12T18:44:02 | 2017-03-13T11:53:52 | Python | UTF-8 | Python | false | false | 57 | py | /Users/yui/.pyenv/versions/2.7/lib/python2.7/posixpath.py | [
"yui@racoon.local"
] | yui@racoon.local |
8bc82afe016dc0fdb0425e76358408b54d303f0f | 8e3b43774810ed73ee4797ebc8c09906359b236d | /finalnet.py | 3274130d46203b579c15b4a8455343268e231eb2 | [] | no_license | 459548764/Myself-Machine-Learning | 0fa9603b156beac810b18c0706d3e7b97e32642b | 31aa2d919443fef6d04d9d2b6545092046f7cfff | refs/heads/master | 2023-08-17T00:55:04.703546 | 2023-08-07T02:30:18 | 2023-08-07T02:30:18 | 144,861,866 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,650 | py | import numpy as np
import random
class MultiNetwork():
def __init__(self,
layer_dims):
self.parameter = {}
L = len(layer_dims)
for i in range(1,L):
self.parameter['W'+str(i)] = np.random.rand(layer_dims[i-1],
layer_dims[i])/np.sqrt(layer_dims[i-1])
self.parameter['b'+str(i)] = np.zeros([1,layer_dims[i]])
assert(self.parameter['W'+str(i)].shape == (layer_dims[i-1],layer_dims[i]))
assert(self.parameter['b'+str(i)].shape == (1,layer_dims[i]))
def _sigmoid(self,z):
return 1/(1+np.exp(-z))
def _diff_sigmoid(self,z):
return self._sigmoid(z)*(1-self._sigmoid(z))
def _tanh(self,z):
return (np.exp(z)+np.exp(-z))/(np.exp(z)-np.exp(-z))
def _diff_tanh(self,z):
return 1 - self._tanh(z)**2
def _relu(self,z):
return np.maximum(0,z)
def _diff_relu(self,z):
dz = np.array(z,copy = True)
dz[z > 0] = 1
dz[z <= 0] = 0
return dz
def linear_forward(self,
input,
weight,
bias):
z = np.dot(input,weight) + bias
assert(z.shape == (input.shape[0],weight.shape[1]))
return z
def linear_activation_forward(self,
input,
weight,
bias,
cur_layer,
method):
z = self.linear_forward(input,weight,bias)
if method == 'sigmoid':
a = self._sigmoid(z)
if method == 'relu':
a = self._relu(z)
if method == 'tanh':
a = self._tanh(z)
if method == 'none':
a = z
return a
def forward(self,
data):
self.backparameter = {}
self.cache = {}
input = data
L = len(self.parameter)//2
for i in range(1,L):
input_pre = input
self.cache['X'+str(i)] = input_pre
input = self.linear_activation_forward(input_pre,
self.parameter['W'+str(i)],
self.parameter['b'+str(i)],i,
'relu')
self.backparameter['diff'+str(i)] = self._diff_relu(input)
self.cache['X'+str(L)] = input
output = self.linear_activation_forward(input,
self.parameter['W'+str(L)],
self.parameter['b'+str(L)],L,
'none')
self.backparameter['diff'+str(L)] = 1
return output
def compute_loss(self,
output,
label,
method):
if method == 'softmax':
q = np.array(output,copy = True)
for i in range(output.shape[0]):
sum = np.sum(np.exp(output[i]))
for j in range(output.shape[1]):
q[i][j] = np.exp(output[i][j])/sum
loss = q - label
print(q)
if method == 'mse':
loss = output - label
return loss
def backward(self,
loss):
self.gradient = {}
L = len(self.parameter)//2
self.gradient['db' + str(L)] = loss*self.backparameter['diff'+str(L)]
self.gradient['dW' + str(L)] = np.dot(self.cache['X'+str(L)].T,
self.gradient['db' + str(L)])
self.gradient['dA' + str(L)] = np.dot(self.gradient['db' + str(L)],
self.parameter['W'+str(L)].T)
for i in reversed(range(1,L)):
self.gradient['db' + str(i)] = self.gradient['dA' + str(i+1)]*self.backparameter['diff'+str(i)]
self.gradient['dW' + str(i)] = np.dot(self.cache['X'+str(i)].T,
self.gradient['db' + str(i)])
self.gradient['dA' + str(i)] = np.dot(self.gradient['db' + str(i)],
self.parameter['W'+str(i)].T)
def update(self,
studyratio,
dropout):
L = len(self.parameter)//2
for i in range(1,L+1):
total_drop = int(dropout * self.parameter['W'+str(i)].shape[1])
resultList = random.sample(range(0,self.parameter['W'+str(i)].shape[1]),
total_drop)
for dt in resultList:
self.gradient['dW'+str(dt)] = 0
self.gradient['db'+str(dt)] = 0
self.parameter['W'+str(i)] = self.parameter['W'+str(i)] - studyratio*self.gradient['dW'+str(i)]
self.parameter['b'+str(i)] = self.parameter['b'+str(i)] - studyratio*self.gradient['db'+str(i)]
def train(self,
data,
label,
studyratio,
dropout = 0):
output = self.forward(data)
loss = self.compute_loss(output,label,'softmax')
self.backward(loss)
self.update(studyratio,dropout)
data = np.array([[1.0,1.0],[0.0,0.0],[0.0,1.0],[1.0,0.0]])
label = np.array([[1.0,0.0],
[1.0,0.0],
[0.0,1.0],
[0.0,1.0]])
studyratio = 0.1
dropout = 0.25
Layers = [2,4,4,2]
bp = MultiNetwork(Layers)
for iteration in range(1,10000):
bp.train(data,label,studyratio,dropout)
| [
"noreply@github.com"
] | noreply@github.com |
89a6df9618831d99282c5edf2d9c150519e4502c | 0978f8e704196f94fce1768e5b0b1ed55b1fe027 | /上課用_打地鼠.py | 427d7a015534e28603c6974f4a3dfb1c8c247e63 | [] | no_license | KaiCheng026/test | 61dbd04b5eb6ae67d9886c9e26dc60bc0a63587f | c10111eda44fce31dfc23233a264e1456e0eb52a | refs/heads/main | 2023-07-03T07:38:36.410746 | 2021-08-09T10:00:48 | 2021-08-09T10:00:48 | 394,238,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,755 | py | """
引入我們會需要用到的程式庫
"""
import pygame
import time
from random import randint
"""
決定視窗大小及我們會常常用到的顏色
這樣的好處是以後我們如果要改主題顏色或視窗大小,只要對應去改這邊的資料就好
"""
# 螢幕尺寸
SCREEN_WIDTH = 400 # 寬
SCREEN_HEIGHT = 400 # 長
# 顏色
GREEN = (73, 188, 11)
YELLOW = (225, 225, 0)
WHITE = (255, 255, 255)
"""
觀察我們會需要知道的資訊
首先是老鼠會出現在某個 (x,y) 座標上(一次一隻)
所以我們透過 x, y 變數來記住目前老鼠在哪裡方便判斷
接著是我們預計設計出來的遊戲裡面會需要什麼資訊?
分數及遊戲時間,那我們額外加個開始時間,方便計算過了幾秒鐘
最後還有一個東西,我們希望知道什麼時候滑鼠有按下
這樣槌子才能對應改變成敲下去的樣子
"""
# 老鼠位置及遊戲資訊
x = None # 老鼠出現在(x,y)座標
y = None
score = 0 # 目前得分
game_time = 20 # 遊戲時間限制
start_time = 0 # 目前的時間
mallet_down = False # 判斷滑鼠有沒有按下去,如果是false代表還沒按下去就不會有槌子出現
"""
我們玩遊戲的時候會有幾個狀態,像是一開始的首頁、遊戲中的畫面和最後結束的畫面
因此我們設計一個變數來儲存目前的狀態,好讓我們對應去顯示畫面及處理動作
像是我們不會希望還沒開始的時候,時間就開始倒數或是老鼠就在亂跑
"""
# game state: 0 - welcome, 1 - playing, 2 - game over
state = 0
"""
接著可以設定初始化的遊戲視窗,順便載入會使用到的圖片
由於只會執行一次,不用函式包起來比較好對這些變數操作
"""
# 建立視窗及載入圖片
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # 建立長寬為400,400的視窗
clock = pygame.time.Clock() # 創建一個對象來幫助跟蹤時間
mole = pygame.image.load('mole.png') # 載入 地鼠的圖片 ((等等要拿來打的
mallet = pygame.image.load('mallet.png') # 載入槌子 還沒槌 下去的圖片((mallet_down = False出現
down_mallet = pygame.image.load('down-mallet.png') # 載入槌子 槌下去 的圖片((mallet_down = True出現
background = pygame.image.load('grass.png') # 載入遊戲畫面的背景
pygame.mouse.set_visible(False) # 在視窗中 滑鼠看不到
pygame.display.set_caption('click a mole') # 使窗左上角的名稱(應用遊戲的名稱)
pygame.font.init() # 該函數用於初始化字體模塊,如果沒有這行 程式就不會初始化 甚麼都不會出現
"""
對應不同的畫面,我們分別把他定義成函式
分別為 welcome_screen 歡迎頁面、 play_screen 遊戲頁面 及 end_screen 結束頁面
然後我們對應看裡面需要做什麼事情
"""
def welcome_screen():
# 背景填滿綠色
screen.fill(GREEN)
# 設定字體樣板 pygame.font.SysFont("字體名稱", 字體大小)
font = pygame.font.Font(None, 30)
# 設定歡迎畫面會出現的文字 (文字, 是否開啟鋸齒, 顏色, 背景(可不加=透明))
text = font.render("press ENTER to start", False, WHITE)
# 將文字、老鼠及槌子顯示在畫面上
screen.blit(text, (SCREEN_WIDTH / 2 - text.get_width() / 2, 185)) # 文字位置
screen.blit(mallet, (200, 50)) # 槌子位置
screen.blit(mole, (120, 250)) # 地鼠位置
def play_screen():
# 將草地顯示在背景上
screen.blit(background, (0,0))
"""
接著去思考有哪些東西需要改變或動作,我們先預想會有哪些函式並寫在這裡
之後我們再去實作他
"""
# 顯示分數
show_score()
# 顯示時間
show_timer()
# 顯示老鼠
show_mole()
# 顯示槌子
show_mallet()
def end_screen():
# 背景填滿綠色
screen.fill(GREEN)
# 設定字體樣板分別顯示遊戲結束、分數及重新開始按鈕
font = pygame.font.Font(None, 30) # 字體物件先建立出來
game_over = font.render("GAME OVER", False, WHITE) # (文字, 是否鋸齒, 顏色, 背景)
font = pygame.font.Font(None, 25)
points = font.render("score: " + str(score), False, WHITE)
font = pygame.font.Font(None, 22)
restart = font.render("press ENTER to play again", False, WHITE)
# 將上述資訊字樣 顯示到螢幕上
screen.blit(game_over, (SCREEN_WIDTH / 2 - game_over.get_width() / 2, 100))
screen.blit(points, (SCREEN_WIDTH / 2 - points.get_width() / 2, 200))
screen.blit(restart, (SCREEN_WIDTH / 2 - restart.get_width() / 2, 300))
"""
接著來實作遊玩的函式,這部分有點多我們可以先從簡單的開始
play 及 end 函式用來處理開始玩及玩完的遊戲狀態
Global:
將外部的變數視為全域變數引入函式之中
這樣對他修改後就不會隨著函式消失就恢復原狀
"""
def play():
# 取用遊戲狀態、分數及開始時間資訊
global state, score, start_time # 預設為 0, 0, 0
# 設定遊戲開始時間 time.time() 會取得目前的時間
start_time = time.time() # import的套件,裡面的時間方法time()
# 將分數歸 0 且狀態設定為 1 遊玩中
score = 0
state = 1 # 把畫面設定為page=playing
# 產生新的老鼠(這邊先立一個函式之後完成)
new_mole()
# 產生瞬間先檢查是否有被打到(這邊先立一個函式之後完成)
whack()
def end():
# 狀態改為 2 結束遊戲
global state
state = 2 # page=game_over
"""
接著處理剛剛提到的產生新老鼠 new_mole 以及槌子 whack 打老鼠的偵測
"""
def new_mole():
# 隨機 決定下一個老鼠產生的位置
global x, y
# 老鼠能出線的範圍: x 從螢幕最左到右邊扣掉老鼠的寬都能取, y 則向下移 30 到底部扣掉老鼠的高都能取
x = randint(0, SCREEN_WIDTH - mole.get_width())
y = randint(30, SCREEN_HEIGHT - mole.get_height())
def whack():
global score
# 取得滑鼠當前的位置
mx, my = pygame.mouse.get_pos() # pos是position意思
# 取得老鼠的寬及高
width, height = mole.get_size()
# 將座標計算是不是點擊在老鼠的圖片上, 如果有的話要"加分"和"產生新的老鼠"
# mx-x 是滑鼠與老鼠圓心的距離 要小於等於<= 老鼠1/2寬度width/2
if abs(mx - x - width / 2) <= width / 2 and abs(my - y - height / 2) <= height / 2:
score += 1 #分數+1
new_mole() #產生新老鼠
"""
接著在顯示 (blit) 圖片之前要來檢查滑鼠是不是有按下去
如果有按下去的話要改變 mallet_down 的狀態來判斷顯示成槌子槌下去的圖片
"""
def check_mallet_state():
global mallet_down
# 檢查滑鼠左鍵有沒有被按,[左鍵、中鍵、右鍵],如果被按下則為 True,[0]為抓到左鍵是否為True表示被按下去。
if pygame.mouse.get_pressed()[0]:
mallet_down = True # 改變狀態: 鎚子槌下去
else:
mallet_down = False # 改變狀態: 槌子恢復原本樣貌
"""
最後一部分是要來顯示老鼠、槌子、分數及時間
"""
def show_mole():
# 把隨機出來的位置放老鼠上去
screen.blit(mole, (x, y))
def show_mallet():
# 檢查槌子狀態
check_mallet_state()
# 取得槌子圖片各種位置資料
mallet_position = mallet.get_rect()
# 將槌子的中心點設在滑鼠點的位置
mallet_position.center = pygame.mouse.get_pos()
# 依照狀態不同將槌子顯示
if mallet_down:
# 如果mallet_down滑鼠是點下的狀態True-塞槌子槌下去的圖片
screen.blit(down_mallet, mallet_position)
else:
# 如果mallet_down滑鼠是點下的狀態False-塞槌子原始的圖片
screen.blit(mallet, mallet_position)
def show_score():
# 設定字型模板
font = pygame.font.Font(None, 28)
# 用模板來建立分數文字
text = font.render(str(score), False, WHITE)
# 將文字顯示上去
screen.blit(text, (10, 0))
def show_timer():
# 現在的時間 - 開始時間 = 經過的秒數
elapsed = time.time() - start_time
# 遊戲總時間 - 經過的秒數 = 要顯示的剩餘秒數
timer = game_time - elapsed
# 如果秒數歸零則遊戲結束
if timer < 0:
end() # 顯示end畫面
# 建立字型模板、建立時間文字及顯示到 end 畫面上
font = pygame.font.Font(None, 28)
text = font.render(str(int(timer)), False, WHITE)
screen.blit(text, (370, 0))
"""
接下來要寫事件處理的部分
每個遊戲狀態都有要處理的事件
像是遊戲一開始按下 Enter 的時候要開始玩 -> Play()
遊戲中的話按下滑鼠要去打地鼠
遊戲結束的話一樣按下 Enter 要重新開始遊戲 -> Play()
"""
# 處理首頁
def handle_welcome(e):
# 顯示歡迎畫面
welcome_screen()
# 偵測鍵盤 Enter 事件: 按下去 && 是Enter按鍵
if e.type == pygame.KEYDOWN and e.key == pygame.K_RETURN:
play() # 進入遊戲畫面 Playing Page
# 處理遊玩中
def handle_play(e):
# 偵測滑鼠按下事件
if e.type == pygame.MOUSEBUTTONDOWN:
whack() # 判斷是否有打中
# 處理遊戲結束
def handle_end(e):
# 偵測鍵盤 Enter 事件
if e.type == pygame.KEYDOWN and e.key == pygame.K_RETURN:
play() # 再來玩一場 進入遊戲畫面 playing page
# 遊戲主要運行的流程
def main():
# 設定運行狀態
running = True
while running:
# 使用了pygame.event.get()來處理所有的事件
for event in pygame.event.get():
# 當按下視窗的 X 結束遊戲運行
if event.type == pygame.QUIT:
running = False # 跳出while迴圈
# 首頁
elif state == 0: # page: Welcome頁面
handle_welcome(event) # 是否enter觸發=> 進入遊戲
# 遊玩中
elif state == 1: # page: Playing頁面
handle_play(event) # 是否按下去滑鼠 => 判斷是否有打中
# 遊戲結束
elif state == 2: # page: end頁面
handle_end(event) # 是否enter觸發 => 繼續遊戲
# 遊玩中繪製遊戲畫面
if state == 1:
play_screen()
# 結束時繪製結束畫面
if state == 2:
end_screen()
# 設定每秒至少 update 30 次 (對這個 while loop) 表示每秒最多應通過30幀
clock.tick(30)
# 更新畫面
pygame.display.update()
# 當視窗關閉時 (running = false), 關閉視窗
pygame.quit()
# 執行遊戲主要運行流程
main()
| [
"l6756071@gmail.com"
] | l6756071@gmail.com |
b75e7fe13bef3cee081fe2bc1532d67b72acfef2 | 54dca65b775a41b764afc34f1e8d9e310775985a | /catfood/experiment/experiment_case_views.py | a053536d437189634834c5df8a9512202861fdf4 | [] | no_license | Nntraveler/backend | 9a5e31b3b168701bf93b2006e83f16e95300cf1d | a3011d7e9f8caeb17340eaa85366b06dcede48af | refs/heads/main | 2023-04-16T16:09:02.827497 | 2021-04-24T13:49:03 | 2021-04-24T13:49:03 | 359,298,942 | 0 | 0 | null | 2021-04-19T01:57:55 | 2021-04-19T01:57:55 | null | UTF-8 | Python | false | false | 8,166 | py | from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import action
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from course_database.models import ExperimentCaseDatabase
from experiment.models import CourseCase, ExperimentAssignment
from experiment.serializers import ExperimentCaseDatabaseSerializer, CourseCaseSerializer, ExperimentAssignmentSerializer
from rest_framework.parsers import JSONParser
from django.http import HttpResponse, JsonResponse
from rest_framework import generics
from rest_framework.permissions import AllowAny
from user.authentication import CatfoodAuthentication
from user.permissions import IsStudent, IsTeachingAssistant, IsTeacher, IsChargingTeacher
from experiment import utils
from course.models import Course, Teach
from user.models import TakeCourse
from user.serializers import TakeCourseSerializer
import json
import datetime
import random
from minio import Minio
from minio.error import ResponseError
from datetime import timedelta
from os import environ
from catfood.settings import MINIO_STORAGE_MEDIA_BUCKET_NAME as DEFAULT_BUCKET
from catfood.settings import MINIO_STORAGE_USE_HTTPS
# minio client to use
local_minio_client = Minio(
environ['MINIO_ADDRESS'],
access_key=environ['MINIO_ACCESS_KEY'],
secret_key=environ['MINIO_SECRET_KEY'],
secure=MINIO_STORAGE_USE_HTTPS,
)
# default file URL timeout = 15 min
DEFAULT_FILE_URL_TIMEOUT = timedelta(minutes=15)
# COURSE_DOCUMENT_BUCKET
EXPERIMENT_CASE_PREFIX = "experiment_case"
@api_view(['GET', 'POST'])
@permission_classes([IsChargingTeacher | IsTeacher])
@authentication_classes([CatfoodAuthentication])
def experiment_case_list(request):
"""
List all cases, or create a new case.
"""
if request.method == 'GET':
cases = ExperimentCaseDatabase.objects.all()
serializer = ExperimentCaseDatabaseSerializer(cases, many=True)
for index, case in enumerate(serializer.data):
serializer.data[index].pop('experiment_case_file_token', None)
serializer.data[index].pop('answer_file_token', None)
ans = sorted(serializer.data, key=lambda x: datetime.datetime.strptime(x['case_created_timestamp'][:10], '%Y-%m-%d').timestamp())
return Response(utils.generate_response(ans, True))
elif request.method == 'POST':
if not local_minio_client.bucket_exists(DEFAULT_BUCKET):
local_minio_client.make_bucket(DEFAULT_BUCKET)
new_case = {}
response_headers = {}
# case file
file_display_name = request.data["experiment_case_file_name"]
random_hex_string = ('%030x' % random.randrange(16 ** 30))
file_token = f"{EXPERIMENT_CASE_PREFIX }/{random_hex_string}/{file_display_name}"
post_url = local_minio_client.presigned_url("PUT",
DEFAULT_BUCKET,
file_token,
expires=DEFAULT_FILE_URL_TIMEOUT)
new_case['experiment_case_file_token'] = file_token
response_headers['CASE_FILE_UPLOAD_URL'] = post_url
# answer file
file_display_name = request.data["answer_file_name"]
random_hex_string = ('%030x' % random.randrange(16 ** 30))
file_token = f"{EXPERIMENT_CASE_PREFIX }/{random_hex_string}/{file_display_name}"
post_url = local_minio_client.presigned_url("PUT",
DEFAULT_BUCKET,
file_token,
expires=DEFAULT_FILE_URL_TIMEOUT)
new_case['answer_file_token'] = file_token
response_headers['ANSWER_FILE_UPLOAD_URL'] = post_url
# other info
new_case['experiment_name'] = request.data['experiment_name']
new_case['experiment_case_name'] = request.data['experiment_case_name']
serializer = ExperimentCaseDatabaseSerializer(data=new_case)
if serializer.is_valid():
serializer.save()
ans = serializer.data
ans.pop('experiment_case_file_token', None)
ans.pop('answer_file_token', None)
return Response(utils.generate_response(ans, True), headers=response_headers, status=status.HTTP_201_CREATED)
else:
return Response(utils.generate_response(serializer.errors, False), status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT'])
@permission_classes([IsChargingTeacher | IsTeacher])
@authentication_classes([CatfoodAuthentication])
def experiment_case_detail(request, pk):
"""
Retrieve, update or delete a experiment case instance.
"""
try:
case = ExperimentCaseDatabase.objects.get(pk=pk)
except ExperimentCaseDatabase.DoesNotExist:
error_data = {"detail": "not exist"}
return Response(utils.generate_response(error_data, False), status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ExperimentCaseDatabaseSerializer(case)
# 生成 minio 下载 url
response_headers = {}
file_token = serializer.data['experiment_case_file_token']
get_url = local_minio_client.presigned_url("GET",
DEFAULT_BUCKET,
file_token,
expires=DEFAULT_FILE_URL_TIMEOUT)
response_headers['CASE_FILE_DOWNLOAD_URL'] = get_url
file_token = serializer.data['answer_file_token']
get_url = local_minio_client.presigned_url("GET",
DEFAULT_BUCKET,
file_token,
expires=DEFAULT_FILE_URL_TIMEOUT)
response_headers['ANSWER_FILE_DOWNLOAD_URL'] = get_url
# 去掉数据库中的 token 信息
ans = serializer.data
ans.pop('experiment_case_file_token', None)
ans.pop('answer_file_token', None)
return Response(utils.generate_response(ans, True), headers=response_headers)
elif request.method == 'PUT':
serializer = ExperimentCaseDatabaseSerializer(
case, data=request.data, partial=True)
if serializer.is_valid():
response_headers = {}
# case file
file_display_name = serializer.data["experiment_case_file_name"]
random_hex_string = ('%030x' % random.randrange(16 ** 30))
file_token = f"{EXPERIMENT_CASE_PREFIX}/{random_hex_string}/{file_display_name}"
post_url = local_minio_client.presigned_url("PUT",
DEFAULT_BUCKET,
file_token,
expires=DEFAULT_FILE_URL_TIMEOUT)
response_headers['CASE_FILE_UPLOAD_URL'] = post_url
# answer file
file_display_name = serializer.data["answer_file_name"]
random_hex_string = ('%030x' % random.randrange(16 ** 30))
file_token = f"{EXPERIMENT_CASE_PREFIX}/{random_hex_string}/{file_display_name}"
post_url = local_minio_client.presigned_url("PUT",
DEFAULT_BUCKET,
file_token,
expires=DEFAULT_FILE_URL_TIMEOUT)
response_headers['ANSWER_FILE_UPLOAD_URL'] = post_url
serializer.save()
ans = serializer.data
ans.pop('experiment_case_file_token', None)
ans.pop('answer_file_token', None)
return Response(utils.generate_response(ans, True), headers=response_headers)
return Response(utils.generate_response(serializer.errors, False), status=status.HTTP_400_BAD_REQUEST)
| [
"noreply@github.com"
] | noreply@github.com |
d1e598832c836550b9a4467d770645ca5e081867 | 63ccd06dc73ae23018138979d6631554c7160794 | /experiments/sl-fmri-expt/four_tasks_fmri/vsl_fmri/misc_vsl/sequence_gen.py | af7806099088bc0d6db73ee9712a3ba0f96ca41f | [] | no_license | zhenghanQ/qlab | d82b318a8c10d9a4d3ab144d029ed20ac83060c7 | aaf7dd591b77b9e611366f2bacefd2b613644c83 | refs/heads/master | 2021-06-01T17:38:51.634923 | 2021-03-23T01:15:44 | 2021-03-23T01:15:44 | 128,655,996 | 1 | 0 | null | 2018-04-08T15:37:37 | 2018-04-08T15:37:36 | null | UTF-8 | Python | false | false | 3,440 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 16:10:26 2016
@author: ysa
"""
import random
import itertools
import pandas as pd
perm_list_1 = list(itertools.permutations(['Alien1.BMP', 'Alien2.BMP', 'Alien3.BMP']))
perm_list_2 = list(itertools.permutations(['Alien4.BMP', 'Alien5.BMP', 'Alien6.BMP']))
perm_list_3 = list(itertools.permutations(['Alien7.BMP', 'Alien8.BMP', 'Alien9.BMP']))
perm_list_4 = list(itertools.permutations(['Alien10.BMP', 'Alien11.BMP', 'Alien12.BMP']))
permutations = [perm_list_1, perm_list_2, perm_list_3, perm_list_4]
sequences = [[permutations[0][0], permutations[1][0], permutations[2][0], permutations[3][0]],
[permutations[0][3], permutations[1][3], permutations[2][3], permutations[3][3]],
[permutations[0][4], permutations[1][4], permutations[2][4], permutations[3][4]]]
print '''
+++++++++++Generating sequence of triples for block 1++++++++++++
'''
def block_1_generator():
block_list = []
triplets = []
for j in sequences:
for i in range(4):
triplets.append(' '.join(j[i]))
seq = [triplets[0], triplets[1], triplets[2], triplets[3]]
while len(block_list) != 96:
block_list.extend(random.sample(seq, 4))
for x in range(1, len(block_list)):
while 1:
if block_list[x] == block_list[x-1]:
y = block_list.pop(x)
if y == block_list[-1] and y != block_list[0]:
block_list.insert(0,y)
elif y == block_list[-1] and y == block_list[0]:
block_list.insert(-3,y)
else:
block_list.append(y)
else:
break
sequence_counts = {seq[0]:0, seq[1]:0, seq[2]:0, seq[3]:0}
for i in block_list:
for k,v in sequence_counts.items():
if k == i:
v += 1
sequence_counts[k] = v
print block_list
print '''
counts for each sequence:
''', sequence_counts
#block_1_generator()
### append each image 24 times
### shuffle the big list
### randomly generate groups of 3
print '''
+++++++++++Generating sequence of triples for block 2++++++++++++
'''
def block_2_generator():
aliens = ['Alien1.BMP', 'Alien2.BMP', 'Alien3.BMP',
'Alien4.BMP', 'Alien5.BMP', 'Alien6.BMP',
'Alien7.BMP', 'Alien8.BMP', 'Alien9.BMP',
'Alien10.BMP', 'Alien11.BMP', 'Alien12.BMP']
block_2_images = [i for x in range(24) for i in aliens]
random.shuffle(block_2_images)
triplets = []
a = 1
while a == 1:
x = 0
for i in range(96):
triplets.append(' '.join(block_2_images[x:x+3]))
x += 3
occurances_of_triplets = []
for x in triplets:
occurances_of_triplets.append(triplets.count(x))
for i in range(3,5):
if i not in occurances_of_triplets:
a = 2
print '\n', 'The number of times a particular triplet appears in the random sequence: ', occurances_of_triplets
return triplets
random_block = block_2_generator()
rand_col = []
for i in range(len(random_block)):
triplet = random_block[i]
split_trip = triplet.split(" ")
rand_col.extend(split_trip)
cols = {"image": rand_col}
new_rand = pd.DataFrame(cols, columns = ['image'])
new_rand.to_csv('R_fam_seq_1.csv')
| [
"qlab.udel@gmail.com"
] | qlab.udel@gmail.com |
10ba71386623c722c7ac6066f207879aca699a08 | 4ce466c9362ff7bce0de3ef64e4db3c802333cf6 | /Pandas/tut5.py | 3d2d586281d4719bdc5fa9628dbab3b5f049c5d0 | [] | no_license | anandmooga/Sentdex_Tutorials | 4e57efef4d9e54dedd2f5ecf39a64dbcb9a0d561 | ada9cc520f9f7ef20ea26efc087ca7def0ce66f5 | refs/heads/master | 2021-09-01T06:23:23.863189 | 2017-12-25T15:12:39 | 2017-12-25T15:12:39 | 114,860,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | import pandas as pd
import quandl
import pickle
api_key = open('Quandl_api.txt' , 'r').read()
def state_list():
fiddy_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')
return fiddy_states[0][0][1:]
def grab_initial_state_data():
states = state_list()
main_df = pd.DataFrame()
for abbv in states:
query = "FMAC/HPI_"+str(abbv)
df = quandl.get(query, authtoken=api_key)
df.rename(columns={'Value':str(abbv)}, inplace=True)
print(query)
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df)
pickle_out = open('fiddy_states.pickle','wb')
pickle.dump(main_df, pickle_out)
pickle_out.close()
#grab_initial_state_data()
pickle_in = open('fiddy_states.pickle' , 'rb')
HPI_data = pickle.load(pickle_in)
print(HPI_data)
pickle_in.close()
#same thing usind pandas
HPI_data.to_pickle('pickle.pickle')
HPI_data2 = pd.read_pickle('pickle.pickle')
print(HPI_data2)
##fifty_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')
##abb = fifty_states[0][0]
##
##print (abb)
##for abv in abb[1:] :
## query= 'FMAC/HPI_'+str(abv)
## df = quandl.get(query, authtoken=api_key)
## df.rename(columns={'Value':str(abv)}, inplace=True)
## if main_df.empty:
## main_df = df
## else:
## main_df = main_df.join(df)
##
##print(main_df.head())
####main_df.to_csv('hpia.csv')
| [
"anandmooga2@gmail.com"
] | anandmooga2@gmail.com |
c5c570c5d072a814ff270e276deaef84ad277e35 | 56255c15702f4f4a01b7f785f956cee7290d0097 | /segmentation_pytorch/utils/train.py | ef5089b869ed248028f04a015305e45cdec34d74 | [] | no_license | devhliu/PyTorch_UNOdeMSegNet | d2561606aac34ace4664c48bc000d4c4a915699a | 3a446ca71ddd74e612bf2c2acc43e7b210366e5b | refs/heads/master | 2020-12-24T04:41:23.674029 | 2019-11-13T08:07:40 | 2019-11-13T08:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | import sys
import torch
import pdb
from tqdm import tqdm as tqdm
from torchnet.meter import AverageValueMeter
# from ..models.CRF import dense_crf
class Epoch:
def __init__(self, model, loss, metrics, stage_name, device='cpu', verbose=True):
self.model = model
self.loss = loss
self.metrics = metrics
self.stage_name = stage_name
self.verbose = verbose
self.device = device
self._to_device()
def _to_device(self):
self.model.to(self.device)
self.loss.to(self.device)
for metric in self.metrics:
metric.to(self.device)
def _format_logs(self, logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
def batch_update(self, x, y):
raise NotImplementedError
def on_epoch_start(self):
pass
def run(self, dataloader):
self.on_epoch_start()
logs = {}
loss_meter = AverageValueMeter()
metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}
with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:
for x, y in iterator:
# x, y = x.to(self.device), y.to(self.device)
x = x.to(self.device)
if isinstance(y, list):
y = [i.to(self.device) for i in y]
else:
y = y.to(self.device)
loss, y_pred = self.batch_update(x, y)
# update loss logs
loss_value = loss.cpu().detach().numpy()
loss_meter.add(loss_value)
loss_logs = {self.loss.__name__: loss_meter.mean}
logs.update(loss_logs)
# update metrics logs
y = y[-1] if isinstance(y, list) else y
for metric_fn in self.metrics:
metric_value = metric_fn(y_pred, y).cpu().detach().numpy()
metrics_meters[metric_fn.__name__].add(metric_value)
metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
logs.update(metrics_logs)
if self.verbose:
s = self._format_logs(logs)
iterator.set_postfix_str(s)
return logs
class TrainEpoch(Epoch):
def __init__(self, model, loss, metrics, optimizer, device='cpu', verbose=True, crf=False):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='train',
device=device,
verbose=verbose,
)
self.crf = crf
self.optimizer = optimizer
def on_epoch_start(self):
self.model.train()
def batch_update(self, x, y):
self.optimizer.zero_grad()
prediction = self.model.forward(x)
if self.crf:
prediction = dense_crf(img=prediction, output_probs=y)
loss = self.loss(prediction, y)
loss.backward()
self.optimizer.step()
if isinstance(prediction, list):
return loss, prediction[-1]
return loss, prediction
class ValidEpoch(Epoch):
def __init__(self, model, loss, metrics, device='cpu', verbose=True):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='valid',
device=device,
verbose=verbose,
)
def on_epoch_start(self):
self.model.eval()
def batch_update(self, x, y):
with torch.no_grad():
prediction = self.model.forward(x)
if isinstance(prediction, list):
prediction = prediction[-1]
loss = self.loss(prediction, y, self.model.training)
return loss, prediction
| [
"maverickers@outlook.com"
] | maverickers@outlook.com |
e4dbd79b218b3d5d6eb4ae027687f1eca532636a | f5a2f3a693fcb374d42de267c0b87c3632d037f5 | /Neumann-Final/Final2.py | 0fc211287dbd54c38576e050172aeafd7fcc10ef | [] | no_license | RyanNeumann/Artificial_Intelligence | 3b0cb9be103ce5f95199c4fe2548f9a5e146335e | fea967793f7baad1f47d06c8b0d16c48dc8c12a8 | refs/heads/master | 2021-03-22T03:31:32.887340 | 2016-12-13T21:21:57 | 2016-12-13T21:21:57 | 67,057,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | import finalGetDigits
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVR
from sklearn.model_selection import KFold
# get digits data X (training input) and y (target output)
X, y, X_te, y_te = finalGetDigits.getDataSet()
#penC <- Penalty parameter C of the error term
#tubEpsilon <- the epsilon-tube within which no penalty is associated
bestC=0
bestEpsilon=0
bestGamma=0
bestScore=float('-inf')
score=0
for penC in np.logspace(6, 12, num=7, base=2):
for tubEpsilon in np.linspace(0.5, 2.5, num=21):
for paramGamma in np.logspace(-6, -2, num=5, base=2):
kf = KFold(n_splits=np.random.randint(2,11))
cvscore=[]
for train, validation in kf.split(X):
X_train, X_validation, y_train, y_validation = X[train, :], X[validation, :], y[train], y[validation]
# here we create the SVR
svr = SVR(C=penC, epsilon=tubEpsilon, gamma=paramGamma, kernel='rbf', verbose=False)
# here we train the SVR
svr.fit(X_train, y_train)
# now we get E_out for validation set
score=svr.score(X_validation, y_validation)
cvscore.append(score)
# average CV score
score=sum(cvscore)/len(cvscore)
if (score > bestScore):
bestScore=score
bestC=penC
bestEpsilon=tubEpsilon
bestGamma=paramGamma
print("BEST! -> C " + str(penC) + ", epsilon " + str(tubEpsilon) + ", gamma " + str(paramGamma) + ". Testing set CV score: %f" % score)
else:
print("C " + str(penC) + ", epsilon " + str(tubEpsilon) + ", gamma " + str(paramGamma) + ". Testing set CV score: %f" % score)
# here we create the final SVR
svr = SVR(C=bestC, epsilon=bestEpsilon, gamma=bestGamma, kernel='rbf', verbose=True)
# here we train the final SVR
svr.fit(X, y)
# E_out in training
print("Training set score: %f" % svr.score(X, y))
# here test the final SVR and get E_out for testing set
ypred=svr.predict(X_te)
score=svr.score(X_te, y_te)
print("Testing set score: %f" % score)
x_min, x_max = np.min(X_te, axis=0), np.max(X_te, axis=0)
X_te = (X_te - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_te.shape[0]):
plt.text(X_te[i, 0], X_te[i, 1], str(y_te[i]), color=plt.cm.spectral(round(ypred[i]) / 10.), fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.tight_layout()
plt.show()
| [
"Ryan.Neumann1@marist.edu"
] | Ryan.Neumann1@marist.edu |
9e8e09a34fca6bb6e644f23b4638fe6a65ba5f62 | 440b6e249bed961706164b20d98fc7ba16f4323d | /testMenegottoPinto.py | dbfac51904e45f28dd3e3d217c658a3f79a9e435 | [] | no_license | pdhhiep/fiber_beam_column | 6df8a5d0e6eb736205c41952dc636b1bad353d42 | 1d6cb08494f6774fb1674ff838dc453c388a7816 | refs/heads/master | 2022-03-24T03:03:06.909542 | 2019-12-05T14:02:13 | 2019-12-05T14:02:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | py | """
test file
"""
import numpy as np
import matplotlib.pyplot as plt
class MenegottoPinto:
def __init__(self, E, b, fy, R, a1, a2):
self._E = E
self._Et = E
self._b = b
self._R0 = R
self._R = R
self._fy = fy
self._a1 = a1
self._a2 = a2
# Loading index:
# 0 => initial state
# 1 => increasing strain
# 2 => decreasing strain
# 3 => strain not changing
self._loading_index = 0
self._strain_0 = fy / E
self._stress_0 = fy
self._strain_r = 0.0
self._stress_r = 0.0
self._last_strain_r = 0.0
self._last_stress_r = 0.0
self._strain = 0.0
self._stress = 0.0
self._xi = 0.0
# Converged Variables
self._c_loading_index = 0
self._c_Et = E
self._c_strain_0 = self._strain_0
self._c_stress_0 = self._stress_0
self._c_strain_r = 0.0
self._c_stress_r = 0.0
self._c_strain = 0.0
self._c_stress = 0.0
self._c_xi = 0.0
@property
def tangent_modulus(self):
return self._Et
@property
def stress(self):
return self._stress
@property
def strain(self):
return self._strain
def update_strain(self, value):
"""
FIXME
"""
self._strain = value
reversal = self._set_trial_state()
return reversal
def _set_trial_state(self):
deps = self._strain - self._c_strain
if self._loading_index == 0 or self._loading_index == 3:
if abs(deps) < 1e-15: # nearly zero
self._Et = self._E
self._stress = 0
self._loading_index = 3
else:
if deps < 0:
self._loading_index = 2
self._strain_0 = -self._fy / self._E
self._stress_0 = -self._fy
else:
self._loading_index = 1
self._strain_0 = self._fy / self._E
self._stress_0 = self._fy
reversal = self._check_reversal()
if reversal:
self._reverse()
return reversal
def _check_reversal(self):
deps = self._strain - self._c_strain
if self._loading_index == 2 and deps > 0:
self._loading_index = 1
return True
if self._loading_index == 1 and deps < 0:
self._loading_index = 2
return True
return False
def _reverse(self):
self._last_strain_r = self._strain_r
self._last_stress_r = self._stress_r
self._strain_r = self._c_strain
self._stress_r = self._c_stress
E = self._E
b = self._b
epr = self._strain_r
sgr = self._stress_r
if self._loading_index == 1:
sgy = self._fy
else:
sgy = -self._fy
lepr = self._last_strain_r
self._strain_0 = (E * epr - sgr + sgy * (1 - b)) / (E * (1 - b))
self._stress_0 = b * E * self._strain_0 + sgy * (1 - b)
eps_intersect = ((sgr - lepr) + E * b * lepr - E * epr) / (E * (b - 1))
self._xi = abs(eps_intersect - lepr) # shouldn't this be divided by eps_y??
self._R = self._R0 - self._a1 * self._xi / (self._a2 + self._xi)
# global ax
# ax.plot(self._strain_0, self._stress_0, "-o", color="black")
# ax.plot(self._strain_r, self._stress_r, "-o", color="black", markerfacecolor="none")
def calculate_stress_and_tangent_modulus(self):
"""
FIXME
"""
b = self._b
eps = self._strain
epr = self._strain_r
ep0 = self._strain_0
sgr = self._stress_r
sg0 = self._stress_0
R = self._R
eps_star = (eps - epr) / (ep0 - epr)
dum1 = 1.0 + (abs(eps_star)) ** R
dum2 = (dum1) ** (1.0 / R)
sg_star = b * eps_star + (1.0 - b) * eps_star / dum2
self._stress = sg_star * (sg0 - sgr) + sgr
self._Et = b + (1 - b) / (dum1 * dum2)
self._Et *= (sg0 - sgr) / (ep0 - epr)
def finalize(self):
self._c_loading_index = self._loading_index
self._c_Et = self._Et
self._c_strain_0 = self._strain_0
self._c_stress_0 = self._stress_0
self._c_strain_r = self._strain_r
self._c_stress_r = self._stress_r
self._c_strain = self._strain
self._c_stress = self._stress
self._c_xi = self._xi
fiber = MenegottoPinto(E=29000, b=0.08, fy=60, R=15, a1=8.5, a2=0.0002) # 0.0042 # 20
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(fiber._strain_0, fiber._stress_0, "-o", color="black")
strains = np.concatenate(
(np.linspace(0.000, 0.005), np.linspace(0.0049, -0.01), np.linspace(-0.009, -0.0005))
)
f = np.linspace(0, strains.size - 1, strains.size, dtype=int)
nf = []
# strains = np.array([0, 0.001, 0.002, 0.0035, 0.0015, 0.0031, 0.003, 0.004, 0.005])
# f = [0, 1, 2, 6, 7, 8]
# nf = [3, 4, 5]
stresses = []
for i, strain in enumerate(strains):
reversal = fiber.update_strain(strain)
fiber.calculate_stress_and_tangent_modulus()
if i in f:
fiber.finalize()
stresses.append(fiber.stress)
if reversal:
print(fiber._stress_0)
stresses = np.array(stresses)
ax.plot(strains[f], stresses[f], "-o", color="black", markerfacecolor="none")
ax.plot(strains[nf], stresses[nf], "o", color="orange")
ax.grid()
ax.axhline(linewidth=3, color="black")
ax.axvline(linewidth=3, color="black")
ax.set(xlabel="STEEL STRAIN", ylabel="STEEL STRESS")
plt.show()
| [
"m.zidan@tum.de"
] | m.zidan@tum.de |
4b17c8063afbada079d284069ca1e1b3efd90dec | 971752c0a0d7aa3f6ca224be041ff100ea96ae29 | /client/app/dialognlu/models/joint_trans_bert.py | 845f499b93d58ab0a283d0fb221a14ee185af2a0 | [] | no_license | zalkikar/DB-Final | 6aec7653a2a7a24d948d06e487d146b84e226360 | a297caadc1c5af84ad430461bf6dfeb9ea52e74a | refs/heads/main | 2023-07-26T12:00:53.096585 | 2021-09-01T22:02:26 | 2021-09-01T22:02:26 | 395,816,499 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # -*- coding: utf-8 -*-
"""
@author: mwahdan
"""
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Multiply, TimeDistributed
from .base_joint_trans import BaseJointTransformerModel, TfliteBaseJointTransformerModel, TfliteBaseJointTransformer4inputsModel
import numpy as np
class JointTransBertModel(BaseJointTransformerModel):
def __init__(self, config, trans_model=None, is_load=False):
super(JointTransBertModel, self).__init__(config, trans_model, is_load)
def build_model(self):
in_id = Input(shape=(self.max_length), name='input_word_ids', dtype=tf.int32)
in_mask = Input(shape=(self.max_length), name='input_mask', dtype=tf.int32)
in_segment = Input(shape=(self.max_length), name='input_type_ids', dtype=tf.int32)
in_valid_positions = Input(shape=(self.max_length, self.slots_num), name='valid_positions')
bert_inputs = [in_id, in_mask, in_segment]
inputs = bert_inputs + [in_valid_positions]
bert_sequence_output, bert_pooled_output = self.trans_model(bert_inputs)
intents_fc = Dense(self.intents_num, activation='softmax', name='intent_classifier')(bert_pooled_output)
slots_output = TimeDistributed(Dense(self.slots_num, activation='softmax'))(bert_sequence_output)
slots_output = Multiply(name='slots_tagger')([slots_output, in_valid_positions])
self.model = Model(inputs=inputs, outputs=[slots_output, intents_fc])
def save(self, model_path):
self.save_to_path(model_path, 'joint_bert_model.h5')
@staticmethod
def load(load_folder_path):
return BaseJointTransformerModel.load_model_by_class(JointTransBertModel, load_folder_path, 'joint_bert_model.h5')
class TfliteJointTransBertModel(TfliteBaseJointTransformer4inputsModel):
def __init__(self, config):
super(TfliteJointTransBertModel, self).__init__(config)
@staticmethod
def load(path):
return TfliteBaseJointTransformerModel.load_model_by_class(TfliteJointTransBertModel, path) | [
"rayzck9@gmail.com"
] | rayzck9@gmail.com |
466772bee855a4726cd8e6ab814468db6d9c6284 | 12d9e5a5ea19674bf436298e3cf69fb0b19b18e1 | /problem1.py | 5c9a827eedd671df51d0005fad68750dcc7dbe21 | [] | no_license | sk4201/summer19py | e0f469523530b4f6e574a4fa2faa03bf19849e34 | ff244e596a599bf4936545ecee4efd2359b0224b | refs/heads/master | 2020-05-31T06:47:35.741916 | 2019-06-27T15:50:54 | 2019-06-27T15:50:54 | 190,149,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | #Create a program that asks the user to enter their name and their age.
#Print out a message that will tell them the year that they will turn 95 years old
import datetime
name=input("Enter Your Name :")
age=int(input("Enter your age :"))
now = datetime.datetime.now()
print(name," in the age of 95 year wiil be ",(95-age)+now.year)
| [
"16tec2cs024@vgu.ac.in"
] | 16tec2cs024@vgu.ac.in |
63b455c578e8c25aae76f9c4f1aad7074d665b95 | d945816080d2086b0fd81ab2ca3bf731591dd08b | /Graph.py | fe03d7f6d0bdd92c11f435b14bf2c0b816a1d077 | [] | no_license | orelkakon/PageRank | 576b0cee6d6f3f8a5cd59dd3ca363c5d25d31d48 | 2db06222885e291744b1322e1e25afc14dde5dfb | refs/heads/master | 2022-12-31T03:48:55.012668 | 2020-10-26T17:29:20 | 2020-10-26T17:29:20 | 307,452,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,663 | py | import csv
import operator
class Node:
def __init__(self):
self.list_neighbors = []
self.list_reverse_neighbors = []
self.r_tag_t = -1
self.r_t_previous = -1
self.r_t = -1
loaded_graph = {}
def load_graph(path):
with open(path, newline='') as file:
reader = csv.reader(file, delimiter=',', quotechar='|')
for row in reader:
name = row[0]
neighbor = row[1]
if name not in loaded_graph:
node_details = Node()
loaded_graph[name] = node_details
loaded_graph[name].list_neighbors.append(neighbor)
if neighbor not in loaded_graph:
node_details = Node()
loaded_graph[neighbor] = node_details
loaded_graph[neighbor].list_reverse_neighbors.append(name)
def sum_r_tag_t(reverse_neighbors_list, b):
sum1 = 0
for i in reverse_neighbors_list:
i_r_t_previous = loaded_graph[i].r_t_previous
di = len(loaded_graph[i].list_neighbors)
sum1 = sum1 + b*(i_r_t_previous/di)
return sum1
def calculate_page_rank(b, epsilon, max_iterations):
counter = 0
gap_sum = 100000.0
s = 0
for j in loaded_graph:
loaded_graph[j].r_t = 1/len(loaded_graph)
loaded_graph[j].r_t_previous = loaded_graph[j].r_t
while counter < max_iterations or gap_sum > epsilon:
gap_sum = 0
s = 0
for j in loaded_graph:
if len(loaded_graph[j].list_reverse_neighbors)== 0:
loaded_graph[j].r_tag_t = 0
else:
loaded_graph[j].r_tag_t = sum_r_tag_t(loaded_graph[j].list_reverse_neighbors, b)
s = s + loaded_graph[j].r_tag_t
for j in loaded_graph:
loaded_graph[j].r_t_previous = loaded_graph[j].r_t
loaded_graph[j].r_t = loaded_graph[j].r_tag_t + ((1 - s) / len(loaded_graph))
gap_sum = gap_sum + abs(loaded_graph[j].r_t - loaded_graph[j].r_t_previous)
counter = counter + 1
def get_page_rank(node_name):
if node_name in loaded_graph:
return loaded_graph[node_name].r_t
return -1
def get_top_nodes(n):
list_of_nodes = {}
for node in loaded_graph:
list_of_nodes[node] = loaded_graph[node].r_t
sorted_list_by_value = sorted(list_of_nodes.items(), key = operator.itemgetter(1), reverse = True)
return sorted_list_by_value[:n]
def get_all_page_rank():
result = []
for node in loaded_graph:
result.append([node, loaded_graph[node].r_t])
return result
notYet = True
Cont = True
while Cont:
print("Insert [1] to load_graph\n"
"Insert [2] to calculate_page_rank\n"
"Insert [3] to get_PageRank\n"
"Insert [4] to get_top_nodes\n"
"Insert [5] to get_all_PageRank\n"
"Insert [6] to Exit")
ans = input()
if ans == "1":
notYet = True
pathVal = input("Insert path of graph to load\n")
load_graph(pathVal)
elif ans == "2":
notYet = False
calculate_page_rank(0.85, 0.001, 20)
elif ans == "3":
nodeName = input("Insert the name of node\n")
print(get_page_rank(nodeName))
elif ans == "4":
if notYet:
print([])
else:
numTop = input("Insert number of top n\n")
print(get_top_nodes(int(numTop)))
elif ans == "5":
if notYet:
print([])
else:
print(get_all_page_rank())
else:
Cont = False
| [
"orelkak@post.bgu.ac.il"
] | orelkak@post.bgu.ac.il |
de8f1e1f2f085838464375d1849719293a936020 | 0af30c2e3ddcc80a19ea9cfaad9d7e1fedf8b876 | /210311-210314/백)2579 계단 오르기/이동재.py | b27bbc2081db13195ca37f930e92c97bac44a0d8 | [] | no_license | winterash2/algorithm_study_2021_1 | d1cd6077f71f68e7fc3eb6dfae7b2cc220885e4c | c1fee62c7e5e560c3bf7ae5e6166866d0147f23f | refs/heads/master | 2023-04-02T20:11:04.169856 | 2021-04-05T11:18:22 | 2021-04-05T11:18:22 | 327,563,535 | 1 | 2 | null | 2021-01-24T14:17:40 | 2021-01-07T09:28:08 | Python | UTF-8 | Python | false | false | 512 | py | import sys
input = sys.stdin.readline
N = int(input())
scores = []
for _ in range(N):
scores.append(int(input()))
dp1 = [0 for _ in range(N)]
dp2 = [0 for _ in range(N)]
# 0번 칸 초기화
dp1[0] = scores[0]
if N == 1:
print(scores[0])
else: # N이 2보다 클 때
# 2번 칸 초기화
dp1[1] = scores[1]
dp2[1] = scores[1] + dp1[0]
for i in range(2, N):
dp1[i] = scores[i] + max(dp1[i-2], dp2[i-2])
dp2[i] = scores[i] + dp1[i-1]
print(max(dp1[N-1], dp2[N-1])) | [
"winterash2@naver.com"
] | winterash2@naver.com |
2d17ce5376464fc9f2183a4be5ae61524b710fba | 6544d441fd25b63acdc9a18e0299db37daf45d3b | /tests/cli/test_create.py | 8b226997665595e963c5d927a28d0d6cadc4819f | [
"BSD-3-Clause"
] | permissive | benyanke/zedenv | 88c32c4d32f144bcc5c3896fa7f25255f0620250 | ca91d05fe74357b786b1a013f18351658e2a4a51 | refs/heads/master | 2020-03-22T21:07:50.090697 | 2018-07-04T20:50:44 | 2018-07-04T20:50:44 | 140,659,388 | 0 | 0 | BSD-3-Clause | 2018-07-12T04:06:21 | 2018-07-12T04:06:21 | null | UTF-8 | Python | false | false | 1,251 | py | """Test zedenv create command"""
import datetime
import pytest
import pyzfscmds.utility as zfs_utility
import zedenv.cli.create
import zedenv.lib.check
require_root_dataset = pytest.mark.require_root_dataset
@require_root_dataset
def test_boot_environment_created(root_dataset):
parent_dataset = zfs_utility.dataset_parent(root_dataset)
boot_environment = f"zedenv-{datetime.datetime.now().isoformat()}"
verbose = True
existing = None
zedenv.cli.create.zedenv_create(parent_dataset, root_dataset,
boot_environment, verbose, existing)
assert zfs_utility.dataset_exists(f"{parent_dataset}/{boot_environment}")
@require_root_dataset
def test_same_boot_environment_created(root_dataset):
parent_dataset = zfs_utility.dataset_parent(root_dataset)
boot_environment = f"zedenv-{datetime.datetime.now().isoformat()}"
verbose = True
existing = None
zedenv.cli.create.zedenv_create(parent_dataset, root_dataset,
boot_environment, verbose, existing)
with pytest.raises(SystemExit):
zedenv.cli.create.zedenv_create(parent_dataset, root_dataset,
boot_environment, verbose, existing)
| [
"johnramsden@riseup.net"
] | johnramsden@riseup.net |
0ae646e5fd55b65b3f924b29c97b5843b2eca062 | bd1362c60313784c90013dfc9f0169e64389bf27 | /scripts/ingestors/soilm_ingest.py | a3a1ef7cc473f3149593d222b9f47ed4891c86b8 | [] | no_license | ForceCry/iem | 391aa9daf796591909cb9d4e60e27375adfb0eab | 4b0390d89e6570b99ca83a5fa9b042226e17c1ad | refs/heads/master | 2020-12-24T19:04:55.517409 | 2013-04-09T14:25:36 | 2013-04-09T14:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | """
Ingest ISU SOILM data!
DailySI
"TIMESTAMP",
"RECORD",
"TAir_C_Avg",
"TAir_C_Max",
"TAir_C_TMx",
"TAir_C_Min",
"TAir_C_TMn",
"SlrMJ_Tot",
"Rain_mm_Tot",
"WS_mps_S_WVT",
"WindDir_D1_WVT",
"WindDir_SD1_WVT",
"WS_mps_Max",
"WS_mps_TMx",
"DailyET",
"TSoil_C_Avg",
"VWC_12_Avg",
"VWC_24_Avg",
"VWC_50_Avg",
"EC12",
"EC24",
"EC50",
"T12_C_Avg",
"T24_C_Avg",
"T50_C_Avg",
"PA",
"PA_2",
"PA_3"
HrlySI
"TIMESTAMP",
"RECORD",
"TAir_C_Avg",
"RH",
"SlrkW_Avg",
"SlrMJ_Tot",
"Rain_mm_Tot",
"WS_mps_S_WVT",
"WindDir_D1_WVT",
"WindDir_SD1_WVT",
"ETAlfalfa",
"SolarRadCalc",
"TSoil_C_Avg",
"VWC_12_Avg",
"VWC_24_Avg",
"VWC_50_Avg",
"EC12",
"EC24",
"EC50",
"T12_C_Avg",
"T24_C_Avg",
"T50_C_Avg",
"PA",
"PA_2",
"PA_3"
"""
import os
import iemdb
import iemtz
import datetime
ISUAG = iemdb.connect('isuag')
icursor = ISUAG.cursor()
STATIONS = {'CAMI4': dict(daily='/mnt/home/mesonet/sm/Calumet/Calumet_DailySI.dat',
hourly='/mnt/home/mesonet/sm/Calumet/Calumet_HrlySI.dat'),
}
def hourly_process(nwsli, maxts):
""" Process the hourly file """
""" Process the daily file """
fn = STATIONS[nwsli]['hourly']
if not os.path.isfile(fn):
return
lines = open(fn).readlines()
if len(lines) < 6:
return
# Read header....
headers = []
for col in lines[1].strip().replace('"', '').split(","):
headers.append(col)
# Read data
for i in range(len(lines)-1,3,-1):
tokens = lines[i].strip().replace('"','').split(",")
if len(tokens) != len(headers):
continue
valid = datetime.datetime.strptime(tokens[ headers.index('TIMESTAMP')],
'%Y-%m-%d %H:%M:%S')
valid = valid.replace(tzinfo=iemtz.CentralStandard)
if valid <= maxts:
break
# We are ready for dbinserting!
dbcols = "station,valid," + ",".join(headers[2:])
dbvals = "'%s','%s-06'," % (nwsli, valid.strftime("%Y-%m-%d %H:%M:%S"))
for v in tokens[2:]:
dbvals += "%s," % (formatter(v),)
sql = "INSERT into sm_hourly (%s) values (%s)" % (dbcols, dbvals[:-1])
icursor.execute(sql)
def formatter(v):
""" Something to format things nicely for SQL"""
if v.find("NAN") > -1:
return 'Null'
if v.find(" ") > -1: #Timestamp
return "'%s-06'" % (v,)
return v
def daily_process(nwsli, maxts):
""" Process the daily file """
fn = STATIONS[nwsli]['daily']
if not os.path.isfile(fn):
return
lines = open(fn).readlines()
if len(lines) < 6:
return
# Read header....
headers = []
for col in lines[1].strip().replace('"', '').split(","):
headers.append(col)
# Read data
for i in range(len(lines)-1,3,-1):
tokens = lines[i].strip().replace('"','').split(",")
if len(tokens) != len(headers):
continue
valid = datetime.datetime.strptime(tokens[ headers.index('TIMESTAMP')][:10],
'%Y-%m-%d')
valid = valid.date() - datetime.timedelta(days=1)
if valid < maxts:
break
if valid == maxts: # Reprocess
icursor.execute("""DELETE from sm_daily WHERE valid = '%s' and
station = '%s' """ % (valid.strftime("%Y-%m-%d") ,nwsli))
# We are ready for dbinserting!
dbcols = "station,valid," + ",".join(headers[2:])
dbvals = "'%s','%s'," % (nwsli, valid.strftime("%Y-%m-%d"))
for v in tokens[2:]:
dbvals += "%s," % (formatter(v),)
sql = "INSERT into sm_daily (%s) values (%s)" % (dbcols, dbvals[:-1])
icursor.execute(sql)
def get_max_timestamps(nwsli):
""" Fetch out our max values """
data = {'hourly': datetime.datetime(2012,1,1, tzinfo=iemtz.CentralStandard),
'daily': datetime.date(2012,1,1)}
icursor.execute("""SELECT max(valid) from sm_daily WHERE station = '%s'""" % (
nwsli,))
row = icursor.fetchone()
if row[0] is not None:
data['daily'] = row[0]
icursor.execute("""SELECT max(valid) from sm_hourly WHERE station = '%s'""" % (
nwsli,))
row = icursor.fetchone()
if row[0] is not None:
data['hourly'] = row[0]
return data
def main():
for nwsli in STATIONS.keys():
maxobs = get_max_timestamps(nwsli)
hourly_process(nwsli, maxobs['hourly'])
daily_process(nwsli, maxobs['daily'])
icursor.close()
ISUAG.commit()
ISUAG.close()
if __name__ == '__main__':
main() | [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
855033498433fc4b023163b8a1e030790481cc8e | 102d09ef1d6effe166ad703ba4472c45dfb03263 | /py/Unique Binary Search Trees.py | ff810735f7dccf5e13975b50685aee50ae48a74b | [] | no_license | bitcsdby/Codes-for-leetcode | 5693100d4b66de65d7f135bbdd81b32650aed7d0 | 9e24e621cfb9e7fd46f9f02dfc40a18a702d4990 | refs/heads/master | 2016-09-05T08:43:31.656437 | 2014-08-02T15:14:53 | 2014-08-02T15:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Solution:
# @return an integer
def numTrees(self, n):
if n == 0 or n == 1:
return 1;
l = [];
l.append(1);
l.append(1);
count = 2;
while count <= n:
c = 0;
tmp = 0;
while c < count:
tmp += l[count-c-1] * l[c];
c += 1;
l.append(tmp);
count += 1;
return l.pop(); | [
"bitcsdby@gmail.com"
] | bitcsdby@gmail.com |
4c8146e1c0684bc70d7cd760d1508a41e90c6e68 | 97e0fe6c7f870e9c4ac2769f37beebc10fba91d5 | /start_processing_manager.py | 44188138edde753ebf68048c40fc7849a98648cf | [] | no_license | MadisonMHillyard/ecse414_lab | e31e8641ad788e218fdac1651ae3094aa4b33454 | 290c1eee43e3a970a63fffb58c6c028aa6cdcedb | refs/heads/main | 2023-05-01T11:48:26.490379 | 2021-05-16T03:18:44 | 2021-05-16T03:18:44 | 362,198,345 | 1 | 0 | null | 2021-05-15T21:09:48 | 2021-04-27T17:35:40 | Python | UTF-8 | Python | false | false | 4,317 | py | import sys
import os
import logging
from src.common import log_util
import json
from pathlib import Path
# pip install Pillow
# or easy install : easy_install Pillow
from PIL import Image
import numpy
import random
# logger setup
logger = log_util.get_logger("logs/aicn_log.txt")
logger.setLevel(logging.DEBUG)
from src.common.publisher import Publisher
from src.processing_node import ComputationNode
# Globals
aicn_registry_filename = "logs/registry_info.txt"
# gather aicn ip and port
if len(sys.argv) == 3:
if os.path.exists(aicn_registry_filename):
with open(aicn_registry_filename, "r") as file:
json_object = json.load(file)
aicn_registry_ip = json_object['aicn_registry'][0]['ip']
aicn_registry_port = json_object['aicn_registry'][0]['port']
else:
logger.error("Failed -- No " + aicn_registry_filename + " found to replace optional parameter 4 (IP address) & 5 (Port)")
exit()
elif len(sys.argv) == 5:
aicn_registry_ip = sys.argv[3]
aicn_registry_port = int(sys.argv[4])
# start publisher, node, and nodepub threads
if len(sys.argv) == 5 or len(sys.argv) == 3:
# Empty thread
ThreadList = []
# Gather number of publishers to create
num_pubs = int(sys.argv[2])
# Gather number of y = 1 images that exist
if num_pubs > 0:
plus1_list = list()
neg1_list = list()
for i in range(0, 1, 1):
file_list = Path('faces/1').glob('**/*.bmp')
for file in file_list:
# because path is object not string
img_str = str(file)
img = Image.open(img_str)
# ensure bitmap is 256 x 256, with grayscale pixels and get raw pixel array
img = img.resize((256, 256))
img = img.convert(mode='L')
img_pixel_array = numpy.asarray(img.getdata())
# normalize to zero-mean and unit variance
#img_pixel_array = numpy.interp(img_pixel_array, (img_pixel_array.min(), img_pixel_array.max()), (-1, +1))
img_pixel_array = (img_pixel_array - numpy.mean(img_pixel_array)) / numpy.std(img_pixel_array)
plus1_list.append([random.randint(1, num_pubs), img_pixel_array, img_str])
# Gather number of y = -1 images that exist
file_list = Path('faces/-1').glob('**/*.bmp')
for file in file_list:
# because path is object not string
img_str = str(file)
img = Image.open(img_str)
# ensure bitmap is 256 x 256, with grayscale pixels and get raw pixel array
img = img.resize((256, 256))
img = img.convert(mode='L')
img_pixel_array = numpy.asarray(img.getdata(), order='C')
# normalize to zero-mean and unit variance
#img_pixel_array = numpy.interp(img_pixel_array, (img_pixel_array.min(), img_pixel_array.max()), (-1, +1))
img_pixel_array = (img_pixel_array - numpy.mean(img_pixel_array)) / numpy.std(img_pixel_array)
neg1_list.append([random.randint(1, num_pubs), img_pixel_array, img_str])
# Gather number of computation nodes to create
num_nodes = int(sys.argv[1])
for i in range(0, num_nodes):
node = ComputationNode(i+1, aicn_registry_ip, aicn_registry_port)
ThreadList.append(node)
num_pubs = int(sys.argv[2])
for i in range(0, num_pubs):
# Create publishers
publisher = Publisher(i+1, aicn_registry_ip, aicn_registry_port)
# Teach them y=1 images
for image in plus1_list:
if image[0] == publisher.index:
publisher.add_image(y=1, pixel_array=image[1], img_name=image[2])
# Teach them y=-1 images
for image in neg1_list:
if image[0] == publisher.index:
publisher.add_image(y=-1, pixel_array=image[1], img_name=image[2])
ThreadList.append(publisher)
else:
logger.error("Failed -- Usage example: 'python ./start_aicn.py <num_nodes> <num_publishers> "
"[<registry_ip> <registry_port>]'")
exit()
# Start all threads
for thread in ThreadList:
thread.start()
# Wait for all threads
for thread in ThreadList:
thread.join()
| [
"Andrew Dupuis"
] | Andrew Dupuis |
fc63244cd75a39edbf500b6fa6de7db12118a2b9 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/auth/application_default/login.py | cd8f38b6c453bd00a3fae5c98e366c42c46a414a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 9,042 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command to install Application Default Credentials using a user account."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.auth import util as auth_util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.command_lib.auth import auth_util as command_auth_util
from googlecloudsdk.command_lib.auth import flags
from googlecloudsdk.command_lib.auth import workforce_login_config as workforce_login_config_util
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.credentials import gce as c_gce
from googlecloudsdk.core.credentials import store as c_store
class Login(base.Command):
r"""Acquire new user credentials to use for Application Default Credentials.
Obtains user access credentials via a web flow and puts them in the
well-known location for Application Default Credentials (ADC).
This command is useful when you are developing code that would normally
use a service account but need to run the code in a local development
environment where it's easier to provide user credentials. The credentials
will apply to all API calls that make use of the Application Default
Credentials client library. Do not set the `GOOGLE_APPLICATION_CREDENTIALS`
environment variable if you want to use the credentials generated by this
command in your local development. This command tries to find a quota
project from gcloud's context and write it to ADC so that Google client
libraries can use it for billing and quota. Alternatively, you can use
the `--client-id-file` flag. In this case, the project owning the client ID
will be used for billing and quota. You can create the client ID file
at https://console.cloud.google.com/apis/credentials.
This command has no effect on the user account(s) set up by the
`gcloud auth login` command.
Any credentials previously generated by
`gcloud auth application-default login` will be overwritten.
"""
detailed_help = {
'EXAMPLES':
"""\
If you want your local application to temporarily use your own user
credentials for API access, run:
$ {command}
If you'd like to login by passing in a file containing your own client
id, run:
$ {command} --client-id-file=clientid.json
"""
}
@staticmethod
def Args(parser):
"""Set args for gcloud auth application-default login."""
parser.add_argument(
'--client-id-file',
help='A file containing your own client id to use to login. If '
'--client-id-file is specified, the quota project will not be '
'written to ADC.')
parser.add_argument(
'--scopes',
type=arg_parsers.ArgList(min_length=1),
metavar='SCOPE',
help='The names of the scopes to authorize for. By default '
'{0} scopes are used. '
'The list of possible scopes can be found at: '
'[](https://developers.google.com/identity/protocols/googlescopes).'
.format(', '.join(auth_util.DEFAULT_SCOPES)))
parser.add_argument(
'--login-config',
help='Path to the login configuration file (workforce pool, '
'generated by the Cloud Console or '
'`gcloud iam workforce-pools create-login-config`)',
action=actions.StoreProperty(properties.VALUES.auth.login_config_file))
flags.AddQuotaProjectFlags(parser)
flags.AddRemoteLoginFlags(parser, for_adc=True)
parser.display_info.AddFormat('none')
def Run(self, args):
"""Run the authentication command."""
# TODO(b/203102970): Remove this condition check after the bug is resolved
if properties.VALUES.auth.access_token_file.Get():
raise c_store.FlowError(
'auth/access_token_file or --access-token-file was set which is not '
'compatible with this command. Please unset the property and rerun '
'this command.'
)
if c_gce.Metadata().connected:
message = textwrap.dedent("""
You are running on a Google Compute Engine virtual machine.
The service credentials associated with this virtual machine
will automatically be used by Application Default
Credentials, so it is not necessary to use this command.
If you decide to proceed anyway, your user credentials may be visible
to others with access to this virtual machine. Are you sure you want
to authenticate with your personal account?
""")
console_io.PromptContinue(
message=message, throw_if_unattended=True, cancel_on_no=True)
command_auth_util.PromptIfADCEnvVarIsSet()
if args.client_id_file and not args.launch_browser:
raise c_exc.InvalidArgumentException(
'--no-launch-browser',
'`--no-launch-browser` flow no longer works with the '
'`--client-id-file`. Please replace `--no-launch-browser` with '
'`--no-browser`.'
)
scopes = args.scopes or auth_util.DEFAULT_SCOPES
flow_params = dict(
no_launch_browser=not args.launch_browser,
no_browser=args.no_browser,
remote_bootstrap=args.remote_bootstrap)
# 1. Try the 3PI web flow with --no-browser:
# This could be a 3PI flow initiated via --no-browser.
# If provider_name is present, then this is the 3PI flow.
# We can start the flow as is as the remote_bootstrap value will be used.
if args.remote_bootstrap and 'provider_name' in args.remote_bootstrap:
auth_util.DoInstalledAppBrowserFlowGoogleAuth(
config.CLOUDSDK_EXTERNAL_ACCOUNT_SCOPES,
auth_proxy_redirect_uri=(
'https://sdk.cloud.google/applicationdefaultauthcode.html'
),
**flow_params
)
return
# 2. Try the 3PI web flow with a login configuration file.
login_config_file = workforce_login_config_util.GetWorkforceLoginConfig()
if login_config_file:
if args.client_id_file:
raise c_exc.ConflictingArgumentsException(
'--client-id-file is not currently supported for third party login '
'flows. ')
if args.scopes:
raise c_exc.ConflictingArgumentsException(
'--scopes is not currently supported for third party login flows.')
# Redirect URI must be sdk.cloud.google for 3PI.
creds = workforce_login_config_util.DoWorkforceHeadfulLogin(
login_config_file,
True,
auth_proxy_redirect_uri=(
'https://sdk.cloud.google/applicationdefaultauthcode.html'
),
**flow_params
)
else:
# 3. Try the 1P web flow.
properties.VALUES.auth.client_id.Set(
auth_util.DEFAULT_CREDENTIALS_DEFAULT_CLIENT_ID)
properties.VALUES.auth.client_secret.Set(
auth_util.DEFAULT_CREDENTIALS_DEFAULT_CLIENT_SECRET)
creds = auth_util.DoInstalledAppBrowserFlowGoogleAuth(
scopes,
client_id_file=args.client_id_file,
auth_proxy_redirect_uri=(
'https://sdk.cloud.google.com/applicationdefaultauthcode.html'
),
**flow_params
)
if not creds:
return
target_impersonation_principal, delegates = None, None
impersonation_service_accounts = (
properties.VALUES.auth.impersonate_service_account.Get()
)
if impersonation_service_accounts:
(target_impersonation_principal, delegates
) = c_store.ParseImpersonationAccounts(impersonation_service_accounts)
if not target_impersonation_principal:
if args.IsSpecified('client_id_file'):
command_auth_util.DumpADC(creds, quota_project_disabled=False)
elif args.disable_quota_project:
command_auth_util.DumpADC(creds, quota_project_disabled=True)
else:
command_auth_util.DumpADCOptionalQuotaProject(creds)
else:
# TODO(b/184049366): Supports quota project with impersonated creds.
command_auth_util.DumpImpersonatedServiceAccountToADC(
creds,
target_principal=target_impersonation_principal,
delegates=delegates)
return creds
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
3e4834d5a41bff7109121e57e288521ddeccf58f | 0987b1cff64f2e3937412c590d1a6953a224de57 | /postgres/sqlapp/migrations/0004_auto_20200623_1400.py | c4b4d0f73b325869622f48b469a5e1cb7714732f | [] | no_license | AleksanderUrbanowicz/Postgres_Django | 9d646d92a746e7df5f7869b5ea457e45bbddb36a | bf7e4466df303476726ed02998249787f17e0cbd | refs/heads/master | 2022-11-07T18:22:54.565637 | 2020-06-24T14:02:10 | 2020-06-24T14:02:10 | 274,174,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # Generated by Django 3.0.7 on 2020-06-23 14:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sqlapp', '0003_auto_20200623_1238'),
]
operations = [
migrations.AddField(
model_name='client',
name='city',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='client',
name='country',
field=models.CharField(default='World', max_length=100),
preserve_default=False,
),
migrations.AlterModelTable(
name='product',
table='products',
),
]
| [
"aleksanderurbanowicz@gmail.com"
] | aleksanderurbanowicz@gmail.com |
1af261d752a689d05277ad4f6abdac6ac6f2d056 | 52eb2cf9d42d2ecb9b997381f3ef61930f0f0aaf | /schnetpack/representation/schnet.py | 31ca8314b6b875c52dc35464cf75b92a30cff4d8 | [
"MIT"
] | permissive | YDS-Med/Transformer3D | 50e12ad4cb82ce22f77ce85d664be5cadf1c2058 | dc1257525ab4f0532f5fbc2af60bd99faa3796be | refs/heads/master | 2022-11-30T00:23:07.555865 | 2020-08-03T21:47:47 | 2020-08-03T21:47:47 | 283,867,160 | 1 | 0 | NOASSERTION | 2020-08-03T21:36:57 | 2020-07-30T20:08:09 | Python | UTF-8 | Python | false | false | 11,100 | py | import torch
import torch.nn as nn
from schnetpack.nn.base import Dense
from schnetpack import Properties
from schnetpack.nn.cfconv import CFConv
from schnetpack.nn.cutoff import CosineCutoff
from schnetpack.nn.acsf import GaussianSmearing
from schnetpack.nn.neighbors import AtomDistances
from schnetpack.nn.activations import shifted_softplus
class SchNetInteraction(nn.Module):
r"""SchNet interaction block for modeling interactions of atomistic systems.
Args:
n_atom_basis (int): number of features to describe atomic environments.
n_spatial_basis (int): number of input features of filter-generating networks.
n_filters (int): number of filters used in continuous-filter convolution.
cutoff (float): cutoff radius.
cutoff_network (nn.Module, optional): cutoff layer.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
"""
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(SchNetInteraction, self).__init__()
# filter block used in interaction block
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
# cutoff layer used in interaction block
self.cutoff_network = cutoff_network(cutoff)
# interaction block
self.cfconv = CFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
# dense layer
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
"""Compute interaction output.
Args:
x (torch.Tensor): input representation/embedding of atomic environments
with (N_b, N_a, n_atom_basis) shape.
r_ij (torch.Tensor): interatomic distances of (N_b, N_a, N_nbh) shape.
neighbors (torch.Tensor): indices of neighbors of (N_b, N_a, N_nbh) shape.
neighbor_mask (torch.Tensor): mask to filter out non-existing neighbors
introduced via padding.
f_ij (torch.Tensor, optional): expanded interatomic distances in a basis.
If None, r_ij.unsqueeze(-1) is used.
Returns:
torch.Tensor: block output with (N_b, N_a, n_atom_basis) shape.
"""
# continuous-filter convolution interaction block followed by Dense layer
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class SchNet(nn.Module):
"""SchNet architecture for learning representations of atomistic systems.
Args:
n_atom_basis (int, optional): number of features to describe atomic environments.
This determines the size of each embedding vector; i.e. embeddings_dim.
n_filters (int, optional): number of filters used in continuous-filter convolution
n_interactions (int, optional): number of interaction blocks.
cutoff (float, optional): cutoff radius.
n_gaussians (int, optional): number of Gaussian functions used to expand
atomic distances.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
coupled_interactions (bool, optional): if True, share the weights across
interaction blocks and filter-generating networks.
return_intermediate (bool, optional): if True, `forward` method also returns
intermediate atomic representations after each interaction block is applied.
max_z (int, optional): maximum nuclear charge allowed in database. This
determines the size of the dictionary of embedding; i.e. num_embeddings.
cutoff_network (nn.Module, optional): cutoff layer.
trainable_gaussians (bool, optional): If True, widths and offset of Gaussian
functions are adjusted during training process.
distance_expansion (nn.Module, optional): layer for expanding interatomic
distances in a basis.
charged_systems (bool, optional):
References:
.. [#schnet1] Schütt, Arbabzadah, Chmiela, Müller, Tkatchenko:
Quantum-chemical insights from deep tensor neural networks.
Nature Communications, 8, 13890. 2017.
.. [#schnet_transfer] Schütt, Kindermans, Sauceda, Chmiela, Tkatchenko, Müller:
SchNet: A continuous-filter convolutional neural network for modeling quantum
interactions.
In Advances in Neural Information Processing Systems, pp. 992-1002. 2017.
.. [#schnet3] Schütt, Sauceda, Kindermans, Tkatchenko, Müller:
SchNet - a deep learning architecture for molceules and materials.
The Journal of Chemical Physics 148 (24), 241722. 2018.
"""
def __init__(
self,
n_atom_basis=128,
n_filters=128,
n_scales=1,
n_interactions=3,
cutoff=8., ###5.0,
n_gaussians=32, ###25,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(SchNet, self).__init__()
self.n_atom_basis = n_atom_basis
# make a lookup table to store embeddings for each element (up to atomic
# number max_z) each of which is a vector of size n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
# layer for computing interatomic distances
self.distances = AtomDistances()
# layer for expanding interatomic distances in a basis
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
# block for computing interaction
self.n_scales = n_scales
self.n_interactions = n_interactions
self.interaction_blocks = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_scales)
]
)
# # block for computing interaction
# if coupled_interactions:
# # use the same SchNetInteraction instance (hence the same weights)
# self.interactions = nn.ModuleList(
# [
# SchNetInteraction(
# n_atom_basis=n_atom_basis,
# n_spatial_basis=n_gaussians,
# n_filters=n_filters,
# cutoff_network=cutoff_network,
# cutoff=cutoff,
# normalize_filter=normalize_filter,
# )
# ]
# * n_interactions
# )
# else:
# # use one SchNetInteraction instance for each interaction
# self.interactions = nn.ModuleList(
# [
# SchNetInteraction(
# n_atom_basis=n_atom_basis,
# n_spatial_basis=n_gaussians,
# n_filters=n_filters,
# cutoff_network=cutoff_network,
# cutoff=cutoff,
# normalize_filter=normalize_filter,
# )
# for _ in range(n_interactions)
# ]
# )
# set attributes
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
"""Compute atomic representations/embeddings.
Args:
inputs (dict of torch.Tensor): SchNetPack dictionary of input tensors.
Returns:
torch.Tensor: atom-wise representation.
list of torch.Tensor: intermediate atom-wise representations, if
return_intermediate=True was used.
"""
# get tensors from input dictionary
atomic_numbers = inputs[Properties.Z]
positions = inputs[Properties.R]
cell = inputs[Properties.cell]
cell_offset = inputs[Properties.cell_offset]
neighbors = inputs[Properties.neighbors]
neighbor_mask = inputs[Properties.neighbor_mask]
atom_mask = inputs[Properties.atom_mask]
# get atom embeddings for the input atomic numbers
x = self.embedding(atomic_numbers)
if False and self.charged_systems and Properties.charge in inputs.keys():
n_atoms = torch.sum(atom_mask, dim=1, keepdim=True)
charge = inputs[Properties.charge] / n_atoms # B
charge = charge[:, None] * self.charge # B x F
x = x + charge
# compute interatomic distance of every atom to its neighbors
r_ij = self.distances(
positions, neighbors, cell, cell_offset, neighbor_mask=neighbor_mask
)
# expand interatomic distances (for example, Gaussian smearing)
f_ij = self.distance_expansion(r_ij)
# store intermediate representations
if self.return_intermediate:
xs = [x]
# compute interaction block to update atomic embeddings
for i_scale in range(self.n_scales):
interaction = self.interaction_blocks[i_scale]
for i_interaction in range(self.n_interactions):
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
# # compute interaction block to update atomic embeddings
# for interaction in self.interactions:
# v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
# x = x + v
# if self.return_intermediate:
# xs.append(x)
if self.return_intermediate:
return x, xs
return x
| [
"noreply@github.com"
] | noreply@github.com |
e9ad2283c07526d1309fe86f4e458cfba45f9f76 | 9bdd741a574d32679532775627b285608bf527f9 | /visualize_utils.py | 12700e8f7d0a106d4f3c3eeee829b778915d4b9d | [] | no_license | mungsoo/CNN-with-Visualization | 96922980b122de70dd0596f8c9ceef921fd6286c | a83f47e620d0f2dcb296991e7e0e5fe77594fa4a | refs/heads/master | 2020-04-02T03:26:46.758448 | 2018-10-21T18:20:46 | 2018-10-21T18:20:46 | 153,966,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | import math
import os
import errno
import shutil
def get_grid_dim(x):
"""
Transforms x into product of two integers
:param x: int
:return: two ints
"""
factors = prime_powers(x)
if len(factors) % 2 == 0:
i = int(len(factors) / 2)
return factors[i], factors[i - 1]
i = len(factors) // 2
return factors[i], factors[i]
def prime_powers(n):
"""
Compute the factors of a positive integer
Algorithm from https://rosettacode.org/wiki/Factors_of_an_integer#Python
:param n: int
:return: set
"""
factors = set()
for x in range(1, int(math.sqrt(n)) + 1):
if n % x == 0:
factors.add(int(x))
factors.add(int(n // x))
return sorted(factors)
def empty_dir(path):
"""
Delete all files and folders in a directory
:param path: string, path to directory
:return: nothing
"""
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Warning: {}'.format(e))
def create_dir(path):
"""
Creates a directory
:param path: string
:return: nothing
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def prepare_dir(path, empty=False):
"""
Creates a directory if it soes not exist
:param path: string, path to desired directory
:param empty: boolean, delete all directory content if it exists
:return: nothing
"""
if not os.path.exists(path):
create_dir(path)
if empty:
empty_dir(path) | [
"noreply@github.com"
] | noreply@github.com |
af3c13b0b6d71fc197d85e36c8e32fa818a832f2 | b72c37e3ccda507b231649cddd5c7845c6c34ba1 | /PythonBasic/Day15/exec5_enumate.py | bcde4f16b170aa836494556ff4f435dfe5176b43 | [] | no_license | ljrdemail/AID1810 | 51c61c255b5c5efc1dc642b46691a614daedd85e | b417bd831bc1550ab953ce7ca23f54e34b8b2692 | refs/heads/master | 2020-04-24T09:45:14.781612 | 2019-02-21T11:26:49 | 2019-02-21T11:26:49 | 171,866,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | # -*- coding:utf-8 -*-
def myenumerate(iterable, start=0):
i = start # 开始索引
for x in iterable:
yield (i, x) # 生成一个元组
i += 1
d = myenumerate("ABCDE", 1)
for i in d:
print(i)
| [
"root"
] | root |
fed79b9a386ddab376d7acd6d52191fc5ec5f846 | 23fb5b1fb275892b0a27657685c062360630889e | /Week 7/django/src/bookstore/settings.py | ad6bf63e9bc7c5c3b7fdb61d360525456c224875 | [
"MIT"
] | permissive | carlosal1015/python2017 | 2b596fa1e4cad4de06537ffc99fb0af0dfa4563d | c1eed0201039c6b4daf857dd1f08c47a7b1e3f45 | refs/heads/master | 2020-09-13T17:15:50.419142 | 2018-05-24T12:44:40 | 2018-05-24T12:44:40 | 222,850,901 | 1 | 2 | MIT | 2019-11-20T04:32:23 | 2019-11-20T04:30:54 | null | UTF-8 | Python | false | false | 3,161 | py | """
Django settings for bookstore project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=4*-6vzd*%j--m+ki)mhd+rpdw2v#t@_&r8z8k8typl8292#te'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"sebastinssanty@gmail.com"
] | sebastinssanty@gmail.com |
64b1ff60158655b97b826b8467eb04fc9536b67f | c264153f9188d3af187905d846fa20296a0af85d | /Python/Python3网络爬虫开发实战/《Python3网络爬虫开发实战》随书源代码/urllib/error/demo3.py | 6928b02a18d8a9762b9a281c84c97d5aa162f9c4 | [] | no_license | IS-OSCAR-YU/ebooks | 5cd3c1089a221759793524df647e231a582b19ba | b125204c4fe69b9ca9ff774c7bc166d3cb2a875b | refs/heads/master | 2023-05-23T02:46:58.718636 | 2021-06-16T12:15:13 | 2021-06-16T12:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from urllib import request, error
try:
response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
print(e.reason)
else:
print('Request Successfully') | [
"jiangzhangha@163.com"
] | jiangzhangha@163.com |
184ebeb33592af81e788e14c06df93a03090abd8 | 5f27bc1a0460a078f6fe33a544f494a5dff7f452 | /script/old/O_0703_arm_move_jaco.py | 1fa6247e4c4c661ef79584d37b0acde343aed2be | [] | no_license | A-Why-not-fork-repositories-Good-Luck/arm_move | 3e381f0310265f47da14beaac136c358fb318f92 | e2e6182cfd93df1935bd3b8e9158134964dc44fa | refs/heads/master | 2023-03-15T18:37:17.337770 | 2020-11-18T06:46:06 | 2020-11-18T06:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,159 | py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, SRI International
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of SRI International nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Acorn Pooley, Mike Lautman
## BEGIN_SUB_TUTORIAL imports
##
## To use the Python MoveIt! interfaces, we will import the `moveit_commander`_ namespace.
## This namespace provides us with a `MoveGroupCommander`_ class, a `PlanningSceneInterface`_ class,
## and a `RobotCommander`_ class. More on these below. We also import `rospy`_ and some messages that we will use:
##
import time
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import sensor_msgs.msg
from arm_move.msg._arm_move_msg import arm_move_msg
from arm_move.msg._box_info_msg import box_info_msg
from arm_move.msg._attach_hand_box import attach_hand_box
from arm_move.srv._box_info_srv import *
from arm_move.srv._att_hand_box_srv import *
from arm_move.srv._arm_move_srv import *
from arm_move.srv._work_start_srv import *
from arm_move.srv._arm_goalJoint_srv import *
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
ROBOT_ARM_GROUP = 'arm'
ROBOT_EE_GROUP = 'hand'
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
class MoveGroupPythonIntefaceTutorial(object):
"""MoveGroupPythonIntefaceTutorial"""
def __init__(self):
super(MoveGroupPythonIntefaceTutorial, self).__init__()
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
self.group_name = 'arm' # this is just for the initialization
move_group = moveit_commander.MoveGroupCommander(self.group_name)
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
traj_arm_publisher = rospy.Publisher('/traj_arm', moveit_msgs.msg.RobotTrajectory, queue_size=100)
feasibility_flag = rospy.Publisher('/arm_feasibility', String, queue_size=20)
planning_frame = move_group.get_planning_frame()
# eef_link = move_group.get_end_effector_link()
eef_link = 'j2n6s300_end_effector'
print "end eef link list\n", eef_link
group_names = robot.get_group_names()
# Misc variables
self.box_name = ''
self.robot = robot
self.scene = scene
self.move_group = move_group
self.display_trajectory_publisher = display_trajectory_publisher
self.traj_arm_publisher = traj_arm_publisher
self.feasibility_flag = feasibility_flag
self.planning_frame = planning_frame
self.eef_link = eef_link
self.group_names = group_names
self.object_list = []
def go_to_pose_goal_m(self, data):
move_group = moveit_commander.MoveGroupCommander(data.arm_name[0])
# move_group = self.move_group
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position = data.goal_position
pose_goal.orientation = data.goal_orientation
# move_group.set_planner_id('SPARStwo')
# move_group.set_planner_id('RRTstar')
# move_group.set_planner_id('BiTRRT')
move_group.set_num_planning_attempts(10000)
move_group.set_planning_time(5)
move_group.set_goal_position_tolerance(0.01)
move_group.set_goal_orientation_tolerance(0.01)
move_group.set_pose_target(pose_goal)
print "goal pose:", pose_goal
plan = move_group.plan()
move_group.execute(plan, wait=True)
traj_arm_pub = self.traj_arm_publisher
traj_arm_pub.publish(plan)
move_group.clear_pose_targets()
current_pose = self.move_group.get_current_pose().pose
return all_close(pose_goal, current_pose, 0.01)
def goalPose_feasibility_check_m(self, data):
# move_group = self.move_group
move_group = moveit_commander.MoveGroupCommander(data.arm_name[0])
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position = data.position
pose_goal.orientation = data.orientation
# move_group.set_planner_id('SPARStwo')
# move_group.set_planner_id('RRTstar')
# move_group.set_planner_id('BiTRRT')
move_group.set_num_planning_attempts(10000)
move_group.set_planning_time(5)
move_group.set_goal_position_tolerance(0.01)
move_group.set_goal_orientation_tolerance(0.01)
move_group.set_pose_target(pose_goal)
plan = move_group.plan()
# print plan
print "plan.joint_trajectory.joint_names :", plan.joint_trajectory.joint_names
feasibility_flag_pub = self.feasibility_flag
feasible_flag_msg = String()
if len(plan.joint_trajectory.joint_names) == 0:
print "no plan found"
feasible_flag_msg = '0'
feasibility_flag_pub.publish(feasible_flag_msg)
elif len(plan.joint_trajectory.joint_names) > 0:
print "plan found"
feasible_flag_msg = '1'
feasibility_flag_pub.publish(feasible_flag_msg)
time.sleep(3)
traj_arm_pub = self.traj_arm_publisher
traj_arm_pub.publish(plan)
time.sleep(2)
move_group.stop()
move_group.clear_pose_targets()
current_pose = self.move_group.get_current_pose().pose
return all_close(pose_goal, current_pose, 0.01)
def pickup(self, upCM, scale=1):
move_group = self.move_group
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z -= -0.05 # First move up (z)
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
move_group.execute(plan, wait=True)
return plan, fraction
def display_trajectory(self, plan):
robot = self.robot
display_trajectory_publisher = self.display_trajectory_publisher
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
display_trajectory_publisher.publish(display_trajectory);
def execute_plan(self, plan):
move_group = self.move_group
move_group.execute(plan, wait=True)
def wait_for_state_update(self, box_is_known=False, box_is_attached=False, timeout=4):
box_name = self.box_name
scene = self.scene
start = rospy.get_time()
seconds = rospy.get_time()
while (seconds - start < timeout) and not rospy.is_shutdown():
# Test if the box is in attached objects
attached_objects = scene.get_attached_objects([box_name])
is_attached = len(attached_objects.keys()) > 0
is_known = box_name in scene.get_known_object_names()
if (box_is_attached == is_attached) and (box_is_known == is_known):
return True
rospy.sleep(0.1)
seconds = rospy.get_time()
return False
## END_SUB_TUTORIAL
def add_box(self, timeout=4):
box_name = self.box_name
scene = self.scene
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "j2n6s300_link_base"
box_pose.pose.orientation.w = 1.0
box_pose.pose.position.z = 0.07 # slightly above the end effector
box_name = "box"
scene.add_box(box_name, box_pose, size=(0.1, 0.1, 0.1))
self.box_name = box_name
return self.wait_for_state_update(box_is_known=True, timeout=timeout)
def add_box_m(self, data, timeout=4):
print "Start 'add box_m'", data.object_name[0]
box_name = data.object_name[0]
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "j2n6s300_link_base"
box_pose.pose.position = data.object_position
box_pose.pose.orientation = data.object_orientation
box_scale = (data.object_scale.x, data.object_scale.y, data.object_scale.z)
self.scene.add_box(box_name, box_pose, box_scale)
self.box_name = box_name
self.object_list.append(box_name)
return self.wait_for_state_update(box_is_known=True, timeout=timeout)
def attach_box(self, timeout=4):
box_name = self.box_name
robot = self.robot
scene = self.scene
eef_link = self.eef_link
group_names = self.group_names
grasping_group = 'hand'
touch_links = robot.get_link_names(group=grasping_group)
scene.attach_box(eef_link, box_name, touch_links=touch_links)
return self.wait_for_state_update(box_is_attached=True, box_is_known=False, timeout=timeout)
def attach_box_m(self, data, timeout=4):
robot = self.robot
scene = self.scene
eef_link = self.eef_link
group_names = self.group_names
grasping_group = data.hand_name[0]
touch_links = robot.get_link_names(group=grasping_group)
print "touch links list\n", touch_links
scene.attach_box(eef_link, data.box_name[0], touch_links=touch_links)
return self.wait_for_state_update(box_is_attached=True, box_is_known=False, timeout=timeout)
def detach_box_m(self, data, timeout=4):
scene = self.scene
eef_link = self.eef_link
scene.remove_attached_object(eef_link, name=data.box_name[0])
return self.wait_for_state_update(box_is_known=True, box_is_attached=False, timeout=timeout)
def detach_box(self, timeout=4):
box_name = self.box_name
scene = self.scene
eef_link = self.eef_link
scene.remove_attached_object(eef_link, name=box_name)
return self.wait_for_state_update(box_is_known=True, box_is_attached=False, timeout=timeout)
def remove_box_m(self, data, timeout=4):
scene = moveit_commander.PlanningSceneInterface()
self.scene = scene
self.box_name = data.object_name[0]
scene.remove_world_object(self.box_name)
return self.wait_for_state_update(box_is_attached=False, box_is_known=False, timeout=timeout)
def remove_box(self, timeout=4):
box_name = self.box_name
scene = self.scene
scene.remove_world_object(box_name)
return self.wait_for_state_update(box_is_attached=False, box_is_known=False, timeout=timeout)
def setjoint_m(self, data):
print "go to initial pose"
self.group_name = 'arm' # this is just for the initialization
move_group = moveit_commander.MoveGroupCommander(self.group_name)
joint_goal = move_group.get_current_joint_values()
joint_goal[0] = 0.60
joint_goal[1] = +0.3
joint_goal[2] = -0.054
joint_goal[3] = -2.25
joint_goal[4] = -1.59
joint_goal[5] = -0.3
joint_goal[6] = 0.01
# The go command can be called with joint values, poses, or without any
# parameters if you have already set the pose or joint target for the group
move_group.go(joint_goal, wait=True)
move_group.stop()
current_joints = move_group.get_current_joint_values()
return all_close(joint_goal, current_joints, 0.01)
def move_joints_m(self, data):
self.group_name = data.name[0] # this is just for the initialization
print self.group_name, "planning group!!!!!!!!!!1"
move_group = moveit_commander.MoveGroupCommander(self.group_name)
joint_goal = move_group.get_current_joint_values()
joint_goal[0] = data.position[0]
joint_goal[1] = data.position[1]
joint_goal[2] = data.position[2]
joint_goal[3] = data.position[3]
joint_goal[4] = data.position[4]
joint_goal[5] = data.position[5]
# joint_goal[6] = data.position[6]
move_group.go(joint_goal, wait=True)
move_group.stop()
current_joints = move_group.get_current_joint_values()
return all_close(joint_goal, current_joints, 0.01)
def remove_all_obj_m(self, data):
print "remove all objects_m if 1, data:", data, type(data)
if data.data == '1':
print "remove all start"
for i in tutorial.object_list:
scene = moveit_commander.PlanningSceneInterface()
self.scene = scene
self.box_name = i
scene.remove_world_object(self.box_name)
'''
(function)_s means that it is an server which gets ROS service messages.
'''
def add_box_s(self, data, timeout=4):
print "Start 'add box_s'", data.object_name[0]
box_name = data.object_name[0]
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "j2n6s300_link_base"
box_pose.pose.position = data.object_position
box_pose.pose.orientation = data.object_orientation
box_scale = (data.object_scale.x, data.object_scale.y, data.object_scale.z)
self.scene.add_box(box_name, box_pose, box_scale)
self.box_name = box_name
self.object_list.append(box_name)
print "add_box_s ends"
return box_info_srvResponse(
w_flag=1
)
def del_box_s(self, data, timeout=4):
print "delete ", data.object_name[0]
scene = moveit_commander.PlanningSceneInterface()
self.scene = scene
self.box_name = data.object_name[0]
scene.remove_world_object(self.box_name)
print "del_box_s ends"
return box_info_srvResponse(
w_flag=1
)
def att_box_s(self, data, timeout=4):
print "attach ", data.object_name[0]
robot = self.robot
scene = self.scene
eef_link = self.eef_link
group_names = self.group_names
grasping_group = data.hand_name[0]
touch_links = robot.get_link_names(group=grasping_group)
print "touch links list\n", touch_links
scene.attach_box(eef_link, data.object_name[0], touch_links=touch_links)
print "att_box_s ends"
return att_hand_box_srvResponse(
w_flag=1
)
def det_box_s(self, data, timeout=4):
print "dettach ", data.object_name[0]
scene = self.scene
eef_link = self.eef_link
scene.remove_attached_object(eef_link, name=data.object_name[0])
print "det_box_s ends"
return box_info_srvResponse(
w_flag=1
)
def goalPose_feasibility_check_s(self, data):
# move_group = self.move_group
move_group = moveit_commander.MoveGroupCommander(data.arm_name[0])
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position = data.goal_position
pose_goal.orientation = data.goal_orientation
# move_group.set_planner_id('SPARStwo')
# move_group.set_planner_id('RRTstar')
# move_group.set_planner_id('BiTRRT')
move_group.set_num_planning_attempts(10000)
move_group.set_planning_time(5)
move_group.set_goal_position_tolerance(0.01)
move_group.set_goal_orientation_tolerance(0.01)
move_group.set_pose_target(pose_goal)
plan = move_group.plan()
# print plan
print "plan.joint_trajectory.joint_names :", plan.joint_trajectory.joint_names
if len(plan.joint_trajectory.joint_names) == 0:
print "no plan found"
move_group.stop()
move_group.clear_pose_targets()
return arm_move_srvResponse(
w_flag=1,
feasibility=0,
r_trj=plan
)
elif len(plan.joint_trajectory.joint_names) > 0:
print "plan found"
move_group.stop()
move_group.clear_pose_targets()
return arm_move_srvResponse(
w_flag=1,
feasibility=1,
r_trj=plan
)
def move_goal_pose_s(self, data):
# move_group = self.move_group
move_group = moveit_commander.MoveGroupCommander(data.arm_name[0])
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position = data.goal_position
pose_goal.orientation = data.goal_orientation
# move_group.set_planner_id('SPARStwo')
# move_group.set_planner_id('RRTstar')
# move_group.set_planner_id('BiTRRT')
move_group.set_num_planning_attempts(10000)
move_group.set_planning_time(5)
move_group.set_goal_position_tolerance(0.01)
move_group.set_goal_orientation_tolerance(0.01)
move_group.set_pose_target(pose_goal)
plan = move_group.plan()
move_group.execute(plan, wait=True)
move_group.clear_pose_targets()
# print plan
print "plan.joint_trajectory.joint_names :", plan.joint_trajectory.joint_names
if len(plan.joint_trajectory.joint_names) == 0:
print "no plan found"
move_group.stop()
move_group.clear_pose_targets()
return arm_move_srvResponse(
w_flag=1,
feasibility=0,
r_trj=plan
)
elif len(plan.joint_trajectory.joint_names) > 0:
print "plan found"
move_group.stop()
move_group.clear_pose_targets()
return arm_move_srvResponse(
w_flag=1,
feasibility=1,
r_trj=plan
)
def init_joints_s(self, data):
if data.w_start == 1:
print "go to initial pose"
self.group_name = 'arm' # this is just for the initialization
move_group = moveit_commander.MoveGroupCommander(self.group_name)
joint_goal = move_group.get_current_joint_values()
joint_goal[0] = 0.60
joint_goal[1] = +0.3
joint_goal[2] = -0.054
joint_goal[3] = -2.25
joint_goal[4] = -1.59
joint_goal[5] = -0.3
# The go command can be called with joint values, poses, or without any
# parameters if you have already set the pose or joint target for the group
move_group.go(joint_goal, wait=True)
move_group.stop()
print "init_joint_s ends"
current_joints = move_group.get_current_joint_values()
return work_start_srvResponse(
w_flag=1
)
def remove_all_s(self, data):
print "data:", data
if data.w_start == 1:
print "remove all objects"
for i in tutorial.object_list:
scene = moveit_commander.PlanningSceneInterface()
self.scene = scene
self.box_name = i
scene.remove_world_object(self.box_name)
print "remove_all_s ends"
return work_start_srvResponse(
w_flag=1
)
def move_joints_s(self, data):
self.group_name = data.goalPose.name[0] # this is just for the initialization
print self.group_name,"planning group!!!!!!!!!!1"
move_group = moveit_commander.MoveGroupCommander(self.group_name)
joint_goal = move_group.get_current_joint_values()
joint_goal[0] = data.goalPose.position[0]
joint_goal[1] = data.goalPose.position[1]
joint_goal[2] = data.goalPose.position[2]
joint_goal[3] = data.goalPose.position[3]
joint_goal[4] = data.goalPose.position[4]
joint_goal[5] = data.goalPose.position[5]
move_group.go(joint_goal, wait=True)
move_group.stop()
current_joints = move_group.get_current_joint_values()
print "move_joint_s ends"
return arm_goalJoint_srvResponse(
w_flag=1
)
def listener():
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('moveit_arm_controller', anonymous=True)
# =================== message!! ======================
rospy.Subscriber('arm_goalPose', arm_move_msg, tutorial.go_to_pose_goal_m)
rospy.Subscriber('feasibility_check', arm_move_msg, tutorial.goalPose_feasibility_check_m)
rospy.Subscriber('arm_initJoint', String, tutorial.setjoint_m)
rospy.Subscriber('remove_all_objects', String, tutorial.remove_all_obj_m)
rospy.Subscriber('arm_goalJoint', sensor_msgs.msg.JointState, tutorial.move_joints_m)
rospy.Subscriber('add_box_info', box_info_msg, tutorial.add_box_m)
rospy.Subscriber('del_box_info', box_info_msg, tutorial.remove_box_m)
rospy.Subscriber('det_box_info', box_info_msg, tutorial.detach_box_m)
rospy.Subscriber('att_box_info', attach_hand_box, tutorial.attach_box_m)
# =================== service!! =======================
rospy.Service('feasibile_check_srv', arm_move_srv, tutorial.goalPose_feasibility_check_s)
rospy.Service('move_goalpose_srv', arm_move_srv, tutorial.move_goal_pose_s)
rospy.Service('arm_goalJoint_srv', arm_goalJoint_srv, tutorial.move_joints_s)
rospy.Service('arm_initJoint_srv', work_start_srv, tutorial.init_joints_s)
rospy.Service('remove_all_srv', work_start_srv, tutorial.remove_all_s)
rospy.Service('add_box_srv', box_info_srv, tutorial.add_box_s)
rospy.Service('del_box_srv', box_info_srv, tutorial.del_box_s)
rospy.Service('det_box_srv', box_info_srv, tutorial.det_box_s)
rospy.Service('att_box_srv', att_hand_box_srv, tutorial.att_box_s)
rospy.spin()
if __name__ == '__main__':
print "------------------------------"
print "Arm trajectory NODE starts!!!!"
print "------------------------------"
print "Press Ctrl-D to exit at any time"
tutorial = MoveGroupPythonIntefaceTutorial()
object_list = []
listener()
print "end node!!"
| [
"welovehun91@gmail.com"
] | welovehun91@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.