blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
287919d9d917521c060f36cad6e6a8d764a13d3b | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/util/dispatch_test.py | d57a980d9a3c0a102a073a315e770b888eb16b5b | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 5,285 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operator dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
class CustomTensor(object):
"""A fake composite tensor class, for testing type-based dispatching."""
def __init__(self, tensor, score):
self.tensor = ops.convert_to_tensor(tensor)
self.score = score
@tf_export("test_op")
@dispatch.add_dispatch_support
def test_op(x, y, z):
"""A fake op for testing dispatch of Python ops."""
return x + (2 * y) + (3 * z)
@test_util.run_all_in_graph_and_eager_modes
class DispatchTest(test_util.TensorFlowTestCase):
def testAddDispatchForTypes_With_CppOp(self):
original_handlers = gen_math_ops.add._tf_dispatchers[:]
# Override the behavior of gen_math_ops.add.
@dispatch.dispatch_for_types(gen_math_ops.add, CustomTensor)
def custom_add(x, y, name=None): # pylint: disable=unused-variable
return CustomTensor(gen_math_ops.add(x.tensor, y.tensor, name),
(x.score+y.score) / 2.0)
self.assertEqual(len(math_ops.add._tf_dispatchers),
len(original_handlers) + 1)
# Test that we see the overridden behavior when using CustomTensors.
x = CustomTensor([1, 2, 3], 2.0)
y = CustomTensor([7, 8, 2], 0.0)
x_plus_y = gen_math_ops.add(x, y)
self.assertAllEqual(self.evaluate(x_plus_y.tensor), [8, 10, 5])
self.assertNear(x_plus_y.score, 1.0, 0.001)
# Test that we still get the right behavior when using normal Tensors.
a = [1, 2, 3]
b = [4, 5, 6]
a_plus_b = gen_math_ops.add(a, b)
self.assertAllEqual(a_plus_b, [5, 7, 9])
# Test that we still get a TypeError or ValueError if we pass some
# type that's not supported by any dispatcher.
with self.assertRaises((TypeError, ValueError)):
gen_math_ops.add(a, None)
# Clean up
gen_math_ops.add._tf_dispatchers = original_handlers
def testAddDispatchForTypes_With_PythonOp(self):
original_handlers = test_op._tf_dispatchers[:]
@dispatch.dispatch_for_types(test_op, CustomTensor)
def override_for_test_op(x, y, z): # pylint: disable=unused-variable
return CustomTensor(test_op(x.tensor, y.tensor, z.tensor),
(x.score + y.score + z.score) / 3.0)
x = CustomTensor([1, 2, 3], 0.2)
y = CustomTensor([7, 8, 2], 0.4)
z = CustomTensor([0, 1, 2], 0.6)
result = test_op(x, y, z)
self.assertAllEqual(self.evaluate(result.tensor), [15, 21, 13])
self.assertNear(result.score, 0.4, 0.001)
# Clean up
test_op._tf_dispatchers = original_handlers
def testDispatchForTypes_SignatureMismatch(self):
with self.assertRaisesRegexp(AssertionError, "The decorated function's "
"signature must exactly match.*"):
@dispatch.dispatch_for_types(test_op, CustomTensor)
def override_for_test_op(a, b, c): # pylint: disable=unused-variable
return CustomTensor(test_op(a.tensor, b.tensor, c.tensor),
(a.score + b.score + c.score) / 3.0)
def testDispatchForTypes_OpDoesNotSupportDispatch(self):
def some_op(x, y):
return x + y
with self.assertRaisesRegexp(AssertionError, "Dispatching not enabled for"):
@dispatch.dispatch_for_types(some_op, CustomTensor)
def override_for_some_op(x, y): # pylint: disable=unused-variable
return x if x.score > 0 else y
@test.mock.patch.object(tf_logging, "warning", autospec=True)
def testInteractionWithDeprecationWarning(self, mock_warning):
@deprecation.deprecated(date=None, instructions="Instructions")
@dispatch.add_dispatch_support
def some_op(x):
return x
some_op(5)
message = mock_warning.call_args[0][0] % mock_warning.call_args[0][1:]
self.assertRegexpMatches(
message,
r".*some_op \(from __main__\) is deprecated and will be "
"removed in a future version.*")
if __name__ == "__main__":
googletest.main()
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
b4cfd589f9a33ea94d548b290b5ad92cab41c430 | 9dc1c85e7d86d29400af79125e9cd89a82a9b8ab | /su_django/su_django/settings.py | 70e3f66d9e3bf1dadbe6287aa4005594eb96c938 | [
"MIT"
] | permissive | borko81/simple_django | e284ff8f79b3e708b4903ba0b774e3a480de9190 | 9dbd2d848cbf0ff0c58e93471853c5b21c769758 | refs/heads/master | 2023-07-14T01:25:13.294095 | 2021-08-16T15:48:00 | 2021-08-16T15:48:00 | 349,369,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-nl!+e0+xy%-*ipck7p6(9&jxfh3)pmwp0anrzcb)7@s#_bzamb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'su_django',
'app01',
'cityes',
'secondary_app',
'main_app',
'boards',
'filemanager',
'posts',
'forms_lab',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'su_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'su_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL)
MEDIA_URL = 'media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"bstoilov81@gmail.com"
] | bstoilov81@gmail.com |
361eb0dd8dd67f869ecb5078de8a4210e4959996 | 115f786a22f928e290753122dd4fdc95280263ca | /env/bin/flask | 6c02818ddf8aa347bf83df4091cdaec6968a4f1c | [] | no_license | land-pack/flask-wordcounter | c6ddd977d247e8c6781c6916ec96fc6ded1f2032 | d81f8045b01439297b46d3f564457194d332f336 | refs/heads/master | 2021-01-17T18:21:26.314704 | 2016-06-25T03:27:44 | 2016-06-25T03:27:44 | 61,924,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/Users/landpack/ak/realpython/flask-by-example/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"landpack@sina.com"
] | landpack@sina.com | |
fe3c7db78c54e5352b73bf103a3df636a50cdaaa | 483f45b1d241d318c06842f250719e73b8c4dfe7 | /Ex084.py | 71a391cde9885a5f79bb5b240a9d890998173737 | [] | no_license | andersondev96/Curso-em-Video-Python | 510a82bfa65830449374eb5e2b81af404120689e | 76449e6a0ba3624d2c5643268499dea3fccfa5d1 | refs/heads/master | 2022-10-19T02:07:10.967713 | 2020-06-14T23:57:02 | 2020-06-14T23:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | """
Faça um programa que leia nome e peso de várias pessoas,
guardando tudo em uma lista. No final, mostre:
A) Quantas pessoas foram cadastradas.
B) Uma listagem com as pessoas mais pesadas.
C) Uma listagem com as pessoas mais leves.
"""
lista = list()
dados = list()
tot = 0
mai = men = 0
while True:
dados.append(str(input('Nome: ')))
dados.append(float(input('peso: ')))
if len(lista) == 0:
mai = men = dados[1]
else:
if dados[1] > mai:
mai = dados[1]
if dados[1] < men:
men = dados[1]
lista.append(dados[:])
dados.clear()
tot = tot + 1
cont = str(input('Deseja continuar [s/n]: '))
if cont in 'Nn':
break
print("-="*30)
print(f'Foram cadastradas {tot} pessoas na lista.')
print(f'O maior peso foi de {mai}Kg')
for p in lista:
if p[1] == mai:
print(f'[{p[0]}]')
print()
print(f'O menor peso foi de {men}Kg')
for p in lista:
if p[1] == men:
print(f'[{p[0]}]')
print()
| [
"andersonfferreira13@hotmail.com"
] | andersonfferreira13@hotmail.com |
8e62cae88c31bff477b2aa066abce0303c029d80 | 3eb99709809a493c46a79171ef9774aa4261b59d | /脚本/llianli/cf_app_flottery_client_data.py | 62454ee49a8036d8b5a611fcd32f37f9b13c3625 | [] | no_license | bingwin/tencent | c831a5b344f597a06c7a7b179d4f67d668198c90 | ea5dc5ff398d85cfdf4df056dc8b4064e66fb5fb | refs/heads/master | 2020-07-28T21:44:00.281933 | 2016-05-28T03:21:31 | 2016-05-28T03:21:31 | 209,548,176 | 1 | 0 | null | 2019-09-19T12:29:21 | 2019-09-19T12:29:21 | null | UTF-8 | Python | false | false | 3,904 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# ******************************************************************************
# 程序名称: cf_app_flottery_client_data.py
# 功能描述: 掌上穿越火线抽奖功能客户端相关事件统计
# 输入参数: yyyymmdd 例如:20140113
# 目标表名: ieg_qt_community_app.tb_cf_app_flottery_client_click
# 数据源表: teg_mta_intf.ieg_lol
# 创建人名: llianli
# 创建日期: 2016-02-01
# 版本说明: v1.0
# 公司名称: tencent
# 修改人名:
# 修改日期:
# 修改原因:
# ******************************************************************************
#import system module
# main entry
def TDW_PL(tdw, argv=[]):
tdw.WriteLog("== begin ==")
tdw.WriteLog("== argv[0] = " + argv[0] + " ==")
sDate = argv[0];
##sDate = '20150111'
tdw.WriteLog("== sDate = " + sDate + " ==")
tdw.WriteLog("== connect tdw ==")
sql = """use ieg_qt_community_app"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.splitbylinenum=true"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.line_num_per_split=1000000"""
res = tdw.execute(sql)
sql = """
CREATE TABLE IF NOT EXISTS tb_cf_app_flottery_client_click
(
sdate int,
id bigint,
ei string,
pv bigint,
total_uin bigint,
total_mac bigint
) """
res = tdw.execute(sql)
sql="""delete from tb_cf_app_flottery_client_click where sdate=%s """ % (sDate)
res = tdw.execute(sql)
sql = """
insert table tb_cf_app_flottery_client_click
select
%s as sdate,
case when grouping(id) = 1 then -100 else id end as id,
ei,
count(*) as pv,
count(distinct uin) as total_uin,
count(distinct ui_mc) as total_mac
from
(
select
id,
concat(ui,mc) as ui_mc,
get_json_object(kv,'$.uin') as uin ,
case
when ( id = 1100679031 and ei = '王者宝藏点击次数') or
( id = 1200679031 and ei = '抽奖_模块点击')
then '王者宝藏模块'
when ( id = 1100679031 and ei = '抽奖页面点击量') or
( id = 1200679031 and ei = '抽奖_TAB展示次数' and get_json_object(kv,'$.type') = '宝藏')
then '抽奖页面'
when ( id = 1100679031 and ei = '分享点击次数') or
( id = 1200679031 and ei = '抽奖_结果界面分享次数' )
then '分享点击次数'
when ( id = 1100679031 and ei = '排行榜页面点击量') or
( id = 1200679031 and ei = '抽奖_TAB展示次数' and get_json_object(kv,'$.type') = '排行')
then '排行页面'
when ( id = 1100679031 and ei = '兑换页面点击量') or
( id = 1200679031 and ei = '抽奖_TAB展示次数' and get_json_object(kv,'$.type') = '兑换')
then '兑换页面'
when ( id = 1100679031 and ei = '记录页面点击量') or
( id = 1200679031 and ei = '抽奖_TAB展示次数' and get_json_object(kv,'$.type') = '记录')
then '记录页面'
else 'other'
end as ei
from teg_mta_intf::ieg_lol where sdate = %s and id in (1100679031,1200679031)
)t
where ei != 'other'
group by cube(id),ei
""" % (sDate,sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
tdw.WriteLog("== end OK ==")
| [
"996346098@qq.com"
] | 996346098@qq.com |
5cc2f23e86060b74e96ad7acccf967a3ea162715 | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script651.py | d647310d5d7941d48e5edf0ebbbda6ae13549b6b | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,951 | py |
# coding: utf-8
# In this edited version of my kernel, I have included some new features and some others are under progress. Some have been influenced from [THIS KERNEL](https://www.kaggle.com/sudosudoohio/stratified-kfold-xgboost-eda-tutorial-0-281).
#
# # What lies ahead of you?
#
# * **Data Exploration**
# * Analyzing Datatypes
# * Analyzing Missing Values
# * Visualizing missing values
# * Memory Usage Analysis
#
# * **Data Analysis** (visualizing each and every type of feature in the data set)
# * Splitting columns based on types
# * Binary Features
# * Categorical Features
# * Continuous/Ordinal Features
# * Correlation (**ps_calc** have an outrageous attitude!!!)
# * **Feature Engineering**
# * New Binary features
# * New Continuous/Ordinal features (*in progress*)
# * **Modeling**
# * Gradient Boosting
# * XGBoost
# # Importing Libraries and Loading Data
# In[ ]:
import numpy as np # linear algebra
import seaborn as sns
import missingno as msno
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
from subprocess import check_output
from sklearn import *
import xgboost as xgb
from multiprocessing import *
from ggplot import *
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_sample = pd.read_csv('../input/sample_submission.csv')
# Any results you write to the current directory are saved as output.
# In[ ]:
df_train.shape
# In[ ]:
print(len(df_train.columns))
#new_cont_ord_cols = [c for c in df_train.columns if not c.startswith('ps_calc_')]
new_cont_ord_cols = [c for c in df_train.columns if not c.endswith('bin')]
no_bin_cat_cols = [c for c in new_cont_ord_cols if not c.endswith('cat')][2:]
# In[ ]:
'''
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
col = [c for c in train.columns if c not in ['id','target']]
print(len(col))
col = [c for c in col if not c.startswith('ps_calc_')]
print(len(col))
train = train.replace(-1, np.NaN)
d_median = train.median(axis=0)
d_mean = train.mean(axis=0)
train = train.fillna(-1)
one_hot = {c: list(train[c].unique()) for c in train.columns if c not in ['id','target']}
'''
# In[ ]:
'''
def transform_df(df):
df = pd.DataFrame(df)
dcol = [c for c in df.columns if c not in ['id','target']]
df['ps_car_13_x_ps_reg_03'] = df['ps_car_13'] * df['ps_reg_03']
df['negative_one_vals'] = np.sum((df[dcol]==-1).values, axis=1)
for c in dcol:
if '_bin' not in c: #standard arithmetic
df[c+str('_median_range')] = (df[c].values > d_median[c]).astype(np.int)
df[c+str('_mean_range')] = (df[c].values > d_mean[c]).astype(np.int)
#df[c+str('_sq')] = np.power(df[c].values,2).astype(np.float32)
#df[c+str('_sqr')] = np.square(df[c].values).astype(np.float32)
#df[c+str('_log')] = np.log(np.abs(df[c].values) + 1)
#df[c+str('_exp')] = np.exp(df[c].values) - 1
for c in one_hot:
if len(one_hot[c])>2 and len(one_hot[c]) < 7:
for val in one_hot[c]:
df[c+'_oh_' + str(val)] = (df[c].values == val).astype(np.int)
return df
def multi_transform(df):
print('Init Shape: ', df.shape)
p = Pool(cpu_count())
df = p.map(transform_df, np.array_split(df, cpu_count()))
df = pd.concat(df, axis=0, ignore_index=True).reset_index(drop=True)
p.close(); p.join()
print('After Shape: ', df.shape)
return df
def gini(y, pred):
fpr, tpr, thr = metrics.roc_curve(y, pred, pos_label=1)
g = 2 * metrics.auc(fpr, tpr) -1
return g
def gini_xgb(pred, y):
y = y.get_label()
return 'gini', gini(y, pred)
params = {'eta': 0.02, 'max_depth': 4, 'subsample': 0.9, 'colsample_bytree': 0.9, 'objective': 'binary:logistic', 'eval_metric': 'auc', 'seed': 99, 'silent': True}
x1, x2, y1, y2 = model_selection.train_test_split(train, train['target'], test_size=0.25, random_state=99)
x1 = multi_transform(x1)
x2 = multi_transform(x2)
test = multi_transform(test)
col = [c for c in x1.columns if c not in ['id','target']]
col = [c for c in col if not c.startswith('ps_calc_')]
print(x1.values.shape, x2.values.shape)
#remove duplicates just in case
tdups = multi_transform(train)
dups = tdups[tdups.duplicated(subset=col, keep=False)]
x1 = x1[~(x1['id'].isin(dups['id'].values))]
x2 = x2[~(x2['id'].isin(dups['id'].values))]
print(x1.values.shape, x2.values.shape)
y1 = x1['target']
y2 = x2['target']
x1 = x1[col]
x2 = x2[col]
watchlist = [(xgb.DMatrix(x1, y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]
model = xgb.train(params, xgb.DMatrix(x1, y1), 5000, watchlist, feval=gini_xgb, maximize=True, verbose_eval=50, early_stopping_rounds=200)
test['target'] = model.predict(xgb.DMatrix(test[col]), ntree_limit=model.best_ntree_limit+45)
test['target'] = (np.exp(test['target'].values) - 1.0).clip(0,1)
sub = pd.DataFrame()
sub['id'] = test['id']
sub['target'] = test['target']
sub.to_csv('xgb1.csv', index=False)
#test[['id','target']].to_csv('xgb_submission.csv', index=False, float_format='%.5f')
'''
# # Data Exploration
#
# First things first, let us explore what we have!
# In[ ]:
df_train.head()
# Saving the **target** variable separately and dropping it from the training set.
# In[ ]:
target = df_train['target']
#df_train = df_train.drop('target', 1)
# ## Analyzing Datatypes
#
# We only have two datatypes in our dataset: **int** and **float**.
# In[ ]:
print(df_train.dtypes.unique())
print(df_train.dtypes.nunique())
print(df_test.dtypes.unique())
print(df_test.dtypes.nunique())
# In[ ]:
pp = pd.value_counts(df_train.dtypes)
pp.plot.bar()
plt.show()
# ## Analyzing Missing Values
# In[ ]:
print (df_train.isnull().values.any())
print (df_test.isnull().values.any())
# However, as mentioned by someone in the comments, "This isn't true!" The missing values have been replaced by -1.
#
# We will replace them using np.nan and see how it is distributed.
#
#
# In[ ]:
#df_train.replace(-1, np.nan)
#df_test.replace(-1, np.nan)
df_train[(df_train == -1)] = np.nan
df_test[(df_test == -1)] = np.nan
print('done')
# Checking for missing values again
# In[ ]:
print (df_train.isnull().values.any())
print (df_test.isnull().values.any())
# Printing list of columns with missing values in both the train and test dataframe:
# In[ ]:
cols_missing_val_train = df_train.columns[df_train.isnull().any()].tolist()
print(cols_missing_val_train)
print('\n')
cols_missing_val_test = df_test.columns[df_test.isnull().any()].tolist()
print(cols_missing_val_test)
# We see that the train dataframe has an extra column with missing values (**ps_car_12**).
#
# ## Visualizing missing values
# In[ ]:
#--- Train dataframe ---
msno.bar(df_train[cols_missing_val_train],figsize=(20,8),color="#19455e",fontsize=18,labels=True,)
# In[ ]:
#--- Test dataframe ---
msno.bar(df_test[cols_missing_val_test],figsize=(20,8),color="#50085e",fontsize=18,labels=True,)
# We can see that the missing values a proportional in both the test and train dataframes.
# In[ ]:
#--- Train dataframe ---
msno.matrix(df_train[cols_missing_val_train],width_ratios=(10,1), figsize=(20,8),color=(0.2,0.2,0.2),fontsize=18,sparkline=True,labels=True)
# In[ ]:
#--- Test dataframe ---
msno.matrix(df_test[cols_missing_val_test],width_ratios=(10,1), figsize=(20,8),color=(0.2,0.2,0.2),fontsize=18,sparkline=True,labels=True)
# We see a similar resemblance of proportional missing values in the train and test dataframes!
# Replacing the missing values to -1.
# In[ ]:
df_train.replace(np.nan, -1, inplace=True)
df_test.replace(np.nan, -1, inplace=True)
# ## Memory Usage
# In[ ]:
#--- memory consumed by train dataframe ---
mem = df_train.memory_usage(index=True).sum()
print("Memory consumed by training set : {} MB" .format(mem/ 1024**2))
print('\n')
#--- memory consumed by test dataframe ---
mem = df_test.memory_usage(index=True).sum()
print("Memory consumed by test set : {} MB" .format(mem/ 1024**2))
# By altering the datatypes we can reduce memory usage:
# In[ ]:
def change_datatype(df):
float_cols = list(df.select_dtypes(include=['int']).columns)
for col in float_cols:
if ((np.max(df[col]) <= 127) and(np.min(df[col] >= -128))):
df[col] = df[col].astype(np.int8)
elif ((np.max(df[col]) <= 32767) and(np.min(df[col] >= -32768))):
df[col] = df[col].astype(np.int16)
elif ((np.max(df[col]) <= 2147483647) and(np.min(df[col] >= -2147483648))):
df[col] = df[col].astype(np.int32)
else:
df[col] = df[col].astype(np.int64)
change_datatype(df_train)
change_datatype(df_test)
# In[ ]:
#--- Converting columns from 'float64' to 'float32' ---
def change_datatype_float(df):
float_cols = list(df.select_dtypes(include=['float']).columns)
for col in float_cols:
df[col] = df[col].astype(np.float32)
change_datatype_float(df_train)
change_datatype_float(df_test)
# Let us check the memory consumed again:
# In[ ]:
#--- memory consumed by train dataframe ---
mem = df_train.memory_usage(index=True).sum()
print("Memory consumed by training set : {} MB" .format(mem/ 1024**2))
print('\n')
#--- memory consumed by test dataframe ---
mem = df_test.memory_usage(index=True).sum()
print("Memory consumed by test set : {} MB" .format(mem/ 1024**2))
# That is memory consumption reduced by **greater than 50%** !!!
# In[ ]:
print(len(df_test.columns))
print(len(df_train.columns))
#print(len(target.columns))
# # Quick Modeling (without any analysis)
# Quick check to make sure the columns are the same in both `train` and `test` data.
# In[ ]:
len(set(df_test.columns) and set(df_train.columns))
# ## Random Forest
# In[ ]:
df_train = df_train.replace(-1, np.NaN)
d_median = df_train.median(axis=0)
d_mean = df_train.mean(axis=0)
df_train = df_train.fillna(-1)
dcol = [c for c in df_train.columns if c not in ['id','target']]
df_train['ps_car_13_x_ps_reg_03'] = df_train['ps_car_13'] * df_train['ps_reg_03']
#df['negative_one_vals'] = np.sum((df[dcol]==-1).values, axis=1)
for c in dcol:
if '_bin' not in c: #standard arithmetic
df_train[c+str('_median_range')] = (df_train[c].values > d_median[c]).astype(np.int)
df_train[c+str('_mean_range')] = (df_train[c].values > d_mean[c]).astype(np.int)
df_train[c+str('_sq')] = np.power(df_train[c].values,2).astype(np.float32)
#df[c+str('_sqr')] = np.square(df[c].values).astype(np.float32)
df_train[c+str('_log')] = np.log(np.abs(df_train[c].values) + 1)
df_train[c+str('_exp')] = np.exp(df_train[c].values) - 1
# In[ ]:
change_datatype(df_train)
# In[ ]:
df_train.head()
# In[ ]:
from sklearn.model_selection import train_test_split
features= [c for c in df_train.columns.values if c not in ['id', 'target']]
#numeric_features= [c for c in df.columns.values if c not in ['id','text','author','processed']]
#target = 'author'
X_train, X_test, y_train, y_test = train_test_split(df_train[features], df_train['target'], test_size=0.33, random_state=42)
X_train.head()
# In[ ]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
pipeline = Pipeline([
#('features',feats),
('classifier', RandomForestClassifier(random_state = 42))
#('classifier', GradientBoostingClassifier(random_state = 42))
])
pipeline.fit(X_train, y_train)
preds = pipeline.predict(X_test)
np.mean(preds == y_test)
# In[ ]:
pipeline.get_params().keys()
# In[ ]:
from sklearn.model_selection import GridSearchCV
hyperparameters = { #'features__text__tfidf__max_df': [0.9, 0.95],
#'features__text__tfidf__ngram_range': [(1,1), (1,2)],
#'classifier__learning_rate': [0.1, 0.2],
'classifier__n_estimators': [20, 30, 50],
'classifier__max_depth': [2, 4],
'classifier__min_samples_leaf': [2, 4]
}
clf = GridSearchCV(pipeline, hyperparameters, cv = 3)
# Fit and tune model
clf.fit(X_train, y_train)
# In[ ]:
clf.best_params_
# In[ ]:
#refitting on entire training data using best settings
clf.refit
preds = clf.predict(X_test)
probs = clf.predict_proba(X_test)
np.mean(preds == y_test)
# In[ ]:
df_test = df_test.replace(-1, np.NaN)
dt_median = df_test.median(axis=0)
dt_mean = df_test.mean(axis=0)
df_test = df_test.fillna(-1)
dtcol = [c for c in df_test.columns if c not in ['id']]
df_test['ps_car_13_x_ps_reg_03'] = df_test['ps_car_13'] * df_test['ps_reg_03']
#df['negative_one_vals'] = np.sum((df[dcol]==-1).values, axis=1)
for c in dtcol:
if '_bin' not in c: #standard arithmetic
df_test[c+str('_median_range')] = (df_test[c].values > dt_median[c]).astype(np.int)
df_test[c+str('_mean_range')] = (df_test[c].values > dt_mean[c]).astype(np.int)
df_test[c+str('_sq')] = np.power(df_test[c].values,2).astype(np.float32)
#df[c+str('_sqr')] = np.square(df[c].values).astype(np.float32)
df_test[c+str('_log')] = np.log(np.abs(df_test[c].values) + 1)
df_test[c+str('_exp')] = np.exp(df_test[c].values) - 1
# In[ ]:
change_datatype(df_test)
# In[ ]:
submission = pd.read_csv('../input/test.csv')
#preprocessing
#test_features= [c for c in submission.columns.values if c not in ['id']]
test_features= [c for c in df_test.columns.values if c not in ['id']]
#submission = processing(submission)
predictions = clf.predict_proba(df_test[test_features])
preds = pd.DataFrame(data = predictions, columns = clf.best_estimator_.named_steps['classifier'].classes_)
#generating a submission file
result = pd.concat([submission[['id']], preds], axis=1)
result = result.drop(0, axis=1)
result.columns = ['id', 'target']
result.head()
result.to_csv('random_forest.csv', index=False)
# In[ ]:
'''
from sklearn.cross_validation import train_test_split
import xgboost as xgb
X_train = df_train.drop(['id'],axis = 1)
X_id_train = df_train['id'].values
Y_train = target.values
X_test = df_test.drop(['id'], axis=1)
X_id_test = df_test['id'].values
x_train, x_valid, y_train, y_valid = train_test_split(X_train, Y_train, test_size = 0.4, random_state = 1000)
print('Train samples: {} Validation samples: {}'.format(len(x_train), len(x_valid)))
d_train = xgb.DMatrix(x_train, y_train)
d_valid = xgb.DMatrix(x_valid, y_valid)
d_test = xgb.DMatrix(X_test)
params = {}
params['min_child_weight'] = 10.0
params['objective'] = 'binary:logistic'
params['eta'] = 0.02
params['silent'] = True
params['max_depth'] = 9
params['subsample'] = 0.9
params['colsample_bytree'] = 0.9
# Define the gini metric - from https://www.kaggle.com/c/ClaimPredictionChallenge/discussion/703#5897
def gini(actual, pred, cmpcol = 0, sortcol = 1):
assert( len(actual) == len(pred) )
all = np.asarray(np.c_[ actual, pred, np.arange(len(actual)) ], dtype=np.float)
all = all[ np.lexsort((all[:,2], -1*all[:,1])) ]
totalLosses = all[:,0].sum()
giniSum = all[:,0].cumsum().sum() / totalLosses
giniSum -= (len(actual) + 1) / 2.
return giniSum / len(actual)
def gini_normalized(a, p):
return gini(a, p) / gini(a, a)
# Create an XGBoost-compatible metric from Gini
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return [('gini', gini_score)]
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model = xgb.train(params, d_train, 100, watchlist, early_stopping_rounds=100, feval=gini_xgb, maximize=True, verbose_eval=10)
xgb.plot_importance(model)
fig, ax = plt.subplots(figsize=(12,18))
plt.show()
p_test = model.predict(d_test)
#--- Submission file ---
sub = pd.DataFrame()
sub['id'] = X_id_test
sub['target'] = p_test
sub.to_csv('xgb.csv', index=False)
importance = model.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
plt.figure()
df.plot()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(10, 25))
plt.gcf().savefig('features_importance.png')
'''
# # Data Analysis
# ## Splitting columns based on types
# According to the data given to us:
# * features that belong to similar groupings are tagged as such in the feature names (e.g., **ind**, **reg**, **car**, **calc**).
# * feature names include the postfix **bin** to indicate binary features and **cat** to indicate categorical features.
# * feature names without **boon** or **cat** are grouped as** continuous/ordinal** features.
# In[ ]:
#-- List of all columns --
train_cols = df_train.columns.tolist()
#--- binary and categorical features list ---
bin_cols = []
cat_cols = []
#--- continous/ordinal features list ---
cont_ord_cols = []
#--- different feature groupings ---
ind_cols = []
reg_cols = []
car_cols = []
calc_cols = []
for col in train_cols:
if (('ps' in str(col)) & ('bin' not in str(col)) & ('cat' not in str(col))):
cont_ord_cols.append(col)
for col in train_cols:
if ('bin' in str(col)):
bin_cols.append(col)
if ('cat' in str(col)):
cat_cols.append(col)
if ('ind' in str(col)):
ind_cols.append(col)
if ('reg' in str(col)):
reg_cols.append(col)
if ('car' in str(col)):
car_cols.append(col)
if ('calc' in str(col)):
calc_cols.append(col)
# Columns present in `cont_ord_cols` list have a collection of different types of columns.
#
# So we can divide them into **continuous** and **ordinal** variables based on their data types.
# In[ ]:
float_cols = []
int_cols = []
for col in cont_ord_cols:
if (df_train[col].dtype == np.float32):
float_cols.append(col) #--- continuous variables ---
elif ((df_train[col].dtype == np.int8) or (df_train[col].dtype == np.int16)):
int_cols.append(col) #--- ordinal variables ---
# The following snippet is confirmation that all the variables are **ordinal** beacuse they have more than 2 unique values.
# In[ ]:
for col in int_cols:
print (df_train[col].nunique())
# Exploring each of the above extracted grouped features individually:
#
# ## Binary features:
#
# Binary features whose single attribute is less than 10% will be collected in a separate list
# In[ ]:
cols_to_delete = []
th = 0.1
for col in range(0, len(bin_cols)):
print (bin_cols[col])
print (df_train[bin_cols[col]].unique())
pp = pd.value_counts(df_train[bin_cols[col]])
for i in range(0, len(pp)):
if((pp[i]/float(len(df_train))) <= th):
cols_to_delete.append(bin_cols[col])
pp.plot.bar()
plt.show()
# In[ ]:
print(cols_to_delete)
# The above mentioned columns have highly skewed values hence can be dropped from both the training and test set.
# In[ ]:
for col in cols_to_delete:
df_train.drop([col], axis=1, inplace=True)
df_test.drop([col], axis=1, inplace=True)
# ## Categorical Features
#
# Exploring the categorical variables:
# In[ ]:
for col in range(0, len(cat_cols)):
print (cat_cols[col])
print (df_train[cat_cols[col]].unique())
pp = pd.value_counts(df_train[cat_cols[col]])
pp.plot.bar()
plt.show()
# From the graphs, only **ps_car_10_cat** is highly skewed hence can be removed from training and test set.
# In[ ]:
'''
cat_cols_to_delete = [ 'ps_car_10_cat']
for col in cat_cols_to_delete:
df_train.drop([col], axis=1, inplace=True)
df_test.drop([col], axis=1, inplace=True)
'''
# ## Continuous/Ordinal Features
#
# Features having different prefixes such as **ind**, **reg**, **car** and **calc**; excluding binary and categorical features.
# In[ ]:
ind_cols_no_bin_cat = []
reg_cols_no_bin_cat = []
car_cols_no_bin_cat = []
calc_cols_no_bin_cat = []
for col in train_cols:
if (('ind' in str(col)) and ('bin' not in str(col)) and ('cat' not in str(col))):
ind_cols_no_bin_cat.append(col)
if (('reg' in str(col)) and ('bin' not in str(col)) and ('cat' not in str(col))):
reg_cols_no_bin_cat.append(col)
if (('car' in str(col)) and ('bin' not in str(col)) and ('cat' not in str(col))):
car_cols_no_bin_cat.append(col)
if (('calc' in str(col)) and ('bin' not in str(col)) and ('cat' not in str(col))):
calc_cols_no_bin_cat.append(col)
# ### Visualizing **ind** features
#
# (Uncomment the following snippets of code to visualzie the various grouped features. They take a long time to load hence I have commented them out)
# In[ ]:
'''
what_col = ind_cols_no_bin_cat
for col in range(0, len(what_col)):
print (what_col[col])
print (df_train[what_col[col]].unique())
pp = pd.value_counts(df_train[what_col[col]])
pp.plot.bar()
plt.show()
'''
# Column **ps_ind_14** is heavily skewed hence can be removed.
# ### Visualizing **reg** features
# In[ ]:
'''
what_col = reg_cols_no_bin_cat
for col in range(0, len(what_col)):
print (what_col[col])
print (df_train[what_col[col]].unique())
pp = pd.value_counts(df_train[what_col[col]])
pp.plot.bar()
plt.show()
'''
# Column **ps_reg_03** does not seem to show anything at all, hence can be removed.
#
# ### Visualizing **car** features
# In[ ]:
'''
what_col = car_cols_no_bin_cat
for col in range(0, len(what_col)):
print (what_col[col])
print (df_train[what_col[col]].unique())
pp = pd.value_counts(df_train[what_col[col]])
pp.plot.bar()
plt.show()
'''
# ### Visualizing **calc** features
# In[ ]:
'''
what_col = calc_cols_no_bin_cat
for col in range(0, len(what_col)):
print (what_col[col])
print (df_train[what_col[col]].unique())
pp = pd.value_counts(df_train[what_col[col]])
pp.plot.bar()
plt.show()
'''
# Colukmns belonging to type ***calc***:
# * **ps_calc_01**,
# * **ps_calc_02**,
# * **ps_calc_03**
#
# have a uniform distribution, which do not offer anything significant. Hence these can also be removed.
# In[ ]:
''' other_cols_to_delete = ['ps_ind_14', 'ps_calc_01', 'ps_calc_02', 'ps_calc_03', 'ps_reg_03']
for col in other_cols_to_delete:
df_train.drop([col], axis=1, inplace=True)
df_test.drop([col], axis=1, inplace=True)'''
# # Feature Engineering
#
# ### NOTE: ALWAYS REMEMBER TO INCLUDE SAME SET OF FEATURES FOR THE TEST DATA ALSO!!
# In[ ]:
'''
for col1 in int_cols:
for col2 in float_cols:
l_mean =
df_train[col1 + '_' + col2] =
'''
# ## New Binary Features
#
# Here I have included logical AND, OR and XOR operation between every binary feature.
# In[ ]:
train_cols = df_train.columns
bin_cols = df_train.columns[df_train.columns.str.endswith('bin')]
'''
for i in ["X1","X2"]:
for j in ["X2","X3"]:
if i != j:
col_name = i + j
k[col_name + '_OR'] = k[i]|k[j]
k[col_name + '_AND'] = k[i]&k[j]
k[col_name + '_XOR'] = k[i]^k[j]
def second_order(df, c_names):
names_col=[]
pp=0
for i in c_names[:c_names.size-1]:
for j in c_names[pp:c_names.size]:
if i != j:
col_name = i + str('_') + j
df[col_name + '_OR'] = df[i]|df[j]
df[col_name + '_AND'] = df[i]&df[j]
df[col_name + '_XOR'] = df[i]^df[j]
#col_name = ii + str('_and_') + jj
#names_col.append(col_name)
#df[col_name] = df[ii]&df[jj]
pp+=1
return df, names_col
df_train, train_new_cols = second_order(df_train, bin_cols)
df_test, test_new_cols = second_order(df_test, bin_cols)
print(len(df_train.columns))
print(len(df_test.columns))
'''
# ## New Continuous/Ordinal Features (*in progress*)
# In[ ]:
'''
print(len(df_train.columns))
#new_cont_ord_cols = [c for c in df_train.columns if not c.startswith('ps_calc_')]
#new_cont_ord_cols = [c for c in df_train.columns if not c.endswith('bin') ]
for col in no_bin_cat_cols:
#df_train[col + str('_greater_median')] = (df_train[col].values > df_train[col].median()).astype(np.int)
#df_train[col + str('_greater_mean')] = (df_train[col].values > df_train[col].mean()).astype(np.int)
df_train[col + str('_sq')] = np.power(df_train[col].values,2).astype(np.float32)
df_train[col + str('_sqr')] = np.square(df_train[col].values).astype(np.float32)
df_train[col + str('_log')] = np.log(np.abs(df_train[col].values) + 1)
#df_train[col + str('_exp')] = np.exp(df_train[col].values) - 1
#new_cont_ord_test_cols = [c for c in df_test.columns if not c.startswith('ps_calc_')]
for col in no_bin_cat_cols:
#df_test[col + str('_greater_median')] = (df_test[col].values > df_test[col].median()).astype(np.int)
#df_test[col + str('_greater_mean')] = (df_test[col].values > df_test[col].mean()).astype(np.int)
df_test[col + str('_sq')] = np.power(df_test[col].values,2).astype(np.float32)
df_test[col + str('_sqr')] = np.square(df_test[col].values).astype(np.float32)
df_test[col + str('_log')] = np.log(np.abs(df_test[col].values) + 1)
#df_test[col + str('_exp')] = np.exp(df_test[col].values) - 1
'''
# ## New Second Order Continuous/Ordinal Features (*based on Gradient Boosting feature importance*)
# In[ ]:
'''
new_col =['ps_car_12', 'ps_car_14', 'ps_car_15', 'ps_car_13', 'ps_reg_03', 'ps_ind_03', 'ps_ind_15', 'ps_reg_02', 'ps_reg_01', 'ps_calc_02', 'ps_calc_11', 'ps_calc_10']
def new_second_order(df, c_names):
names_col=[]
pp=0
for i in c_names[:len(c_names)-1]:
for j in c_names[pp:len(c_names)]:
if i != j:
col_name = i + str('_*_') + j
df[col_name] = df[i] * df[j]
#col_name = ii + str('_and_') + jj
#names_col.append(col_name)
#df[col_name] = df[ii]&df[jj]
pp+=1
return df, names_col
df_train, train_new_cols = new_second_order(df_train, new_col)
df_test, test_new_cols = new_second_order(df_test, new_col)
'''
# In[ ]:
print(len(df_train.columns))
print(len(df_test.columns))
# ## Correlation
# In[ ]:
'''
sns.set(style="white")
corr = df_train.corr()
f, ax = plt.subplots(figsize=(18, 15))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, cmap=cmap, vmax=.3, center=0,square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
'''
# Outrageous! Not even a single **calc** feature seems to have any interest in indulging themselves with anything!! It is better to remove them all!!
# In[ ]:
'''
removed_calc_cols = []
for col in df_train.columns:
if ('calc' in str(col)):
removed_calc_cols.append(col)
#unwanted = train.columns[train.columns.str.startswith('ps_calc_')]
df_train = df_train.drop(removed_calc_cols, axis=1)
df_test = df_test.drop(removed_calc_cols, axis=1)
'''
# In[ ]:
df_train.replace(np.nan, -1, inplace=True)
df_test.replace(np.nan, -1, inplace=True)
print('Done')
# # Modeling
# ## Gradient Boosting
# In[ ]:
'''
X_train = df_train.drop(['id'],axis = 1)
X_id_train = df_train['id'].values
Y_train = target.values
X_test = df_test.drop(['id'], axis=1)
X_id_test = df_test['id'].values
'''
# In[ ]:
'''
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
GBR = GradientBoostingRegressor(n_estimators = 100, learning_rate = 0.02, max_depth=7, random_state = 0, loss='ls')
#GBR = GradientBoostingClassifier(learning_rate = 0.02, n_estimators = 500, max_depth = 9, min_samples_split = 2, min_samples_leaf = 2, max_features = 10, random_state=123)
GBR.fit(X_train, Y_train)
print (GBR)
'''
# In[ ]:
#--- List of important features for Gradient Boosting Regressor ---
'''
features_list = X_train.columns.values
feature_importance = GBR.feature_importances_
sorted_idx = np.argsort(feature_importance)
print(sorted_idx)
'''
# In[ ]:
'''
plt.figure(figsize=(15, 15))
plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center')
plt.yticks(range(len(sorted_idx)), features_list[sorted_idx])
plt.xlabel('Importance')
plt.title('Feature importances')
plt.draw()
plt.show()
'''
# In[ ]:
#--- Predicting Gradient boost result for test data ---
# y_GBR = GBR.predict(X_test)
# In[ ]:
'''
final = pd.DataFrame()
final['id'] = X_id_test
final['target'] = y_GBR
final.to_csv('Gradient_Boost_1.csv', index=False)
print('DONE!!')
'''
# ## XGBoost
# In[ ]:
import xgboost as xgb
# In[ ]:
def gini(actual, pred, cmpcol = 0, sortcol = 1):
assert( len(actual) == len(pred) )
all = np.asarray(np.c_[ actual, pred, np.arange(len(actual)) ], dtype=np.float)
all = all[ np.lexsort((all[:,2], -1*all[:,1])) ]
totalLosses = all[:,0].sum()
giniSum = all[:,0].cumsum().sum() / totalLosses
giniSum -= (len(actual) + 1) / 2.
return giniSum / len(actual)
def gini_normalized(a, p):
return gini(a, p) / gini(a, a)
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return 'gini', gini_score
# In[ ]:
'''
from sklearn.model_selection import StratifiedKFold
kfold = 5
skf = StratifiedKFold(n_splits=kfold, random_state=42)
'''
# In[ ]:
params = {
'min_child_weight': 10.0,
'objective': 'binary:logistic',
'max_depth': 7,
'max_delta_step': 1.8,
'colsample_bytree': 0.4,
'subsample': 0.8,
'eta': 0.025,
'gamma': 0.65,
'num_boost_round' : 700
}
# In[ ]:
'''
for i, (train_index, test_index) in enumerate(skf.split(X_train, Y_train)):
print('[Fold %d/%d]' % (i + 1, kfold))
X_train, X_valid = X_train[train_index], X_train[test_index]
y_train, y_valid = Y_train[train_index], Y_train[test_index]
# Convert our data into XGBoost format
d_train = xgb.DMatrix(X_train, y_train)
d_valid = xgb.DMatrix(X_valid, y_valid)
d_test = xgb.DMatrix(X_test.values)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
# Train the model! We pass in a max of 1,600 rounds (with early stopping after 70)
# and the custom metric (maximize=True tells xgb that higher metric is better)
mdl = xgb.train(params, d_train, 1600, watchlist, early_stopping_rounds=70, feval=gini_xgb, maximize=True, verbose_eval=100)
print('[Fold %d/%d Prediciton:]' % (i + 1, kfold))
# Predict on our test data
p_test = mdl.predict(d_test)
sub['target'] += p_test/kfold
'''
# ## Random Forest
# In[ ]:
'''
from sklearn.ensemble import RandomForestClassifier
RF = RandomForestClassifier(n_estimators=100, max_depth=8, criterion='entropy', min_samples_split=10, max_features=120, n_jobs=-1, random_state=123, verbose=1, class_weight = "balanced")
RF.fit(X_train, Y_train)
print(RF)
#--- List of important features ---
features_list = X_train.columns.values
feature_importance = RF.feature_importances_
sorted_idx = np.argsort(feature_importance)
print(sorted_idx)
plt.figure(figsize=(15, 15))
plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center')
plt.yticks(range(len(sorted_idx)), features_list[sorted_idx])
plt.xlabel('Importance')
plt.title('Feature importances')
plt.draw()
plt.show()
Y_pred = RF.predict(X_test)
final = pd.DataFrame()
final['id'] = X_id_test
final['target'] = Y_pred
final.to_csv('RF.csv', index=False)
print('DONE!!')
'''
# In[ ]:
#-- Adaboost ---
'''
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
Ada_R = AdaBoostRegressor(DecisionTreeRegressor(max_depth=7), n_estimators = 400, random_state = 99)
Ada_R.fit(X_train, Y_train)
print (Ada_R)
features_list = X_train.columns.values
feature_importance = Ada_R.feature_importances_
sorted_idx = np.argsort(feature_importance)
print(sorted_idx)
plt.figure(figsize=(15, 15))
plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center')
plt.yticks(range(len(sorted_idx)), features_list[sorted_idx])
plt.xlabel('Importance')
plt.title('Feature importances')
plt.draw()
plt.show()
#--- Predicting Ada boost result for test data ---
y_Ada = Ada_R.predict(X_test)
final = pd.DataFrame()
final['id'] = X_id_test
final['target'] = y_Ada
final.to_csv('Ada_Boost_1.csv', index=False)
print('DONE!!')
'''
# In[ ]:
'''
from sklearn.cross_validation import train_test_split
import xgboost as xgb
X_train = df_train.drop(['id'],axis = 1)
X_id_train = df_train['id'].values
Y_train = target.values
X_test = df_test.drop(['id'], axis=1)
X_id_test = df_test['id'].values
x_train, x_valid, y_train, y_valid = train_test_split(X_train, Y_train, test_size=0.2, random_state=4242)
print('Train samples: {} Validation samples: {}'.format(len(x_train), len(x_valid)))
d_train = xgb.DMatrix(x_train, y_train)
d_valid = xgb.DMatrix(x_valid, y_valid)
d_test = xgb.DMatrix(X_test)
params = {}
params['min_child_weight'] = 10.0
params['objective'] = 'binary:logistic'
params['eta'] = 0.02
params['silent'] = True
params['max_depth'] = 9
params['subsample'] = 0.9
params['colsample_bytree'] = 0.9
# Define the gini metric - from https://www.kaggle.com/c/ClaimPredictionChallenge/discussion/703#5897
def gini(actual, pred, cmpcol = 0, sortcol = 1):
assert( len(actual) == len(pred) )
all = np.asarray(np.c_[ actual, pred, np.arange(len(actual)) ], dtype=np.float)
all = all[ np.lexsort((all[:,2], -1*all[:,1])) ]
totalLosses = all[:,0].sum()
giniSum = all[:,0].cumsum().sum() / totalLosses
giniSum -= (len(actual) + 1) / 2.
return giniSum / len(actual)
def gini_normalized(a, p):
return gini(a, p) / gini(a, a)
# Create an XGBoost-compatible metric from Gini
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return [('gini', gini_score)]
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model = xgb.train(params, d_train, 100, watchlist, early_stopping_rounds=100, feval=gini_xgb, maximize=True, verbose_eval=10)
xgb.plot_importance(model)
fig, ax = plt.subplots(figsize=(12,18))
plt.show()
p_test = model.predict(d_test)
#--- Submission file ---
sub = pd.DataFrame()
sub['id'] = X_id_test
sub['target'] = p_test
sub.to_csv('xgb2.csv', index=False)
importance = model.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
plt.figure()
df.plot()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(10, 25))
plt.gcf().savefig('features_importance.png')
'''
# ### Can you think of more features? Let me know in the comments!
#
# # STAY TUNED FOR MORE UPDATES !!!
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
9f25304223889a65bb1ac1016b8110a748efbb9d | 4438b60b7095d56c7fc2ee8396ea98ec620f7f51 | /etro.py | 9f2571163bab0f826fcb1b045c10e654888bf408 | [] | no_license | haizi-zh/firenze | 8a53e49a55f0827f8b0179164ed6c1ea9a3b005d | 1129bfd0df6f9d661b4f01ce514595f1eba784c3 | refs/heads/master | 2021-01-18T15:24:33.495830 | 2013-07-13T15:57:10 | 2013-07-13T15:57:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | # coding=utf-8
import json
import string
import re
import common as cm
import geosense as gs
__author__ = 'Zephyre'
db = None
def fetch_continents(data):
result = []
for i in xrange(1, 5):
d = data.copy()
d['url'] = '%s%d/' % (data['url'], i)
result.append(d)
return result
def fetch_countries(data):
url = data['url']
try:
body = cm.get_data(url)
except Exception:
print 'Error occured: %s' % url
dump_data = {'level': 0, 'time': cm.format_time(), 'data': {'url': url}, 'brand_id': data['brand_id']}
cm.dump(dump_data)
return []
m = re.search(ur'<\s*nav\s+class\s*=\s*"country-list"\s*>', body)
if m is None:
return []
sub, start, end = cm.extract_closure(body[m.start():], ur'<nav\b', ur'</nav>')
result = []
for m in re.findall(ur'<\s*li\s*>\s*<a\s+href\s*=\s*"(.+?)"\s+title=.*?>\s*(.+?)\s*<\s*/\s*a\s*>', sub):
d = data.copy()
d['url'] = m[0].strip()
d['country'] = m[1].strip().upper()
result.append(d)
return result
def fetch_stores(data):
url = data['url']
try:
body = cm.get_data(url)
except Exception:
print 'Error occured: %s' % url
dump_data = {'level': 0, 'time': cm.format_time(), 'data': {'url': url}, 'brand_id': data['brand_id']}
cm.dump(dump_data)
return []
result = []
for m in re.findall(ur'<li class="store">.+?<a href="(.+?)".+?</li>', body, re.S):
d = data.copy()
d['url'] = m.strip()
result.append(d)
return result
def fetch_store_details(data):
url = data['url']
try:
body = cm.get_data(url)
except Exception:
print 'Error occured: %s' % url
dump_data = {'level': 0, 'time': cm.format_time(), 'data': {'url': url}, 'brand_id': data['brand_id']}
cm.dump(dump_data)
return []
entry = cm.init_store_entry(data['brand_id'], data['brandname_e'], data['brandname_c'])
ret = gs.look_up(data['country'], 1)
if ret is not None:
entry[cm.country_e] = ret['name_e']
m = re.search(ur'<span class="type">Address</span>\s*<p>(.+?)</p>', body, re.S)
if m is not None:
addr = cm.reformat_addr(m.group(1))
country, province, city = gs.addr_sense(addr)
if country is not None and entry[cm.country_e] == '':
entry[cm.country_e] = country
if province is not None:
entry[cm.province_e] = province
if city is not None:
entry[cm.city_e] = city
entry[cm.addr_e] = addr
m = re.search(ur'<span class="type">Phone</span>\s*<p>(.+?)</p>', body, re.S)
if m is not None:
entry[cm.tel] = m.group(1)
m = re.search(ur'<span class="type">Opening hours</span>\s*<p>(.+?)</p>', body, re.S)
if m is not None:
entry[cm.hours] = cm.reformat_addr(m.group(1))
m = re.search(ur'<span class="type">You can find</span>\s*<p>(.+?)</p>', body, re.S)
if m is not None:
entry[cm.store_type] = cm.reformat_addr(m.group(1))
m = re.search(ur'google.maps.LatLng\(\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)', body, re.S)
entry[cm.lat]=string.atof(m.group(1))
entry[cm.lng]=string.atof(m.group(2))
gs.field_sense(entry)
print '(%s / %d) Found store: %s, %s (%s, %s)' % (data['brandname_e'], data['brand_id'],
entry[cm.name_e], entry[cm.addr_e], entry[cm.country_e],
entry[cm.continent_e])
db.insert_record(entry, 'stores')
return [entry]
def fetch(level=1, data=None, user='root', passwd=''):
def func(data, level):
"""
:param data:
:param level: 0:国家;1:城市;2:商店列表
"""
if level == 0:
# 洲列表
return [{'func': lambda data: func(data, 1), 'data': s} for s in fetch_continents(data)]
if level == 1:
# 国家列表
return [{'func': lambda data: func(data, 2), 'data': s} for s in fetch_countries(data)]
if level == 2:
# 商店列表
return [{'func': lambda data: func(data, 3), 'data': s} for s in fetch_stores(data)]
if level == 3:
# 商店详情
return [{'func': None, 'data': s} for s in fetch_store_details(data)]
else:
return []
# Walk from the root node, where level == 1.
if data is None:
data = {'url': 'http://www.etro.com/en_wr/storefinder/get/list/continent/',
'brand_id': 10127, 'brandname_e': u'Etro', 'brandname_c': u'艾特罗'}
global db
db = cm.StoresDb()
db.connect_db(user=user, passwd=passwd)
db.execute(u'DELETE FROM %s WHERE brand_id=%d' % ('stores', data['brand_id']))
results = cm.walk_tree({'func': lambda data: func(data, 0), 'data': data})
db.disconnect_db()
return results | [
"haizi.zh@gmail.com"
] | haizi.zh@gmail.com |
b2851bd4f588b8c3675082d85433d7b02b70f58a | 82dca8287b9cc32599404c402240a177f7dfb9f7 | /venv/bin/sqlformat | b9ae5435c634511caba14ee0aff440ee9ac61801 | [] | no_license | kairat3/homework-tutorial | 8abaf2710a76c042701882cb904ea5cbac75f600 | 2babb6ddbd04fd596b0a5fe83e9b7eb1969c24d9 | refs/heads/master | 2023-05-05T14:34:25.121718 | 2021-06-01T09:40:51 | 2021-06-01T09:40:51 | 372,775,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/ka/projects/tutorial/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jvckmiller@gmail.com"
] | jvckmiller@gmail.com | |
1faf061beb78dad3524eae193c52e3e541f3652e | 28dbe47aba287ed94ef7bba734203736bcc06249 | /.history/dmac_20200624221838.py | 1dd8a319dcdd7456874878e6ce201b25c7df7cbf | [] | no_license | ntung88/Trading_Algorithms | 242fd816b19df95e02e9fcd8c5c91c862d2ede40 | d96488b1754e3751f739d9c3f094a8f8dc54a0a9 | refs/heads/master | 2022-11-19T16:04:07.800344 | 2020-07-17T21:14:10 | 2020-07-17T21:14:10 | 276,239,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | import yfinance as yf
import numpy as np
from scipy.stats import norm
import pandas as pd
from pandasgui import show
from scipy.optimize import minimize, LinearConstraint
import matplotlib.pyplot as plt
def clean_data(data):
incomplete_idxs = False
for col in data.columns:
incomplete_idxs |= np.isnan(data[col])
return data[~incomplete_idxs]
def calc_crossovers(sma, lma):
num_points = len(clean_data(lma))
#Currently using only closing prices
sma = sma['Close']
lma = lma['Close']
high = (sma > lma)[-num_points:]
crossovers = high.astype(int).diff()[1:]
trimmed = crossovers[crossovers != 0]
return trimmed
def profit(data, crossovers):
if len(crossovers) == 0:
return 0
total = 0
if crossovers.iloc[0] == -1:
total += data.loc[crossovers.index[0]] - data.iloc[0]
for i in range(1,len(crossovers)):
left_bound = crossovers.index[i-1]
if crossovers.loc[left_bound] == 1:
right_bound = crossovers.index[i]
total += data.loc[right_bound] - data.loc[left_bound]
if crossovers.iloc[-1] == 1:
total += data.iloc[-1] - data.loc[crossovers.index[-1]]
return total
def optimize(data):
cons = ({'type': 'ineq', 'fun': lambda x: x[1] - x[0]},
{'type': 'ineq', 'fun': lambda x: x[0] - 5})
# 'type':'eq', 'fun': lambda x : max([x[i]-int(x[i]) for i in range(len(x))]),
short_seeds = range(5, 300, 30)
long_seeds = range(20, 800, 40)
# short_seeds = [100]
# long_seeds = [750]
minimum = float('inf')
best_short = 0
best_long = 0
for short_seed in short_seeds:
for long_seed in long_seeds:
if long_seed > short_seed:
res = minimize(run_analysis, [short_seed, long_seed], args=(data,), method='COBYLA', constraints=cons, options={'rhobeg': 10.0, 'catol': 0.0})
if res.fun < minimum:
best_short = res.x[0]
best_long = res.x[1]
minimum = res.fun
return (int(round(best_short)), int(round(best_long)), minimum)
def run_analysis(periods, data):
# print(periods)
short_period = int(round(periods[0]))
long_period = int(round(periods[1]))
# print(short_period, long_period)
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
result = -1 * profit(data['Close'], crossovers)
# print(short_period, long_period, result)
return result
def main():
tickers = 'SPY AAPL MRNA TSLA MMM APA'
data = yf.download(tickers, period='max', group_by='ticker')
dirty = pd.DataFrame(data['APA'])
frame = clean_data(dirty)
# periods = optimize(frame)
# visualize(data, periods[0], periods[1])
visualize(frame, 50, 200)
def visualize(data, short_period, long_period):
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
buys = pd.DataFrame(crossovers[crossovers == 1.0])
sells = pd.DataFrame(crossovers[crossovers == -1.0])
plot_sells = sells * data['Close']
# plot_sells[np.isnan(plot_sells)] = 0
plot_buys = buys * data['Close']
# print(len(plot_sells.index), len(plot_sells['Close']))
# plot_buys[np.isnan(plot_buys)] = 0
data.plot(color='black')
plot_sells.plot(kind='scatter', x=plot_sells.index, y=plot_sells['Close'], color='red')
plot_buys.plot(kind='scatter', x=plot_buys.index, y=plot_buys['Close'], color='green')
plt.show()
if __name__ == "__main__":
main() | [
"nathantung@Nathans-MacBook-Pro.local"
] | nathantung@Nathans-MacBook-Pro.local |
e7b577c747abdc08c9955fd13104ca6a9f5c9d3c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/1725.py | c78439354107fb7183c695dfd3799280485c6f7d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | INPUT_FILE = 'A-large.in'
OUTPUT_FILE = 'A-large_out.txt'
def solve(f_in):
l = f_in.readline().strip()
lst = l.split(' ')
k = int(lst[1])
pancakesStr = lst[0]
pancakes = 0
nPancakes = len(pancakesStr)
for i in range(nPancakes):
if pancakesStr[i] == '-':
pancakes = pancakes | (1 << (nPancakes - i - 1))
countFlips = 0
flip = 0
for i in range(k):
flip = flip | (1 << i)
for i in range(nPancakes - k + 1):
if pancakes & (1 << (nPancakes - i - 1)):
countFlips = countFlips + 1
pancakes = pancakes ^ (flip << nPancakes - k - i)
if pancakes == 0:
result = countFlips
else:
result = "IMPOSSIBLE"
return result
with open(INPUT_FILE, 'r') as f:
with open(OUTPUT_FILE, 'w') as f_out:
T = int(f.readline())
for i in range(T):
f_out.write('Case #%d: %s\n' % (i + 1, solve(f)))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9c9a840d4942d09bbb6213039764fca57b919091 | ca507259c36a4299666f4c064f25832f5c3f45c1 | /test/test_mosaic_dto.py | f7959f7a562463df95275868b61ee3b8768dc54e | [] | no_license | fullcircle23/symbol-openapi-python-client | ae38a2537d1f2eebca115733119c444b79ec4962 | 3728d30eb1b5085a5a5e991402d180fac8ead68b | refs/heads/master | 2022-04-15T06:20:47.821281 | 2020-04-16T02:39:14 | 2020-04-16T02:39:14 | 254,701,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,273 | py | # coding: utf-8
"""
****************************************************************************
Copyright (c) 2016-present,
Jaguar0625, gimre, BloodyRookie, Tech Bureau, Corp. All rights reserved.
This file is part of Catapult.
Catapult is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Catapult is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Catapult. If not, see <http://www.gnu.org/licenses/>.
****************************************************************************
Catapult REST Endpoints
OpenAPI Specification of catapult-rest 1.0.20.22 # noqa: E501
The version of the OpenAPI document: 0.8.9
Contact: ravi@nem.foundation
NOTE: This file is auto generated by Symbol OpenAPI Generator:
https://github.com/nemtech/symbol-openapi-generator
Do not edit this file manually.
"""
from __future__ import absolute_import
import unittest
import datetime
import symbol_openapi_client
from symbol_openapi_client.models.mosaic_dto import MosaicDTO # noqa: E501
from symbol_openapi_client.rest import ApiException
class TestMosaicDTO(unittest.TestCase):
"""MosaicDTO unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test MosaicDTO
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = symbol_openapi_client.models.mosaic_dto.MosaicDTO() # noqa: E501
if include_optional :
return MosaicDTO(
id = '0DC67FBE1CAD29E3',
supply = 123456,
start_height = 1,
owner_public_key = 'AC1A6E1D8DE5B17D2C6B1293F1CAD3829EEACF38D09311BB3C8E5A880092DE26',
owner_address = '9081FCCB41F8C8409A9B99E485E0E28D23BD6304EF7215E01A',
revision = 1,
flags = 56,
divisibility = 56,
duration = 200
)
else :
return MosaicDTO(
id = '0DC67FBE1CAD29E3',
supply = 123456,
start_height = 1,
owner_public_key = 'AC1A6E1D8DE5B17D2C6B1293F1CAD3829EEACF38D09311BB3C8E5A880092DE26',
owner_address = '9081FCCB41F8C8409A9B99E485E0E28D23BD6304EF7215E01A',
revision = 1,
flags = 56,
divisibility = 56,
duration = 200,
)
def testMosaicDTO(self):
"""Test MosaicDTO"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"fullcircle2324@gmail.com"
] | fullcircle2324@gmail.com |
e2d5dea60a9718ed889e701aa2ca4e555be2b88d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pKyeEDkNqZraqS3rW_18.py | 58da6c5ea31db532d3eaecfa2747e4c6be4dcc37 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py |
def k_to_k(n, k):
return (k**k == n)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c5f19c607da75bed9b9a36a1a5f11b71b04c0f5e | 637bb3f080ff18001a732d9bf607ef962b09c5dd | /AtividadeURI_01/uri_1012_area.py | 8d96cd42ca3bd54dc511638969355f63eba86ad3 | [] | no_license | magnoazneto/IFPI_Algoritmos | 995296fa22445c57981a1fad43e1ef7a8da83e5e | 3b5e79c79b7a1fb7a08206719fd418fba1b39691 | refs/heads/master | 2022-02-27T10:59:17.123895 | 2019-11-17T13:51:35 | 2019-11-17T13:51:35 | 186,868,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | def main():
entradas = input().split()
A = float(entradas[0])
B = float(entradas[1])
C = float(entradas[2])
tria_retangulo = (A*C) / 2
circulo = 3.14159 * (C**2)
trapezio = ((A+B) * C) / 2
quadrado = B ** 2
retangulo = A * B
print('TRIANGULO: {:.3f}'.format(tria_retangulo))
print('CIRCULO: {:.3f}'.format(circulo))
print('TRAPEZIO: {:.3f}'.format(trapezio))
print('QUADRADO: {:.3f}'.format(quadrado))
print('RETANGULO: {:.3f}'.format(retangulo))
main() | [
"magnoazneto@gmail.com"
] | magnoazneto@gmail.com |
6b25e915fc6c577b588a7392594f156bb81877e5 | 4592d7a83df7c7baeff91e5ef046cde59cf11dd3 | /Python/Python_OOP/debug/debug/urls.py | fdb5d20f9865a49b203e987fda17c210c63f9238 | [] | no_license | gxnews8/CodingDojo | 8136870b2653c6337d418dc03a4754fc66fd8a9e | b81faab167aabd0abad1aecdbf3594d6b43d740d | refs/heads/master | 2021-01-11T08:17:27.539115 | 2017-06-24T20:04:05 | 2017-06-24T20:04:05 | 76,394,057 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | """debug URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.first_app.urls')),
]
| [
"gxnews8@gmail.com"
] | gxnews8@gmail.com |
cd01a0671a16b17c6f88a80526d827fcc05fd55a | 52a15d4fabf68bf23a23799312ae40465764908c | /src/changeset/load.py | 56b0f7312eaf95e688e194baaccf8d0d1988df1e | [
"MIT",
"Apache-2.0"
] | permissive | jensl/critic | 2071a1b0600051967323df48f4d3a5656a5d2bb8 | c2d962b909ff7ef2f09bccbeb636333920b3659e | refs/heads/stable/1 | 2022-05-28T03:51:15.108944 | 2018-03-27T18:47:46 | 2018-03-29T15:08:30 | 6,430,552 | 224 | 36 | NOASSERTION | 2023-05-29T15:38:00 | 2012-10-28T18:26:04 | Python | UTF-8 | Python | false | false | 4,926 | py | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import diff
import dbutils
import gitutils
def loadChangeset(db, repository, changeset_id, filtered_file_ids=None, load_chunks=True):
return loadChangesets(db, repository,
changesets=[diff.Changeset.fromId(db, repository, changeset_id)],
filtered_file_ids=filtered_file_ids,
load_chunks=load_chunks)[0]
def loadChangesetsForCommits(db, repository, commits, filtered_file_ids=None, load_chunks=True):
commit_ids = dict([(commit.getId(db), commit) for commit in commits])
def getCommit(commit_id):
return commit_ids.get(commit_id) or gitutils.Commit.fromId(db, repository, commit_id)
cursor = db.cursor()
cursor.execute("SELECT id, parent, child FROM changesets WHERE child=ANY (%s) AND type='direct'", (commit_ids.keys(),))
changesets = []
for changeset_id, parent_id, child_id in cursor:
changesets.append(diff.Changeset(changeset_id, getCommit(parent_id), getCommit(child_id), "direct"))
return loadChangesets(db, repository, changesets, filtered_file_ids=filtered_file_ids, load_chunks=load_chunks)
def loadChangesets(db, repository, changesets, filtered_file_ids=None, load_chunks=True):
cursor = db.cursor()
changeset_ids = [changeset.id for changeset in changesets]
filtered_file_ids = list(filtered_file_ids) if filtered_file_ids else None
if filtered_file_ids is None:
cursor.execute("""SELECT changeset, file, path, old_sha1, new_sha1, old_mode, new_mode
FROM fileversions
JOIN files ON (files.id=fileversions.file)
WHERE changeset=ANY (%s)""",
(changeset_ids,))
else:
cursor.execute("""SELECT changeset, file, path, old_sha1, new_sha1, old_mode, new_mode
FROM fileversions
JOIN files ON (files.id=fileversions.file)
WHERE changeset=ANY (%s)
AND file=ANY (%s)""",
(changeset_ids, filtered_file_ids))
files = dict([(changeset.id, {}) for changeset in changesets])
for changeset_id, file_id, file_path, file_old_sha1, file_new_sha1, file_old_mode, file_new_mode in cursor.fetchall():
files[changeset_id][file_id] = diff.File(file_id, file_path,
file_old_sha1, file_new_sha1,
repository,
old_mode=file_old_mode,
new_mode=file_new_mode,
chunks=[])
if load_chunks:
if filtered_file_ids is None:
cursor.execute("""SELECT id, changeset, file, deleteOffset, deleteCount, insertOffset, insertCount, analysis, whitespace
FROM chunks
WHERE changeset=ANY (%s)
ORDER BY file, deleteOffset ASC""",
(changeset_ids,))
else:
cursor.execute("""SELECT id, changeset, file, deleteOffset, deleteCount, insertOffset, insertCount, analysis, whitespace
FROM chunks
WHERE changeset=ANY (%s)
AND file=ANY (%s)
ORDER BY file, deleteOffset ASC""",
(changeset_ids, filtered_file_ids))
for chunk_id, changeset_id, file_id, delete_offset, delete_count, insert_offset, insert_count, analysis, is_whitespace in cursor:
files[changeset_id][file_id].chunks.append(diff.Chunk(delete_offset, delete_count,
insert_offset, insert_count,
id=chunk_id,
is_whitespace=is_whitespace,
analysis=analysis))
for changeset in changesets:
changeset.files = diff.File.sorted(files[changeset.id].values())
return changesets
| [
"jl@opera.com"
] | jl@opera.com |
b9a24f96a2369aaff0a8e0ac012240974a522eba | 9a4bf3864d9e66ba285e2d28d570b920b47d1ae9 | /0886_Possible_Bipartition.py | 855be9f8c40a2796f8b15cc637eb0b4fa00bf602 | [] | no_license | actcheng/leetcode-solutions | bf32044bcd916912c86294d83638be08a27b9d8f | 4bf1a7814b5c76e11242e7933e09c59ede3284a3 | refs/heads/master | 2021-06-28T19:15:30.089116 | 2020-08-25T08:17:48 | 2020-08-25T08:17:48 | 139,708,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | # Problem 886
# Date completed: 2020/05/27
# 1084 ms (18%)
class Solution:
def possibleBipartition(self, N: int, dislikes: List[List[int]]) -> bool:
rec = collections.defaultdict(set)
for a,b in dislikes:
rec[a].add(b)
rec[b].add(a)
wait = set(rec.keys())
queue = []
groups = [set([]), set([])]
group_dislike = [set([]),set([])]
while queue or wait:
key = queue.pop(0) if queue else wait.pop()
for i in [0,1]:
if key not in group_dislike[i]:
groups[i].add(key)
group_dislike[i].update(rec[key])
if key in wait: wait.remove(key)
for b in rec[key].intersection(wait):
if b in group_dislike[1-i]:
print(b,groups)
return False
groups[1-i].add(b)
group_dislike[1-i].update(rec[b])
queue.extend([val for val in rec[b] if val in wait])
wait.remove(b)
break
if len(group_dislike[0].intersection(group_dislike[1])) > 0: return False
return True
| [
"noreply@github.com"
] | actcheng.noreply@github.com |
cc06a1afd3cf990aab9e8017188708c6770ddf82 | 16b389c8dcace7f7d010c1fcf57ae0b3f10f88d3 | /docs/jnpr_healthbot_swagger/test/test_devicegroup_schema_publish.py | bd0b683c3c81f49498cdd4654c35eff56b402752 | [
"Apache-2.0"
] | permissive | Juniper/healthbot-py-client | e4e376b074920d745f68f19e9309ede0a4173064 | 0390dc5d194df19c5845b73cb1d6a54441a263bc | refs/heads/master | 2023-08-22T03:48:10.506847 | 2022-02-16T12:21:04 | 2022-02-16T12:21:04 | 210,760,509 | 10 | 5 | Apache-2.0 | 2022-05-25T05:48:55 | 2019-09-25T05:12:35 | Python | UTF-8 | Python | false | false | 994 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.devicegroup_schema_publish import DevicegroupSchemaPublish # noqa: E501
from swagger_client.rest import ApiException
class TestDevicegroupSchemaPublish(unittest.TestCase):
"""DevicegroupSchemaPublish unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDevicegroupSchemaPublish(self):
"""Test DevicegroupSchemaPublish"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.devicegroup_schema_publish.DevicegroupSchemaPublish() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nitinkr@juniper.net"
] | nitinkr@juniper.net |
b1b12a777f9b970ed264a9754df0d10d7c4e5e48 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_014/ch17_2020_04_04_00_30_01_519493.py | 477f8c89a6db4461d60cad420534ed30cf4363cc | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | def eh_bissexto (ano):
if ano % 4 == 0:
return True
elif ano % 100 == 0:
return False | [
"you@example.com"
] | you@example.com |
04598fc33634c577860347eab85306f2b9f11336 | 10e19b5cfd59208c1b754fea38c34cc1fb14fdbe | /desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/tests/__init__.py | f76af6f1bdfa86e22acaa51e11710b716b03d492 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | sarvex/hue | 780d28d032edd810d04e83f588617d1630ec2bef | 6e75f0c4da2f3231e19c57bdedd57fb5a935670d | refs/heads/master | 2023-08-15T21:39:16.171556 | 2023-05-01T08:37:43 | 2023-05-01T08:37:43 | 32,574,366 | 0 | 0 | Apache-2.0 | 2023-09-14T16:55:28 | 2015-03-20T09:18:18 | Python | UTF-8 | Python | false | false | 1,749 | py | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from test_views import *
from test_store import *
from test_auth import *
from test_admin import *
def suite():
suite = unittest.TestSuite()
for name in ['test_auth', 'test_store', 'test_views', 'test_admin']:
mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite'])
suite.addTest(mod.suite())
return suite
| [
"romain@cloudera.com"
] | romain@cloudera.com |
552019b6e9b5e757bfae8e0ece6e89266860f4b9 | d1e540562faabf463788e6ad31c337e2fe329944 | /holland/core/exceptions.py | 4b6356f5b6aa08cfd50a1938c60530cafa77c5e0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nwbreneman/holland | 5a675883d8ca037505bf6a9e271a3cc33b0c6902 | 8defe48aafb32388ec2bf1cc76070da98c766d8e | refs/heads/master | 2020-04-09T20:24:13.453638 | 2018-11-27T20:21:14 | 2018-11-27T20:21:14 | 160,572,324 | 0 | 0 | NOASSERTION | 2018-12-10T23:31:07 | 2018-12-05T20:08:38 | Python | UTF-8 | Python | false | false | 325 | py | """
Standard public exceptions that are raised by
the various APIs in holland-core
"""
class ConfigError(Exception):
"""Configuration error"""
pass
class InsufficientSpaceError(Exception):
"""Operation could not complete due to disk space"""
pass
class ArgumentError(Exception):
"""Invalid argument"""
| [
"wdierkes@rackspace.com"
] | wdierkes@rackspace.com |
b3cdc2e5d186896ef3832edbf72fbb38490d134d | 4acfe1899465199ed5f6b40e6261f362a731cc28 | /stock_system/migrations/0037_auto_20210224_0626.py | 7168db6ee4400734b703dd338efd3587c50bbb15 | [] | no_license | Tiilon/cjs_inventory | 13d1304a4aa21b8ffb748fb29fd964795dfc3601 | a9e0b844d885c47355e8d40985f33265517595a8 | refs/heads/master | 2023-03-14T08:58:35.660834 | 2021-03-04T17:29:38 | 2021-03-04T17:29:38 | 340,845,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # Generated by Django 3.1.7 on 2021-02-23 22:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock_system', '0036_auto_20210223_1852'),
]
operations = [
migrations.AddField(
model_name='brand',
name='reorder_lvl',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='returns',
name='reason',
field=models.CharField(blank=True, choices=[('Damaged', 'Damaged'), ('Expired', 'Expired'), ('Unwanted', 'Unwanted')], max_length=250, null=True),
),
]
| [
"tiilon42@gmail.com"
] | tiilon42@gmail.com |
6f6ca39857cec502b96273f03e3e4ee7a792ec78 | 7ea54debed6a3acda594adc9c9cb36027ba4842c | /article/migrations/0055_auto_20181011_2133.py | 8f5c3d1135ae3d38276a46d6339e4316356a3d01 | [
"MIT"
] | permissive | higab85/drugsandme | e32df050b0a1fb24c06c53eece50f2e4b9b4f01e | 7db66d9687ac9a04132de94edda364f191d497d7 | refs/heads/master | 2022-12-19T12:12:36.885236 | 2019-08-21T14:50:21 | 2019-08-21T14:50:21 | 153,900,258 | 2 | 1 | MIT | 2022-12-08T02:26:17 | 2018-10-20T11:48:46 | JavaScript | UTF-8 | Python | false | false | 1,195 | py | # Generated by Django 2.0.9 on 2018-10-11 21:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0054_auto_20181009_1452'),
]
operations = [
migrations.AddField(
model_name='articlepage',
name='search_description_en',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='articlepage',
name='search_description_es',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='articlepage',
name='seo_title_en',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='articlepage',
name='seo_title_es',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='articlepage',
name='slug_en',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='articlepage',
name='slug_es',
field=models.TextField(blank=True),
),
]
| [
"gabh@protonmail.com"
] | gabh@protonmail.com |
1656afc3c3cdaa014e280d54a10859a50df088e0 | c040c1f6c770122d8a71af2339acce77062baab7 | /__main__.py | 2af6206559b64db8476219871dea38d48b76eaa0 | [] | no_license | dipu-bd/ebl-fer-notify | feec71de296279121d63a2532f8706ed6361a83c | 0623db28b98e4a48782ee48fdafc9c83387743e6 | refs/heads/master | 2020-04-19T16:34:13.230353 | 2019-02-03T06:32:13 | 2019-02-03T06:32:13 | 168,308,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from src import main
main()
| [
"dipu.sudipta@gmail.com"
] | dipu.sudipta@gmail.com |
25145269e9924692592b4be1912a1a7332cffae3 | 305a9cade2fd8a69ab363a40389420296eccddd0 | /LogExpConversion.py | 098470645632eee5e7d19ba3cf40459b5d5e055f | [] | no_license | PMiskew/Python3_Small_Projects | 823ee6fe20715e8aa9dbadf87f0091929ac87e58 | 1fd80b81d0fa80d90d894bd88a2039d15d2330ca | refs/heads/master | 2021-01-01T04:27:33.711886 | 2017-07-14T01:54:01 | 2017-07-14T01:54:01 | 97,176,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | import tkinter as tk
class LogExpConversionCalc:
def __init__(self):
self.root = tk.Tk()
self.labLog = tk.Label(self.root, text = "Log")
self.labLog.grid(row = 0, column = 0)
self.entN = tk.Entry(self.root, width = 1)
self.entN.grid(row = 1,column = 1)
self.entX = tk.Entry(self.root,width = 3)
self.entX.grid(row = 0, column = 2)
self.labEqu = tk.Label(self.root, text = "=")
self.labEqu.grid(row = 0, column = 3)
self.entA = tk.Entry(self.root,width = 4)
self.entA.grid(row = 0, column = 4)
self.butSub = tk.Button(self.root,text = "Submit")
self.butSub.grid(row = 2,column = 0,columnspan = 5,sticky = "NESW")
self.root.mainloop()
game = LogExpConversionCalc() | [
"paul.miskew@gmail.com"
] | paul.miskew@gmail.com |
c6617026f963131f4207f1edb60676caddf682d3 | 33110fa5ad8c47e31401769086a985eea1a991c7 | /mmsegmentation/mmseg/core/evaluation/class_names.py | bc7baa7506a561ef4d4b8b3dca20796b177b45bc | [
"Apache-2.0"
] | permissive | UESTC-Liuxin/SKMT | 32bc2781063de1da2a778659e6501762531b15a8 | 377bbe3e5d2777d6c3ccaae7a6c364bd9c85d651 | refs/heads/master | 2023-01-12T19:28:49.340298 | 2020-11-16T03:35:09 | 2020-11-16T03:35:09 | 283,365,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,194 | py | import mmcv
######################custom#########################
def skmt_classes():
"""Cityscapes class names for external use."""
return [
'background', 'C', 'GL', 'SAS',
'SAC', 'SUP', 'INF', 'LHF', 'HH',
'SCB', 'SHB', 'TM', 'SUB', 'D',
'GC', 'LHB'
]
def us_classes():
"""Cityscapes class names for external use."""
return [
'background', 'C'
]
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
]
def voc_classes():
"""Pascal VOC class names for external use."""
return [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor'
]
###############custom##########################
def skmt_palette():
"""skmt palette for external use."""
return [[32, 112, 48], [48, 112, 32], [176, 240, 32], [240, 112, 32],
[112, 112, 32], [240, 112, 160], [176, 112, 160], [176, 240, 160],[48, 112, 160],
[112, 240, 32], [240, 240, 32], [112, 240, 160],[112, 112, 160], [176, 112, 32],
[48, 240, 32], [48, 240, 160]]
def us_palette():
"""skmt palette for external use."""
return [[0,0,0],[255,0,0]]
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]]
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| [
"625092890@qq.com"
] | 625092890@qq.com |
37a002a7f34e0a6477257e835216ae07c4fd12be | bbec348efb79c6588a4cb6bb565c813fe3fe86ad | /pyVpx/integtests.d/nfc/case_2.py | 464ec556a90548885be34a58e7c49cfa7bb79523 | [] | no_license | free-Zen/pvc | 2be60fdc0fd0345039219c802223f987fce3b113 | 8428a84481be319ae739dfbb87715f31810138d9 | refs/heads/master | 2022-02-24T12:13:31.599398 | 2019-10-14T07:49:13 | 2019-10-14T07:49:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,380 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2007 VMware, Inc. All rights reserved. -- VMware Confidential
#
## @file nfc/case_2.py --
#
# TestCase 1 verify nfcsvc protocol operations
# Assumes: TTTVM2 is registered and is not powered on, vmx file is 854 bytes
#
# NfcLib protocol API functions tested using per VM and per Data store tickets
# TCP/IPv4 Connect/disconnect to hostd's nfclib via authd using ticket
# PUT [force]
# GET
# RENAME
# DELETE
#
__author__ = "VMware, Inc"
import pdb
import os
from stat import *
import pyVmomi
import pyVim.nfclib
from case_1 import FixtureNFC
class NfcFunctionTest(FixtureNFC):
def test1_VerifyNfcSessionSetupAndShutdown(self):
self._ds = self._host.GetDS().GetSummary().GetDatastore()
self.failUnless(self._ds is not None, "Using datastore:[%s]" % \
(self._host.GetDS().GetSummary().GetName()))
tkt = self._nfc.FileManagement(self._ds, None)
self.failUnless(tkt is not None)
self.failUnless(isinstance(tkt, pyVmomi.Vim.HostServiceTicket),
"FileManagement() returns: pyVmomi.Vim.HostServiceTicket, got\'%s\'" %
(str(tkt)))
print str(tkt)
nfc = pyVim.nfclib.NfcClient(tkt)
self.failIf(nfc.Connect() is False, "Connecting to nfclib via authd")
self.failIf(nfc.Disconnect() is False, "Dropping connection to nfclib")
newNfc = pyVim.nfclib.NfcClient(tkt)
# @todo presently a second connect will even after ticket is already been used
# self.failIf(newNfc.Connect() is True,
# "Connecting to nfclib via authd with used ticket should fail")
# Verify original get VMX file works
# @post created TTTVM2.vmx to current working directory
def test2_VerifyNfcVmGetOperation(self):
self._vm = self._host.GetVM(self._vmname)
self.failIf(self._vm is None,
"Retrieve a VM named %s on host %s to run tests against" %
(self._vmname,self._host.GetName()))
tkt = self._nfc.GetVmFiles(self._vm._mo, None)
self.failUnless(tkt is not None, "Checking results from GetVmFiles")
self.failUnless(isinstance(tkt, pyVmomi.Vim.HostServiceTicket),
"Expecting GetVmFiles rc = pyVmomi.Vim.HostServiceTicket, got\'%s\'" %
(str(tkt)))
print str(tkt)
nfc = pyVim.nfclib.NfcClient(tkt)
self.failIf(nfc.Connect() is False, "Connecting to nfclib via authd")
fileset = []
local = 0
remote = 1
vmxNames = self.GenerateFullDSPath(self._vmname, ".vmx")
fileset.append((vmxNames[remote], vmxNames[local]))
if os.path.exists(vmxNames[local]):
print "Deleting local file %s" % (vmxNames[local])
os.unlink(vmxNames[local])
self.failUnless(nfc.GetFiles(fileset),
"expecting nfc.GetFiles() to return true");
self.failIf(nfc.Disconnect() is False,
"Dropping connection to nfclib")
# verify the file we received is what we expect
self.failUnless(os.path.exists(vmxNames[local]),
"Checking we got file %s" % \
vmxNames[local])
fsize = 854L
self.failUnless((self.FileSize(vmxNames[local]) == fsize),
"Checking file size is %d" % \
fsize)
# Verify original get VMX file works
# @post created TTTVM2.vmx to current working directory
def test3_VerifyNfcVmPutOperation(self):
self._vm = self._host.GetVM(self._vmname)
self.failIf(self._vm is None,
"Retrieve a VM named %s on host %s to run tests against" %
(self._vmname,self._host.GetName()))
tkt = self._nfc.PutVmFiles(self._vm._mo, None)
self.failUnless(tkt is not None, "Checking results from GetVmFiles")
self.failUnless(isinstance(tkt, pyVmomi.Vim.HostServiceTicket),
"Expecting GetVmFiles rc = pyVmomi.Vim.HostServiceTicket, got\'%s\'" %
(str(tkt)))
print str(tkt)
nfc = pyVim.nfclib.NfcClient(tkt)
self.failIf(nfc.Connect() is False, "Connecting to nfclib via authd")
fileset = []
vmxNames = self.GenerateFullDSPath(self._vmname, ".vmx")
fileset.append(vmxNames)
local = 0
self.failUnless(os.path.exists(vmxNames[local]),
"Checking local file %s exists" % (vmxNames[local]))
self.failUnless(nfc.PutFiles(fileset,nfc.NFC_CREATE_OVERWRITE),
"expecting nfc.PutFiles() to return true");
self.failIf(nfc.Disconnect() is False,
"Dropping connection to nfclib")
# @todo Need to add tests for GetFileWithPassword and
# LocalCopyWithPassword
# rname the vmx file 2x
def test4_VerifyNfcRenameFunction(self):
self._dsName = "[" + self._host.GetDS().GetSummary().GetName() + "]"
self._ds = self._host.GetDS().GetSummary().GetDatastore()
self.failUnless(self._ds is not None, "Using datastore:[%s]" % \
(self._dsName))
tkt = self._nfc.FileManagement(self._ds, None)
self.failUnless(tkt is not None, "Verify ticket is returned")
self.failUnless(isinstance(tkt, pyVmomi.Vim.HostServiceTicket),
"ExpectingFileManagement() to return \
pyVmomi.Vim.HostServiceTicket, got\'%s\'" % \
(str(tkt)))
nfc = pyVim.nfclib.NfcClient(tkt)
self.failIf(nfc.Connect() is False, "Connecting to nfclib via authd")
fileset = []
remote = 1
vmxNames = self.GenerateFullDSPath(self._vmname, ".vmx")
chgStr = "-changed"
newVmxName = vmxNames[remote] + chgStr
fileset.append((vmxNames[remote], newVmxName))
fileset.append((newVmxName, vmxNames[remote]))
self.failUnless(nfc.RenameFiles(fileset),
"expecting nfc.RenameFiles() to return true");
self.failIf(nfc.Disconnect() is False,
"Dropping connection to nfclib")
def test5_VerifyNfcRenameFunction(self):
self._dsName = "[" + self._host.GetDS().GetSummary().GetName() + "]"
self._ds = self._host.GetDS().GetSummary().GetDatastore()
self.failUnless(self._ds is not None, "Using datastore:[%s]" % \
(self._dsName))
tkt = self._nfc.FileManagement(self._ds, None)
self.failUnless(tkt is not None, "Verify ticket is returned")
self.failUnless(isinstance(tkt, pyVmomi.Vim.HostServiceTicket),
"ExpectingFileManagement() to return \
pyVmomi.Vim.HostServiceTicket, got\'%s\'" % \
(str(tkt)))
nfc = pyVim.nfclib.NfcClient(tkt)
self.failIf(nfc.Connect() is False, "Connecting to nfclib via authd")
fileset = []
remote = 1
local = 0
vmxNames = self.GenerateFullDSPath(self._vmname, ".vmx")
fileset.append(vmxNames)
self.failUnless(os.path.exists(vmxNames[local]),
"Checking local file %s exists" % (vmxNames[local]))
self.failUnless(nfc.DeleteFiles([vmxNames[remote]]),
"expecting nfc.DeleteFiles() to return true");
self.failUnless(nfc.PutFiles(fileset),
"expecting nfc.PutFiles() to return true");
self.failIf(nfc.Disconnect() is False,
"Dropping connection to nfclib")
# @pre pathname is regular file
# @return size of pathname
def FileSize(self,
pathname):
sb = os.stat(pathname)
if not S_ISREG(sb[ST_MODE]):
raise Exception, "Expecting a regular file for '%s'" % (pathname)
return sb[ST_SIZE]
# given VM name and file extention construct filenames for a get/put command
# @return ('DStoreName path/file.ext', 'vmname.ext')
def GenerateFullDSPath(self,
vmname,
fileExt):
cfgExt = fileExt
localFile = vmname + cfgExt
# assume disk is vmdk and config file is in same directory
key, backing = self.GetADiskKeyAndDSPath(self._vmname)
self.failIf(backing is None, "Getting disk backing and id")
diskFile = backing.GetFileName()
self.failIf(diskFile is None, "Getting path to disk file")
diskExt = '.vmdk'
remoteFile = diskFile[0:diskFile.find(diskExt)] + cfgExt
return (localFile, remoteFile)
| [
"liuzhen@vmware.com"
] | liuzhen@vmware.com |
78ba1ce80f8ed7fefab1b41579e66895862d173a | ee803c29e9c5216a16a2699854b98c8a6d9760b8 | /dataServer/FlaskDataServer/app/LocalDataServer/DBApi/Conf.py | cd4560affc42bb9db4f034e95e3bf0b2cb952893 | [] | no_license | algo2019/algorithm | c160e19b453bc979853caf903ad96c2fa8078b69 | 3b5f016d13f26acab89b4a177c95a4f5d2dc1ba1 | refs/heads/master | 2022-12-12T17:59:57.342665 | 2019-02-23T07:45:39 | 2019-02-23T07:45:39 | 162,404,028 | 0 | 0 | null | 2022-12-08T01:29:20 | 2018-12-19T08:08:13 | Python | UTF-8 | Python | false | false | 444 | py | DB_PATH = '/Users/renren1/test/LocalData.db'
EXCHANGE_ID = {
'SHFX': {'IM', 'AL', 'AU', 'NI', 'PB', 'CU', 'SN', 'ZN', 'AG', 'BU', 'RB', 'FU', 'HC', 'WR', 'RU'},
'DLFX': {'V', 'B', 'M', 'A', 'Y', 'JD', 'JM', 'J', 'BB', 'PP', 'L', 'I', 'FB', 'C', 'CS', 'P'},
'ZZFX': {'SR', 'CF', 'ZC', 'FG', 'TA', 'MA', 'WH', 'PM', 'R', 'LR', 'JR', 'RS', 'OI', 'RM', 'SF', 'SM', 'RI'},
'CFFEX': {'IF', 'IH', 'IC', 'TF', 'T', 'TT', 'AF', 'EF'}
} | [
"xingwang.zhang@renren-inc.com"
] | xingwang.zhang@renren-inc.com |
544f529cb861e34908c3c8dc63ee19177cfa3629 | 6d05f0521eabec73a53f34c3080e9eaee14253fb | /eden/integration/hg/absorb_test.py | 9393ec1dbd3f226d39680fb89cae9510acf518d0 | [
"BSD-3-Clause"
] | permissive | eamonnkent/eden | 8d44ce7bd75329754fb38c1a815dbcdb65b1a481 | c0a837ec7d1f05903bbc17e707bc9fd427046fd8 | refs/heads/master | 2020-03-27T09:43:46.545906 | 2018-08-27T05:42:20 | 2018-08-27T05:52:34 | 146,367,298 | 0 | 0 | null | 2018-08-27T23:54:15 | 2018-08-27T23:54:15 | null | UTF-8 | Python | false | false | 3,344 | py | #!/usr/bin/env python3
#
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
log = logging.getLogger("eden.test.absorb")
@hg_test
class AbsorbTest(EdenHgTestCase):
def populate_backing_repo(self, repo):
repo.write_file("readme.txt", "readme\n")
repo.write_file(
"src/test.c",
"""\
start of the file
line 1
line 2
line 3
end of the file
""",
)
self.commit1 = repo.commit("Initial commit.")
repo.hg("phase", "--public", self.commit1)
log.debug("commit1: %s", self.commit1)
def test_absorb(self):
self.assert_status_empty()
# Update src/test.c in our first draft commit
self.write_file(
"src/test.c",
"""\
start of the file
line 1
new line a
line 2
new line b
line 3
end of the file
""",
)
self.assert_status({"src/test.c": "M"})
commit2 = self.repo.commit("new lines in test.c\n")
self.assert_status_empty()
log.debug("commit2: %s", commit2)
# Update src/new.c in our second draft commit
self.write_file(
"src/new.c",
"""\
this is a brand new file
with some new contents
last line
""",
)
self.hg("add", "src/new.c")
self.assert_status({"src/new.c": "A"})
commit3 = self.repo.commit("add new.c\n")
self.assert_status_empty()
log.debug("commit2: %s", commit3)
# Now modify test.c and new.c in the working copy
self.write_file(
"src/test.c",
"""\
start of the file
line 1
new line abc
testing
line 2
new line b
line 3
end of the file
""",
)
self.write_file(
"src/new.c",
"""\
this is a brand new file
with some enhanced new contents
last line
""",
)
self.assert_status({"src/new.c": "M", "src/test.c": "M"})
old_commits = self.repo.log()
# Run "hg absorb" to fold these changes into their respective commits
out = self.hg("absorb", "-p")
log.debug("absorb output:\n%s" % (out,))
self.assert_status_empty()
# Verify the results are what we expect
new_commits = self.repo.log()
files_changed = self.repo.log(template="{files}")
self.assertEqual(len(old_commits), len(new_commits))
self.assertEqual(old_commits[0], new_commits[0])
self.assertNotEqual(old_commits[1], new_commits[1])
self.assertNotEqual(old_commits[2], new_commits[2])
self.assertEqual(files_changed[0], "readme.txt src/test.c")
self.assertEqual(files_changed[1], "src/test.c")
self.assertEqual(files_changed[2], "src/new.c")
self.assertEqual(
self.read_file("src/test.c"),
"""\
start of the file
line 1
new line abc
testing
line 2
new line b
line 3
end of the file
""",
)
self.assertEqual(
self.read_file("src/new.c"),
"""\
this is a brand new file
with some enhanced new contents
last line
""",
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f9fae8b6ca5b4e04fe4f6c0bd5bc1d152c5639c2 | f12770add2dd48d2262612fcb1aaecc4a714b4fb | /django/remoteomd/remoteomd/urls.py | 2e25dece1960a6dc2c60a4198f90325e94284859 | [] | no_license | github188/vending | 3558666b57b37e843e72d194d80f6a8ef5dbc7a4 | ce3e2f1fcbb4d132f7b01a99400d917d7ca174a6 | refs/heads/master | 2021-06-16T09:54:09.214694 | 2017-01-28T01:31:30 | 2017-01-28T01:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | """localomd URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from localomd.admin import admin_omd
urlpatterns = [
url(r'^admin_omd/', admin_omd.urls),
# url(r'^admin/', admin.site.urls),
# url(r'^web/', include('localomdweb.urls')),
url(r'^', include('remoteomddata.urls', namespace='data')),
]
| [
"jeson.peng@gmail.com"
] | jeson.peng@gmail.com |
3ff67aa461c8cb9aa6e79fbcf64e60551b102975 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /snerg/nerf/models.py | a5093ed9d6c9feedfdbc5c59c1bfa8b8424cd489 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 12,651 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Different model implementation plus a general port for all the models."""
from typing import Any, Callable
from flax import linen as nn
from jax import random
import jax.numpy as jnp
from snerg.nerf import model_utils
from snerg.nerf import utils
def get_model(key, example_batch, args):
"""A helper function that wraps around a 'model zoo'."""
model_dict = {
"nerf": construct_nerf,
}
return model_dict[args.model](key, example_batch, args)
class NerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
num_coarse_samples: int # The number of samples for the coarse nerf.
num_fine_samples: int # The number of samples for the fine nerf.
use_viewdirs: bool # If True, use viewdirs as an input.
near: float # The distance to the near plane
far: float # The distance to the far plane
noise_std: float # The std dev of noise added to raw sigma.
net_depth: int # The depth of the first part of MLP.
net_width: int # The width of the first part of MLP.
num_viewdir_channels: int # The number of extra channels for view-dependence.
viewdir_net_depth: int # The depth of the view-dependence MLP.
viewdir_net_width: int # The width of the view-dependence MLP.
net_activation: Callable[Ellipsis, Any] # MLP activation
skip_layer: int # How often to add skip connections.
num_rgb_channels: int # The number of RGB channels.
num_sigma_channels: int # The number of density channels.
white_bkgd: bool # If True, use a white background.
min_deg_point: int # The minimum degree of positional encoding for positions.
max_deg_point: int # The maximum degree of positional encoding for positions.
deg_view: int # The degree of positional encoding for viewdirs.
lindisp: bool # If True, sample linearly in disparity rather than in depth.
rgb_activation: Callable[Ellipsis, Any] # Output RGB activation.
sigma_activation: Callable[Ellipsis, Any] # Output sigma activation.
legacy_posenc_order: bool # Keep the same ordering as the original tf code.
@nn.compact
def __call__(self, rng_0, rng_1, rays, randomized):
"""Nerf Model.
Args:
rng_0: jnp.ndarray, random number generator for coarse model sampling.
rng_1: jnp.ndarray, random number generator for fine model sampling.
rays: util.Rays, a namedtuple of ray origins, directions, and viewdirs.
randomized: bool, use randomized stratified sampling.
Returns:
ret: list, [(rgb_coarse, disp_coarse, acc_coarse, features_coarse,
specular_coarse), (rgb, disp, acc, features, specular)]
"""
# Stratified sampling along rays
key, rng_0 = random.split(rng_0)
z_vals, coarse_samples = model_utils.sample_along_rays(
key,
rays.origins,
rays.directions,
self.num_coarse_samples,
self.near,
self.far,
randomized,
self.lindisp,
)
coarse_samples_enc = model_utils.posenc(
coarse_samples,
self.min_deg_point,
self.max_deg_point,
self.legacy_posenc_order,
)
# Construct the "coarse" MLP.
coarse_mlp = model_utils.MLP(
net_depth=self.net_depth,
net_width=self.net_width,
net_activation=self.net_activation,
skip_layer=self.skip_layer,
num_rgb_channels=self.num_rgb_channels + self.num_viewdir_channels,
num_sigma_channels=self.num_sigma_channels)
# Point attribute predictions
if self.use_viewdirs:
viewdirs_enc = model_utils.posenc(
rays.viewdirs,
0,
self.deg_view,
self.legacy_posenc_order,
)
raw_features_and_rgb, raw_sigma = coarse_mlp(coarse_samples_enc)
else:
raw_rgb, raw_sigma = coarse_mlp(coarse_samples_enc)
# Add noises to regularize the density predictions if needed
key, rng_0 = random.split(rng_0)
raw_sigma = model_utils.add_gaussian_noise(
key,
raw_sigma,
self.noise_std,
randomized,
)
sigma = self.sigma_activation(raw_sigma)
if self.use_viewdirs:
coarse_viewdir_mlp = model_utils.MLP(
net_depth=self.viewdir_net_depth,
net_width=self.viewdir_net_width,
net_activation=self.net_activation,
skip_layer=self.skip_layer,
num_rgb_channels=self.num_rgb_channels,
num_sigma_channels=self.num_sigma_channels)
# Overcomposite the features to get an encoding for the features.
comp_features, _, _, _ = model_utils.volumetric_rendering(
raw_features_and_rgb[Ellipsis, self.num_rgb_channels:(
self.num_rgb_channels + self.num_viewdir_channels)],
sigma,
z_vals,
rays.directions,
white_bkgd=False,
)
features = comp_features[Ellipsis, 0:self.num_rgb_channels]
diffuse_rgb = self.rgb_activation(
raw_features_and_rgb[Ellipsis, 0:self.num_rgb_channels])
comp_rgb, disp, acc, weights = model_utils.volumetric_rendering(
diffuse_rgb,
sigma,
z_vals,
rays.directions,
white_bkgd=self.white_bkgd,
)
viewdirs_enc_features = jnp.concatenate(
[viewdirs_enc, comp_rgb, comp_features], axis=-1)
viewdirs_enc_features = jnp.expand_dims(viewdirs_enc_features, -2)
raw_comp_rgb_residual, _ = coarse_viewdir_mlp(viewdirs_enc_features)
output_shape = list(comp_features.shape)
output_shape[-1] = 3
raw_comp_rgb_residual = raw_comp_rgb_residual.reshape(output_shape)
rgb_residual = self.rgb_activation(raw_comp_rgb_residual)
comp_rgb += rgb_residual
else:
rgb = self.rgb_activation(raw_rgb)
# Volumetric rendering.
comp_rgb, disp, acc, weights = model_utils.volumetric_rendering(
rgb,
sigma,
z_vals,
rays.directions,
white_bkgd=self.white_bkgd,
)
features = jnp.zeros_like(comp_rgb)
rgb_residual = jnp.zeros_like(comp_rgb)
ret = [
(comp_rgb, disp, acc, sigma, features, rgb_residual),
]
# Hierarchical sampling based on coarse predictions
if self.num_fine_samples > 0:
z_vals_mid = .5 * (z_vals[Ellipsis, 1:] + z_vals[Ellipsis, :-1])
key, rng_1 = random.split(rng_1)
z_vals, fine_samples = model_utils.sample_pdf(
key,
z_vals_mid,
weights[Ellipsis, 1:-1],
rays.origins,
rays.directions,
z_vals,
self.num_fine_samples,
randomized,
)
fine_samples_enc = model_utils.posenc(
fine_samples,
self.min_deg_point,
self.max_deg_point,
self.legacy_posenc_order,
)
# Construct the "fine" MLP.
fine_mlp = model_utils.MLP(
net_depth=self.net_depth,
net_width=self.net_width,
net_activation=self.net_activation,
skip_layer=self.skip_layer,
num_rgb_channels=self.num_rgb_channels + self.num_viewdir_channels,
num_sigma_channels=self.num_sigma_channels)
if self.use_viewdirs:
raw_features_and_rgb, raw_sigma = fine_mlp(fine_samples_enc)
else:
raw_rgb, raw_sigma = fine_mlp(fine_samples_enc)
key, rng_1 = random.split(rng_1)
raw_sigma = model_utils.add_gaussian_noise(
key,
raw_sigma,
self.noise_std,
randomized,
)
sigma = self.sigma_activation(raw_sigma)
_, raw_reg_sigma = fine_mlp(coarse_samples_enc)
reg_sigma = self.sigma_activation(raw_reg_sigma)
if self.use_viewdirs:
fine_viewdir_mlp = model_utils.MLP(
net_depth=self.viewdir_net_depth,
net_width=self.viewdir_net_width,
net_activation=self.net_activation,
skip_layer=self.skip_layer,
num_rgb_channels=self.num_rgb_channels,
num_sigma_channels=self.num_sigma_channels)
# Overcomposite the features to get an encoding for the features.
features_and_rgb = self.rgb_activation(raw_features_and_rgb)
features = features_and_rgb[Ellipsis, self.num_rgb_channels:(
self.num_rgb_channels + self.num_viewdir_channels)]
comp_features, _, _, _ = model_utils.volumetric_rendering(
features,
sigma,
z_vals,
rays.directions,
white_bkgd=False,
)
features = comp_features[Ellipsis, 0:self.num_rgb_channels]
diffuse_rgb = features_and_rgb[Ellipsis, 0:self.num_rgb_channels]
comp_rgb, disp, acc, weights = model_utils.volumetric_rendering(
diffuse_rgb,
sigma,
z_vals,
rays.directions,
white_bkgd=self.white_bkgd,
)
viewdirs_enc_features = jnp.concatenate(
[viewdirs_enc, comp_rgb, comp_features], axis=-1)
viewdirs_enc_features = jnp.expand_dims(viewdirs_enc_features, -2)
raw_comp_rgb_residual, _ = fine_viewdir_mlp(viewdirs_enc_features)
output_shape = list(comp_features.shape)
output_shape[-1] = 3
raw_comp_rgb_residual = raw_comp_rgb_residual.reshape(output_shape)
rgb_residual = self.rgb_activation(raw_comp_rgb_residual)
comp_rgb += rgb_residual
else:
rgb = self.rgb_activation(raw_rgb)
# Volumetric rendering.
comp_rgb, disp, acc, weights = model_utils.volumetric_rendering(
rgb,
sigma,
z_vals,
rays.directions,
white_bkgd=self.white_bkgd,
)
features = jnp.zeros_like(comp_rgb)
rgb_residual = jnp.zeros_like(comp_rgb)
ret.append((comp_rgb, disp, acc, reg_sigma, features, rgb_residual))
return ret
def construct_nerf(key, example_batch, args):
"""Construct a Neural Radiance Field.
Args:
key: jnp.ndarray. Random number generator.
example_batch: dict, an example of a batch of data.
args: FLAGS class. Hyperparameters of nerf.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
net_activation = nn.relu
rgb_activation = nn.sigmoid
sigma_activation = nn.relu
# Assert that rgb_activation always produces outputs in [0, 1], and
# sigma_activation always produce non-negative outputs.
x = jnp.exp(jnp.linspace(-90, 90, 1024))
x = jnp.concatenate([-x[::-1], x], 0)
rgb = rgb_activation(x)
if jnp.any(rgb < 0) or jnp.any(rgb > 1):
raise NotImplementedError(
"Choice of rgb_activation `{}` produces colors outside of [0, 1]"
.format(args.rgb_activation))
sigma = sigma_activation(x)
if jnp.any(sigma < 0):
raise NotImplementedError(
"Choice of sigma_activation `{}` produces negative densities".format(
args.sigma_activation))
model = NerfModel(
min_deg_point=args.min_deg_point,
max_deg_point=args.max_deg_point,
deg_view=args.deg_view,
num_coarse_samples=args.num_coarse_samples,
num_fine_samples=args.num_fine_samples,
use_viewdirs=args.use_viewdirs,
near=args.near,
far=args.far,
noise_std=args.noise_std,
white_bkgd=args.white_bkgd,
net_depth=args.net_depth,
net_width=args.net_width,
num_viewdir_channels=args.num_viewdir_channels,
viewdir_net_depth=args.viewdir_net_depth,
viewdir_net_width=args.viewdir_net_width,
skip_layer=args.skip_layer,
num_rgb_channels=args.num_rgb_channels,
num_sigma_channels=args.num_sigma_channels,
lindisp=args.lindisp,
net_activation=net_activation,
rgb_activation=rgb_activation,
sigma_activation=sigma_activation,
legacy_posenc_order=args.legacy_posenc_order)
rays = example_batch["rays"]
key1, key2, key3 = random.split(key, num=3)
init_variables = model.init(
key1,
rng_0=key2,
rng_1=key3,
rays=utils.namedtuple_map(lambda x: x[0], rays),
randomized=args.randomized)
return model, init_variables
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
83705db78bfb60ae472f3a82f0c3f249d1980e10 | 340ace07b347ffd35852641591ea702e3bf0c947 | /Objects And Classes/06_inventory.py | a91bf8b7642e04bd617ff0643f0b43c17af565ba | [] | no_license | IvayloValkov/Python_Fundamentals | 8de28bf68d32822e0d442469477d2b2c87ba0f9f | e86cb3e516b4baf7027808128fc105f5b86807c9 | refs/heads/main | 2023-02-24T23:39:54.067798 | 2021-01-17T08:54:27 | 2021-01-17T08:54:27 | 330,347,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | class Inventory:
def __init__(self, capacity):
self.__capacity = capacity
self.items = []
def add_item(self, item):
if len(self.items) < self.__capacity:
self.items.append(item)
else:
return "not enough room in the inventory"
def get_capacity(self):
return self.__capacity
def __repr__(self):
return f"Items: {', '.join(self.items)}.\nCapacity left: {self.__capacity - len(self.items)}"
inventory = Inventory(2)
inventory.add_item("potion")
inventory.add_item("sword")
inventory.add_item("bottle")
print(inventory.get_capacity())
print(inventory)
| [
"noreply@github.com"
] | IvayloValkov.noreply@github.com |
1607b39a03fe85418f0255842a8d9ea674b4af8d | 81b57282ccbc000a416382e3a91645a0938febb5 | /dbupgrade/files.py | 3169b028579926c781120b7ffef85e6454c79c19 | [
"MIT"
] | permissive | srittau/dbupgrade | 03ae61f5bd307c289db9d3069b87c5e5fbba934d | 35c704321a39a5319231dd78b1446e836ef3cadc | refs/heads/main | 2023-08-10T07:07:29.811756 | 2023-08-01T11:10:44 | 2023-08-01T11:10:44 | 120,672,859 | 2 | 0 | MIT | 2023-09-01T08:56:39 | 2018-02-07T21:17:50 | Python | UTF-8 | Python | false | false | 1,099 | py | import os.path
from os import listdir
from typing import List
class FileInfo:
def __init__(
self,
filename: str,
schema: str,
dialect: str,
version: int,
api_level: int,
) -> None:
self.filename = filename
self.schema = schema
self.dialect = dialect
self.version = version
self.api_level = api_level
self.transaction = True
def __lt__(self, other: "FileInfo") -> bool:
if self.schema != other.schema or self.dialect != other.dialect:
raise TypeError("FileInfos must have the same schema and dialect")
return self.version < other.version
def __repr__(self) -> str:
return "FileInfo({}, {}, {}, {}, {})".format(
repr(self.filename),
repr(self.schema),
repr(self.dialect),
self.version,
self.api_level,
)
def collect_sql_files(directory: str) -> List[str]:
return [
os.path.join(directory, fn)
for fn in listdir(directory)
if fn.endswith(".sql")
]
| [
"srittau@rittau.biz"
] | srittau@rittau.biz |
4ec81f004f08fe89ab15068b87fe26ee3763ede5 | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/main_20210125143443.py | 0c2d7623c2cd1d1355b799bdf20d3c1708ec119c | [] | no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py |
if __name__ == '__main__':
check
| [
"chenjiajun.jason@outlook.com"
] | chenjiajun.jason@outlook.com |
97409bedd791d4aeea1ef231189b2d17114187b0 | 3a8b0cf0484e06f317923f0b6de08cb8e381b38b | /tests/test_routes.py | b746c6ae758c3e4c8c63eea60047645cc5059385 | [
"MIT"
] | permissive | luiscape/hdx-monitor-sql-collect | 844260b1bfb8152f5a741a76307775e09fd81515 | 3fb7d31363227de160f95d259dee2b0b38da1982 | refs/heads/master | 2021-01-10T07:36:55.642759 | 2015-11-19T15:18:53 | 2015-11-19T15:18:53 | 45,499,253 | 0 | 0 | null | 2015-11-19T15:18:53 | 2015-11-03T22:15:02 | Python | UTF-8 | Python | false | false | 6,468 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Integration tests for testing the application
HTTP routes and methods.
'''
import json
import flask
import unittest
import app.server as Server
class Generic(unittest.TestCase):
'''
Set of generic tests.
'''
def setUp(self, object):
self.object = object
def keys(self):
'''
Generic test for expected default keys.
'''
keys = ['success', 'message', 'endpoint', 'time', 'ETA', 'computations']
for key in self.object.keys():
self.assertIn(key, keys)
def computations(self):
'''
Generic test for expected computation keys.
'''
computation_keys = ['total', 'completed', 'failed', 'queued', 'progress']
for key in self.object['computations']:
self.assertIn(key, computation_keys)
def types(self):
'''
Generic test for the types of data.
'''
types = {
'online': bool,
'message': str,
'endpoint': str,
'time': str,
'ETA': str,
'computations': {
'total': int,
'completed': int,
'failed': int,
'queued': int,
'progress': float
}
}
for key in self.object.keys():
self.assertIs(type(self.object.get(key)), types[key])
if type(key) == type({}):
for k in key:
self.assertIs(type(self.object['computations'].get(key)), types['computations'][k])
class TestRoutes(unittest.TestCase):
'''
Tests for all routes and methods.
'''
def setUp(self):
self.app = Server.createServer('test', debug=False)
self.client = self.app.test_client()
#
# /status
#
def test_status_type(self):
'''
routes.status: /status endpoint returns a JSON object.
'''
response = self.client.get('/status')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result), type({}))
def test_status_object(self):
'''
routes.status: /status endpoint returns a complete object.
'''
response = self.client.get('/status')
result = json.loads(response.data.decode('utf8'))
keys = ['online', 'version', 'description', 'repository', 'maintainer', 'ckan']
for key in result.keys():
self.assertIn(key, keys)
# #
# # /users
# #
# def test_users_type(self):
# '''
# routes: /users endpoint returns a JSON object.
# '''
# response = self.client.get('/users')
# result = json.loads(response.data.decode('utf8'))
# assert type(result) == type({})
# def test_users_object(self):
# '''
# routes: /users endpoint returns a complete object.
# '''
# response = self.client.get('/users')
# result = json.loads(response.data.decode('utf8'))
# generic = Generic(result)
# generic.keys()
# generic.types()
# generic.computations()
#
# /revisions
#
def test_revisions_type(self):
'''
routes.revisions: /revisions endpoint returns a JSON object.
'''
response = self.client.get('/revisions')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result) == type({}))
def test_revisions_object(self):
'''
routes.revisions: /revisions endpoint returns a complete object.
'''
response = self.client.get('/revisions')
result = json.loads(response.data.decode('utf8'))
generic = Generic(result)
generic.keys()
generic.types()
generic.computations()
#
# /datasets
#
def test_datasets_type(self):
'''
routes.datasets: /datasets endpoint returns a JSON object.
'''
response = self.client.get('/datasets')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result), type({}))
def test_datasets_object(self):
'''
routes.datasets: /datasets endpoint returns a complete object.
'''
response = self.client.get('/datasets')
result = json.loads(response.data.decode('utf8'))
generic = Generic(result)
generic.keys()
generic.types()
generic.computations()
#
# /resources
#
def test_resources_type(self):
'''
routes.resources: /resources endpoint returns a JSON object.
'''
response = self.client.get('/resources')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result), type({}))
def test_resources_object(self):
'''
routes.resources: /resources endpoint returns a complete object.
'''
response = self.client.get('/resources')
result = json.loads(response.data.decode('utf8'))
generic = Generic(result)
generic.keys()
generic.types()
generic.computations()
#
# /countries
#
def test_countries_type(self):
'''
routes.countries: /countries endpoint returns a JSON object.
'''
response = self.client.get('/countries')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result), type({}))
def test_countries_object(self):
'''
routes.countries: /countries endpoint returns a complete object.
'''
response = self.client.get('/countries')
result = json.loads(response.data.decode('utf8'))
generic = Generic(result)
generic.keys()
generic.types()
generic.computations()
#
# /gallery_items
#
def test_gallery_items_type(self):
'''
routes:.gallery_items /gallery_items endpoint returns a JSON object.
'''
response = self.client.get('/gallery_items')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result), type({}))
def test_gallery_items_object(self):
'''
routes.gallery_items: /gallery_items endpoint returns a complete object.
'''
response = self.client.get('/gallery_items')
result = json.loads(response.data.decode('utf8'))
generic = Generic(result)
generic.keys()
generic.types()
generic.computations()
#
# /organizations
#
def test_organizations_type(self):
'''
routes.organizations: /organizations endpoint returns a JSON object.
'''
response = self.client.get('/organizations')
result = json.loads(response.data.decode('utf8'))
self.assertIs(type(result), type({}))
def test_organizations_object(self):
'''
routes.organizations: /organizations endpoint returns a complete object.
'''
response = self.client.get('/organizations')
result = json.loads(response.data.decode('utf8'))
generic = Generic(result)
generic.keys()
generic.types()
generic.computations()
| [
"luiscape@gmail.com"
] | luiscape@gmail.com |
a90c2576811f155502fe3355226705ffbabe853a | d48c82c0d69b94227516d8c43a8233e8b7f98732 | /shipping_api_ups/sale.py | c2dbda3cb62aeb878074229b6a384ecb4427adad | [] | no_license | NovaPointGroup/OdooUSA-Stock | f983bdf68ea002585e650c106fead7790de18256 | c4a45f20860efbee5257c11d34f5ef191f4a4fc6 | refs/heads/master | 2020-04-24T23:45:51.751577 | 2015-03-16T14:59:17 | 2015-03-16T14:59:17 | 30,602,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,338 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import xml2dic
class res_partner(osv.osv):
_inherit = 'res.partner'
def _method_get(self, cr, uid, context=None):
res = super(res_partner, self)._method_get(cr, uid, context=context)
res.append(('ups.account', 'UPS'))
return res
_columns = {
'address_validation_method': fields.selection(_method_get, 'Address Validation Method', size=32),
}
res_partner()
class sale_order(osv.osv):
_inherit = "sale.order"
def action_ship_create(self, cr, uid, ids, context=None):
pick_obj = self.pool.get('stock.picking')
result = super(sale_order, self).action_ship_create(cr, uid, ids, context=context)
if result:
for sale in self.browse(cr, uid, ids):
if sale.ship_company_code == 'ups':
pick_ids = pick_obj.search(cr, uid, [('sale_id', '=', sale.id), ('picking_type_code', '=', 'outgoing')], context=context)
if pick_ids:
vals = {
'ship_company_code': 'ups',
'logis_company': sale.logis_company and sale.logis_company.id or False,
'shipper': sale.ups_shipper_id and sale.ups_shipper_id.id or False,
'ups_service': sale.ups_service_id and sale.ups_service_id.id or False,
'ups_pickup_type': sale.ups_pickup_type,
'ups_packaging_type': sale.ups_packaging_type and sale.ups_packaging_type.id or False,
'ship_from_address':sale.ups_shipper_id and sale.ups_shipper_id.address and sale.ups_shipper_id.address.id or False,
'shipcharge':sale.shipcharge or False,
'packages_ids': [(0,0, {
'package_type':sale.ups_packaging_type and sale.ups_packaging_type.id or False,
'weight':sale.total_weight_net or False,
})]
}
pick_obj.write(cr, uid, pick_ids, vals)
else:
pick_ids = pick_obj.search(cr, uid, [('sale_id', '=', sale.id), ('picking_type_code', '=', 'outgoing')])
if pick_ids:
pick_obj.write(cr, uid, pick_ids, {'shipper': False, 'ups_service': False}, context=context)
return result
def _get_company_code(self, cr, user, context=None):
res = super(sale_order, self)._get_company_code(cr, user, context=context)
res.append(('ups', 'UPS'))
return res
def onchage_service(self, cr, uid, ids, ups_shipper_id=False, context=None):
vals = {}
service_type_ids = []
if ups_shipper_id:
shipper_obj = self.pool.get('ups.account.shipping').browse(cr, uid, ups_shipper_id)
for shipper in shipper_obj.ups_shipping_service_ids:
service_type_ids.append(shipper.id)
domain = [('id', 'in', service_type_ids)]
return {'domain': {'ups_service_id': domain}}
def onchange_ups_shipper_id(self, cr, uid, ids, ups_shipper_id = False, context=None):
res = {}
service_type_ids = []
if ups_shipper_id:
shipper_obj = self.pool.get('ups.account.shipping').browse(cr, uid, ups_shipper_id)
for shipper in shipper_obj.ups_shipping_service_ids:
service_type_ids.append(shipper.id)
domain = [('id', 'in', service_type_ids)]
if ups_shipper_id:
partner_id = self.pool.get('ups.account.shipping').browse(cr, uid, ups_shipper_id, context=context).partner_id.id
res = {'value': {'transport_id' : partner_id},
'domain': {'ups_service_id': domain}}
return res
def onchange_delivery_method(self, cr, uid, ids, delivery_method, context=None):
res = super(sale_order, self).onchange_delivery_method(cr, uid, ids, delivery_method, context=context)
ups_shipper_ids = []
ups_shipper_id=False
if delivery_method:
deliver_method_obj = self.pool.get('delivery.method').browse(cr, uid, delivery_method, context=context)
if deliver_method_obj.ship_company_code == 'ups':
for shipper in deliver_method_obj.ups_shipping_account_ids:
ups_shipper_ids.append(shipper.id)
if ups_shipper_ids :
ups_shipper_id=ups_shipper_ids[0]
res['value']['ship_company_code'] = deliver_method_obj.ship_company_code
res['value']['sale_account_id'] = deliver_method_obj.ship_account_id.id
res['value']['ups_shipper_id'] = ups_shipper_id
return res
def _method_get(self, cr, uid, context=None):
res = super(sale_order, self)._method_get(cr, uid, context=context)
res.append(('ups.account', 'UPS'))
return res
_columns = {
'payment_method':fields.selection([
('cc_pre_auth', 'Credit Card – PreAuthorized'),
('invoice', 'Invoice'),
('cod', 'COD'),
('p_i_a', 'Pay In Advance'),
('pay_pal', 'Paypal'),
('no_charge', 'No Charge')], 'Payment Method'),
'ship_company_code': fields.selection(_get_company_code, 'Logistic Company', method=True, size=64),
'ups_shipper_id': fields.many2one('ups.account.shipping', 'Shipping Account'),
'ups_service_id': fields.many2one('ups.shipping.service.type', 'Service Type'),
'ups_pickup_type': fields.selection([
('01', 'Daily Pickup'),
('03', 'Customer Counter'),
('06', 'One Time Pickup'),
('07', 'On Call Air'),
('11', 'Suggested Retail Rates'),
('19', 'Letter Center'),
('20', 'Air Service Center'),
], 'Pickup Type'),
'ups_packaging_type': fields.many2one('shipping.package.type', 'Packaging Type'),
'shipping_rates': fields.one2many('shipping.rates.sales', 'sales_id', 'Rate Quotes'),
'status_message': fields.char('Status', size=128, readonly=True),
# From partner address validation
'address_validation_method': fields.selection(_method_get, 'Address Validation Method', size=32),
}
def _get_sale_account(self, cr, uid, context=None):
if context is None:
context = {}
logsitic_obj = self.pool.get('logistic.company')
user_rec = self.pool.get('res.users').browse(cr , uid, uid, context)
logis_company = logsitic_obj.search(cr, uid, [])
if not logis_company:
return False
return logsitic_obj.browse(cr, uid, logis_company[0], context).ship_account_id.id
_defaults = {
'sale_account_id': _get_sale_account,
}
def get_rate(self, cr, uid, ids, context=None):
sale_obj = self.pool.get('sale.order')
data = self.browse(cr, uid, ids[0], context=context)
# sale_obj.write(cr,uid,context.get('active_ids'),{'ups_shipper_id':data.ups_shipper_id.id,
# 'ups_service_id':data.ups_service_id.id,
# 'ups_pickup_type':data.ups_pickup_type,
# 'ups_packaging_type':data.ups_packaging_type.id},context=context)
if context is None:
context = {}
# if not (data['rate_selection'] == 'rate_request' and data['ship_company_code'] == 'ups'):
# return super(shipping_rate_wizard, self).get_rate(cr, uid, ids, context)
# if context.get('active_model', False) == 'sale.order':
weight = data.total_weight_net or 0.00
# invoice = self.pool.get('account.invoice').browse(cr, uid, context['active_id'], context=context)
# weight = invoice.total_weight_net or 0.00
receipient_zip = data.partner_id and data.partner_id.zip or ''
receipient_country_code = data.partner_id.country_id and data.partner_id.country_id.code or ''
access_license_no = data.ups_shipper_id and data.ups_shipper_id.accesslicensenumber or ''
user_id = data.ups_shipper_id and data.ups_shipper_id.userid or ''
password = data.ups_shipper_id and data.ups_shipper_id.password or ''
pickup_type_ups = data.ups_pickup_type
shipper_zip = data.ups_shipper_id and data.ups_shipper_id.address and data.ups_shipper_id.address.zip or ''
shipper_country_code = data.ups_shipper_id and data.ups_shipper_id.address and data.ups_shipper_id.address.country_id and \
data.ups_shipper_id.address.country_id.code or ''
ups_info_shipper_no = data.ups_shipper_id and data.ups_shipper_id.acc_no or ''
packaging_type_ups = data.ups_packaging_type.code
test_mode = False
test_mode = data.delivery_method and data.delivery_method.test_mode
if test_mode:
url = unicode(data.delivery_method.ship_rate_test_web)
# port = data.logis_company.ship_rate_test_port
else:
url = unicode(data.delivery_method.ship_rate_web)
# port = data.logis_company.ship_rate_port
# if data.ups_service_id:
# request_action ="rate"
# request_option ="rate"
# service_type_ups = data.ups_service_id and data.ups_service_id.shipping_service_code or ''
# else:
request_action = "shop"
request_option = "shop"
service_type_ups = ''
# url = 'https://wwwcie.ups.com/ups.app/xml/Rate' or 'https://onlinetools.ups.com/ups.app/xml/Rate'
rate_request = """<?xml version=\"1.0\"?>
<AccessRequest xml:lang=\"en-US\">
<AccessLicenseNumber>%s</AccessLicenseNumber>
<UserId>%s</UserId>
<Password>%s</Password>
</AccessRequest>
<?xml version=\"1.0\"?>
<RatingServiceSelectionRequest xml:lang=\"en-US\">
<Request>
<TransactionReference>
<CustomerContext>Rating and Service</CustomerContext>
<XpciVersion>1.0001</XpciVersion>
</TransactionReference>
<RequestAction>%s</RequestAction>
<RequestOption>%s</RequestOption>
</Request>
<PickupType>
<Code>%s</Code>
</PickupType>
<Shipment>
<Shipper>
<Address>
<PostalCode>%s</PostalCode>
<CountryCode>%s</CountryCode>
</Address>
<ShipperNumber>%s</ShipperNumber>
</Shipper>
<ShipTo>
<Address>
<PostalCode>%s</PostalCode>
<CountryCode>%s</CountryCode>
<ResidentialAddressIndicator/>
</Address>
</ShipTo>
<ShipFrom>
<Address>
<PostalCode>%s</PostalCode>
<CountryCode>%s</CountryCode>
</Address>
</ShipFrom>
<Service>
<Code>%s</Code>
</Service>
<Package>
<PackagingType>
<Code>%s</Code>
</PackagingType>
<PackageWeight>
<UnitOfMeasurement>
<Code>LBS</Code>
</UnitOfMeasurement>
<Weight>%s</Weight>
</PackageWeight>
</Package>
</Shipment>
</RatingServiceSelectionRequest>""" % (access_license_no, user_id, password,request_action,request_option, pickup_type_ups, shipper_zip,shipper_country_code,
ups_info_shipper_no,receipient_zip, receipient_country_code, shipper_zip, shipper_country_code,
service_type_ups, packaging_type_ups, weight)
rates_obj = self.pool.get('shipping.rates.sales')
so = data.id
rids = rates_obj.search(cr,uid,[('sales_id','=', so )])
rates_obj.unlink(cr, uid, rids, context)
serv_obj = self.pool.get('ups.shipping.service.type')
try:
print rate_request
from urllib2 import Request, urlopen, URLError, quote
request = Request(url.encode('utf-8').strip(), rate_request.encode('utf-8').strip())
response_text = urlopen(request).read()
# p.agent_info = u' '.join((agent_contact, agent_telno)).encode('utf-8').strip()
print response_text
response_dic = xml2dic.main(response_text)
str_error = ''
for response in response_dic['RatingServiceSelectionResponse'][0]['Response']:
if response.get('Error'):
for item in response['Error']:
if item.get('ErrorDescription'):
str_error = item['ErrorDescription']
self.write(cr, uid, [data.id], {'status_message': "Error : " + item['ErrorDescription'] })
if not str_error:
# print response_dic
amount = None
ups_service_id = None
# Get all the return Shipping rates as options
for response in response_dic['RatingServiceSelectionResponse']:
if response.get('RatedShipment'):
warning = None
vals = {}
for val in response['RatedShipment']:
if val.get('TotalCharges'):
vals['totalcharges'] = float(val['TotalCharges'][1]['MonetaryValue'])
if val.get('GuaranteedDaysToDelivery'):
vals['daystodelivery'] = val['GuaranteedDaysToDelivery']
if val.get('Service'):
service_code = val['Service'][0]['Code']
service = serv_obj.search(cr,uid,[('shipping_service_code','=',service_code)])
vals['service'] =service[0]
if val.get('RatedShipmentWarning'):
if not warning:
warning = val['RatedShipmentWarning']
else:
warning = warning + ", " + val['RatedShipmentWarning']
# get the lowest cost shipping rate as default on Sales Order
if (amount is None) or amount > vals['totalcharges']:
amount = vals['totalcharges']
ups_service_id = vals['service']
status_mesage = warning
vals['ratedshipmentwarning'] = warning
vals['sales_id'] = so
rates_obj.create(cr,uid,vals,context)
sale_obj.write(cr,uid,so,{'shipcharge':amount or 0.00,'ups_service_id':ups_service_id,'status_message':warning},context=context)
return True
rates_obj.write(cr, uid, context.get('active_ids'), { 'status_message': 'Success!'},context=context)
except URLError, e:
if hasattr(e, 'reason'):
print 'Could not reach the server, reason: %s' % e.reason
elif hasattr(e, 'code'):
print 'Could not fulfill the request, code: %d' % e.code
raise
# mod, modid = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'shipping_api_ups', 'view_for_shipping_rate_wizard_shipping')
return True
sale_order()
class shipping_rates_sales(osv.osv):
_name = "shipping.rates.sales"
_description = "Shipping Rate Estimate Charges"
_columns = {
'totalcharges': fields.float('Total Charges'),
'ratedshipmentwarning': fields.char('Shipment Warning', size=512),
'sales_id': fields.many2one('sale.order', 'Sales Order', required=True, ondelete='cascade',),
'daystodelivery': fields.integer('Days to Delivery'),
'service': fields.many2one('ups.shipping.service.type', 'Shipping Service' ),
}
def select_ship_service(self,cr,uid,ids,context=None):
sale_obj = self.pool.get('sale.order')
vals = {}
for service in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order')
vals['ups_service_id'] = service.service.id
vals['shipcharge'] = service.totalcharges
vals['ship_service'] = service.service.description
sale_obj.write(cr,uid,[service.sales_id.id],vals,context)
mod, modid = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
return {
'name':_("Sale Order"),
'view_mode': 'form',
'view_id': modid,
'view_type': 'form',
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
# 'target':'new',
# 'nodestroy': True,
'domain': '[]',
'res_id': service.sales_id.id,
'context':context,
}
return True
shipping_rates_sales()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"slevenhagen@novapointgroup.com"
] | slevenhagen@novapointgroup.com |
f784c7b34a8ee2d40ec04537960612598627a438 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/32/usersdata/75/10599/submittedfiles/questao2_av1.py | 5570de3c54d4ac0fa8e7dbd035738c5856d048e5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | # -*- coding: utf-8 -*-
from __future__ import division
a=int(input('Digite o primeiro número:'))
b=int(input('Digite o segundo número:'))
c=int(input('Digite o terceiro número:'))
d=int(input('Digite o quarto número:'))
e=int(input('Digite o quinto número:'))
f=int(input('Digite o sexto número:'))
na=int(input('Digite o primeiro número sorteado:'))
nb=int(input('Digite o segundo número sorteado:'))
nc=int(input('Digite o terceiro número sorteado:'))
nd=int(input('Digite o quarto número sorteado:'))
ne=int(input('Digite o quinto número sorteado:'))
nf=int(input('Digite o sexto número sorteado:'))
cont=0
if a==na:
cont=cont+1
if b==na:
cont=cont+1
if c==nc:
cont=cont+1
if d==nd:
cont=cont+1
if e==ne:
cont=cont+1
if f==nf:
cont=cont+1
if cont==3:
print ('terno')
if cont==4:
print ('quadra')
if cont==5:
print ('quina')
if cont==6:
print ('seno')
if (cont!=3) and (cont!=4) and (cont!=5) and (cont!=6):
print ('azar')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
6de0665ab973cf33ab27a8950f3dabcf7953f852 | 3dbbde1aa96fc09e9aab885cf3713e86f3572dec | /gs-vtoi/bin/hachoir-metadata-gtk | 9ef9a81ee82a3aca600d74de4023a47b328d39de | [] | no_license | bopopescu/gs-vtoi | 6223d6dbf47e89292bd0e79e24e5664450e28cf6 | f12b802976d0020179d1b40b0b5e3af5b72d55cc | refs/heads/master | 2022-11-24T16:31:36.804869 | 2018-07-31T08:30:56 | 2018-07-31T08:30:56 | 282,551,982 | 0 | 0 | null | 2020-07-26T01:09:10 | 2020-07-26T01:09:09 | null | UTF-8 | Python | false | false | 3,649 | #!/Users/Sang/OneDrive/Developments/gs-vtoi/gs-vtoi/bin/python
import sys, pygtk, os
pygtk.require('2.0')
import gtk
from hachoir_core.cmd_line import unicodeFilename
from hachoir_parser import createParser
from hachoir_metadata import extractMetadata
from hachoir_metadata.metadata import MultipleMetadata
class Gui:
def __init__(self):
self.main_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.main_window.set_border_width(5)
self.main_window.connect("destroy", self._destroy)
self.main_vbox = gtk.VBox()
self.select_hbox = gtk.HBox()
self.select_button = gtk.Button("Select")
self.select_button.connect("clicked", self._select_clicked)
self.select_hbox.pack_start(self.select_button, False)
self.file_combo = gtk.combo_box_new_text()
self.file_combo.connect("changed", self._file_combo_changed)
self.select_hbox.pack_start(self.file_combo)
self.main_vbox.pack_start(self.select_hbox, False)
self.metadata_table = gtk.Table(1, 1)
self.metadata_table.attach(gtk.Label("Select a file to view metadata information..."), 0, 1, 0, 1)
self.main_vbox.pack_start(self.metadata_table)
self.main_window.add(self.main_vbox)
self.main_window.show_all()
def add_file(self, filename):
self.file_combo.append_text(filename)
def _select_clicked(self, widget):
file_chooser = gtk.FileChooserDialog("Ouvrir..", None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
file_chooser.set_default_response(gtk.RESPONSE_OK)
file_chooser.show()
reponse = file_chooser.run()
if reponse == gtk.RESPONSE_OK:
selected_file = file_chooser.get_filename()
self.add_file(selected_file)
file_chooser.destroy()
def _file_combo_changed(self, widget):
self.main_vbox.remove(self.metadata_table)
real_filename = self.file_combo.get_active_text()
filename = unicodeFilename(real_filename)
parser = createParser(filename, real_filename=real_filename)
metadata = extractMetadata(parser)
self.metadata_table = gtk.Table(1, 2)
self.main_vbox.pack_start(self.metadata_table)
if metadata is None:
self.metadata_table.attach(gtk.Label("Unknown file format"), 0, 1, 0, 1)
else:
total = 1
for data in sorted(metadata):
if not data.values:
continue
title = data.description
for item in data.values:
self.metadata_table.resize(total, 2)
value = item.text
self.metadata_table.attach(gtk.Label(title + ":"), 0, 1, total-1, total)
self.metadata_table.attach(gtk.Label(value), 1, 2, total-1, total)
total += 1
self.metadata_table.show_all()
def _destroy(self, widget, data=None):
gtk.main_quit()
def main(self):
has_file = False
for arg in sys.argv[1:]:
if os.path.isdir(arg):
for file in os.listdir(arg):
path = os.path.join(arg, file)
if os.path.isfile(path):
self.add_file(path)
has_file = True
elif os.path.isfile(arg):
self.add_file(arg)
has_file = True
if has_file:
self.file_combo.set_active(0)
gtk.main()
if __name__ == "__main__":
Gui().main()
| [
"sy0414@gmail.com"
] | sy0414@gmail.com | |
699d3bfef04aed9ea2fbddc55b9a01bb4b3afd83 | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /merge-two-binary-trees/merge-two-binary-trees.py | 081371ccd42e4224ccd32f00bbf5ef910c431f4d | [
"Apache-2.0"
] | permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | // https://leetcode.com/problems/merge-two-binary-trees
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
def merge(one, two):
node = None
if one or two:
node = TreeNode(0)
if one:
node.val+=one.val
if two:
node.val+=two.val
node.left = merge(one.left if one else None, two.left if two else None)
node.right = merge(one.right if one else None, two.right if two else None)
return node
return merge(t1,t2)
| [
"keshava.gowda@gmail.com"
] | keshava.gowda@gmail.com |
623081c6b4a86024a7ab4cf1d69a7e46d21600d1 | f131d940b96452441602e8bd687a55d62ea22912 | /models/unet.py | b4e6f1c0d05195078e8fc8f2d43815bfada6cbb9 | [] | no_license | aguilarmg/cs221-final-project | a66e3b9e037de59f83ef5a950106a46430a808c3 | 9506a50614ca1619dc4338e7e2afa02e99becec1 | refs/heads/master | 2020-06-01T07:40:34.281510 | 2020-03-28T12:39:56 | 2020-03-28T12:39:56 | 190,702,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,967 | py | import torch
import torch.nn as nn
from torch.nn import Conv2d as Conv2D
import torch.nn.init as init
import torch.nn.functional as F
import numpy
from torch.nn import Upsample
class Up(nn.Module):
def __init__(self, channel_in, channel_out):
super(Up, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv = nn.Sequential(
Conv2D(channel_in, channel_out, kernel_size = 3, padding = 1),
nn.BatchNorm2d(channel_out),
nn.ReLU(inplace=True)
)
def forward(self, x1, x2):
# Input size - Batch_Size X Channel X Height of Activation Map X Width of Activation Map
# Upsample using bilinear mode and scale it to twice its size
x1 = self.upsample(x1)
# in 4D array - matching the last two in case of 5D it will take
# last three dimensions
difference_in_X = x1.size()[2] - x2.size()[2]
difference_in_Y = x1.size()[3] - x2.size()[3]
# Padding it with the required value
x2 = F.pad(x2, (difference_in_X // 2, int(difference_in_X / 2),
difference_in_Y // 2, int(difference_in_Y / 2)))
# concat on channel axis
x = torch.cat([x2, x1], dim=1)
# Use convolution
x = self.conv(x)
return x
class Down(nn.Module):
def __init__(self, channel_in, channel_out):
super(Down, self).__init__()
self.conv = nn.Sequential(
Conv2D(channel_in, channel_out, kernel_size = 3, padding = 1),
nn.BatchNorm2d(channel_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
# Input size - Batch_Size X Channel X Height of Activation Map X Width of Activation Map
# Downsample First
x = F.max_pool2d(x,2)
# Use convolution
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, channel_in, classes):
super(UNet, self).__init__()
self.input_conv = self.conv = nn.Sequential(
Conv2D(channel_in, 8, kernel_size = 3, padding = 1),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True)
)
self.down1 = Down(8, 16)
self.down2 = Down(16, 32)
self.down3 = Down(32, 32)
self.up1 = Up(64, 16)
self.up2 = Up(32, 8)
self.up3 = Up(16, 4)
self.output_conv = nn.Conv2d(4, classes, kernel_size = 1)
def forward(self, x):
x1 = self.input_conv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x = self.up1(x4, x3)
x = self.up2(x, x2)
x = self.up3(x, x1)
output = self.output_conv(x)
# print(output.shape)
m = nn.Softmax2d()
return m(output)
def weights_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_uniform(m.weight, gain=numpy.sqrt(2.0))
init.constant(m.bias, 0.1)
| [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
fdc1f3bb98810086e2d180f43f57c1b891d6fd4a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_cuffs.py | e56d0f9e3b98522f7ec4b6b644bb19718b341852 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._cuff import _CUFF
#calss header
class _CUFFS(_CUFF, ):
def __init__(self,):
_CUFF.__init__(self)
self.name = "CUFFS"
self.specie = 'nouns'
self.basic = "cuff"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9ad4f8a6c19169efc08b92e84ad63ec201cb44c0 | abfa631a97c1741c693bcf3e3780fcb30dc46003 | /extendtest/manage.py | c6ff57e83088bd906d3a3b6a215c0d4177b009e8 | [] | no_license | python-1901-class-five/Team-Five | b08f1860f3da0165838a5dc375bc7cb12b81125e | 66d23dd9b0d57f2a77271d7cc4ab1d122b16e7ee | refs/heads/master | 2020-05-09T20:19:55.591049 | 2019-06-13T09:04:09 | 2019-06-13T09:04:09 | 181,403,319 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'extendtest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"hcy@qq.com"
] | hcy@qq.com |
5b81ab2428a480bcbaceb2e72a457fdc4d31ac41 | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC001~ABC099/ABC033/C.py | 0a5b493b4fb1a0d3b7c6093c373952bae7e20536 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | s=input().split('+')
if len(s)==1:
ans=1
for j in range(len(s[0])):
if s[0][j]!='*':
ans*=int(s[0][j])
if ans!=0:
ans=1
else:
ans=0
print(ans)
exit(0)
ans=0
for i in range(len(s)):
if eval(s[i])==0:
continue
ans+=1
print(ans) | [
"ymdysk911@gmail.com"
] | ymdysk911@gmail.com |
76a0a0029a7548dcbbc65c90816c31168f43dbd5 | 4b4c6b19805d52dab9cc232e343a45785fb33c5a | /mininews/managers.py | 12bd3bfe8ddd7a916fcc8dfdcee9823b4eb076f4 | [
"MIT"
] | permissive | marcinn/django-mininews | d5866c11a7947ec5bc724dc1d61ce2b49b2e0177 | 5868fd5978ab0e64a68df576b8f14b4d4caf1bb5 | refs/heads/master | 2023-03-31T11:13:56.729693 | 2021-03-24T00:54:20 | 2021-03-24T00:54:20 | 350,901,546 | 0 | 0 | MIT | 2021-03-24T00:50:16 | 2021-03-24T00:50:16 | null | UTF-8 | Python | false | false | 387 | py | from django.db.models.query import QuerySet
from django.db.models import Q
import datetime
class MininewsQuerySet(QuerySet):
def live(self, statuses=['published']):
today = datetime.date.today()
return self.filter(status__in=statuses).\
filter(Q(start__lte=today) | Q(start__isnull=True)).\
filter(Q(end__gte=today) | Q(end__isnull=True))
| [
"richard@arbee-design.co.uk"
] | richard@arbee-design.co.uk |
336506c6a65cfee6893992a7afd6b8650db9d2c4 | 3aef4825c5f2366f2e551cdfa54b88c034b0b4f4 | /tutorials/2_tensorflow_old/sklearnTUT/sk10_cross_validation3.py | 5818f6b03c21bbb84be7ede221d50d2aee9605d5 | [
"MIT"
] | permissive | wull566/tensorflow_demo | 4a65cbe1bdda7430ab1c3883889501a62258d8a6 | c2c45050867cb056b8193eb53466d26b80b0ec13 | refs/heads/master | 2020-04-06T17:34:05.912164 | 2018-11-15T07:41:47 | 2018-11-15T07:41:48 | 157,665,187 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | # View more 3_python 1_tensorflow_new tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for 3_python 3+. If you are using 3_python 2+, please modify the code accordingly.
"""
from __future__ import print_function
from sklearn.learning_curve import validation_curve
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits()
X = digits.data
y = digits.target
param_range = np.logspace(-6, -2.3, 5)
train_loss, test_loss = validation_curve(
SVC(), X, y, param_name='gamma', param_range=param_range, cv=10,
scoring='mean_squared_error')
train_loss_mean = -np.mean(train_loss, axis=1)
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(param_range, train_loss_mean, 'o-', color="r",
label="Training")
plt.plot(param_range, test_loss_mean, 'o-', color="g",
label="Cross-validation")
plt.xlabel("gamma")
plt.ylabel("Loss")
plt.legend(loc="best")
plt.show() | [
"vicleo566@163.com"
] | vicleo566@163.com |
f0fccbe1f1d129a6d95be4858240a525c0c23db7 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/aaa/aretp.py | 2ca1f5865928d1d54330991f06f2bb16cd4862ad | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,007 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ARetP(Mo):
meta = ClassMeta("cobra.model.aaa.ARetP")
meta.isAbstract = True
meta.moClassName = "aaaARetP"
meta.moClassName = "aaaARetP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Record Retention Policy"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x800000000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.condition.RetP")
meta.concreteSubClasses.add("cobra.model.aaa.CtrlrRetP")
meta.concreteSubClasses.add("cobra.model.aaa.SwRetP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "maxSize", "maxSize", 72, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("maxSize", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "purgeWin", "purgeWin", 73, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(100, 1000)]
prop.defaultValue = 250
prop.defaultValueStr = "250"
meta.props.add("purgeWin", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
1d83ba7c7e40243158ac207a36dd6c74cb8eb0f3 | d85a26d336fd127fd002e661ac6442bd915bf3ea | /tests/test_settings.py | c8313c5a17705b52a1c26f22481ce03cd2363cd5 | [
"Apache-2.0"
] | permissive | uduse/bonobo | e2f1addee253fc4798ef854dc89a39c7a4deaa36 | 4247f17d381161dfe26a9d294dfa33594338cf38 | refs/heads/master | 2021-08-28T06:42:10.700541 | 2017-12-11T12:50:36 | 2017-12-11T12:50:36 | 113,858,232 | 1 | 0 | null | 2017-12-11T12:50:37 | 2017-12-11T12:47:37 | Python | UTF-8 | Python | false | false | 1,585 | py | import logging
from os import environ
from unittest.mock import patch
import pytest
from bonobo import settings
TEST_SETTING = 'TEST_SETTING'
def test_to_bool():
assert not settings.to_bool('')
assert not settings.to_bool('FALSE')
assert not settings.to_bool('NO')
assert not settings.to_bool('0')
assert settings.to_bool('yup')
assert settings.to_bool('True')
assert settings.to_bool('yes')
assert settings.to_bool('1')
def test_setting():
s = settings.Setting(TEST_SETTING)
assert s.get() is None
with patch.dict(environ, {TEST_SETTING: 'hello'}):
assert s.get() is None
s.clear()
assert s.get() == 'hello'
s = settings.Setting(TEST_SETTING, default='nope')
assert s.get() is 'nope'
with patch.dict(environ, {TEST_SETTING: 'hello'}):
assert s.get() == 'nope'
s.clear()
assert s.get() == 'hello'
def test_default_settings():
settings.clear_all()
assert settings.DEBUG.get() == False
assert settings.PROFILE.get() == False
assert settings.QUIET.get() == False
assert settings.LOGGING_LEVEL.get() == logging._checkLevel('INFO')
with patch.dict(environ, {'DEBUG': 't'}):
settings.clear_all()
assert settings.LOGGING_LEVEL.get() == logging._checkLevel('DEBUG')
settings.clear_all()
def test_check():
settings.check()
with patch.dict(environ, {'DEBUG': 't', 'PROFILE': 't', 'QUIET': 't'}):
settings.clear_all()
with pytest.raises(RuntimeError):
settings.check()
settings.clear_all()
| [
"romain@dorgueil.net"
] | romain@dorgueil.net |
9fec051f4266d18f40cc3b954c62022c834e4c02 | 82f67ea9fbf21d21f26b611cea5ad6047617e1ab | /cvs-projects/build_scripts/utils/zenlib.py | ec69bfbcea2816b1674a0efbd0ddf0cde88ffa6d | [] | no_license | metalsky/mvst | 741a0e8ddc1c43ca28c7b26dc5720e965a70b764 | e1deb593f47c28e0142e66d11ca47fa4af247ed8 | refs/heads/master | 2020-03-19T09:42:59.923445 | 2018-03-02T00:38:58 | 2018-03-02T00:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | #!/usr/bin/python
#Normal
import sys, traceback, syslog
from types import *
#Pyro
try:
import Pyro.core
except:
logError(traceback.format_exc())
PYROSERVER = 'PYROLOC://overlord:7769/zenbuild'
Pyro.core.initClient(0)
Pyro.config.PYRO_PRINT_REMOTE_TRACEBACK = 1
LOG_FACILITY = syslog.LOG_LOCAL0
def logError(msg):
syslog.openlog('ZENLIB', 0, LOG_FACILITY)
syslog.syslog(syslog.LOG_ERR, msg)
syslog.closelog()
def regBuild(buildtag, type, ETA=None):
try:
bdmd = Pyro.core.getProxyForURI(PYROSERVER)
bdmd.regBuild(buildtag, type, ETA)
except:
logError(traceback.format_exc())
def updateBuild(buildtag, stage=None, stagePercentComplete=None, percentComplete=None, ETA=None, status=None):
try:
bdmd = Pyro.core.getProxyForURI(PYROSERVER)
bdmd.updateBuild(buildtag,stage, stagePercentComplete, percentComplete, ETA, status)
except:
logError(traceback.format_exc())
def regSubBuild(buildtag, hostname, task, ETA=None):
try:
bdmd = Pyro.core.getProxyForURI(PYROSERVER)
bdmd.regSubBuild(buildtag,hostname,task, ETA)
except:
logError(traceback.format_exc())
def updateSubBuild(device, stage=None, stagePercentComplete=None, percentComplete=None, ETA=None, status=None):
try:
bdmd = Pyro.core.getProxyForURI(PYROSERVER)
bdmd.updateSubBuild(device, stage, stagePercentComplete, percentComplete, ETA, status)
except:
logError(traceback.format_exc())
def completeBuild(buildtag):
try:
bdmd = Pyro.core.getProxyForURI(PYROSERVER)
bdmd.completeBuild(buildtag)
except:
logError(traceback.format_exc())
def completeSubBuild(device):
try:
bdmd = Pyro.core.getProxyForURI(PYROSERVER)
bdmd.completeSubBuild(device)
except:
logError(traceback.format_exc())
def test():
regBuild('test666','test build')
regBuild('foundation_test_1234','foundation')
regSubBuild('test666', 'node-24', 'arm_iwmmxt_le target apps')
updateBuild('test666',stage="Build Prep")
print completeSubBuild('node-24')
print completeBuild('test666')
def main():
test()
sys.exit(1)
if __name__=="__main__":
main()
| [
"njka.github@gmail.com"
] | njka.github@gmail.com |
89412e55408ee7a9d9b91b23e4891741e3ca1a77 | 9d1238fb0e4a395d49a7b8ff745f21476c9d9c00 | /framework/Tests/PAS/PAS/SecuritySettings/MultiplePasswordCheckouts/test_system_level_password_checkout_blank.py | a7f3b714415f8fbea980a615498a546f84534f42 | [] | no_license | jaspalsingh92/TestAutomation-1 | a48ee1d3b73386f1bf8f53328a5b55444238e054 | e631c67255b10f150e0012991fb1474ede904417 | refs/heads/master | 2023-04-18T14:52:08.836221 | 2021-04-07T12:01:07 | 2021-04-07T12:01:07 | 357,175,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | import pytest
import logging
from Shared.API.infrastructure import ResourceManager
from Shared.API.redrock import RedrockController
logger = logging.getLogger("test")
lock_tenant = True
@pytest.mark.api
@pytest.mark.pas
def test_system_level_password_checkout_blank(core_session, pas_windows_setup, users_and_roles,
update_tenant_multiple_checkouts):
"""
C1548 : System level password checkout set to '--'
:param core_session: Authenticated Centrify Session
:param pas_windows_setup: Added and return Windows system and Account associated to it.
:param users_and_roles: Gets user and role on demand.
"""
system_id, account_id, sys_info, connector_id, user_password = pas_windows_setup()
user_session = users_and_roles.get_session_for_user("Privileged Access Service Administrator")
pas_admin = user_session.__dict__['auth_details']
# Setting 'Allow multiple password checkouts' policy to Uncheck on Global Security Setting page
result, success = update_tenant_multiple_checkouts(core_session, False)
assert success, f"Not able to disable 'Allow multiple password checkouts' policy on Global " \
f"Security Setting page. API response result: {result}."
logger.info(f"'Allow multiple password checkouts' policy Unchecked on Global Security Setting page")
# Assigning 'Checkout' permission to user for Account.
account_result, account_success = ResourceManager.assign_account_permissions(core_session, 'Naked',
pas_admin['User'],
pas_admin['UserId'], 'User',
account_id)
assert account_success, f"Assign Checkout permission to account : {account_id} failed. " \
f"API response result: {account_result}"
logger.info(f"'Checkout' permission given to user: {pas_admin['User']} for Account:{account_id}.")
# Checkout account while logged in as Cloud Admin
admin_checkout_result, admin_checkout_success = ResourceManager.check_out_password(core_session, 1, account_id)
assert admin_checkout_result['Password'] == user_password, f"Not able to checkout Account : {account_id}. API " \
f"response result: {admin_checkout_result} "
logger.info(f"Account Checkout successful for Account :{account_id}.")
# Checkout account while logged in as Privileged Access Service Administrator
user_checkout_result, user_checkout_success = ResourceManager.check_out_password(user_session, 1, account_id)
assert not user_checkout_success, f"Checkout Account successful : {account_id} : API response " \
f"result: {user_checkout_result}"
logger.info(f"Not able to checkout Account : {account_id}")
activity = RedrockController.get_account_activity(user_session, account_id)[0]['Detail']
assert 'Multiple checkouts not allowed' in activity, f"Checkout Failed activity not found " \
f"for account : {account_id}. API response result: {activity} "
logger.info(f"Checkout Failed activity found in Activity for account : {account_id}")
| [
"singh.jaspal92@gmail.com"
] | singh.jaspal92@gmail.com |
49ae7530ff0772b04bb33a96550692f5f9106cbf | c9fde4576216a22e8d5711bbe97adda1aafa2f08 | /model-optimizer/mo/front/common/partial_infer/reduce.py | 74fdd40e9a935d5f5158baaf54e31969ede37593 | [
"Apache-2.0"
] | permissive | dliang0406/dldt | c703d6a837de3f996528fc8a9543f9530b23342c | d9b10abcebafe8b10ba81e09e433de7a366c072c | refs/heads/2018 | 2020-04-03T08:24:47.723353 | 2018-10-29T07:58:05 | 2018-10-29T07:58:05 | 155,132,108 | 3 | 1 | Apache-2.0 | 2019-10-10T08:39:46 | 2018-10-29T01:03:54 | C++ | UTF-8 | Python | false | false | 1,410 | py | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
def tf_reduce_infer(node, op=None):
input_shape = node.in_node(0).shape
log.debug("input_shape: {}".format(input_shape))
axis = node.in_node(1).value
if input_shape is None or axis is None or input_shape.ndim != 1 or axis.ndim > 1:
return
output_shape = np.array(input_shape)
if node.keep_dims:
output_shape[axis] = 1
else:
output_shape = np.delete(output_shape, axis)
node.out_node().shape = output_shape
if op is not None and node.in_node(0).value is not None:
node.out_node(0).value = np.array([op(node.in_node(0).value, (*axis,))],
dtype=node.in_node(0).value.dtype) # TODO extend to multi-dimensional axis
log.debug("value: {}".format(node.out_node(0).value))
| [
"openvino_pushbot@intel.com"
] | openvino_pushbot@intel.com |
7b1c11206207badd5f7d7c507c0be0f6262fa6eb | aad917f794226f917257ce103c295fd85a51ee62 | /categories/models.py | 6ee2075d4d3e0087cdb60858341277d6ddcecc4f | [] | no_license | jgsogo/bb-django-taxonomy-categories | 3fc05d5b3074cd6a469e97b81ad545834438adab | f00b86bcd08857a668cb62ef40caac0c3cba4bb5 | refs/heads/master | 2020-12-20T11:28:03.426634 | 2013-02-02T21:14:01 | 2013-02-02T21:14:01 | 236,059,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | #!/usr/bin/env python
# encoding: utf-8
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from taxonomy.models.taxon import BaseTaxon
from taxonomy.managers import TaxonManager
class CategoryManager(TaxonManager):
def choices(self, user=None):
qs = self.get_query_set().all()
if user is not None and user.is_superuser:
# superusers get to see all categories
return qs
else:
# only show public categories to regular users
return qs.filter(public=self.model.PUBLIC)
def orphan(self, user=None):
""" Retrieves all categories with no parent """
return self.choices(user).filter(parent=None)
class Category(BaseTaxon):
PUBLIC = 0
PRIVATE = 1
PUBLICY_CHOICES = ( (PUBLIC, _('public')), (PRIVATE, _('private')),)
title = models.CharField(max_length=100)
slug = models.SlugField()
description = models.TextField(blank=True, help_text=_(u'Optional'))
public = models.IntegerField(choices = PUBLICY_CHOICES)
objects = CategoryManager()
class MPTTMeta:
order_insertion_by = ['slug']
class Meta(BaseTaxon.Meta):
db_table = 'category_tree'
app_label = 'category_tree'
verbose_name = _('category')
verbose_name_plural = _('categories')
abstract = False
def get_name(self):
return self.title
def save(self, *args, **kwargs):
if not len(self.slug.strip()):
self.slug = slugify(self.title)
super(Category, self).save(*args, **kwargs)
| [
"jgsogo@gmail.com"
] | jgsogo@gmail.com |
c59afdb801c2ae60596ec009aeb16a6c7eb6379e | c3cd2d040ceb3eabd387281835cacd0967fdbb6a | /web2py/extras/build_web2py/web2py.win_no_console.spec | d6c55d2f3ecd4375adca10507765bc7b3bc23279 | [
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | operepo/smc | cc55338b8b9fbeac78e67397079759965d859b68 | d10e7b7567266e31de73e5b29663577cab119a90 | refs/heads/master | 2022-09-22T07:17:59.970650 | 2022-07-11T00:20:45 | 2022-07-11T00:20:45 | 116,905,452 | 1 | 3 | MIT | 2021-03-09T03:01:37 | 2018-01-10T03:53:08 | Python | UTF-8 | Python | false | false | 2,230 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['web2py.py'],
pathex=['.'],
binaries=[],
datas=[],
hiddenimports=['site-packages', 'argparse', 'cgi', 'cgitb', 'code', 'concurrent', 'concurrent.futures',
'concurrent.futures._base', 'concurrent.futures.process', 'concurrent.futures.thread', 'configparser', 'csv', 'ctypes.wintypes',
'email.mime', 'email.mime.base', 'email.mime.multipart', 'email.mime.nonmultipart', 'email.mime.text', 'html.parser', 'http.cookies',
'ipaddress', 'imp', 'json', 'json.decoder', 'json.encoder', 'json.scanner', 'logging.config', 'logging.handlers', 'profile', 'pstats',
'psycopg2', 'psycopg2._ipaddress', 'psycopg2._json', 'psycopg2._range', 'psycopg2.extensions', 'psycopg2.extras', 'psycopg2.sql',
'psycopg2.tz', 'pyodbc', 'python-ldap', 'rlcompleter', 'sched', 'site', 'smtplib', 'sqlite3', 'sqlite3.dbapi2', 'sqlite3.dump', 'timeit', 'tkinter',
'tkinter.commondialog', 'tkinter.constants', 'tkinter.messagebox', 'uuid', 'win32con', 'win32evtlogutil', 'winerror', 'wsgiref',
'wsgiref.handlers', 'wsgiref.headers', 'wsgiref.simple_server', 'wsgiref.util', 'xml.dom', 'xml.dom.NodeFilter', 'xml.dom.domreg',
'xml.dom.expatbuilder', 'xml.dom.minicompat', 'xml.dom.minidom', 'xml.dom.pulldom', 'xml.dom.xmlbuilder', 'xmlrpc.server'],
hookspath=[],
runtime_hooks=[],
excludes=['gluon'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='web2py_no_console',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=False , icon='extras\\icons\\web2py.ico')
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='web2py_no_console')
| [
"ray@cmagic.biz"
] | ray@cmagic.biz |
05e0de1b6de56c36b6858a92f1e58b04ba53ba4e | 1f7d287ef90041e20468513a26a39e1f3d221289 | /Level-2/s15/guvi-L2-s15-py03.py | bd07bcfd56aa7a6e68f256aa01b44c59c5d2dc54 | [] | no_license | ksthacker/python | d787d69f954c0e9b59b0cc96a8b8fc5c0594d8a0 | 3a3775e1b9349e313f8c96ea11eade54a7e9bf54 | refs/heads/master | 2021-04-27T16:32:40.923316 | 2019-08-21T04:50:22 | 2019-08-21T04:50:22 | 122,303,461 | 0 | 17 | null | 2019-10-03T14:59:51 | 2018-02-21T07:09:32 | Python | UTF-8 | Python | false | false | 240 | py | import sys,string, math,itertools
n = int(input())
L1 = [ int(x) for x in input().split()]
L2 = [ int(x) for x in input().split()]
L22 = sorted(L2)
L12 = []
for x in L22 :
k = L2.index(x)
L12.append(L1[k])
print(*L12)
| [
"noreply@github.com"
] | ksthacker.noreply@github.com |
9d64563f089d3521304bb6648f440eb8c1b1e375 | c18a63e2e37712025794bc7d0bb824ca3a8cde51 | /IDserver/ssh_remote/data.py | 3f38241e919d3df484d294e500f8023dcdca543e | [] | no_license | wuqiangchuan/Xproxxx | 9202767573a3f0bfc1b00b6069eaf6ef9bc25907 | 6403bde2bc091faab55cca5ac9fff62b13d6a0cb | refs/heads/master | 2021-01-01T17:48:55.335991 | 2017-07-20T10:00:52 | 2017-07-20T10:00:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import json
class BaseData(object):
def __init__(self, data=""):
self.from_json(data)
def from_json(self, data=""):
self.__dict__ = json.loads(data)
def to_json(self):
return json.dumps(self)
def get_type(self):
return self.tp
class ClientData(BaseData):
def __init__(self, data=""):
super(ClientData, self).__init__(data)
class ServerData(BaseData):
def __init__(self, data=""):
self.tp = 'server'
self.data = data
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
30dc421b2c9a6259ad94cd434a40969e96d7e9d4 | d0f65918f28acea06c6632d59a74554239a13195 | /pynet/plotting/network.py | ba62ec6357d63d480dc5bee690158b862e8dfc57 | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | Duplums/pynet | 0811f2ebd151c4a913af3fa16568f6b0b81bd43a | 7a807ed690929563ce36086eaf0998d0e8856aea | refs/heads/master | 2023-08-09T14:51:08.437781 | 2023-08-04T13:11:02 | 2023-08-04T13:11:02 | 221,240,432 | 0 | 2 | NOASSERTION | 2019-11-12T14:43:25 | 2019-11-12T14:43:24 | null | UTF-8 | Python | false | false | 30,653 | py | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides tools to display a graph.
"""
# System import
import sys
import os
import time
from pprint import pprint
import tempfile
import weakref
import operator
import tempfile
# Third party import
import torch
import hiddenlayer as hl
from PySide2 import QtCore, QtGui, QtWidgets
from torchviz import make_dot
# Module import
from .graph import Graph, GraphNode
from pynet.plotting.colors import *
def plot_net_rescue(model, shape, outfileroot=None):
""" Save a PNG file containing the network graph representation.
Parameters
----------
model: Net
the network model.
shape: list of int
the shape of a classical input batch dataset.
outfileroot: str, default None
the file path without extension.
Returns
-------
outfile: str
the path to the generated PNG.
"""
x = torch.randn(shape)
graph = make_dot(model(x), params=dict(model.named_parameters()))
graph.format = "png"
if outfileroot is None:
dirpath = tempfile.mkdtemp()
basename = "pynet_graph"
else:
dirpath = os.path.dirname(outfileroot)
basename = os.path.basename(outfileroot)
graph.render(directory=dirpath, filename=basename, view=True)
return os.path.join(dirpath, basename + ".png")
def plot_net(model, shape, static=True, outfileroot=None):
""" Save a PDF file containing the network graph representation.
Sometimes the 'get_trace_graph' pytorch function fails: use the
'plot_net_rescue' function insteed.
Parameters
----------
model: Net
the network model.
shape: list of int
the shape of a classical input batch dataset.
static: bool, default True
create a static or dynamic view.
outfileroot: str, default None
the file path without extension to generate PDF.
Returns
-------
outfile: str
the path to the generated PDF.
"""
# Create application
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
# Create view
hl_graph = hl.build_graph(model, torch.zeros(shape))
hl_graph.theme = hl.graph.THEMES["blue"].copy()
outfile = None
if outfileroot is not None:
hl_graph.save(outfileroot)
outfile = outfileroot + ".pdf"
if not os.path.isfile(outfile):
raise ValueError("'{0}' has not been generated.".format(outfile))
if static:
def draw(widget, surface):
page.render(surface)
with tempfile.TemporaryDirectory() as tmpdir:
tmpfileroot = os.path.join(tmpdir, "graph")
hl_graph.save(tmpfileroot, format="png")
tmpfile = tmpfileroot + ".png"
widget = PDFView(tmpfile)
view = QtWidgets.QScrollArea()
view.setWidgetResizable(True)
view.setWidget(widget)
else:
graph = Graph()
nodes_map = {}
cnt = 1
for key, node in hl_graph.nodes.items():
label = node.title
if node.caption:
label += node.caption
if node.repeat:
label += str(node.repeat)
nodes_map[key] = "{0}-{1}".format(cnt, label)
cnt += 1
for key, node in hl_graph.nodes.items():
graph.add_node(GraphNode(str(nodes_map[key]), node))
for key1, key2, label in hl_graph.edges:
if isinstance(label, (list, tuple)):
label = "x".join([str(l or "?") for l in label])
graph.add_link(str(nodes_map[key1]), str(nodes_map[key2]))
view = GraphView(graph)
# Display
view.show()
app.exec_()
return outfile
class PDFView(QtWidgets.QWidget):
""" A widget to visualize a PDF graph.
"""
def __init__(self, path):
""" Initialize the PDFView class
"""
super(PDFView, self).__init__()
self.path = path
layout = QtWidgets.QVBoxLayout(self)
self.label = QtWidgets.QLabel()
layout.addWidget(self.label)
self.pixmap = QtGui.QPixmap(self.path)
self.label.setPixmap(self.pixmap)
class Control(QtWidgets.QGraphicsPolygonItem):
""" Create a glyph for each control connection.
"""
def __init__(self, name, height, width, optional, parent=None):
""" Initilaize the Control class.
Parameters
----------
name: str
the control name.
height, width: int
the control size.
optional: bool
option to color the glyph.
"""
# Inheritance
super(Control, self).__init__(parent)
# Class parameters
self.name = name
self.optional = optional
color = self._color(optional)
self.brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
self.brush.setColor(color)
# Set graphic item properties
self.setAcceptedMouseButtons(QtCore.Qt.LeftButton)
# Define the widget
polygon = QtGui.QPolygonF([
QtCore.QPointF(0, 0), QtCore.QPointF(width, (height - 5) / 2.0),
QtCore.QPointF(0, height - 5)])
self.setPen(QtGui.QPen(QtCore.Qt.NoPen))
self.setPolygon(polygon)
self.setBrush(self.brush)
self.setZValue(3)
def _color(self, optional):
""" Define the color of a control glyph depending on its status.
Parameters
----------
optional: bool (mandatory)
option to color the glyph.
Returns
-------
color: QColor
the glyph color.
"""
if optional:
color = QtCore.Qt.darkGreen
else:
color = QtCore.Qt.black
return color
def get_control_point(self):
""" Give the relative location of the control glyph in the parent
widget.
Returns
-------
position: QPointF
the control glyph position.
"""
point = QtCore.QPointF(
self.boundingRect().size().width() / 2.0,
self.boundingRect().size().height() / 2.0)
return self.mapToParent(point)
class Node(QtWidgets.QGraphicsItem):
""" A box node.
"""
_colors = {
"default": (RED_1, RED_2, LIGHT_RED_1, LIGHT_RED_2),
"choice1": (SAND_1, SAND_2, LIGHT_SAND_1, LIGHT_SAND_2),
"choice2": (DEEP_PURPLE_1, DEEP_PURPLE_2, PURPLE_1, PURPLE_2),
"choice3": (BLUE_1, BLUE_2, LIGHT_BLUE_1, LIGHT_BLUE_2)
}
def __init__(self, name, inputs, outputs, active=True, style=None,
graph=None, parent=None):
""" Initilaize the Node class.
Parameters
----------
name: string
a name for the box node.
inputs: list of str
the box input controls. If None no input will be created.
outputs: list of str
the box output controls. If None no output will be created.
active: bool, default True)
a special color will be applied on the node rendering depending
of this parameter.
style: string, default None
the style that will be applied to tune the box rendering.
graph: Graph, default None
a sub-graph item.
"""
# Inheritance
super(Node, self).__init__(parent)
# Class parameters
self.style = style or "default"
self.name = name
self.graph = graph
self.inputs = inputs or []
self.outputs = outputs or []
self.active = active
self.input_controls = {}
self.output_controls = {}
self.embedded_box = None
# Set graphic item properties
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setAcceptedMouseButtons(
QtCore.Qt.LeftButton | QtCore.Qt.RightButton |
QtCore.Qt.MiddleButton)
# Define rendering colors
bgd_color_indices = [2, 3]
if self.active:
bgd_color_indices = [0, 1]
self.background_brush = self._get_brush(
*operator.itemgetter(*bgd_color_indices)(self._colors[self.style]))
self.title_brush = self._get_brush(
*operator.itemgetter(2, 3)(self._colors[self.style]))
# Construct the node
self._build()
def get_title(self):
""" Create a title for the node.
"""
return self.name
def _build(self, margin=5):
""" Create a node reprensenting a box.
Parameters
----------
margin: int (optional, default 5)
the default margin.
"""
# Create a title for the node
self.title = QtGui.QGraphicsTextItem(self.get_title(), self)
font = self.title.font()
font.setWeight(QtGui.QFont.Bold)
self.title.setFont(font)
self.title.setPos(margin, margin)
self.title.setZValue(2)
self.title.setParentItem(self)
# Define the default control position
control_position = (
margin + margin + self.title.boundingRect().size().height())
# Create the input controls
for input_name in self.inputs:
# Create the control representation
control_glyph, control_text = self._create_control(
input_name, control_position, is_output=False, margin=margin)
# Update the class parameters
self.input_controls[input_name] = (control_glyph, control_text)
# Update the next control position
control_position += control_text.boundingRect().size().height()
# Create the output controls
for output_name in self.outputs:
# Create the control representation
control_glyph, control_text = self._create_control(
output_name, control_position, is_output=True, margin=margin)
# Update the class parameters
self.output_controls[output_name] = (control_glyph, control_text)
# Update the next control position
control_position += control_text.boundingRect().size().height()
# Define the box node
self.box = QtGui.QGraphicsRectItem(self)
self.box.setBrush(self.background_brush)
self.box.setPen(QtGui.QPen(QtCore.Qt.NoPen))
self.box.setZValue(-1)
self.box.setParentItem(self)
self.box.setRect(self.contentsRect())
self.box_title = QtGui.QGraphicsRectItem(self)
rect = self.title.mapRectToParent(self.title.boundingRect())
brect = self.contentsRect()
brect.setWidth(brect.right() - margin)
rect.setWidth(brect.width())
self.box_title.setRect(rect)
self.box_title.setBrush(self.title_brush)
self.box_title.setPen(QtGui.QPen(QtCore.Qt.NoPen))
self.box_title.setParentItem(self)
def _create_control(self, control_name, control_position, is_output=False,
control_width=12, margin=5):
""" Create a control representation: small glyph and control name.
Parameters
----------
control_name: str (mandatory)
the name of the control to render.
control_position: int (mandatory)
the position (height) of the control to render.
control_name: bool (optional, default False)
an input control glyph is diplayed on the left while an output
control glyph is displayed on the right.
control_width: int (optional, default 12)
the default size of the control glyph.
margin: int (optional, default 5)
the default margin.
Returns
-------
control_text: QGraphicsTextItem
the control text item.
control_glyph: Control
the associated control glyph item.
"""
# Detect if the control is optional
is_optional = False
# Create the control representation
control_text = QtGui.QGraphicsTextItem(self)
control_text.setHtml(control_name)
control_name = "{0}:{1}".format(self.name, control_name)
control_glyph = Control(
control_name, control_text.boundingRect().size().height(),
control_width, optional=is_optional, parent=self)
control_text.setZValue(2)
control_glyph_width = control_glyph.boundingRect().size().width()
control_title_width = self.title.boundingRect().size().width()
control_text.setPos(control_glyph_width + margin, control_position)
if is_output:
control_glyph.setPos(
control_title_width - control_glyph_width,
control_position)
else:
control_glyph.setPos(margin, control_position)
control_text.setParentItem(self)
control_glyph.setParentItem(self)
return control_glyph, control_text
def _get_brush(self, color1, color2):
""" Create a brush that has a style, a color, a gradient and a texture.
Parameters
----------
color1, color2: QtGui.QColor (mandatory)
edge box colors used to define the gradient.
"""
gradient = QtGui.QLinearGradient(0, 0, 0, 50)
gradient.setColorAt(0, color1)
gradient.setColorAt(1, color2)
return QtGui.QBrush(gradient)
def contentsRect(self):
""" Returns the area inside the widget's margins.
Returns
-------
brect: QRectF
the bounding rectangle (left, top, right, bottom).
"""
first = True
excluded = []
for name in ("box", "box_title"):
if hasattr(self, name):
excluded.append(getattr(self, name))
for child in self.childItems():
if not child.isVisible() or child in excluded:
continue
item_rect = self.mapRectFromItem(child, child.boundingRect())
if first:
first = False
brect = item_rect
else:
if item_rect.left() < brect.left():
brect.setLeft(item_rect.left())
if item_rect.top() < brect.top():
brect.setTop(item_rect.top())
if item_rect.right() > brect.right():
brect.setRight(item_rect.right())
if item_rect.bottom() > brect.bottom():
brect.setBottom(item_rect.bottom())
return brect
def boundingRect(self):
""" Returns the bounding rectangle of the given text as it will appear
when drawn inside the rectangle beginning at the point (x , y ) with
width w and height h.
Returns
-------
brect: QRectF
the bounding rectangle (x, y, w, h).
"""
brect = self.contentsRect()
brect.setRight(brect.right())
brect.setBottom(brect.bottom())
return brect
def paint(self, painter, option, widget=None):
pass
def mouseDoubleClickEvent(self, event):
""" If a sub-graph is available emit a 'subgraph_clicked' signal.
"""
if self.graph is not None:
self.scene().subgraph_clicked.emit(self.name, self.graph,
event.modifiers())
event.accept()
else:
event.ignore()
def add_subgraph_view(self, graph, margin=5):
""" Display the a sub-graph box in a node.
Parameters
----------
graph: Graph
the sub-graph box to display.
"""
# Create a embedded proxy view
if self.embedded_box is None:
view = GraphView(graph)
proxy_view = EmbeddedSubGraphItem(view)
view._graphics_item = weakref.proxy(proxy_view)
proxy_view.setParentItem(self)
posx = margin + self.box.boundingRect().width()
proxy_view.setPos(posx, margin)
self.embedded_box = proxy_view
# Change visibility property of the embedded proxy view
else:
if self.embedded_box.isVisible():
self.embedded_box.hide()
else:
self.embedded_box.show()
class EmbeddedSubGraphItem(QtWidgets.QGraphicsProxyWidget):
""" QGraphicsItem containing a sub-graph box view.
"""
def __init__(self, sub_graph_view):
""" Initialize the EmbeddedSubGraphItem.
Parameters
----------
sub_graph_view: GraphView
the sub-graph view.
"""
# Inheritance
super(EmbeddedSubGraphItem, self).__init__()
# Define rendering options
sub_graph_view.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
sub_graph_view.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
# sub_graph_view.setFixedSize(400, 600)
# Add the sub-graph widget
self.setWidget(sub_graph_view)
# sub_graph_view.setSizePolicy(
# QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
# self.setSizePolicy(
# QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
class Link(QtWidgets.QGraphicsPathItem):
""" A link between boxes.
"""
def __init__(self, src_position, dest_position, parent=None):
""" Initilaize the Link class.
Parameters
----------
src_position: QPointF (mandatory)
the source control glyph position.
dest_position: QPointF (mandatory)
the destination control glyph position.
"""
# Inheritance
super(Link, self).__init__(parent)
# Define the color rendering
pen = QtGui.QPen()
pen.setWidth(2)
pen.setBrush(RED_2)
pen.setCapStyle(QtCore.Qt.RoundCap)
pen.setJoinStyle(QtCore.Qt.RoundJoin)
self.setPen(pen)
# Draw the link
path = QtGui.QPainterPath()
path.moveTo(src_position.x(), src_position.y())
path.cubicTo(src_position.x() + 100, src_position.y(),
dest_position.x() - 100, dest_position.y(),
dest_position.x(), dest_position.y())
self.setPath(path)
self.setZValue(0.5)
def update(self, src_position, dest_position):
""" Update the link extreme positions.
Parameters
----------
src_position: QPointF (mandatory)
the source control glyph position.
dest_position: QPointF (mandatory)
the destination control glyph position.
"""
path = QtGui.QPainterPath()
path.moveTo(src_position.x(), src_position.y())
path.cubicTo(src_position.x() + 100, src_position.y(),
dest_position.x() - 100, dest_position.y(),
dest_position.x(), dest_position.y())
self.setPath(path)
class GraphScene(QtWidgets.QGraphicsScene):
""" Define a scene representing a graph.
"""
# Signal emitted when a sub graph has to be open
subgraph_clicked = QtCore.Signal(str, Graph, QtCore.Qt.KeyboardModifiers)
def __init__(self, graph, parent=None):
""" Initilaize the GraphScene class.
Parameters
----------
graph: Graph
graph to be displayed.
parent: QWidget, default None)
parent widget.
"""
# Inheritance
super(GraphScene, self).__init__(parent)
# Class parameters
self.graph = graph
self.gnodes = {}
self.glinks = {}
self.gpositions = {}
# Add event to upadate links
self.changed.connect(self.update_links)
def update_links(self):
""" Update the node positions and associated links.
"""
for node in self.items():
if isinstance(node, Node):
self.gpositions[node.name] = node.pos()
for linkdesc, link in self.glinks.items():
# Parse the link description
src_control, dest_control = self.parse_link_description(linkdesc)
# Get the source and destination nodes/controls
src_gnode = self.gnodes[src_control[0]]
dest_gnode = self.gnodes[dest_control[0]]
src_gcontrol = src_control[1]
dest_gcontrol = dest_control[1]
# Update the current link
src_control_glyph = src_gnode.output_controls[src_gcontrol][0]
dest_control_glyph = dest_gnode.input_controls[dest_gcontrol][0]
link.update(
src_gnode.mapToScene(src_control_glyph.get_control_point()),
dest_gnode.mapToScene(dest_control_glyph.get_control_point()))
def draw(self):
""" Draw the scene representing the graph.
"""
# Add the graph graph
for box_name, box in self.graph._nodes.items():
# Define the box type and check if we are dealing with a graph
# box
if isinstance(box.meta, Graph):
style = "choice1"
else:
style = "choice3"
# Add the box
self.add_box(
box_name,
inputs=[""] * (0 if box.links_from_degree == 0 else 1),
outputs=[""] * (0 if box.links_to_degree == 0 else 1),
active=True,
style=style,
graph=box.meta)
# If no node position is defined used an automatic setup
# based on a graph representation
if self.gpositions == {}:
scale = 0.0
for node in self.gnodes.values():
scale = max(node.box.boundingRect().width(), scale)
scale = max(node.box.boundingRect().height(), scale)
scale *= 4
box_positions = self.graph.layout(scale=scale)
for node_name, node_pos in box_positions.items():
self.gnodes[node_name].setPos(QtCore.QPointF(*node_pos))
# Create the links between the boxes
for from_box_name, to_box_name in self.graph._links:
self.add_link("{0}.->{1}.".format(from_box_name, to_box_name))
def parse_link_description(self, linkdesc):
""" Parse a link description.
Parameters
----------
linkdesc: string (mandatory)
link representation with the source and destination separated
by '->' and control desriptions of the form
'<box_name>.<control_name>' or '<control_name>' for graph
input or output controls.
Returns
-------
src_control: 2-uplet
the source control representation (box_name, control_name).
dest_control: 2-uplet
the destination control representation (box_name, control_name).
"""
# Parse description
srcdesc, destdesc = linkdesc.split("->")
src_control = srcdesc.split(".")
dest_control = destdesc.split(".")
# Deal with graph input and output controls
if len(src_control) == 1:
src_control.insert(0, "inputs")
if len(dest_control) == 1:
dest_control.insert(0, "outputs")
return tuple(src_control), tuple(dest_control)
def add_box(self, name, inputs, outputs, active=True, style=None,
graph=None):
""" Add a box in the graph representation.
Parameters
----------
name: string
a name for the box.
inputs: list of str
the box input controls.
outputs: list of str
the box output controls.
active: bool, default True
a special color will be applied on the box rendering depending
of this parameter.
style: string, default None
the style that will be applied to tune the box rendering.
graph: Graph, default None
the sub-graph item.
"""
# Create the node widget that represents the box
box_node = Node(name, inputs, outputs, active=active, style=style,
graph=graph)
# Update the scene
self.addItem(box_node)
node_position = self.gpositions.get(name)
if node_position is not None:
box_node.setPos(node_position)
self.gnodes[name] = box_node
def add_link(self, linkdesc):
""" Define a link between two nodes in the graph.
Parameters
----------
linkdesc: string (mandatory)
link representation with the source and destination separated
by '->' and control desriptions of the form
'<box_name>.<control_name>' or '<control_name>' for graph
input or output controls.
"""
# Parse the link description
src_control, dest_control = self.parse_link_description(linkdesc)
# Get the source and destination nodes/controls
src_gnode = self.gnodes[src_control[0]]
dest_gnode = self.gnodes[dest_control[0]]
src_gcontrol = src_control[1]
dest_gcontrol = dest_control[1]
# Create the link
src_control_glyph = src_gnode.output_controls[src_gcontrol][0]
dest_control_glyph = dest_gnode.input_controls[dest_gcontrol][0]
glink = Link(
src_gnode.mapToScene(src_control_glyph.get_control_point()),
dest_gnode.mapToScene(dest_control_glyph.get_control_point()))
# Update the scene
self.addItem(glink)
self.glinks[linkdesc] = glink
def keyPressEvent(self, event):
""" Display the graph box positions when the 'p' key is pressed.
"""
super(GraphScene, self).keyPressEvent(event)
if not event.isAccepted() and event.key() == QtCore.Qt.Key_P:
event.accept()
posdict = dict([(key, (value.x(), value.y()))
for key, value in self.gpositions.items()])
pprint(posdict)
def helpEvent(self, event):
""" Display tooltips on controls and links.
"""
item = self.itemAt(event.scenePos())
if isinstance(item, Control):
item.setToolTip("type: {0} - optional: {1}".format(
item.control.__class__.__name__, item.optional))
super(GraphScene, self).helpEvent(event)
class GraphView(QtWidgets.QGraphicsView):
""" Graph representation (using boxes and arrows).
Based on Qt QGraphicsView, this can be used as a Qt QWidget.
Qt signals are emitted:
* on a double click on a sub-graph box to display the sub-graph. If
'ctrl' is pressed a new window is created otherwise the view is
embedded.
* on the wheel to zoom in or zoom out.
* on the kewboard 'p' key to display the box node positions.
Attributes
----------
scene: GraphScene
the main scene.
"""
# Signal emitted when a sub graph has to be open
subgraph_clicked = QtCore.Signal(str, Graph, QtCore.Qt.KeyboardModifiers)
def __init__(self, graph, parent=None):
""" Initilaize the GraphView class.
Parameters
----------
graph: Graph
graph to be displayed.
parent: QWidget, default None
parent widget.
"""
# Inheritance
super(GraphView, self).__init__(parent)
# Class parameters
self.scene = None
# Check that we have a graph
if not isinstance(graph, Graph):
raise Exception("'{0}' is not a valid graph.".format(graph))
# Create the graph representing.
self.set_graph(graph)
def set_graph(self, graph):
""" Assigns a new graph to the view.
Parameters
----------
graph: Graph
graph to be displayed.
"""
# Define the graph box positions
if hasattr(graph, "_box_positions"):
box_positions = dict(
(box_name, QtCore.QPointF(*box_position))
for box_name, box_position in graph._box_positions.items())
else:
box_positions = {}
# Create the scene
self.scene = GraphScene(graph, self)
self.scene.gpositions = box_positions
self.scene.draw()
# Update the current view
self.setWindowTitle("Graph representation")
self.setScene(self.scene)
# Try to initialize the current view scale factor
if hasattr(graph, "_scale"):
self.scale(graph.scale, graph.scale)
# Define signals
self.scene.subgraph_clicked.connect(self.subgraph_clicked)
self.scene.subgraph_clicked.connect(self.display_subgraph)
def zoom_in(self):
""" Zoom the view in by applying a 1.2 zoom factor.
"""
self.scale(1.2, 1.2)
def zoom_out(self):
""" Zoom the view out by applying a 1 / 1.2 zoom factor.
"""
self.scale(1.0 / 1.2, 1.0 / 1.2)
def display_subgraph(self, node_name, graph, modifiers):
""" Event to display the selected sub-graph.
If 'ctrl' is pressed the a new window is created, otherwise the new
view will be embedded in its parent node box.
Parameters
----------
node_name: str
the node name.
graph: Graph
the sub-graph box to display.
"""
# Open a new window
if modifiers & QtCore.Qt.ControlModifier:
view = GraphView(graph)
QtCore.QObject.setParent(view, self.window())
view.setAttribute(QtCore.Qt.WA_DeleteOnClose)
view.setWindowTitle(node_name)
view.show()
# Embedded sub-graph inside its parent node
else:
node = self.scene.gnodes.get(node_name)
node.add_subgraph_view(graph)
def wheelEvent(self, event):
""" Change the scene zoom factor.
"""
item = self.itemAt(event.pos())
if not isinstance(item, QtGui.QGraphicsProxyWidget):
if event.delta() < 0:
self.zoom_out()
else:
self.zoom_in()
event.accept()
else:
super(GraphView, self).wheelEvent(event)
| [
"antoine.grigis@cea.fr"
] | antoine.grigis@cea.fr |
fb1fff9a82da62a462954d422bee1491b4574f0d | c1f0e38201ef91e772a1ffd3819b58382f7b4dc2 | /partner_bank_iban_convert/models/res_partner_bank.py | 138bfced7485a67c25080adfea1668015a9be059 | [] | no_license | victoralmau/account | f1007f3a645c0d529ad3cf519c0ffed0801c3eea | 522f59d8fb6bd7ae2cc910ac24152eb967a8c142 | refs/heads/master | 2022-12-24T08:33:00.150369 | 2020-04-03T08:34:21 | 2020-04-03T08:34:21 | 284,680,712 | 0 | 0 | null | 2020-08-03T11:21:46 | 2020-08-03T11:21:45 | null | UTF-8 | Python | false | false | 2,485 | py | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
from openerp import api, models, fields
from openerp.exceptions import Warning
from datetime import datetime
import requests, json
class ResPartnerBank(models.Model):
_inherit = 'res.partner.bank'
@api.one
def check_iban_convert(self):
if self.acc_number!=False:
if self.acc_type=='bank':
if self.bank_id.id>0:
if self.bank_id.code!=False:
if self.acc_country_id.id>0:
if self.acc_country_id.code!=False:
#limpiamos caracteres + reemplazamos espacios
account_number = str(self.acc_number).strip().replace(' ', '')
#revisamos longitud de la cuenta bancaria
if len(account_number)==20:
account_number = account_number.replace(self.bank_id.code, '')
#request
url = 'https://openiban.com/v2/calculate/'+str(self.acc_country_id.code)+'/'+str(self.bank_id.code)+'/'+str(account_number)
response = requests.get(url)
if response.status_code==200:
response_json = json.loads(response.text)
if 'valid' in response_json:
if response_json['valid']==True:
if 'iban' in response_json:
if response_json['iban']!='':
#update
self.acc_number = str(response_json['iban'])
self.acc_type = 'iban'
@api.model
def create(self, values):
return_item = super(ResPartnerBank, self).create(values)
#check_iban_convert
return_item.check_iban_convert()
#return
return return_item
@api.one
def write(self, vals):
return_write = super(ResPartnerBank, self).write(vals)
#check_iban_convert
self.check_iban_convert()
#return
return return_write | [
"informatica@arelux.com"
] | informatica@arelux.com |
bcf29855ebe59aa2492ec108f429f524118b32d3 | c5d553e68de3d5c730f5fe2550209de759eabc8c | /프로그래머스/unrated/176963. 추억 점수/추억 점수.py | fd8452d93516976756e970ca7c06bdcfd1d358f5 | [] | no_license | KimMooHyeon/Algorithm-Studying | 6bb23b971b0c46c35f4cdde133148f2c5cfaa0f4 | e4417aadf209fd22f960239623bed542744fd374 | refs/heads/master | 2023-08-08T02:28:02.460332 | 2023-07-15T14:22:53 | 2023-07-15T14:22:53 | 198,966,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | def solution(name, yearning, photo):
answer = []
hashScore = {}
for i in range(0,len(name)) :
hashScore[name[i]]=yearning[i]
print(hashScore)
for photoList in photo:
num = 0
for onePhoto in photoList:
if onePhoto in hashScore:
num+=hashScore[onePhoto]
answer.append(num)
return answer | [
"dlfb77@gmail.com"
] | dlfb77@gmail.com |
04bdb90a656863ac33204b200f157b78e9b4a660 | 7ab15522084e2f81d39cda505da844fb4d519f9d | /Logic and basic mathematics/Hard/Weird list/weird_list.py | e47346d384a1b426a8e538aab2170fa21eb8a2b7 | [] | no_license | Infinite-Loop-KJSIEIT/Algo-Talks | 1662cfd802bfbe4a9bfcf80a9c3157334e5cb4fd | 27d85ae3827f8765a4ebe98c80cc55b53c0562b0 | refs/heads/master | 2022-12-25T21:53:57.745115 | 2020-10-03T07:07:02 | 2020-10-03T07:07:02 | 286,681,402 | 13 | 3 | null | 2020-10-03T07:07:04 | 2020-08-11T07:53:23 | Python | UTF-8 | Python | false | false | 1,374 | py | def query(i,arr,repeat):
if i < len(arr):
return arr[i]
else:
return (repeat[(i - len(arr))% len(repeat)])
def solve():
n = int(input())
a,b,c = map(int,input().split())
arr = []
mem = {}
while True:
if n in mem:
break
else:
arr.append(n)
mem[n] = len(arr)
n = n/a
s = str(n)
for i in range(len(s)):
if s[i] == ".":
if s[i+1] != '0':
n = int(s[i+1])
else:
n = int(s[0])
arr.append(n)
n = n/b
s = str(n)
for i in range(len(s)):
if s[i] == ".":
if s[i+1] != '0':
n = int(s[i+1])
else:
n = int(s[0])
arr.append(n)
n = n/c
s = str(n)
for i in range(len(s)):
if s[i] == ".":
if s[i+1] != '0':
n = int(s[i+1])
else:
n = int(s[0])
new_arr = arr[:mem[n]-1]
repeat = arr[mem[n]-1:]
#print(new_arr)
#print(repeat)
Q = int(input())
for q in range(Q):
i = int(input())
print(query(i, new_arr, repeat))
if __name__ == '__main__':
for t in range(int(input())):
solve()
| [
"keshav.sm@somaiya.edu"
] | keshav.sm@somaiya.edu |
3e40c4baffc41e0e6c66dcf3225c7b95f25bf744 | 37fef592f365194c28579f95abd222cc4e1243ae | /streamlit/Unbottled/Pages/2_Wine_Explorer.py | 799e5b08ba10636602ad117bd4c472fd5310c68c | [] | no_license | edimaudo/Python-projects | be61e0d3fff63fb7bd00513dbf1401e2c1822cfb | 85d54badf82a0b653587a02e99daf389df62e012 | refs/heads/master | 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null | UTF-8 | Python | false | false | 1,021 | py | w# Libraries
import streamlit as st
import pandas as pd
import plotly.express as px
@st.cache
def load_data():
data = pd.read_csv(DATA_URL)
return data
# Load data
DATA_URL = "winemag-data_first150k.csv"
df = load_data()
st.title('Unbottled')
st.header("Wine Explorer")
country_list = df['country'].unique()
country_list = country_list.astype('str')
country_list.sort()
variety_list = df['variety'].unique()
variety_list = variety_list.astype('str')
variety_list.sort()
country_choice = st.selectbox("Select a Country",country_list)
variety_choice = st.selectbox("Select a Variety",variety_list)
price_choice = st.slider('Select a Price Range', 0, 2500, 100)
points_choice = st.slider('Select a Points Range', 80, 100, 5)
choice_df = df[(df.country == country_choice) | (df.variety == variety_choice) | (df.price.le(price_choice)) | (df.points.le(points_choice))]
choice_df = choice_df[['country','variety','price','points','description','designation','winery']]
st.dataframe(choice_df)
| [
"edimaudo@gmail.com"
] | edimaudo@gmail.com |
8ee62c7ae4ce8147c226b824dc7d65eca8972907 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /leetcode/magic-squares-in-grid.py | 8fe33c0521dd7a3b930269182a7b70defb66cada | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | from typing import List
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
def is_magic(x: int, y: int) -> bool:
col_sum, row_sum, dia_sum = [0, 0, 0], [0, 0, 0], [0, 0]
ws = set()
for i in range(x, x + 3):
for j in range(y, y + 3):
v = grid[i][j]
col_sum[j - y] += v
row_sum[i - x] += v
if i - x == j - y:
dia_sum[0] += v
if (i - x) + (j - y) == 2:
dia_sum[1] += v
ws.add(v)
if len(ws) > 1:
return False
return len(set(col_sum) | set(row_sum) | set(dia_sum)) == 1
n, m = len(grid), len(grid[0])
c = 0
for i in range(n - 2):
for j in range(m - 2):
if is_magic(i, j):
c += 1
return c
s = Solution()
assert s.numMagicSquaresInside([[4, 3, 8, 4], [9, 5, 1, 9], [2, 7, 6, 2]]) == 1
| [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
4ab2751ff496c14437c5da2dcae0880daf0f0322 | 9ba71c165fe70e1bba26bd3d6230c321e0aa60ec | /src/python/up_sqllite_cdi.py | 68a056f9cd7cc95957529375e5b6213b626cdf07 | [] | no_license | gabrielreiss/DB_econ | 0977899be20986a0b33937a534e6cbbc9fb767a2 | bcf6de08402b09c0e84f8696b92ef3c0846b3cc1 | refs/heads/master | 2022-08-02T03:55:08.096697 | 2020-05-26T18:57:27 | 2020-05-26T18:57:27 | 266,602,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | import os
import sqlalchemy
import argparse
import pandas as pd
BASE_DIR = os.path.dirname(os.path.dirname( os.path.dirname(__file__) ) )
DATA_DIR = os.path.join( BASE_DIR, 'data' )
SQL_DIR = os.path.join( BASE_DIR, 'src', 'sql' )
RESULT_DIR = os.path.join( BASE_DIR, 'resultado' )
print(DATA_DIR)
# Abrindo conexão com banco...
str_connection = os.path.join( 'sqlite:///', DATA_DIR, 'DB_econ.db' )
engine = sqlalchemy.create_engine( str_connection )
connection = engine.connect()
# Encontrando os arquivos de dados
files_names = [ i for i in os.listdir( DATA_DIR ) if i.endswith('.csv') ]
def data_quality(x):
if type(x) == str:
return x.replace("\n", "").replace("\r", '')
else:
return x
# Para cada arquivo é realizado uma inserção no banco
for i in files_names:
print(i)
df_tmp = pd.read_csv( os.path.join( DATA_DIR, i ) )
for c in df_tmp.columns:
df_tmp[c] = df_tmp[c].apply(data_quality)
table_name = "tb_" + i.strip(".csv").replace("df_", "")
#print(df_tmp.head())
df_tmp.to_sql( table_name,
connection,
if_exists='replace',
index=False ) | [
"gabrielreissdecastro@gmail.com"
] | gabrielreissdecastro@gmail.com |
d014b68c6bae72b2251594c82da84f8e6b87fa2e | e70e8f9f5c1b20fe36feab42ad4c2c34fc094069 | /Python/Programming Basics/Simple Calculations/10. Radians to Degrees.py | 65dc1032cef4d0e48e61c7334efb451d6ed7baa1 | [
"MIT"
] | permissive | teodoramilcheva/softuni-software-engineering | 9247ca2032915d8614017a3762d3752b3e300f37 | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | refs/heads/main | 2023-03-29T15:55:54.451641 | 2021-04-09T18:46:32 | 2021-04-09T18:46:32 | 333,551,625 | 0 | 0 | null | 2021-04-09T18:46:32 | 2021-01-27T20:30:18 | Python | UTF-8 | Python | false | false | 82 | py | import math
rad = float(input())
deg = rad * 180 / math.pi
print(round(deg, 0))
| [
"noreply@github.com"
] | teodoramilcheva.noreply@github.com |
7c3dfcfc469f34534a03b2380bb90d7fa72abae0 | f4c0df92671a9cd021415830e8b7183cc7c6422f | /Play/migrations/0001_initial.py | 4f26e1c683f4634e78ef5bd89154cb9960eafeef | [] | no_license | JorgitoR/DrawSomthing-Django-Python | 322e301e67ff58224fe6d628dde815ca8ed501ca | d6e0997e2ffbf38ca5b1a44c2c9840ac8bf8286d | refs/heads/main | 2023-04-09T16:14:31.152384 | 2021-04-13T22:26:08 | 2021-04-13T22:26:08 | 357,693,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Generated by Django 3.2 on 2021-04-13 20:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='solucion',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('length', models.IntegerField()),
('letras', models.CharField(max_length=20)),
],
),
]
| [
"jorgitouribe133@gmail.com"
] | jorgitouribe133@gmail.com |
0596bfd7f19ac3df4ef8e329fb73a591a444828c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_study.py | c48d7435c9a758b6b9ac596e39c436b127f51674 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py |
#calss header
class _STUDY():
def __init__(self,):
self.name = "STUDY"
self.definitions = [u'the activity of examining a subject in detail in order to discover new information: ', u'a drawing that an artist makes in order to test ideas before starting a painting of the same subject', u'the act of learning about a subject, usually at school or university: ', u'studying or work involving studying: ', u'used in the names of some educational subjects and courses: ', u'a room, especially in a house, used for quiet work such as reading or writing']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6e1b2983208299a57bab42a21a2cd696e401903c | fd717fe6ca74f6d77210cdd57a8c365d27c5bfc6 | /pychron/monitors/monitor.py | 31a1c54dd066afc976076913098eb1410bb2fb66 | [
"Apache-2.0"
] | permissive | stephen-e-cox/pychron | 1dea0467d904d24c8a3dd22e5b720fbccec5c0ed | 681d5bfe2c13e514859479369c2bb20bdf5c19cb | refs/heads/master | 2021-01-19T15:40:03.663863 | 2016-07-14T14:37:16 | 2016-07-14T14:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,193 | py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Float
# ============= standard library imports ========================
from threading import Thread, Event
import time
# ============= local library imports ==========================
# from pychron.config_loadable import ConfigLoadable
# from pychron.managers.manager import Manager
from pychron.config_loadable import ConfigLoadable
from pyface.message_dialog import warning
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.paths import paths
class Monitor(ConfigLoadable):
"""
"""
sample_delay = Float(5)
manager = None
_monitoring = False
_invalid_checks = None
_stop_signal = None
configuration_dir_name = paths.monitors_dir
def is_monitoring(self):
return self._monitoring
def load(self):
config = self.get_configuration()
if config:
self.set_attribute(config, 'sample_delay',
'General', 'sample_delay', cast='float', optional=False)
self._invalid_checks = []
return self._load_hook(config)
def _load_hook(self, *args):
return True
def stop(self):
"""
"""
if self._stop_signal:
self._stop_signal.set()
# self.kill = True
self.info('Stop monitor')
self._monitoring = False
def warning(self, msg):
"""
override loggable warning to issue a warning dialog
"""
super(Monitor, self).warning(msg)
invoke_in_main_thread(warning, None, msg)
def monitor(self):
"""
"""
if not self._monitoring:
self._monitoring = True
self.info('Starting monitor')
self._stop_signal = Event()
if self.load():
t = Thread(target=self._monitor_)
t.setDaemon(1)
t.start()
return True
else:
return True
def reset_start_time(self):
"""
"""
self.start_time = time.time()
def check(self):
return any([fi() for fi in self._get_checks()])
def _get_checks(self):
return [getattr(self, h) for h in dir(self)
if '_fcheck' in h and h not in self._invalid_checks]
def _monitor_(self):
"""
"""
# load before every monitor call so that changes to the config file
# are incorpoated
if self.manager is not None:
# clear error
self.manager.error_code = None
self.gntries = 0
self.reset_start_time()
# funcs = [getattr(self, h) for h in dir(self)
# if '_fcheck' in h and h not in self._invalid_checks]
stop_signal = self._stop_signal
while not stop_signal.isSet():
for fi in self._get_checks():
fi()
if stop_signal.isSet():
break
# sleep before running monitor again
time.sleep(self.sample_delay)
# ============= EOF ====================================
# def _monitor_(self, stop_signal):
# '''
# '''
# #load before every monitor call so that changes to the config file
# #are incorpoated
# self.load()
#
# if self.manager is not None:
# self.gntries = 0
# self.reset_start_time()
# cnt = 0
# while not stop_signal.isSet():
# '''
# double checks executed twice for every check
# '''
# for h in dir(self):
# if '_doublecheck' in h and h not in self._invalid_checks:
# func = getattr(self, h)
# func()
# if stop_signal.isSet():
# break
#
# if cnt % 2 == 0:
# for h in dir(self):
# if '_check' in h and h not in self._invalid_checks:
# func = getattr(self, h)
# func()
# if stop_signal.isSet():
# break
#
# cnt += 1
# if cnt == 100:
# cnt = 0
# #sleep before running monitor again
# time.sleep(self.sample_delay / 2.0)
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
98a7e3d51502fa16b8c3e7b6e3a26663044d0646 | 3b894d58234a3ee03b40edecb82c46ca096c6953 | /print_only_values.py | 688ea99872913a1982b159f4caf94737af9c0189 | [] | no_license | Bharti20/dictionaryQuestions | b1b5ad8d824240d02013b2c2dc4755cc2b8ae5a0 | 1351c534b5bc68610bce1bc0a64b8c8d25241c60 | refs/heads/main | 2023-02-03T07:37:39.941728 | 2020-12-16T16:52:58 | 2020-12-16T16:52:58 | 320,323,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | my_dict = {"one": 1,"two":2,"three":3,"four":4}
for i in my_dict:
print(my_dict[i]) | [
"bhartikumari20@navgurukul.org"
] | bhartikumari20@navgurukul.org |
7fed4d541de3bcf5193df154b82115797d4c2dd1 | 1521332438d4e711b6fa4af825047a3466925511 | /GuessingGame/guessingGame.py | 9bede277f18ac0b9985398ce5dc0e9f097dfbcd3 | [] | no_license | JakeAttard/Python-2807ICT-NoteBook | df0907bdca9ff10f347498233260c97f41ea783b | 9a38035d467e569b3fb97f5ab114753efc32cecc | refs/heads/master | 2020-04-26T17:33:18.184447 | 2019-11-05T13:04:56 | 2019-11-05T13:04:56 | 173,717,675 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | import random
randomNumber = random.randint(1, 10)
player = None
while True:
player = input("Pick a number from 1 to 10:")
player = int(player)
if player < randomNumber:
print("The number you entered is to low!")
elif player > randomNumber:
print("The number you entered is to high!")
else:
print("You guess the number. You win!")
playAgain = input("Do you want to play again? (y/n)")
if playAgain == "y":
randomNumber = random.randint(1, 10)
player = None
else:
print("Thankyou for playing!")
break | [
"jakeattard18@gmail.com"
] | jakeattard18@gmail.com |
d06c93dc79f19ab2a2e5a94c345f87b70f022eb9 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-sgw/aliyunsdksgw/request/v20180511/DeleteGatewayBlockVolumesRequest.py | a2c400048585f93262c6d389ea4f5bc52f6e85d2 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,010 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksgw.endpoint import endpoint_data
class DeleteGatewayBlockVolumesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'sgw', '2018-05-11', 'DeleteGatewayBlockVolumes','hcs_sgw')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IsSourceDeletion(self):
return self.get_query_params().get('IsSourceDeletion')
def set_IsSourceDeletion(self,IsSourceDeletion):
self.add_query_param('IsSourceDeletion',IsSourceDeletion)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_IndexId(self):
return self.get_query_params().get('IndexId')
def set_IndexId(self,IndexId):
self.add_query_param('IndexId',IndexId)
def get_GatewayId(self):
return self.get_query_params().get('GatewayId')
def set_GatewayId(self,GatewayId):
self.add_query_param('GatewayId',GatewayId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
b0ceda92856ed55a8490f94731a8f267fc40675d | 0d1ad7e5fca72ae767c7ddbe0406eb72e733596c | /smartedukart/urls.py | a435775c5b34ea5513f8ae112901fb76a26b0265 | [
"MIT"
] | permissive | Vishesh-Conbi/Chatbot | d223faa4e2935da3f9abec8821e97c69e18a03d9 | c6a966eb1947064eeffe1a1722d672ca560377bd | refs/heads/main | 2023-03-01T02:25:49.853907 | 2021-02-09T10:22:58 | 2021-02-09T10:22:58 | 337,365,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | """quantum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',include('mainhome.urls')),
path('admin/', admin.site.urls),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"="
] | = |
21d2e345e0a2621e72779c42482c834df4d1fbd2 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetPluginsRequest.py | 2e7bb852993547f26504fbcc5ba4ee96f124d88f | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,227 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class GetPluginsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'GetPlugins','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_GatewayUniqueId(self): # String
return self.get_query_params().get('GatewayUniqueId')
def set_GatewayUniqueId(self, GatewayUniqueId): # String
self.add_query_param('GatewayUniqueId', GatewayUniqueId)
def get_EnableOnly(self): # Boolean
return self.get_query_params().get('EnableOnly')
def set_EnableOnly(self, EnableOnly): # Boolean
self.add_query_param('EnableOnly', EnableOnly)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_Category(self): # Integer
return self.get_query_params().get('Category')
def set_Category(self, Category): # Integer
self.add_query_param('Category', Category)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
777b484e21a94d9a58c37e2f1454817c52064f88 | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /pyobjc/pyobjc/pyobjc-framework-InputMethodKit-2.5.1/setup.py | ba422bf4177764d84954db4d4b0ead83c748c644 | [
"MIT"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | UTF-8 | Python | false | false | 1,203 | py | '''
Wrappers for the "InputMethodKit" framework on MacOSX 10.5 or later. The
interfaces in this framework allow you to develop input methods.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
'''
from pyobjc_setup import setup, Extension
import os
setup(
min_os_level='10.5',
name='pyobjc-framework-InputMethodKit',
version="2.5.1",
description = "Wrappers for the framework InputMethodKit on Mac OS X",
packages = [ "InputMethodKit" ],
setup_requires = [
'pyobjc-core>=2.5.1',
],
install_requires = [
'pyobjc-core>=2.5.1',
'pyobjc-framework-Cocoa>=2.5.1',
],
ext_modules = [
Extension("InputMethodKit._InputMethodKit",
[ "Modules/_InputMethodKit.m" ],
extra_link_args=["-framework", "InputMethodKit"],
depends=[
os.path.join('Modules', fn)
for fn in os.listdir('Modules')
if fn.startswith('_InputMethodKit')
]
),
]
)
| [
"opensource@apple.com"
] | opensource@apple.com |
e30b8c178cbce1fead1bc6ca1ff2dd9f37b91ab0 | 2f1c6cbade026d6d48e1be38e7faeabb93a31e22 | /stock_prepicking/__openerp__.py | b66d4a5380ff125d4f862cf33afae9fe33457acb | [] | no_license | OdooBulgaria/odoo-stock-1 | 53f21f6b4d8eb5ed2902b5f74de38fc5fdee23b0 | 3ce7794519a1ad3a4eca32a6c709713039c793b4 | refs/heads/master | 2021-01-11T16:57:40.312964 | 2017-01-18T09:22:18 | 2017-01-18T09:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2017- Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Prepicking',
'version': '0.1',
'summary': 'Add extra picking step',
'category': 'stock',
'description': """
Financed by Dermanord-Svensk Hudvård AB""",
'author': 'Vertel AB',
'website': 'http://www.vertel.se',
'depends': ['stock_multiple_picker'],
'data': ['stock_view.xml', 'stock_picking_template.xml'],
'qweb': ['static/src/xml/picking.xml'],
'installable': True,
}
| [
"apollo_zhj@msn.com"
] | apollo_zhj@msn.com |
f1f3b500c8af88016fadb2c242416fe86c0bce21 | 1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f | /addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/bbc_parliament_ltvi.py | d5fcbc1f8759f07f2c949ab8e4afcac6c5a7f8e4 | [] | no_license | bopopescu/kodiprofile | 64c067ee766e8a40e5c148b8e8ea367b4879ffc7 | 7e78640a569a7f212a771aab6a4a4d9cb0eecfbe | refs/heads/master | 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class bbc_parliament(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "BBC Parliament"
name = "bbc_parliament"
other_names = "bbc_parliament,BBC Parliament"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'News', 'img':'', 'fanart':''}
]
addon = None
| [
"sokasoka@hotmail.com"
] | sokasoka@hotmail.com |
60da94b3d5a56964d1f99e9185da3b986a435a2c | 7c99ea5b1ffe089c97615336daf4b6ceed9a5b00 | /Configurations/HighMass/v7_Full2017/ANlogplot_configuration_em.py | fd4651d3d926308afef03f77ab35c8b698d176ef | [] | no_license | flaviacetorelli/PlotsConfigurations | 948faadba356e1d5e6f546dc11dd8dacfe1c1910 | aa5cf802c86902378617f566186bc638e69f9936 | refs/heads/master | 2022-05-02T00:37:56.070453 | 2022-03-18T10:14:23 | 2022-03-18T10:14:23 | 235,580,894 | 0 | 1 | null | 2020-01-22T13:37:30 | 2020-01-22T13:37:29 | null | UTF-8 | Python | false | false | 920 | py | # example of configuration file
treeName= 'Events'
tag = 'Full2017_em'
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables_forANplot.py'
# file with list of cuts
cutsFile = 'cuts_em.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot_log_em.py'
# luminosity to normalize to (in 1/fb)
lumi = 41.53
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plot_'+tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
#structureFile = 'structure.py' # Is this even needed still?
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"dennis.roy@cern.ch"
] | dennis.roy@cern.ch |
8dc78b28e68a52e460a2066b41bc262bb6a51ea6 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/examples/PyQt_PySide_book/003_Placing several components in the box/005_Class_QSizePolicy/087_Maximum - toClass.py | a62d182be878b1c769b07b9724d61facdfe71f69 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 676 | py | from PySide import QtCore, QtGui
import sys
class SampleWindow(QtGui.QWidget):
def __init__(self):
super(SampleWindow, self).__init__()
self.setWindowTitle("QSizePolicy")
self.resize(300, 150)
label = QtGui.QLabel("Текст надписи")
button = QtGui.QPushButton("1")
policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
label.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Plain)
label.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(button)
self.setLayout(vbox)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
bd8b5d7a9f75f9e6bb277b66405299801c4562c9 | a35b24c8c3c5bdf861f3cda9396f2fa6795ec929 | /abc/151/A.py | 23cd56bd48f947d327ee7e8669e4cf83569e9086 | [] | no_license | Msksgm/atcoder_msksgm_practice | 92a19e2d6c034d95e1cfaf963aff5739edb4ab6e | 3ae2dcb7d235a480cdfdfcd6a079e183936979b4 | refs/heads/master | 2021-08-18T16:08:08.551718 | 2020-09-24T07:01:11 | 2020-09-24T07:01:11 | 224,743,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def main():
C = input()
ans = chr(ord(C)+1)
print(ans)
if __name__ == "__main__":
main()
| [
"4419517@ed.tus.ac.jp"
] | 4419517@ed.tus.ac.jp |
6794fead579bf4d1b7bfe5452a1893e0a5b910d7 | cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69 | /telemetry/telemetry/internal/platform/desktop_platform_backend_unittest.py | fe8ed448e677934ec0a9d9f2589cb9bca8531572 | [
"BSD-3-Clause"
] | permissive | CTJyeh/catapult | bd710fb413b9058a7eae6073fe97a502546bbefe | c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb | refs/heads/master | 2020-08-19T21:57:40.981513 | 2019-10-17T09:51:09 | 2019-10-17T18:30:16 | 215,957,813 | 1 | 0 | BSD-3-Clause | 2019-10-18T06:41:19 | 2019-10-18T06:41:17 | null | UTF-8 | Python | false | false | 1,019 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from telemetry.internal.platform import linux_platform_backend
from telemetry.internal.platform import win_platform_backend
from telemetry.internal.platform import cros_platform_backend
from telemetry.internal.platform import mac_platform_backend
class DesktopPlatformBackendTest(unittest.TestCase):
def testDesktopTagInTypExpectationsTags(self):
desktop_backends = [
linux_platform_backend.LinuxPlatformBackend,
win_platform_backend.WinPlatformBackend,
cros_platform_backend.CrosPlatformBackend,
mac_platform_backend.MacPlatformBackend]
for db in desktop_backends:
with mock.patch.object(db, 'GetOSVersionDetailString', return_value=''):
with mock.patch.object(db, 'GetOSVersionName', return_value=''):
self.assertIn('desktop', db().GetTypExpectationsTags())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
7ceea105afcc67fa44c8cad1a7685eb46fca8ee6 | 214230d0796377be0bfdda286c2c389b92a19555 | /SCTF/2018 Quals/ding_j_max/solver.py | deea8232606f93e63bd58390cfbbbbe41597a98c | [
"Unlicense"
] | permissive | Qwaz/solved-hacking-problem | fa5ebfeb98ec979cf57dac1470a651199f2dc50d | cda0db4888322cce759a7362de88fff5cc79f599 | refs/heads/master | 2023-08-24T03:45:12.481496 | 2023-07-16T12:38:08 | 2023-07-16T12:38:08 | 49,208,719 | 100 | 28 | null | 2022-03-24T00:51:04 | 2016-01-07T14:18:18 | HTML | UTF-8 | Python | false | false | 1,379 | py | import sys
from pwn import *
# SCTF{I_w0u1d_l1k3_70_d3v3l0p_GUI_v3rs10n_n3x7_t1m3}
GDB_HEADER = '(gdb) '
BREAK_ADDR = 0x401412
PATCH_ADDR = 0x401415
def gdb_command(cmd):
gdb.recvuntil(GDB_HEADER)
gdb.sendline(cmd)
gdb = process(['gdb', './dingJMax', sys.argv[1]])
gdb_command('b *0x%x' % BREAK_ADDR)
context.arch = 'amd64'
press_none = asm('mov %eax, 0')
press_d = asm('mov %eax, 0x64')
press_f = asm('mov %eax, 0x66')
press_j = asm('mov %eax, 0x6a')
press_k = asm('mov %eax, 0x6b')
for i in range(42259):
gdb_command('c')
gdb_command('x/gd ($rbp-0x40)')
timing = int(gdb.recvline().strip().split()[1])
code = press_none
if timing % 20 == 0 and timing // 20 >= 19:
print timing
gdb_command('x/gx 0x%x' % (0x603280 + 8*(timing // 20 - 19)))
str_addr = int(gdb.recvline().strip().split()[1], 16)
print '0x%x' % str_addr
gdb_command('x/s 0x%x' % str_addr)
keypress = gdb.recvline().strip().split('"')[1]
print keypress
try:
code = [
press_d,
press_f,
press_j,
press_k,
][keypress.index('o')]
except ValueError:
pass
assert len(code) == 5
for i in range(5):
gdb_command('set *(unsigned char*)0x%x = %d' % (PATCH_ADDR + i, ord(code[i])))
gdb.interactive()
| [
"qwazpia@gmail.com"
] | qwazpia@gmail.com |
08b3ed5e21ee24030807a466581e83ed6918823d | 3bc089a77598694aace6b060c3aca5e9bb1e156b | /exercises/1901100254/1001S02E05_string.py | ce51182891da02309452743bc68fdcb04a6192ba | [] | no_license | Lily0905/selfteaching-python-camp | 8a91dc47b707a0e605c0722e7a50c402e3c61968 | bf1b8ea3b064937f650d50e83d98847bfc567bae | refs/heads/master | 2020-07-29T19:56:24.761264 | 2019-09-20T08:46:48 | 2019-09-20T08:46:48 | 209,940,920 | 1 | 0 | null | 2019-09-21T07:06:42 | 2019-09-21T07:06:42 | null | UTF-8 | Python | false | false | 1,720 | py |
text = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
string = text
string.replace("better", "worse") #替换函数str.replace()
print('\n替换better后的结果:',string.replace("better","worse"))
#从第2步的结果⾥,将单词中包含 ea 的单词剔除
text1 = text.replace('better','worse')
text2 =text1.replace('ea', '__')
print ('\n删除含ea的单词的结果:\n',text2)
#将第3步的结果里的字⺟进⾏大小写翻转(将大写字母转成小写,⼩写字母转成大写)
text3 = ' '.join(text2) #列表转换成字符串
text4 = text3.swapcase() #逐个字符更替大小写
print('\n大小写翻转后新字符串text4为:\n',text4)
#将第4步的结里所有单词按a...z升序排列,并输出结果
text5 = text4.split() # 排序需要通过列表,上一步结果字符串转换成列表
text5.sort()
print('\n排列结果如下:\n',text5)
| [
"43633521+liujiayi0042@users.noreply.github.com"
] | 43633521+liujiayi0042@users.noreply.github.com |
aff0f112469e96ffe9c9ca4c1b77779cb217de75 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/portal/v20181001/get_console.py | e0491ed0df58d54c014286ff88faba9f6cd5b828 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 1,891 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetConsoleResult',
'AwaitableGetConsoleResult',
'get_console',
]
@pulumi.output_type
class GetConsoleResult:
"""
Cloud shell console
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.ConsolePropertiesResponse':
"""
Cloud shell console properties.
"""
return pulumi.get(self, "properties")
class AwaitableGetConsoleResult(GetConsoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConsoleResult(
properties=self.properties)
def get_console(console_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConsoleResult:
"""
Use this data source to access information about an existing resource.
:param str console_name: The name of the console
"""
__args__ = dict()
__args__['consoleName'] = console_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:portal/v20181001:getConsole', __args__, opts=opts, typ=GetConsoleResult).value
return AwaitableGetConsoleResult(
properties=__ret__.properties)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
ecf43cb2b8cf8ecea51db2d3ff7ead25d563ea67 | 70026e9e5a6d9f70057574c749d0ef8b603763f5 | /audits/admin.py | b0e64b5c5137ec5c2e55b741815363af7c6ef548 | [
"MIT"
] | permissive | GreenBankObservatory/nrqz_admin | 96b113e640bfdec221631626114665ef1f684cc5 | c8e0876c3ab7a3feae578fa725dbfd1bdbe52a61 | refs/heads/master | 2023-05-11T16:28:21.731339 | 2023-05-01T14:25:25 | 2023-05-01T14:25:25 | 159,407,852 | 2 | 0 | MIT | 2022-12-14T04:13:35 | 2018-11-27T22:22:48 | Python | UTF-8 | Python | false | false | 307 | py | from django.contrib import admin
from django_import_data.models import (
ModelImportAttempt,
FileImporter,
FileImportAttempt,
)
@admin.register(FileImporter)
class FileImporterAdmin(admin.ModelAdmin):
fields = ("file_path",)
admin.site.register([ModelImportAttempt, FileImportAttempt])
| [
"tchamber@nrao.edu"
] | tchamber@nrao.edu |
4e75fe3dfeffd6dd6d9727a0a14677fe8e3f681b | bd10d096a40f6ac88ea4ade678297cb4552626b3 | /core/nginx/config.py | 360ce683f0779e1c31e947f3ddf4d8ce13b88a42 | [
"MIT"
] | permissive | rageOS/Mailu | 26db34b082251673de5e6ff91f4668578bb996ac | 319965a4afa461a3cb63e6cf20100d9d7fe80c48 | refs/heads/master | 2021-08-23T11:38:20.771680 | 2017-12-03T18:37:36 | 2017-12-03T18:37:36 | 113,082,186 | 0 | 0 | null | 2017-12-04T18:57:07 | 2017-12-04T18:57:06 | null | UTF-8 | Python | false | false | 1,036 | py | #!/usr/bin/python
import jinja2
import os
convert = lambda src, dst, args: open(dst, "w").write(jinja2.Template(open(src).read()).render(**args))
args = os.environ.copy()
# Get the first DNS server
with open("/etc/resolv.conf") as handle:
content = handle.read().split()
args["RESOLVER"] = content[content.index("nameserver") + 1]
# TLS configuration
args["TLS"] = {
"cert": ("/certs/cert.pem", "/certs/key.pem"),
"mail": ("/certs/cert.pem", "/certs/key.pem"),
"letsencrypt": ("/certs/letsencrypt/live/mailu/fullchain.pem",
"/certs/letsencrypt/live/mailu/privkey.pem"),
"notls": None
}[args["TLS_FLAVOR"]]
if args["TLS"] and not all(os.path.exists(file_path) for file_path in args["TLS"]):
print("Missing cert or key file, disabling TLS")
args["TLS_ERROR"] = "yes"
# Build final configuration paths
convert("/conf/tls.conf", "/etc/nginx/tls.conf", args)
convert("/conf/nginx.conf", "/etc/nginx/nginx.conf", args)
if os.path.exists("/var/log/nginx.pid"):
os.system("nginx -s reload")
| [
"pierre@jaury.eu"
] | pierre@jaury.eu |
22dc7e14aaa24e3ff8644caf46c78a53ff96d37b | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/TOM-Lambda/CSEUFLEX_Intro_Python_GP/equipment.py | 8b4dff5975c361bf9cd8643cdde3ddad059ce7ea | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 676 | py | <<<<<<< HEAD
# make an equipment class with the fields of
=======
# make an equipment class with the fields of
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# name, price, style and weight
# that inherits from the product class
from product import Product
<<<<<<< HEAD
=======
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
class Equipment(Product):
def __init__(self, name, price, style, weight):
super().__init__(name, price)
self.style = style
self.weight = weight
def __str__(self):
return super().__str__() + f" comes in {self.style}, {self.weight}"
<<<<<<< HEAD
=======
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
312b2e57144f885c8394f8deee7110fb7f0dddc8 | a6ff5be50b499ffb36294e1e93ce59b138bfe622 | /test/test_integration_event.py | 636dbfb935bca44a3e065c688ab7c799920b2635 | [
"MIT"
] | permissive | MostafaSalah222/talon_one.py | 7221ebc54831dce33f1724fe0856093145d7add8 | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | refs/heads/master | 2023-07-27T06:03:43.849686 | 2021-09-07T08:56:41 | 2021-09-07T09:01:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.integration_event import IntegrationEvent # noqa: E501
from talon_one.rest import ApiException
class TestIntegrationEvent(unittest.TestCase):
"""IntegrationEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IntegrationEvent
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.integration_event.IntegrationEvent() # noqa: E501
if include_optional :
return IntegrationEvent(
profile_id = '0',
type = '0',
attributes = None
)
else :
return IntegrationEvent(
type = '0',
attributes = None,
)
def testIntegrationEvent(self):
"""Test IntegrationEvent"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | MostafaSalah222.noreply@github.com |
123ace045db3cd03e44cc51df57ed19c1a98d7f3 | 15b12d69ac3123d1562986970ce01d7a47d171de | /SetOperation.py | 34a7d487b53786c9b2b62ea9c17e5f2fef42dc6c | [
"Apache-2.0"
] | permissive | simplymanas/python-learning | 9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37 | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | refs/heads/master | 2021-07-11T06:40:24.803589 | 2021-06-20T12:06:02 | 2021-06-20T12:06:02 | 241,769,614 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py |
# Date: 27th Jun 2020
# Lets learn Set Theory in Python
# Few Operations on Sets
# Let's take two sets
first_set = {11, 21, 31, 41, 51}
second_set = {11, 61, 71, 81, 31}
print('First Set : ' + str(first_set))
print('Second Set : ' + str(second_set))
# The basic operations are:
# 1. Union of Sets
print('\nUNION of the two sets are (Both in first and second)')
print(set(first_set) | set(second_set))
# inbuilt function
print(first_set.union(second_set))
# 2. Intersection of sets
print('\nIntersection of the two sets are (common to both)')
print(set(first_set) & set(second_set))
# inbuilt function
print(first_set.intersection(second_set))
# 3. Difference of two sets
print('\nDifference of the two sets are (in first but not in second) ')
print(set(first_set) - set(second_set))
# inbuilt function
print(first_set.difference(second_set))
# 4. Symmetric difference of two sets
print('\nSymmetric Difference of the two sets are (excluding the common element of both) ')
print(set(first_set) ^ set(second_set))
# inbuilt function
print(first_set.symmetric_difference(second_set))
print() | [
"manas.dash@tesco.com"
] | manas.dash@tesco.com |
d0445a3dd070f62e6c3fd5d3d07c7b2cf5099e81 | 4505ae4b6fee0e32d799f22c32b18f79884daef4 | /src/keras/tests/keras/layers/merge_test.py | 563db757910965b28cf74eb1bc186549f8bb6a59 | [
"MIT",
"Apache-2.0"
] | permissive | lu791019/iii_HA_Image_Recognition_DL | 5cde9c2d0c06f8fe3fb69991b27fda87d42450e1 | d5f56d62af6d3aac1c216ca4ff309db08a8c9072 | refs/heads/master | 2020-08-03T06:56:05.345175 | 2019-09-29T13:20:24 | 2019-09-29T13:20:24 | 211,660,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,542 | py | import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import layers
from keras import models
from keras import backend as K
from keras.utils.test_utils import layer_test
from keras.layers import merge
def test_merge_add():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
i3 = layers.Input(shape=(4, 5))
o = layers.add([i1, i2, i3])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2, i3], o)
add_layer = layers.Add()
o2 = add_layer([i1, i2, i3])
assert add_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
assert out.shape == (2, 4, 5)
assert_allclose(out, x1 + x2 + x3, atol=1e-4)
assert add_layer.compute_mask([i1, i2, i3], [None, None, None]) is None
assert np.all(K.eval(add_layer.compute_mask(
[i1, i2, i3], [K.variable(x1), K.variable(x2), K.variable(x3)])))
# Test invalid use case
with pytest.raises(ValueError):
add_layer.compute_mask([i1, i2, i3], x1)
with pytest.raises(ValueError):
add_layer.compute_mask(i1, [None, None, None])
with pytest.raises(ValueError):
add_layer.compute_mask([i1, i2, i3], [None, None])
def test_merge_subtract():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
i3 = layers.Input(shape=(4, 5))
i4 = layers.Input(shape=(3, 5))
o = layers.subtract([i1, i2])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2], o)
subtract_layer = layers.Subtract()
o2 = subtract_layer([i1, i2])
assert subtract_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
assert_allclose(out, x1 - x2, atol=1e-4)
assert subtract_layer.compute_mask([i1, i2], [None, None]) is None
assert np.all(K.eval(subtract_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)])))
# Test invalid use case
with pytest.raises(ValueError):
subtract_layer.compute_mask([i1, i2], x1)
with pytest.raises(ValueError):
subtract_layer.compute_mask(i1, [None, None])
with pytest.raises(ValueError):
subtract_layer([i1, i2, i3])
with pytest.raises(ValueError):
subtract_layer([i1])
def test_merge_multiply():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
i3 = layers.Input(shape=(4, 5))
o = layers.multiply([i1, i2, i3])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2, i3], o)
mul_layer = layers.Multiply()
o2 = mul_layer([i1, i2, i3])
assert mul_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
assert out.shape == (2, 4, 5)
assert_allclose(out, x1 * x2 * x3, atol=1e-4)
def test_merge_average():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
o = layers.average([i1, i2])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2], o)
avg_layer = layers.Average()
o2 = avg_layer([i1, i2])
assert avg_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4)
def test_merge_maximum():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
o = layers.maximum([i1, i2])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2], o)
max_layer = layers.Maximum()
o2 = max_layer([i1, i2])
assert max_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
assert_allclose(out, np.maximum(x1, x2), atol=1e-4)
def test_merge_minimum():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
o = layers.minimum([i1, i2])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2], o)
max_layer = layers.Minimum()
o2 = max_layer([i1, i2])
assert max_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
assert_allclose(out, np.minimum(x1, x2), atol=1e-4)
def test_merge_concatenate():
i1 = layers.Input(shape=(None, 5))
i2 = layers.Input(shape=(None, 5))
o = layers.concatenate([i1, i2], axis=1)
assert o._keras_shape == (None, None, 5)
model = models.Model([i1, i2], o)
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
o = layers.concatenate([i1, i2], axis=1)
assert o._keras_shape == (None, 8, 5)
model = models.Model([i1, i2], o)
concat_layer = layers.Concatenate(axis=1)
o2 = concat_layer([i1, i2])
assert concat_layer.output_shape == (None, 8, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 8, 5)
assert_allclose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
x3 = np.random.random((1, 1, 1))
nb_layers = 4
x_i = layers.Input(shape=(None, None))
x_list = [x_i]
x = x_i
for i in range(nb_layers):
x_list.append(x)
x = layers.concatenate(x_list, axis=1)
concat_model = models.Model(x_i, x)
concat_out = concat_model.predict([x3])
x3 = np.repeat(x3, 16, axis=1)
assert concat_out.shape == (1, 16, 1)
assert_allclose(concat_out, x3)
assert concat_layer.compute_mask([i1, i2], [None, None]) is None
assert np.all(K.eval(concat_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)])).reshape(-1))
# Test invalid use case
with pytest.raises(ValueError):
concat_layer.compute_mask([i1, i2], x1)
with pytest.raises(ValueError):
concat_layer.compute_mask(i1, [None, None])
with pytest.raises(ValueError):
concat_layer.compute_mask([i1, i2], [None])
with pytest.raises(ValueError):
concat_layer([i1])
def test_merge_dot():
i1 = layers.Input(shape=(4,))
i2 = layers.Input(shape=(4,))
o = layers.dot([i1, i2], axes=1)
assert o._keras_shape == (None, 1)
model = models.Model([i1, i2], o)
dot_layer = layers.Dot(axes=1)
o2 = dot_layer([i1, i2])
assert dot_layer.output_shape == (None, 1)
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
assert out.shape == (2, 1)
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
assert_allclose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = layers.dot([i1, i2], axes=(-1, -1))
assert o._keras_shape == (None, 1)
model = models.Model([i1, i2], o)
out = model.predict([x1, x2])
assert out.shape == (2, 1)
assert_allclose(out, expected, atol=1e-4)
def test_merge_broadcast():
# shapes provided
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(5,))
ops = [layers.add, layers.maximum]
for op in ops:
o = op([i1, i2])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
# shapes not provided
i1 = layers.Input(shape=(None, None))
i2 = layers.Input(shape=(None,))
ops = [layers.add, layers.maximum]
for op in ops:
o = op([i1, i2])
assert o._keras_shape == (None, None, None)
model = models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
# ndim not provided
if K.backend() == 'tensorflow':
k_ndim = K.ndim
K.ndim = lambda _: None
i1 = layers.Input(shape=(None, None))
i2 = layers.Input(shape=(None,))
ops = [layers.add, layers.maximum]
for op in ops:
o = op([i1, i2])
assert o._keras_shape == (None, None, None)
model = models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
K.ndim = k_ndim
def test_masking_concatenate():
input1 = layers.Input(shape=(6,))
input2 = layers.Input(shape=(6,))
x1 = layers.Embedding(10, 5, input_length=6, mask_zero=True)(input1)
x2 = layers.Embedding(10, 5, input_length=6, mask_zero=True)(input2)
x = layers.concatenate([x1, x2])
x = layers.wrappers.TimeDistributed(layers.Dense(3, activation='softmax'))(x)
models.Model(inputs=[input1, input2], outputs=[x])
if __name__ == '__main__':
pytest.main([__file__])
| [
"noreply@github.com"
] | lu791019.noreply@github.com |
08a0c9f6e3366da46ae6a7717e53a2a4c44730be | 91352985ae08a37e916c43c554ecb3efa58ed78a | /ravens/ravens/tasks/defs_cables.py | 31bbef75ac5ee850e7893ae6e54ec13a3fabe589 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | danhernest-rist/google-research | 67614077b00d2d3145ccd5b132301f6a090f8206 | 6a1d89380b819b1ddbc8b2f400228828a80aba9b | refs/heads/master | 2023-09-03T07:26:06.255324 | 2021-10-19T06:28:08 | 2021-10-19T06:28:08 | 311,953,493 | 0 | 0 | Apache-2.0 | 2021-07-28T13:53:39 | 2020-11-11T11:42:00 | Jupyter Notebook | UTF-8 | Python | false | false | 23,620 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of tasks that use cables.
Design philosophy is to have a generic CableEnv with common functionality,
and then make new tasks subclass CableEnv with their specific versions of
inits and resets.
"""
import os
import time
import numpy as np
import pybullet as p
from ravens import utils as U
from ravens.tasks import Task
class CableEnv(Task):
"""Superclass for tasks that use a cable."""
def __init__(self):
super().__init__()
self.ee = 'suction'
self.primitive = 'pick_place'
self.max_steps = 11
self._settle_secs = 2
self._name = None
# Scaling the zone as needed.
self.zone_scale = 0.01
self.zone_length = (20.0 * self.zone_scale)
self.zone_size = (20.0 * self.zone_scale, 20.0 * self.zone_scale, 0)
# Target zone and debug marker visibility. Set both False for goal-based
# tasks.
self.target_zone_visible = True
self.target_debug_markers = False
# Cable-related parameters, can override in subclass.
self.num_parts = 24
self.radius = 0.005
self.length = 2 * self.radius * self.num_parts * np.sqrt(2)
self.color_bead = U.COLORS['blue'] + [1]
self.color_end = U.COLORS['yellow'] + [1]
# Put cable bead IDs here, so we don't count non cable IDs for targets.
self.cable_bead_ids = []
def add_zone(self, env):
"""Adds a green target zone."""
zone_template = 'assets/zone/zone-template.urdf'
replace = {'LENGTH': (self.zone_scale, self.zone_scale)}
zone_urdf = self.fill_template(zone_template, replace)
self.zone_pose = self.random_pose(env, self.zone_size)
zone_id = env.add_object(zone_urdf, self.zone_pose, fixed=True)
os.remove(zone_urdf)
return zone_id
def add_cable(self, env, size_range, info):
"""Add a cable to the env, consisting of rigids beads.
Add each bead ID to (a) env.objects, (b) object_points, and (c)
cable_bead_ids. Use (b) because the demonstrator checks it to pick
the bead farthest from a goal, and it is also used to tally up beads
within the zone (to compute reward). Use (c) to distinguish between
bead vs non-bead objects in case we add other items.
Args:
env: A ravens environment.
size_range: Used to indicate the area of the target, so the beads
avoid spawning there.
info: Stores relevant stuff, such as for ground-truth targets.
"""
num_parts = self.num_parts
radius = self.radius
length = self.length
# Add beaded cable.
distance = length / num_parts
position, _ = self.random_pose(env, size_range)
position = np.float32(position)
part_shape = p.createCollisionShape(p.GEOM_BOX, halfExtents=[radius] * 3)
part_visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius * 1.5)
# Iterate through parts and create constraints as needed.
for i in range(num_parts):
position[2] += distance
parent_frame = (0, 0, distance)
part_id = p.createMultiBody(
0.1, part_shape, part_visual, basePosition=position)
if i > 0:
constraint_id = p.createConstraint(
parentBodyUniqueId=env.objects[-1],
parentLinkIndex=-1,
childBodyUniqueId=part_id,
childLinkIndex=-1,
jointType=p.JOINT_POINT2POINT,
jointAxis=(0, 0, 0),
parentFramePosition=parent_frame,
childFramePosition=(0, 0, 0))
p.changeConstraint(constraint_id, maxForce=100)
# Colors
if (i > 0) and (i < num_parts - 1):
p.changeVisualShape(part_id, -1, rgbaColor=self.color_bead)
elif i == num_parts - 1:
p.changeVisualShape(part_id, -1, rgbaColor=self.color_end)
# Add objects in a consistent manner.
self.cable_bead_ids.append(part_id)
env.objects.append(part_id)
self.object_points[part_id] = np.float32(([0], [0], [0]))
# Get target placing positions for each cable bead, if applicable.
if (self._name == 'cable-shape' or self._name == 'cable-shape-notarget' or
self._name == 'cable-line-notarget'):
# ----------------------------------------------------------- #
# Here, zone_pose = square_pose, unlike Ravens cable, where the
# zone_pose is shifted so that its center matches the straight
# line segment center. For `true_position`, we use `zone_pose`
# but apply the correct offset to deal with the sides. Note
# that `length` is the size of a fully smoothed cable, BUT we
# made a rectangle with each side <= length.
# ----------------------------------------------------------- #
lx = info['lengthx']
ly = info['lengthy']
r = radius
if info['nb_sides'] == 1:
# Here it's just a straight line on the 'lx' side.
x_coord = lx / 2 - (distance * i)
y_coord = 0
true_position = (x_coord - r, y_coord, 0)
elif info['nb_sides'] == 2:
# Start from lx side, go 'left' to the pivot point, then on
# the ly side, go 'upwards' but offset by `i`. For radius
# offset, I just got this by tuning. XD
if i < info['cutoff']:
x_coord = lx / 2 - (distance * i)
y_coord = -ly / 2
true_position = (x_coord - r, y_coord, 0)
else:
x_coord = -lx / 2
y_coord = -ly / 2 + (distance * (i - info['cutoff']))
true_position = (x_coord, y_coord + r, 0)
elif info['nb_sides'] == 3:
# Start from positive lx, positive ly, go down to first
# pivot. Then go left to the second pivot, then up again.
# For v1, division by two is because we assume BOTH of the
# 'ly edges' were divided by two.
v1 = (self.num_parts - info['cutoff']) / 2
v2 = self.num_parts - v1
if i < v1:
x_coord = lx / 2
y_coord = ly / 2 - (distance * i)
true_position = (x_coord, y_coord - r, 0)
elif i < v2:
x_coord = lx / 2 - (distance * (i - v1))
y_coord = -ly / 2
true_position = (x_coord - r, y_coord, 0)
else:
x_coord = -lx / 2
y_coord = -ly / 2 + (distance * (i - v2))
true_position = (x_coord, y_coord + r, 0)
elif info['nb_sides'] == 4:
# I think this is similar to the 2-side case: we start in
# the same direction and go counter-clockwise.
v1 = info['cutoff'] / 2
v2 = num_parts / 2
v3 = (num_parts + info['cutoff']) / 2
if i < v1:
x_coord = lx / 2 - (distance * i)
y_coord = -ly / 2
true_position = (x_coord, y_coord, 0)
elif i < v2:
x_coord = -lx / 2
y_coord = -ly / 2 + (distance * (i - v1))
true_position = (x_coord, y_coord, 0)
elif i < v3:
x_coord = -lx / 2 + (distance * (i - v2))
y_coord = ly / 2
true_position = (x_coord, y_coord, 0)
else:
x_coord = lx / 2
y_coord = ly / 2 - (distance * (i - v3))
true_position = (x_coord, y_coord, 0)
# Map true_position onto the workspace from zone_pose.
true_position = U.apply(self.zone_pose, true_position)
# See `cable.py`: just get the places and steps set.
self.goal['places'][part_id] = (true_position, (0, 0, 0, 1.))
symmetry = 0
self.goal['steps'][0][part_id] = (symmetry, [part_id])
# Debugging target zones.
if self.target_debug_markers:
sq_pose = ((true_position[0], true_position[1], 0.002), (0, 0, 0, 1))
sq_template = 'assets/square/square-template-allsides-blue.urdf'
replace = {'DIM': (0.003,), 'HALF': (0.003 / 2,)}
urdf = self.fill_template(sq_template, replace)
env.add_object(urdf, sq_pose, fixed=True)
os.remove(urdf)
else:
print(f'Warning, env {self._name} will not have goals.')
def add_cable_ring(self, env, info):
"""Add a cable, but make it connected at both ends to form a ring.
For consistency, add each `part_id` to various information tracking
lists and dictionaries (see `add_cable` documentation).
Args:
env: A ravens environment.
info: Stores relevant stuff, such as for ground-truth targets.
"""
def rad_to_deg(rad):
return (rad * 180.0) / np.pi
def get_discretized_rotations(i, num_rotations):
# counter-clockwise
theta = i * (2 * np.pi) / num_rotations
return (theta, rad_to_deg(theta))
# Bead properties.
num_parts = self.num_parts
radius = self.radius
color = self.color_bead
# The `ring_radius` (not the bead radius!) has to be tuned somewhat.
# Try to make sure the beads don't have notable gaps between them.
ring_radius = info['ring_radius']
beads = []
bead_positions_l = []
# Add beaded cable. Here, `position` is the circle center.
position = np.float32(info['center_position'])
part_shape = p.createCollisionShape(p.GEOM_BOX, halfExtents=[radius] * 3)
part_visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius * 1.5)
# Iterate through parts and create constraints as needed.
for i in range(num_parts):
angle_rad, _ = get_discretized_rotations(i, num_parts)
px = ring_radius * np.cos(angle_rad)
py = ring_radius * np.sin(angle_rad)
bead_position = np.float32([position[0] + px, position[1] + py, 0.01])
part_id = p.createMultiBody(
0.1, part_shape, part_visual, basePosition=bead_position)
p.changeVisualShape(part_id, -1, rgbaColor=color)
if i > 0:
parent_frame = bead_position - bead_positions_l[-1]
constraint_id = p.createConstraint(
parentBodyUniqueId=beads[-1],
parentLinkIndex=-1,
childBodyUniqueId=part_id,
childLinkIndex=-1,
jointType=p.JOINT_POINT2POINT,
jointAxis=(0, 0, 0),
parentFramePosition=parent_frame,
childFramePosition=(0, 0, 0))
p.changeConstraint(constraint_id, maxForce=100)
# Make a constraint with i=0. Careful with `parent_frame`!
if i == num_parts - 1:
parent_frame = bead_positions_l[0] - bead_position
constraint_id = p.createConstraint(
parentBodyUniqueId=part_id,
parentLinkIndex=-1,
childBodyUniqueId=beads[0],
childLinkIndex=-1,
jointType=p.JOINT_POINT2POINT,
jointAxis=(0, 0, 0),
parentFramePosition=parent_frame,
childFramePosition=(0, 0, 0))
p.changeConstraint(constraint_id, maxForce=100)
# Track beads.
beads.append(part_id)
bead_positions_l.append(bead_position)
# Add objects in a consistent manner.
self.cable_bead_ids.append(part_id)
env.objects.append(part_id)
self.object_points[part_id] = np.float32((0, 0, 0)).reshape(3, 1)
if self._name == 'cable-ring' or self._name == 'cable-ring-notarget':
# We assume the starting position gives us the targets.
true_position = (bead_position[0], bead_position[1], 0)
self.goal['places'][part_id] = (true_position, (0, 0, 0, 1.))
symmetry = 0
self.goal['steps'][0][part_id] = (symmetry, [part_id])
# Make the true positions visible if desired.
if info['targets_visible']:
sq_pose = ((true_position[0], true_position[1], 0.002), (0, 0, 0, 1))
sq_template = 'assets/square/square-template-allsides-green.urdf'
replace = {'DIM': (0.003,), 'HALF': (0.003 / 2,)}
urdf = self.fill_template(sq_template, replace)
env.add_object(urdf, sq_pose, fixed=True)
os.remove(urdf)
else:
print(f'Warning, env {self._name} will not have goals.')
@property
def circle_area(self):
"""Only applies to cable-ring and cable-ring-notarget."""
return np.pi * self.ring_radius**2
@property
def area_thresh(self):
"""Only applies to cable-ring and cable-ring-notarget.
Using >= 0.8 might be too hard because moving beads to targets causes
other beads to move, thus potentially decreasing the area of the
convex hull of the beads. 0.75 strikes a reasonable balance.
"""
return 0.75
class CableShape(CableEnv):
"""A single cable, and manipulating to a complex target.
Application inspiration: moving a cable towards a target is commonly done
in cases such as knot-tying and rearranging stuff on a surface, and more
generally it's a common robotics benchmark.
For now we are using targets based on line segments stacked with each
other. This means we have to change the normal zone metric because it
assumes a linear target, but shouldn't be too difficult. Also, because
this involves just a simple cable, we are going to use the same
pick_place demonstrator.
Remember that the UR5 is at zone (0,0,0) and the 'square' is like this:
| | xxxx
| o | xxxx
| | xxxx
-------
where `o` is the center of the robot, and `x`'s represent the workspace
(horizontal axis is x). Then the 'line' has to fill in the top part of
the square. Each edge has length `length` in code. We generalize this for
targets of between 1 and 4 connected line segments. Use `length_x` and
`length_y` to determine the lengths of the sides. With two sides, we get:
| xxxx
| o xxxx length_y
| xxxx
-------
length_x
where `length_x + length_y = length`, or one of the original square
sides. Thus, the square essentially defines where the target can be
sampled. Also keep in mind that we actually use a rectangle, not a
square; the square_pose is just used for a pose and sampling bounds.
"""
def __init__(self):
super().__init__()
self.ee = 'suction'
self.max_steps = 21
self.metric = 'cable-target'
self.primitive = 'pick_place'
self._name = 'cable-shape'
# Target zone and the debug marker visibility.
self.target_zone_visible = True
self.target_debug_markers = False
# Cable parameters.
self.num_parts = 24
self.radius = 0.005
self.length = 2 * self.radius * self.num_parts * np.sqrt(2)
self.num_sides_low = 2
self.num_sides_high = 4
# Parameters for pick_place primitive.
self.primitive_params = {
1: {
'speed': 0.001,
'delta_z': -0.001,
'postpick_z': 0.04,
'preplace_z': 0.04,
'pause_place': 0.0,
},
}
self.task_stage = 1
# To see if performance varies as a function of the number of sides.
self.nb_sides = None
def reset(self, env):
self.total_rewards = 0
self.object_points = {}
self.task_stage = 1
self.cable_bead_ids = []
# Use this for the built-in pick_place demonstrator in `task.py`.
self.goal = {'places': {}, 'steps': [{}]}
# Sample the 'square pose' which is the center of a rectangle.
square_size = (self.length, self.length, 0)
square_pose = self.random_pose(env, square_size)
assert square_pose is not None, 'Cannot sample a pose.'
# Be careful. We deduce ground-truth pose labels from zone_pose.
self.zone_pose = square_pose
zone_range = (self.length / 10, self.length / 10, 0)
# Sample the number of sides to preserve from the rectangle.
low, high = self.num_sides_low, self.num_sides_high
self.nb_sides = nb_sides = np.random.randint(
low=low, high=high + 1) # note +1
template = f'assets/rectangle/rectangle-template-sides-{nb_sides}.urdf'
if nb_sides == 1:
# One segment target: a straight cable should be of length `length`.
lengthx = self.length
lengthy = 0
cutoff = 0
elif nb_sides == 2:
# Two segment target: length1 + length2 should equal a straight cable.
cutoff = np.random.randint(0, self.num_parts + 1)
alpha = cutoff / self.num_parts
lengthx = self.length * alpha
lengthy = self.length * (1 - alpha)
elif nb_sides == 3:
# Three segment target: remove length1, but need to remove a bit more.
offset = 4 # avoid 'extremes'
cutoff = np.random.randint(offset, self.num_parts + 1 - offset)
alpha = cutoff / self.num_parts
lengthx = self.length * alpha
lengthy = (self.length * (1 - alpha)) / 2
elif nb_sides == 4:
# Four segment target, divide by two to make the cable 'fit'.
offset = 4 # avoid 'extremes'
cutoff = np.random.randint(offset, self.num_parts + 1 - offset)
alpha = cutoff / self.num_parts
lengthx = (self.length * alpha) / 2
lengthy = (self.length * (1 - alpha)) / 2
# I deduced DIM & HALF from rectangle template through trial & error.
dim = (lengthx, lengthy)
half = (dim[1] / 2, dim[0] / 2)
if self.target_zone_visible:
replace = {'DIM': dim, 'HALF': half}
urdf = self.fill_template(template, replace)
env.add_object(urdf, square_pose, fixed=True)
os.remove(urdf)
# Add cable.
info = {
'nb_sides': nb_sides,
'cutoff': cutoff,
'lengthx': lengthx,
'lengthy': lengthy,
'DIM': dim,
'HALF': half,
}
self.add_cable(env, size_range=zone_range, info=info)
env.start()
time.sleep(self._settle_secs)
env.pause()
class CableShapeNoTarget(CableShape):
"""CableShape, but without a target, so we need a goal image."""
def __init__(self):
super().__init__()
self._name = 'cable-shape-notarget'
# Target zone and the debug marker visibility.
self.target_zone_visible = False
self.target_debug_markers = False
def reset(self, env, last_info=None):
"""Reset to start an episode.
If generating training data for goal-conditioned Transporters with
`main.py` or goal images using `generate_goals.py`, then call the
superclass. The code already puts the bead poses inside `info`. For
this env it's IDs 4 through 27 (for 24 beads) but I scale it based on
num_parts in case we change this value.
If loading using `load.py` (detect with self.goal_cond_testing) then
must make targets based on loaded info. However, we still have to
randomly create the cable, so the easiest way might be to make the
cable as usual, and then just override the 'places' key later.
Args:
env: A ravens environment.
last_info: Last info dictionary.
Returns:
places:
"""
super().reset(env)
if self.goal_cond_testing:
assert last_info is not None
self.goal['places'] = self._get_goal_info(last_info)
def _get_goal_info(self, last_info):
"""Used to determine the goal given the last `info` dict."""
start_id = 4
end_id = start_id + self.num_parts
places = {}
for id_ in range(start_id, end_id):
assert id_ in last_info, f'something went wrong with ID={id_}'
position, _, _ = last_info[id_]
places[id_] = (position, (0, 0, 0, 1.))
return places
class CableLineNoTarget(CableShape):
"""Like CableShapeNoTarget, but only straight lines (no visible targets)."""
def __init__(self):
super().__init__()
self._name = 'cable-line-notarget'
# Major change, only considering straight lines.
self.num_sides_low = 1
self.num_sides_high = 1
# Target zone and the debug marker visibility.
self.target_zone_visible = False
self.target_debug_markers = False
def reset(self, env, last_info=None):
"""See `CableShapeNoTarget.reset()`."""
super().reset(env)
if self.goal_cond_testing:
assert last_info is not None
self.goal['places'] = self._get_goal_info(last_info)
def _get_goal_info(self, last_info):
"""See `CableShapeNoTarget._get_goal_info()`."""
start_id = 4
end_id = start_id + self.num_parts
places = {}
for id_ in range(start_id, end_id):
assert id_ in last_info, f'something went wrong with ID={id_}'
position, _, _ = last_info[id_]
places[id_] = (position, (0, 0, 0, 1.))
return places
class CableRing(CableEnv):
"""Cable as a ring.
This differs from CableShape in that (1) the cable is a ring and
continuously connected, and (2) the target is also a ring.
We need good parameters for num_parts, radius, and ring_radius. So far I
like these combinations: (24, 0.005, 0.06), (32, 0.005, 0.075), (36,
0.005, 0.09)... using 32 parts to the bead is ideal, given that it's the
same number as what the bag uses. The postpick and preplace should be
just high enough to let one layer of cables go above another, to avoid
the demonstrator engaging in back-and-forth actions.
"""
def __init__(self):
super().__init__()
self.metric = 'cable-ring'
self.max_steps = 21
self.primitive = 'pick_place'
self._name = 'cable-ring'
# Cable parameters. We use ring_radius to determine sampling bounds.
self.num_parts = 32
self.radius = 0.005
self.ring_radius = 0.075
self.targets_visible = True
# Parameters for pick_place primitive.
self.primitive_params = {
1: {
'speed': 0.001,
'delta_z': -0.001,
'postpick_z': 0.04,
'preplace_z': 0.04,
'pause_place': 0.0,
},
}
self.task_stage = 1
def reset(self, env):
self.total_rewards = 0
self.object_points = {}
self.task_stage = 1
self.cable_bead_ids = []
# We need this to use the built-in pick_place demonstrator in `task.py`.
self.goal = {'places': {}, 'steps': [{}]}
# Sample the center of the ring, increasing size to allow for random force.
boundary_size = (self.ring_radius * 3, self.ring_radius * 3, 0)
boundary_pose = self.random_pose(env, boundary_size)
self.zone_pose = (boundary_pose[0], (0, 0, 0, 1))
# Add cable ring.
info = {
'center_position': self.zone_pose[0],
'ring_radius': self.ring_radius,
'targets_visible': self.targets_visible,
}
self.add_cable_ring(env, info=info)
# Env must begin before we can apply forces.
env.start()
# Add a small force to perturb the cable. Pick a bead at random.
bead_idx = np.random.randint(len(self.cable_bead_ids))
bead_id = self.cable_bead_ids[bead_idx]
fx = np.random.randint(low=-20, high=20 + 1)
fy = np.random.randint(low=-20, high=20 + 1)
fz = 40
for _ in range(20):
p.applyExternalForce(
bead_id,
linkIndex=-1,
forceObj=[fx, fy, fz],
posObj=[0, 0, 0],
flags=p.LINK_FRAME)
time.sleep(self._settle_secs)
env.pause()
class CableRingNoTarget(CableRing):
"""Cable as a ring, but no target, so it subclasses CableRing."""
def __init__(self):
super().__init__()
self._name = 'cable-ring-notarget'
self.targets_visible = False
def reset(self, env): # pylint: disable=useless-super-delegation
super().reset(env)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
3d858e5db1b1ab965f3a747beeefb92469f199af | 216cbe15186bca5f48d422cb55e059ed72b1f9c8 | /models.py | f0295f6cb8b2c93d12f6851da0eaa22b9d12672a | [] | no_license | mehdidc/modnet | 9bf4d3a713b20279e3c533932d9875f19cfeff00 | 5d17d895ea5f69a2bf8cdb60bb3adc6826a4df10 | refs/heads/master | 2020-04-13T08:42:58.139445 | 2018-12-27T22:46:10 | 2018-12-27T22:46:10 | 163,089,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,540 | py | import torch.nn as nn
import torch
import math
def weight_init(m):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Block(nn.Module):
def __init__(self, inplanes, outplanes, block_type=BasicBlock, stride=1):
super().__init__()
self.inplane = inplanes
self.outplane = outplanes
self.block_type = block_type
self.stride = stride
self.block = make_block(inplanes, outplanes,
block_type, 1, stride=stride)
def forward(self, x):
return self.block(x)
def make_block(inplanes, outplanes, block, nb_blocks, stride=1):
downsample = None
if stride != 1 or inplanes != outplanes:
downsample = nn.Sequential(
conv1x1(inplanes, outplanes, stride),
nn.BatchNorm2d(outplanes),
)
layers = []
layers.append(block(inplanes, outplanes, stride, downsample))
for _ in range(1, nb_blocks):
layers.append(block(outplanes, outplanes))
return nn.Sequential(*layers)
class FC(nn.Module):
def __init__(self, inplane, outplane=1000):
super().__init__()
self.inplane = inplane
self.outplane = outplane
self.fc = nn.Linear(inplane, outplane)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.fc(x)
class Controller(nn.Module):
pass
class ModularNetController(Controller):
def __init__(self, modules):
super().__init__()
self.inplane = modules[0].inplane
self.outplane = modules[-1].outplane
self.controller = nn.Sequential(
nn.Conv2d(self.inplane, len(modules), kernel_size=1),
nn.AdaptiveAvgPool2d(1)
)
self.components = nn.ModuleList(modules)
self.cur_assignments = None
def forward(self, x):
ctl = self.controller(x)
ctl_logits = ctl.view(ctl.size(0), -1)
_, ctl_decisions = ctl_logits.max(dim=1)
outs = []
for i, decision in enumerate(ctl_decisions):
outs.append(self.components[decision](x[i:i+1]))
out = torch.cat(outs, dim=0)
return out
def forward_E_step(self, x):
ctl = self.controller(x)
ctl_logits = ctl.view(ctl.size(0), -1)
ctl_probs = nn.Softmax(dim=1)(ctl_logits)
device = next(self.parameters()).device
ctl_decisions = torch.multinomial(ctl_probs, 1)[:, 0]
outs = []
for i, decision in enumerate(ctl_decisions):
outs.append(self.components[decision](x[i:i+1]))
out = torch.cat(outs, dim=0)
return out, ctl_logits, ctl_decisions
def forward_M_step(self, x, indices):
ctl = self.controller(x)
ctl_logits = ctl.view(ctl.size(0), -1)
device = next(self.parameters()).device
ctl_decisions = self.cur_assignments[indices].to(device)
outs = []
for i, decision in enumerate(ctl_decisions):
outs.append(self.components[decision](x[i:i+1]))
out = torch.cat(outs, dim=0)
return out, ctl_logits, ctl_decisions
class ModularNet(nn.Module):
def __init__(self, layers, nb_trials=10):
super().__init__()
self.layers = nn.ModuleList(layers)
self.nb_trials = nb_trials
@property
def controllers(self):
for layer in self.layers:
if isinstance(layer, Controller):
yield layer
def initialize_assignments(self, nb_examples):
for ctl in self.controllers:
ctl.cur_assignments = torch.randint(
len(ctl.components), (nb_examples,))
def forward_E_step(self, input, indices):
trial_outputs = []
trial_decisions = []
# cur assignment
output, _, decisions = self.forward_M_step(input, indices)
trial_outputs.append(output)
trial_decisions.append(torch.stack(decisions, dim=0))
# sampled assignments
for trial in range(self.nb_trials):
x = input
controller_decisions = []
for layer in self.layers:
if isinstance(layer, Controller):
x, logits, decisions = layer.forward_E_step(x)
controller_decisions.append(decisions)
else:
x = layer(x)
trial_outputs.append(x)
controller_decisions = torch.stack(controller_decisions, dim=0)
trial_decisions.append(controller_decisions)
#nb_trials, batch_size, nb_classes
trial_outputs = torch.stack(trial_outputs, dim=0)
#nb_trials, nb_controllers, batch_size
trial_decisions = torch.stack(trial_decisions, dim=0)
return trial_outputs, trial_decisions
def update_assignments(self, indices, y_true, outputs, decisions):
controllers = [
layer for layer in self.layers if isinstance(layer, Controller)]
#nb_trials, nb_examples
p = self.log_likelihood(outputs, y_true)
# nb_examples
_, best_trial = p.max(dim=0)
for i in range(len(indices)):
example_index = indices[i]
best_decisions = decisions[best_trial[i]]
for j, controller in enumerate(controllers):
controller.cur_assignments[example_index] = best_decisions[j][i]
def log_likelihood(self, pred, true):
#pred: (nb_trials, nb_examples, nb_classes)
#true: (nb_examples,)
nb_trials, nb_examples, nb_classes = pred.size()
pred_ = pred.view(-1, nb_classes)
true_ = true.view(1, -1)
true_ = true_.expand(pred.size(0), true.size(0))
true_ = true_.contiguous()
true_ = true_.view(-1)
true_ = true_.long()
prob = -nn.functional.cross_entropy(pred_, true_, reduction='none')
prob = prob.view(nb_trials, nb_examples)
return prob
def forward_M_step(self, x, indices):
logits_list = []
decisions_list = []
for layer in self.layers:
if isinstance(layer, Controller):
x, logits, decisions = layer.forward_M_step(x, indices)
logits_list.append(logits)
decisions_list.append(decisions)
else:
x = layer(x)
return x, logits_list, decisions_list
def M_step_loss(self, logits_list, decisions_list):
loss = 0
for logits, decisions in zip(logits_list, decisions_list):
loss += nn.functional.cross_entropy(logits, decisions)
return loss
def forward(self, x):
for layer in self.layers:
if isinstance(x, Controller):
x, probs, decisions = layer.forward_train(x)
else:
x = layer(x)
return x
def simple(nb_colors=3, nb_classes=10):
net = nn.Sequential(
Block(nb_colors, 64),
Block(64, 64),
Block(64, 128, stride=2),
Block(128, 128),
Block(128, 256, stride=2),
Block(256, 256),
Block(256, 512, stride=2),
FC(512, nb_classes),
)
net.apply(weight_init)
return net
def modular_simple(nb_colors=3, nb_classes=10):
f1 = Block(nb_colors, 64)
f2 = Block(nb_colors, 64)
f3 = Block(64, 128, stride=2)
f4 = Block(64, 128, stride=2)
f5 = Block(128, 128, stride=2)
f6 = Block(128, 128, stride=2)
net = ModularNet([
ModularNetController([f1, f2]),
ModularNetController([f3, f4]),
ModularNetController([f5, f6]),
FC(128, nb_classes)
])
net.apply(weight_init)
return net
if __name__ == '__main__':
net = modular_simple(nb_classes=2)
x = torch.rand(5, 3, 32, 32)
y = torch.Tensor([1, 0, 0, 1, 0])
net.initialize_assignments(len(x))
inds = torch.arange(0, len(x))
o, d = net.forward_E_step(x)
for cnt in net.controllers:
print(cnt.cur_assignments)
print('Update')
net.update_assignments(inds, y, o, d)
for cnt in net.controllers:
print(cnt.cur_assignments)
| [
"mehdicherti@gmail.com"
] | mehdicherti@gmail.com |
9bb15e41ae62f4d5977caeaa5b592386b4a87562 | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/kemlrocrecom.py | 4b4717cfcc86c97a8622f2359d87ec9eb1eaf697 | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='detai_product_right']/h1",
'price' : "//div[@class='detai_product_gia']/span",
'category' : "//h2[@class='dita_name_home']/a",
'description' : "//div[@class='ibm-active ibm-columns']/div[@class='dita_detail']",
'images' : "//div[@class='detai_product']/div/div/img/@src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'kemlrocre.com'
allowed_domains = ['kemlrocre.com']
start_urls = ['http://kemlrocre.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/\d+-[a-zA-Z0-9-]+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/.+\.html'], deny=['/\d+-[a-zA-Z0-9-]+\.html$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
aa1f36de03e34eaff8ca75902441da8ade24fd46 | 2106e17ab0e564e8ad0b2c907e47795ec6d4835b | /examples/neural_networks/plot_mlp_alpha.py | f04f3462ba3d1a818977763991e332ac8f655e0d | [
"BSD-3-Clause"
] | permissive | smartscheduling/scikit-learn | 31eca6d5894399f003fcc607224c28831bdf86e8 | f773bb5413bf367167ce265df019c3237096ef49 | refs/heads/master | 2021-01-24T15:44:04.473676 | 2014-10-13T20:42:16 | 2014-10-13T20:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different regularization term 'alpha' values on synthetic
datasets. The plot shows that different alphas yield different decision
functions.
Alpha is a regularization term, or also known as penalty term, that combats
overfitting by constraining the weights' size. Increasing alpha may fix high
variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision function plot that may appear with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in more curvatures in the
decision function plot.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MultilayerPerceptronClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-4, 4, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MultilayerPerceptronClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| [
"issam.laradji@gmail.com"
] | issam.laradji@gmail.com |
f1cde5699db50e9afd62a6633e1d18b2c8f11428 | 9a6ae760c478f136e09c78eeff06770929f93afb | /demo2/polls/migrations/0003_auto_20190705_1417.py | 31d6dc20dd57a17db99af07be3c9ff60fe917e9e | [] | no_license | zzy0371/Py1904 | adb0faa2e29abefe08ed81835573626ce2bcd899 | 47e24a34d49356f64ffdf87bb7e1b7009b215511 | refs/heads/master | 2022-12-11T08:10:06.160540 | 2019-07-18T08:11:33 | 2019-07-18T08:11:33 | 194,625,297 | 0 | 0 | null | 2022-04-22T21:54:48 | 2019-07-01T07:50:54 | JavaScript | UTF-8 | Python | false | false | 377 | py | # Generated by Django 2.2.1 on 2019-07-05 06:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_temp'),
]
operations = [
migrations.AlterField(
model_name='temp',
name='desc',
field=models.CharField(max_length=20, null=True),
),
]
| [
"496575233@qq.com"
] | 496575233@qq.com |
fb1c05fe450cfdeacced4b7a11ff507c7d783914 | 1f79d9d02810a944c45fc962c62159035c5a2247 | /migrations/versions/37878b76721_.py | b8ea073ef84a3fa65312468c4ebe2d712c125626 | [] | no_license | qsq-dm/mff | 5f17d6ffd1d4742dc46d1367cff35233af08a450 | d7f1e6f3fba95fe0d8ebb8937dda64a17e71f048 | refs/heads/master | 2020-12-29T02:19:29.037394 | 2016-08-01T15:40:42 | 2016-08-01T15:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | """empty message
Revision ID: 37878b76721
Revises: 3107ca470fdf
Create Date: 2016-02-27 10:16:36.087236
"""
# revision identifiers, used by Alembic.
revision = '37878b76721'
down_revision = '3107ca470fdf'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('school', sa.Column('pics_count', sa.Integer(), nullable=True))
op.create_index(op.f('ix_school_pics_count'), 'school', ['pics_count'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_school_pics_count'), table_name='school')
op.drop_column('school', 'pics_count')
### end Alembic commands ###
| [
"root@localhost"
] | root@localhost |
c3f357915308c406450f3af0a4a588a2ddcb9b30 | 7d667b70c8ae1c8f214b85d613d3a98462af9d0c | /froide/foirequestfollower/tests.py | 68fc03b7be15659bbbca939ec138dc0a38e27a53 | [
"MIT"
] | permissive | handlingar/froide | c57653a87a05fb402c1fe61f0df1ff480391f911 | 5ed80cf6550fb4cbc757029b2c860b53e784eb93 | refs/heads/master | 2021-05-28T18:13:17.573095 | 2015-06-18T13:00:16 | 2015-06-18T13:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,564 | py | from __future__ import with_statement
import re
import factory
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.auth import get_user_model
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from froide.foirequest.models import FoiRequest
from froide.foirequest.tests import factories
from .models import FoiRequestFollower
from .tasks import _batch_update
User = get_user_model()
class FoiRequestFollowerFactory(factory.DjangoModelFactory):
FACTORY_FOR = FoiRequestFollower
request = factory.SubFactory(factories.FoiRequestFactory)
user = factory.SubFactory(factories.UserFactory)
email = ''
confirmed = True
class FoiRequestFollowerTest(TestCase):
def setUp(self):
self.site = factories.make_world()
def test_following(self):
req = FoiRequest.objects.all()[0]
user = User.objects.get(username='sw')
self.client.login(username='sw', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
# Can't follow my own requests
self.assertEqual(response.status_code, 400)
followers = FoiRequestFollower.objects.filter(request=req, user=user)
self.assertEqual(followers.count(), 0)
self.client.logout()
user = User.objects.get(username='dummy')
self.client.login(username='dummy', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.get(request=req, user=user)
self.assertEqual(len(mail.outbox), 0)
req.add_postal_reply.send(sender=req)
self.assertEqual(len(mail.outbox), 1)
mes = mail.outbox[0]
match = re.search('/%d/(\w+)/' % follower.pk, mes.body)
check = match.group(1)
response = self.client.get(
reverse('foirequestfollower-confirm_unfollow',
kwargs={'follow_id': follower.id,
'check': "a" * 32}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.get(request=req, user=user)
response = self.client.get(
reverse('foirequestfollower-confirm_unfollow',
kwargs={'follow_id': follower.id,
'check': check}))
self.assertEqual(response.status_code, 302)
try:
FoiRequestFollower.objects.get(request=req, user=user)
except FoiRequestFollower.DoesNotExist:
pass
else:
self.assertTrue(False)
def test_unfollowing(self):
req = FoiRequest.objects.all()[0]
user = User.objects.get(username='dummy')
self.client.login(username='dummy', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.filter(request=req, user=user).count()
self.assertEqual(follower, 1)
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.filter(request=req, user=user).count()
self.assertEqual(follower, 0)
def test_updates(self):
mail.outbox = []
req = FoiRequest.objects.all()[0]
comment_user = factories.UserFactory()
user = User.objects.get(username='dummy')
self.client.login(username='dummy', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
self.client.logout()
self.client.login(username=comment_user.username, password='froide')
mes = list(req.messages)[-1]
d = {
'name': 'Jim Bob',
'email': 'jim.bob@example.com',
'url': '',
'comment': 'This is my comment',
}
f = CommentForm(mes)
d.update(f.initial)
self.client.post(reverse("comments-post-comment"), d)
_batch_update()
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to[0], req.user.email)
self.assertEqual(mail.outbox[1].to[0], user.email)
def test_updates_avoid(self):
mail.outbox = []
req = FoiRequest.objects.all()[0]
dummy_user = User.objects.get(username='dummy')
req2 = factories.FoiRequestFactory.create(
site=self.site, user=req.user)
mes = list(req.messages)[-1]
mes2 = factories.FoiMessageFactory.create(request=req2)
self.client.login(username=req.user.username, password='froide')
d = {
'name': 'Jim Bob',
'email': 'jim.bob@example.com',
'url': '',
'comment': 'This is my comment',
}
f = CommentForm(mes)
d.update(f.initial)
self.client.post(reverse("comments-post-comment"), d)
_batch_update(update_requester=False)
self.assertEqual(len(mail.outbox), 0)
mail.outbox = []
self.client.logout()
def do_follow(req, username):
self.client.login(username=username, password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
self.client.logout()
def do_comment(mes, username):
self.client.login(username=username, password='froide')
f = CommentForm(mes)
d.update(f.initial)
self.client.post(
reverse("comments-post-comment"),
d
)
do_follow(req, 'dummy')
do_comment(mes, 'sw')
do_follow(req2, 'dummy')
do_comment(mes2, 'sw')
_batch_update()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], dummy_user.email)
Comment.objects.all().delete()
mail.outbox = []
do_comment(mes2, 'dummy')
_batch_update()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], req.user.email)
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
99206d1556504fc1696b6afc8d4d1e3e31d90434 | 5ef87d7308fd7a6a56c1fa4795e733820144fdac | /dataloader.py | 17f7c33ace3b82ace471b974c9a8a181e6639d91 | [
"Apache-2.0"
] | permissive | pprp/SimpleClassifier | 19bdcdbad5a9f3d3cd6b22f545fa0037fd94f659 | ad6d664364ebdba0efcab7366a75a179995e43cb | refs/heads/master | 2020-09-27T10:41:20.682317 | 2019-12-16T13:43:12 | 2019-12-16T13:43:12 | 226,497,408 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | import torch
from config import cfg
from torchvision import transforms, datasets
# part 0: parameter
input_size = cfg.INPUT_SIZE
batch_size = cfg.BATCH_SIZE
# part 1: transforms
train_transforms = transforms.Compose([
transforms.RandomRotation(5),
transforms.RandomResizedCrop(input_size[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
valid_transforms = transforms.Compose([
transforms.Resize(input_size),
transforms.RandomResizedCrop(input_size[0]),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
# part 2: dataset
train_dataset = datasets.ImageFolder(root=cfg.TRAIN_DATASET_DIR,
transform=train_transforms)
valid_dataset = datasets.ImageFolder(root=cfg.VALID_DATASET_DIR,
transform=valid_transforms)
# part 3: dataloader
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
valid_dataloader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
# part 4: test
if __name__ == "__main__":
for image, label in train_dataloader:
print(image.shape, label.shape, len(train_dataloader)) | [
"1115957667@qq.com"
] | 1115957667@qq.com |
1b9b1d16718a056c304343417313209c8d7cde59 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/intents/transports/base.py | 00ca9ac3dc52d11bc4eccb9cbcc32ac4d52a1dbc | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,038 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.types import intent
from google.cloud.dialogflow_v2.types import intent as gcd_intent
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-dialogflow',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class IntentsTransport(abc.ABC):
"""Abstract transport class for Intents."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
)
DEFAULT_HOST: str = 'dialogflow.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_intents: gapic_v1.method.wrap_method(
self.list_intents,
default_timeout=None,
client_info=client_info,
),
self.get_intent: gapic_v1.method.wrap_method(
self.get_intent,
default_timeout=None,
client_info=client_info,
),
self.create_intent: gapic_v1.method.wrap_method(
self.create_intent,
default_timeout=None,
client_info=client_info,
),
self.update_intent: gapic_v1.method.wrap_method(
self.update_intent,
default_timeout=None,
client_info=client_info,
),
self.delete_intent: gapic_v1.method.wrap_method(
self.delete_intent,
default_timeout=None,
client_info=client_info,
),
self.batch_update_intents: gapic_v1.method.wrap_method(
self.batch_update_intents,
default_timeout=None,
client_info=client_info,
),
self.batch_delete_intents: gapic_v1.method.wrap_method(
self.batch_delete_intents,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_intents(self) -> Callable[
[intent.ListIntentsRequest],
Union[
intent.ListIntentsResponse,
Awaitable[intent.ListIntentsResponse]
]]:
raise NotImplementedError()
@property
def get_intent(self) -> Callable[
[intent.GetIntentRequest],
Union[
intent.Intent,
Awaitable[intent.Intent]
]]:
raise NotImplementedError()
@property
def create_intent(self) -> Callable[
[gcd_intent.CreateIntentRequest],
Union[
gcd_intent.Intent,
Awaitable[gcd_intent.Intent]
]]:
raise NotImplementedError()
@property
def update_intent(self) -> Callable[
[gcd_intent.UpdateIntentRequest],
Union[
gcd_intent.Intent,
Awaitable[gcd_intent.Intent]
]]:
raise NotImplementedError()
@property
def delete_intent(self) -> Callable[
[intent.DeleteIntentRequest],
Union[
empty_pb2.Empty,
Awaitable[empty_pb2.Empty]
]]:
raise NotImplementedError()
@property
def batch_update_intents(self) -> Callable[
[intent.BatchUpdateIntentsRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def batch_delete_intents(self) -> Callable[
[intent.BatchDeleteIntentsRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
__all__ = (
'IntentsTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
c79bbac857dfd71d4ab4da5645e78f296e69721d | 49a15ea9bdbf68575c034f2428ddc5bdc9b897d2 | /mysite/polls/urls.py | 5d787f4eae86d603e7098327aedff2e742373a0f | [] | no_license | chenhanfang/djangotest | 277a23c62cbf6b4d5e336642352e06d16c0238f3 | 96eeb865a4bc51fea345e54108081ae08a150e4f | refs/heads/master | 2020-12-30T18:03:24.955470 | 2017-05-17T09:41:52 | 2017-05-17T09:41:52 | 90,949,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns=[
url(r'^$',views.index,name='index'),
url(r'^(?P<question_id>[0-9]+)/$',views.detail,name='detail'),
url(r'^(?P<question_id>[0-9]+)/results/$',views.results,name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$',views.vote,name='vote'),
url(r'^latest/.html$',views.index,name='index')
] | [
"chenhanfang@zhixunkeji.cn"
] | chenhanfang@zhixunkeji.cn |
cff098258b5b5b7e9de922770351d0c19a9f6d4d | 7d4e8492de331f8bed4ef625132a3c8bb1e44b3e | /src/exceptions/aws_exceptions.py | 90b29576c3400d52412ae100877495c039908dec | [
"ISC"
] | permissive | uk-gov-mirror/dwp.dataworks-behavioural-framework | f6d1d7a94a2b18be659acd444ae8d88615e4a162 | d7c143c0fc0c4ae9e86ece34ccc1a480df1f65ad | refs/heads/master | 2023-04-09T01:09:37.313078 | 2021-04-14T15:43:44 | 2021-04-14T15:43:44 | 356,707,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | class AthenaQueryException(Exception):
"""Used when an exception occurs during an Athena query"""
pass
class HBaseQueryException(Exception):
"""Used when an exception occurs during an HBase query"""
pass
| [
"noreply@github.com"
] | uk-gov-mirror.noreply@github.com |
7323ef87e2046adea00e9391a8c96ad2e572d5a9 | fc5734ad9b0dc154b3a36ec2f5d848b3d693473f | /solutions/Trees and Graphs/Graphs/max_area_of_island.py | d75e31146c0bbabbdf6159e062b5edde686ccf2e | [
"MIT"
] | permissive | aimdarx/data-structures-and-algorithms | 8e51ec2144b6e0c413bc7ef0c46aba749fd70a99 | 1659887b843c5d20ee84a24df152fb4f763db757 | refs/heads/master | 2023-08-28T12:00:33.073788 | 2021-11-07T08:31:28 | 2021-11-07T08:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | """
Max Area of Island:
You are given an m x n binary matrix grid. An island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
The area of an island is the number of cells with a value 1 in the island.
Return the maximum area of an island in grid. If there is no island, return 0.
Example 1:
Input: grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,1,1,0,1,0,0,0,0,0,0,0,0],[0,1,0,0,1,1,0,0,1,0,1,0,0],[0,1,0,0,1,1,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Output: 6
Explanation: The answer is not 11, because the island must be connected 4-directionally.
Example 2:
Input: grid = [[0,0,0,0,0,0,0,0]]
Output: 0
Example 3:
[[1,1,0],[0,0,0]]
2
https://leetcode.com/problems/max-area-of-island/
"""
class Solution:
def maxAreaOfIsland(self, grid):
maximum = 0
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col] == 1:
maximum = max(
maximum, self.areOfIsland(grid, row, col))
return maximum
def areOfIsland(self, grid, row, col):
if not (row >= 0 and row < len(grid) and col >= 0 and col < len(grid[0]) and grid[row][col] == 1):
return 0
grid[row][col] = 0 # remove
count = 1
# up
count += self.areOfIsland(grid, row-1, col)
# down
count += self.areOfIsland(grid, row+1, col)
# left
count += self.areOfIsland(grid, row, col-1)
# right
count += self.areOfIsland(grid, row, col+1)
return count
| [
"noreply@github.com"
] | aimdarx.noreply@github.com |
314d7d7f762ea47030a3ad526c45bdbd5b548225 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0825+428/sdB_pg_0825+428_lc.py | 6a60c6b876ec9ccf2d2ed0b809b4bb5139d1047f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[127.239792,42.678167], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_0825+428/sdB_pg_0825+428_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
c45a634d4051faca587362c4c6d02f814a05afe5 | 260133e46c0c88fd20f2ed18309c5f46508b7fb9 | /opengever/base/monkey/patches/action_info.py | e473fd4111bb55218ed7b680d6aba6c87f420f45 | [] | no_license | robertmuehsig/opengever.core | 4180fbea1436fade9b33232a293b0d43ebfc6c51 | 63b3747793d5b824c56eb3659987bb361d25d8d8 | refs/heads/master | 2020-09-08T14:55:00.340222 | 2019-11-08T10:16:02 | 2019-11-08T10:16:02 | 221,163,734 | 0 | 0 | null | 2019-11-12T08:08:59 | 2019-11-12T08:08:54 | null | UTF-8 | Python | false | false | 1,603 | py | from opengever.base.monkey.patching import MonkeyPatch
from Products.CMFCore.utils import _checkPermission
class PatchActionInfo(MonkeyPatch):
"""We patch the _checkPermissions() method of the ActionInfo object
in order to also consider our 'file_actions' category one that should
have its actions' permissions checked on the context.
Without this, the permissions would be checked on the Plone Site instead.
"""
def __call__(self):
def _checkPermissions(self, ec):
""" Check permissions in the current context.
"""
category = self['category']
object = ec.contexts['object']
if object is not None and ( category.startswith('object') or
category.startswith('workflow') or
category.startswith('file') or # <-- patched
category.startswith('document') ):
context = object
else:
folder = ec.contexts['folder']
if folder is not None and category.startswith('folder'):
context = folder
else:
context = ec.contexts['portal']
for permission in self._permissions:
if _checkPermission(permission, context):
return True
return False
from Products.CMFCore.ActionInformation import ActionInfo
locals()['__patch_refs__'] = False
self.patch_refs(ActionInfo, '_checkPermissions', _checkPermissions)
| [
"lukas.graf@4teamwork.ch"
] | lukas.graf@4teamwork.ch |
9f1ce72275c26f6742415bae951f4b266f69a90a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03711/s673158923.py | 24d0cec24c92d3886a0f6f29f107232db5bf4165 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | l = [4, 6, 9, 11]
x, y = map(int, input().split())
if x == 2 or y == 2:
print('No')
elif x in l and y in l:
print('Yes')
elif x not in l and y not in l:
print('Yes')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.