blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e50542f228d6759629bd4a34572c3eb303e86f74 | 3f1a6ad5c0c9015653206f45f5395d006b663a14 | /Q Learning/MTM/ARVIND/Run 9/GlobalVariables.py | 3e2d5879ffdeb20c3f0fd1e851beb570cc01e9b0 | [] | no_license | ciddhijain/Results | 075c9c51bf8bebefdf3ca87f66a50c9c39d8461e | a3ce350885264a6bf5c277bd811ad1a9931ced18 | refs/heads/master | 2021-01-10T19:23:17.764638 | 2015-07-03T14:51:48 | 2015-07-03T14:51:48 | 37,335,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,572 | py | __author__ = 'Ciddhi'
from datetime import timedelta, datetime
databaseName = 'QL_ARVIND' # This is database name to which connection is made
userName = 'root' # This is the user name for database connection
password = 'controljp' # This is the password for database connection
dbHost = '127.0.0.1' # This is host address for database connection
dbPort = '3306' # This is port for database connection
dbConnector = 'mysqlconnector' # This is the connector string to be used, depending upon python package
startDate = datetime(2014, 12, 1).date() # This is the start of training period
endDate = datetime(2015, 4, 10).date() # This is the end of training period
rankingDays = 15 # This is the number of days for which ranking is done
initializationDays = 15 # This is the number of days for which q_matrix is initilaized
liveDays = 15 # This is the number of days for which live trading is done
logFileName = "QLearningARVIND"
maxProcesses = 3 # This is the number of maximum processes
#-----------------------------------------------------------------------------------------------------------------------------------------
# These variables need to contain list values
alpha = [0.2, 0.4, 0.5, 0.6, 0.8] # This defines the weightage to long trades as compared to short trades while constructing reward matrix
gamma = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] # This defines the weightage of old data as compared to latest observations of reward matrix
#maxGreedyLevel = [5] # This defines the number of future states for which reward is to be maximized in construction of q matrix
#individualFactor = [8] # This defines the factor of total asset which is to be allocated to each strategy
zeroRange = [0.002, 0.006, 0.01] # This determines the spread between states 0, 1, 2
factorLevelPairs = [[5, 12], [10, 12], [10, 8]]
#------------------------------------------------------------------------------------------------------------------------------------------
dummyIndividualId = -1 # This is to keep a track of max total capital that is invested in the portfolio
unitQty = 250000 # This is the amount of each decrement in asset
hourWindow = 1 # This is the window after which re-allocation is done
maxTotalAsset = 10000000 # This is the total asset deployed
trainingFactor = 2
trainingMaxTotalAsset = maxTotalAsset*trainingFactor # This is the total asset deployed while training
dummyPerformance = -50000
performanceMonthlyOutfileNameBase = 'arvind mtm monthly 15 15 15'
performanceOutfileNameBase = 'arvind mtm 15 15 15'
latestIndividualTableBase = "latest_individual_table"
trainingTradesheetTableBase = "training_tradesheet_data_table"
trainingAssetTableBase = "training_asset_allocation_table"
rankingTableBase = "ranking_table"
dailyAssetTableBase = "asset_daily_allocation_table"
newTradesheetTableBase = "tradesheet_data_table"
assetTableBase = "asset_allocation_table"
qMatrixTableBase = "q_matrix_table"
reallocationTableBase = "reallocation_table"
performanceTableBase = "performance_table"
rankingWalkforwardTableBase = "ranking_walkforward_table"
dailyMtmTableBase = "daily_mtm_table"
| [
"ciddhijain@gmail.com"
] | ciddhijain@gmail.com |
cac0bf55441435485f57281a5edd5505e43adbd5 | 53db22afe7b2dc8344dd2ab0636644109708128f | /clab/nninit/__init__.py | f4854ab3143e64ac2d0a2683aa9e6858db9ca291 | [
"Apache-2.0"
] | permissive | Erotemic/clab | 5b5c11b2e4d39642f071c560e0e7caf3397c372d | 89af79816d219cbecefefa209c0f6dc1fe390375 | refs/heads/master | 2018-10-09T18:04:31.762389 | 2018-08-13T14:23:38 | 2018-08-13T14:23:38 | 110,250,463 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | """
python -c "import ubelt._internal as a; a.autogen_init('clab.nninit')"
python -m clab
"""
# flake8: noqa
from clab.nninit import base
from clab.nninit import lsuv
from clab.nninit.base import (HeNormal, KaimingNormal, KaimingUniform, NoOp,
Orthogonal, Pretrained, VGG16, apply_initializer,
constant, he_normal, he_uniform, init_he_normal,
kaiming_normal, kaiming_uniform,
load_partial_state, normal, orthogonal, shock,
shock_he, sparse, trainable_layers, uniform,
xavier_normal, xavier_uniform,)
from clab.nninit.lsuv import (LSUV, Orthonormal, svd_orthonormal,)
| [
"crallj@rpi.edu"
] | crallj@rpi.edu |
9c198d82430f363233c68b084eddf9cf7586ae1e | d61183674ed7de0de626490cfba77d67c298d1be | /py_scripts/bench_plot_neighbors.py | 54b896c2395f7e0598e4cd69e6dd4c51357c11ad | [] | no_license | Giannos-G/python_dataset | bc670a53143d92cf781e88dee608da38b0e63886 | 18e24cbef16ada1003a3e15a2ed2a3f995f25e46 | refs/heads/main | 2023-07-25T20:24:31.988271 | 2021-09-09T10:31:41 | 2021-09-09T10:31:41 | 363,489,911 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X, _ = datasets.load_digits(return_X_y=True)
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = {alg: np.zeros(len(Nrange))
for alg in algorithms}
N_results_query = {alg: np.zeros(len(Nrange))
for alg in algorithms}
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = {alg: np.zeros(len(Drange))
for alg in algorithms}
D_results_query = {alg: np.zeros(len(Drange))
for alg in algorithms}
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = {alg: np.zeros(len(krange))
for alg in algorithms}
k_results_query = {alg: np.zeros(len(krange))
for alg in algorithms}
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
#plt.show()
| [
"giannos.gavrielides@gmail.com"
] | giannos.gavrielides@gmail.com |
fb518482a9258267dfecca4ed56546ae95d000cf | 03b98276b252aa0e94142b65fd4b9dcd0a4d12ee | /Project.py | 4d44581f8e3cc0aa66a52956f2641de9b4be4d85 | [] | no_license | FrancescoSaverioZuppichini/scientists-keypoints-classification | b2c0ec30f5f7067d3809955f26a2f0cb051cdc2a | d1c4b6010e8c0f13271cd3304a137febf927c51d | refs/heads/main | 2023-03-02T07:44:40.767193 | 2021-01-31T15:56:12 | 2021-01-31T15:56:12 | 334,368,509 | 0 | 0 | null | 2021-01-31T15:56:13 | 2021-01-30T08:49:52 | Jupyter Notebook | UTF-8 | Python | false | false | 518 | py | from dataclasses import dataclass
from pathlib import Path
@dataclass
class Project:
"""
This class represents our project. It stores useful information about the structure, e.g. paths.
"""
base_dir: Path = Path(__file__).parents[0]
data_dir: Path = base_dir / 'dataset'
checkpoint_dir: Path = base_dir / 'checkpoint'
def __post_init__(self):
# create the directories if they don't exist
self.data_dir.mkdir(exist_ok=True)
self.checkpoint_dir.mkdir(exist_ok=True) | [
"zuppif@usi.ch"
] | zuppif@usi.ch |
a56e5646913bda05dbd46183d6ccfa161a785312 | 7a0f7ce00528b103c7b8b501f1e8333fc3a0836c | /Class Based Views/CBV/CBV/CBV_app/urls.py | e9a5006b2991b23bdefa7672692ccb6ffcd53603 | [] | no_license | StefanDimitrovDimitrov/DjangoRESTFramework | 605ea044f4039d5bb8017ffe2f33c54ea0ebacc0 | 076cac204d2b5b1a278b68d3883394d5dcc2c040 | refs/heads/main | 2023-07-15T01:55:52.550274 | 2021-08-27T16:34:40 | 2021-08-27T16:34:40 | 382,106,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from django.urls import path
from CBV.CBV_app.views import CBVList, Details, Create
urlpatterns = [
path('', CBVList.as_view(), name='CBVList'),
path('details/<int:pk>', Details.as_view(), name='details'),
path('create', Create.as_view(), name='create')
] | [
"54206891+StefanDimitrovDimitrov@users.noreply.github.com"
] | 54206891+StefanDimitrovDimitrov@users.noreply.github.com |
e9441712b4248e5f710a43bc2f7edd338493dca2 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/5250fabed22b4ae590e1fc2c10ff7469.py | 1d05c10071c4c66d5b3f7de258699ffdc3b7ceb5 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 465 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
#Strip whitespace
what=what.strip()
if (what==''):
return 'Fine. Be that way!'
elif (any(c.isalpha() for c in what) and what.upper()==what):
#if the string contains letters, and all letters are uppercase (i.e. applying .upper() doesn't change the string)
return 'Whoa, chill out!'
elif (what[-1]=='?'):
#if asked a question, return sure.
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
9f455b8429dea02b5edeba4e91d0ff72aa48f3b9 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayDataDataserviceHellobikeLogSyncResponse.py | da91fbf326c81f03835b551c7221a1f8723c99fd | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 742 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayDataDataserviceHellobikeLogSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayDataDataserviceHellobikeLogSyncResponse, self).__init__()
self._biz_code = None
@property
def biz_code(self):
return self._biz_code
@biz_code.setter
def biz_code(self, value):
self._biz_code = value
def parse_response_content(self, response_content):
response = super(AlipayDataDataserviceHellobikeLogSyncResponse, self).parse_response_content(response_content)
if 'biz_code' in response:
self.biz_code = response['biz_code']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
8b3bc7cf95133c671e3c410fa6d4ef40fc0d8f7b | 3e54ca2ad9146c1eaeee8a2497483187d660289c | /Python/string1.py | 4f1ba32b7966d9f8b643b2e1c9244efd57f1459a | [] | no_license | ParvathyGS/My-learnings | 438e096dc45d73ac5808a7b77316317dd90942ec | 653dac3dc3b46803ab89f07b7c14435f15b3164f | refs/heads/master | 2023-01-13T02:53:58.223103 | 2020-08-09T08:09:08 | 2020-08-09T08:09:08 | 227,995,341 | 0 | 1 | null | 2023-01-07T20:48:22 | 2019-12-14T09:09:53 | HTML | UTF-8 | Python | false | false | 46 | py | a = "my Python"
print(len(a))
#print(a[-5:-2]) | [
"gsparvathy1989@gmail.com"
] | gsparvathy1989@gmail.com |
861e56f9b51658633be995a3553d7ed2a0aa0d18 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nikola.py | 79a8ef8c28112a7172aa37e667777afa2136de90 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 65 | py | ii = [('LeakWTI2.py', 2), ('LeakWTI3.py', 2), ('LeakWTI4.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
8a01fbab7ddd08d936daca4f2b151f92b88385c5 | e9ff112a590a2707e66c518328ba71a4d964846a | /train_scripts/train_c2i.py | f980e7fb7c32ab9e357e2dc4a9e4acdc8522b720 | [
"MIT"
] | permissive | n644t031/fastMRI-kspace | 60a6ca9679ede25f0db89f174647a8451a578331 | 4c484b3183e9f06838b5ee108af283611c2e1e77 | refs/heads/master | 2022-08-30T17:19:23.105996 | 2020-05-24T13:55:40 | 2020-05-24T13:55:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,068 | py | import torch
from torch import nn, optim
from pathlib import Path
from utils.run_utils import initialize, save_dict_as_json, get_logger, create_arg_parser
from utils.data_loaders import create_prefetch_data_loaders
from train.subsample import RandomMaskFunc, UniformMaskFunc
from data.input_transforms import PreProcessCMG
from data.output_transforms import PostProcessCMG
from train.new_model_trainers.img_only import ModelTrainerIMG
# from models.att_unet import UNet
from models.unet_no_norm import UNet
from metrics.new_1d_ssim import SSIMLoss, LogSSIMLoss
def train_cmg_to_img(args):
# Creating checkpoint and logging directories, as well as the run name.
ckpt_path = Path(args.ckpt_root)
ckpt_path.mkdir(exist_ok=True)
ckpt_path = ckpt_path / args.train_method
ckpt_path.mkdir(exist_ok=True)
run_number, run_name = initialize(ckpt_path)
ckpt_path = ckpt_path / run_name
ckpt_path.mkdir(exist_ok=True)
log_path = Path(args.log_root)
log_path.mkdir(exist_ok=True)
log_path = log_path / args.train_method
log_path.mkdir(exist_ok=True)
log_path = log_path / run_name
log_path.mkdir(exist_ok=True)
logger = get_logger(name=__name__)
# Assignment inside running code appears to work.
if (args.gpu is not None) and torch.cuda.is_available():
device = torch.device(f'cuda:{args.gpu}')
logger.info(f'Using GPU {args.gpu} for {run_name}')
else:
device = torch.device('cpu')
logger.info(f'Using CPU for {run_name}')
# Saving peripheral variables and objects in args to reduce clutter and make the structure flexible.
args.run_number = run_number
args.run_name = run_name
args.ckpt_path = ckpt_path
args.log_path = log_path
args.device = device
save_dict_as_json(vars(args), log_dir=log_path, save_name=run_name)
if args.random_sampling:
mask_func = RandomMaskFunc(args.center_fractions, args.accelerations)
else:
mask_func = UniformMaskFunc(args.center_fractions, args.accelerations)
input_train_transform = PreProcessCMG(mask_func, args.challenge, device, augment_data=args.augment_data,
use_seed=False, crop_center=args.crop_center)
input_val_transform = PreProcessCMG(mask_func, args.challenge, device, augment_data=False,
use_seed=True, crop_center=args.crop_center)
output_train_transform = PostProcessCMG(challenge=args.challenge, residual_acs=args.residual_acs)
output_val_transform = PostProcessCMG(challenge=args.challenge, residual_acs=args.residual_acs)
# DataLoaders
train_loader, val_loader = create_prefetch_data_loaders(args)
losses = dict(
img_loss=LogSSIMLoss(filter_size=7).to(device)
# img_loss=SSIMLoss(filter_size=7).to(device=device)
# img_loss=nn.L1Loss()
)
# model = UNet(
# in_chans=30, out_chans=30, chans=args.chans, num_pool_layers=args.num_pool_layers, num_groups=args.num_groups,
# negative_slope=args.negative_slope, use_residual=args.use_residual, interp_mode=args.interp_mode,
# use_ca=args.use_ca, reduction=args.reduction, use_gap=args.use_gap, use_gmp=args.use_gmp).to(device)
model = UNet(in_chans=30, out_chans=30, chans=args.chans, num_pool_layers=args.num_pool_layers,
num_depth_blocks=args.num_depth_blocks, use_residual=args.use_residual, use_ca=args.use_ca,
reduction=args.reduction, use_gap=args.use_gap, use_gmp=args.use_gmp).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.init_lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_red_epochs, gamma=args.lr_red_rate)
trainer = ModelTrainerIMG(args, model, optimizer, train_loader, val_loader, input_train_transform,
input_val_transform, output_train_transform, output_val_transform, losses, scheduler)
try:
trainer.train_model()
except KeyboardInterrupt:
trainer.writer.close()
logger.warning('Closing summary writer due to KeyboardInterrupt.')
if __name__ == '__main__':
project_name = 'fastMRI-kspace'
assert Path.cwd().name == project_name, f'Current working directory set at {Path.cwd()}, not {project_name}!'
settings = dict(
# Variables that almost never change.
challenge='multicoil',
data_root='/media/veritas/D/FastMRI',
log_root='./logs',
ckpt_root='./checkpoints',
batch_size=1, # This MUST be 1 for now.
save_best_only=True,
smoothing_factor=8,
# Variables that occasionally change.
center_fractions=[0.08, 0.04],
accelerations=[4, 8],
random_sampling=True,
num_pool_layers=5,
verbose=False,
use_gt=True,
augment_data=True,
crop_center=True,
# Model specific parameters.
train_method='C2I', # Weighted semi-k-space to complex-valued image.
# num_groups=16, # Maybe try 16 now since chans is 64.
chans=64,
num_depth_blocks=1,
# negative_slope=0.1,
# interp_mode='nearest',
use_residual=True,
residual_acs=False,
# TensorBoard related parameters.
max_images=8, # Maximum number of images to save.
shrink_scale=1, # Scale to shrink output image size.
# Channel Attention.
use_ca=False,
reduction=8,
use_gap=False,
use_gmp=False,
# Learning rate scheduling.
lr_red_epochs=[20, 25],
lr_red_rate=0.1,
# Variables that change frequently.
use_slice_metrics=True,
num_epochs=30,
gpu=0, # Set to None for CPU mode.
num_workers=4,
init_lr=2E-4,
max_to_keep=1,
prev_model_ckpt='',
sample_rate_train=1,
start_slice_train=0,
sample_rate_val=1,
start_slice_val=0,
)
arguments = create_arg_parser(**settings).parse_args()
train_cmg_to_img(arguments)
| [
"veritas9872@gmail.com"
] | veritas9872@gmail.com |
2c562bda934b4f62ab5ded428de590c8313dcc52 | 0b9e884be78ecc22a44a94e2c1cabefd637b9ed0 | /Python_Talk/mpt-master/ch12/guess_letter.py | 7c0efa9b99992a2bcda705aca8885edea8980335 | [] | no_license | marcovnyc/penguin-code | 6ba3faa5f21186918e2d08f5a0fcacebb2697e56 | a0c1f91219ff74a8bb8e9fd3375b03b667056b54 | refs/heads/master | 2021-12-22T15:04:26.002512 | 2021-12-16T04:01:40 | 2021-12-16T04:01:40 | 7,264,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | import turtle as t
from random import choice
# Set up the board
t.setup(600,500)
t.hideturtle()
t.tracer(False)
t.bgcolor("lavender")
t.title("Guess the Word Game in Turtle Graphics")
# Define a variable to count how many guesses left
score = 6
# Create a second turtle to show guesses left
left = t.Turtle()
left.up()
left.hideturtle()
left.goto(-290,200)
left.write(f"guesses left: {score}", font = ('Arial',20,'normal'))
# Put incorrect guesses on top
t.up()
t.goto(-290,150)
t.write("incorrect guesses:", font = ('Arial',20,'normal'))
# Put four empty spaces for the four letters at bottom
for x in range(4):
t.goto(-275+150*x,-200)
t.down()
t.goto(-175+150*x,-200)
t.up()
t.update()
# Put words in a dictionary and randomly pick one
words = ['that', 'with', 'have', 'this', 'will', 'your',
'from', 'they', 'know', 'want', 'been',
'good', 'much', 'some', 'time']
word = choice(words)
# Create a missed list
missed = []
# Start the game loop
while True:
# Take written input
inp = input("What's your guess?\n").lower()
# Stop the loop if you key in "done"
if inp == "done":
break
# Check if the letter is in the word
elif inp in list(word):
# If yes, put it in the right position(s)
for w in range(4):
if inp == list(word)[w]:
t.goto(-250+150*w,-190)
t.write(inp, font = ('Arial',60,'normal'))
# If the letter is not in the word, show it at the top
else:
missed.append(inp)
t.goto(-290+80*len(missed),60)
t.write(inp, font = ('Arial',60,'normal'))
# Update everything that happens in the iteration
t.update()
try:
t.bye()
except t.Terminator:
print('exit turtle')
| [
"penguin@penguin.com"
] | penguin@penguin.com |
55442bd8026d0af90bc25fa86a863d5ebe49cfbc | 9bbf429d2c2e2f20345d613a719cf01e8f9a0bff | /userprofile/views.py | 560c9d6dd350e3921d0fd10212558efa8c14f642 | [] | no_license | sandglasscao/ENU | f78f8a8dfaf3263587885b0622ab6d3182012375 | e3c26fd57f8ef582da576e1cc28b7eb42562c706 | refs/heads/master | 2021-01-23T05:19:03.175439 | 2017-04-14T09:24:22 | 2017-04-14T09:24:22 | 86,297,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,585 | py | import base64
from django.http import HttpResponse
import json
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.generics import (
ListAPIView,
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
)
from rest_framework.pagination import PageNumberPagination
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet, ModelViewSet
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAuthenticatedOrReadOnly,
)
from django.contrib.auth.models import User
from .models import (
Profile,
Pquestion,
Address,
Freight,
Notification,
Taxpayer,
)
from metadata.models import (NiceWeidcode,
AddressCode,
)
from metadata.serializers import (NiceWeidcodeSerializer, )
from .serializers import (
UserSerializer,
PasswordSerializer,
RegisterSerializer,
ProfileSerializer,
PquestionSerializer,
AddressSerializer,
FreightSerializer,
NotificationSerializer,
TaxpayerSerializer,
)
from utility.views import NewFreightNo
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class UserListApiView(ListAPIView):
permission_classes = [AllowAny]
queryset = User.objects.all()
serializer_class = UserSerializer
def get(self, request, *args, **kwargs):
serializer = UserSerializer(self.queryset)
return Response(serializer.data, status=HTTP_200_OK)
class ProfileApiView(APIView):
# permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = ProfileSerializer
def get(self, request, username):
user = User.objects.get(username=username)
profile = Profile.objects.get(user=user)
serializer = ProfileSerializer(profile)
return Response(serializer.data, status=HTTP_200_OK)
def put(self, request, *args, **kwargs):
profile = request.user.profile
# print(request.data)
serializer = ProfileSerializer(profile, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
return Response(serializer.errors, status=400)
class UserRegisterApiView(APIView):
permission_classes = [AllowAny]
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
data = request.data
serializer = RegisterSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
class CheckCellApiView(APIView):
permission_classes = [AllowAny]
serializer_class = ProfileSerializer
# query
def post(self, request, *args, **kwargs):
cellphone = request.data['cell']
profiles = Profile.objects.filter(cell=cellphone)
serializer = ProfileSerializer()
if profiles.count() >= 1:
serializer = ProfileSerializer(profiles, many=True)
return Response(serializer.data, status=200)
class ChangePwdApiView(APIView):
permission_classes = [IsAuthenticated]
# permission_classes = [AllowAny]
serializer_class = PasswordSerializer
def put(self, request, *args, **kwargs):
# need to replace it with authenticated user
# print(request.user)
# username = request.data['username']
# user = User.objects.get(username=username)
serializer = PasswordSerializer(request.user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
#
# class PquestionListApiView(ListAPIView):
# # permission_classes = [IsAuthenticated]
# serializer_class = PquestionSerializer
#
# def get_queryset(self):
# user = self.request.user
# return Pquestion.objects.filter(owner=user)
#
#
# class PquestionApiView(APIView):
# serializer_class = PquestionSerializer
#
# def post(self, request, *args, **kwargs):
# serializer = PquestionSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save(owner=request.user)
# return Response(serializer.data, status=201)
# return Response(serializer.errors, status=400)
class PquestionLC(ListCreateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = PquestionSerializer
def post(self, request, *args, **kwargs):
serializer = PquestionSerializer(data=request.data)
if serializer.is_valid():
serializer.save(owner=request.user)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
def get_queryset(self):
return Pquestion.objects.filter(owner=self.request.user)
class PquestionRUD(RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = PquestionSerializer
queryset = Pquestion.objects.all()
lookup_field = 'id'
# class AddressCreateApiView(CreateAPIView):
# serializer_class = AddressSerializer
#
# def post(self, request, *args, **kwargs):
# serializer = AddressSerializer(data=request.data)
# data_country = AddressCode.objects.get(pk=request.data['country'])
# data_province = AddressCode.objects.get(pk=request.data['province'])
# data_city = AddressCode.objects.get(pk=request.data['city'])
# data_district = AddressCode.objects.get(pk=request.data['district'])
# if serializer.is_valid():
# serializer.save(owner=request.user,
# country=data_country,
# province=data_province,
# city=data_city,
# district=data_district)
# return Response(serializer.data, status=201)
# return Response(serializer.errors, status=400)
#
#
# class AddressDeleteApiView(DestroyAPIView):
# # permission_classes = [AllowAny]
# def delete(self, request, addressid):
# addressid = self.kwargs.get('addressid')
# address = Address.objects.filter(id=addressid).delete()
# if address:
# return Response({"count": "1", "address": address}, status=200)
# return Response({"error": "system error"}, status=500)
#
# class AddressListApiView(ListAPIView):
# # permission_classes = [AllowAny]
# serializer_class = AddressSerializer
#
# def get_queryset(self):
# user = self.request.user
# return Address.objects.filter(owner=user)
#
# class AddressRUDView(RetrieveUpdateDestroyAPIView):
# permission_classes = (IsAuthenticatedOrReadOnly,)
# serializer_class = AddressSerializer
# queryset = Address.objects.all()
# lookup_field = 'id'
#
#
# class AddressListCreateView(ListCreateAPIView):
# permission_classes = (IsAuthenticatedOrReadOnly,)
# serializer_class = AddressSerializer
#
# def post(self, request, *args, **kwargs):
# serializer = AddressSerializer(data=request.data)
# data_country = AddressCode.objects.get(pk=request.data['country'])
# data_province = AddressCode.objects.get(pk=request.data['province'])
# data_city = AddressCode.objects.get(pk=request.data['city'])
# data_district = AddressCode.objects.get(pk=request.data['district'])
# if serializer.is_valid():
# serializer.save(owner=request.user,
# country=data_country,
# province=data_province,
# city=data_city,
# district=data_district)
# return Response(serializer.data, status=201)
# return Response(serializer.errors, status=400)
#
# def get_queryset(self):
# user = self.request.user
# return Address.objects.filter(owner=user)
class AddressViewSet(ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = AddressSerializer
def get_queryset(self):
return Address.objects.filter(owner=self.request.user)
def create(self, request, *args, **kwargs):
return super(AddressViewSet, self).create(request, *args, **kwargs)
def perform_create(self, serializer):
data_country = AddressCode.objects.get(pk=self.request.data['country'])
data_province = AddressCode.objects.get(pk=self.request.data['province'])
data_city = AddressCode.objects.get(pk=self.request.data['city'])
data_district = AddressCode.objects.get(pk=self.request.data['district'])
serializer.save(owner=self.request.user,
country=data_country,
province=data_province,
city=data_city,
district=data_district)
class StandardPagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 1000
class NiceWeidcodeList(ListAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = NiceWeidcodeSerializer
pagination_class = StandardPagination
def get_queryset(self):
weidcode = self.kwargs['weidcode']
return NiceWeidcode.objects.filter(weidcode__contains=weidcode)
#
# class ConsignorDetail(RetrieveAPIView):
# permission_classes = (IsAuthenticatedOrReadOnly,)
#
# def get(self, request, *args, **kwargs):
# weidcode = self.kwargs.get('weidcode')
# profile = Profile.objects.get(user__username=weidcode)
# data = {}
# if profile:
# data['weidcode'] = weidcode
# data['cell'] = profile.cell
# if profile.address:
# data['address'] = profile.address.no
# else:
# data['address'] = ''
# else:
# data['weidcode'] = ''
# data['cell'] = ''
# data['address'] = ''
#
# return Response(data, status=HTTP_200_OK)
# class DeliveryOptList(ListAPIView):
# permission_classes = (IsAuthenticatedOrReadOnly,)
# serializer_class = MetaTypeSerializer
#
# def get_queryset(self):
# language = settings.LANGUAGE_CODE
# enumTypes = ['1008', '1010']
# return MetaType.objects.filter(type__in=enumTypes, language=language)
class FreightRUDView(RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = FreightSerializer
queryset = Freight.objects.all()
# def post(self, request, *args, **kwargs):
# data = request.data
# freightS = data.pop('freight')[0]
# freight = json.loads(freightS)
#
# if freight.get('hasCheck'):
# self.__resetlst__(which='gross_check', source=data, target=freight)
#
# freight['freight_no'] = NewFreightNo.next_freight_no()
# freight['status'] = 0
# serializer = FreightSerializer(data=freight)
# if serializer.is_valid():
# serializer.save(owner=request.user)
# return Response(serializer.data, status=201)
# return Response(serializer.errors, status=400)
#
# def __resetlst__(self, which, source, target):
# check = target.get(which)
# if check:
# reciept_lst = check.get('receipt_lst')
#
# for item in reciept_lst:
# pic = which + '_' + str(item.get('seq_no'))
# item['pic'] = source.get(pic)
#
# target[which] = check
class FreightListCreateView(ListCreateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = FreightSerializer
def get_queryset(self):
return Freight.objects.filter(owner=self.request.user)
def post(self, request, *args, **kwargs):
data = request.data
freightS = data.pop('freight')[0]
freight = json.loads(freightS)
if freight.get('hasCheck'):
self.__resetlst__(which='gross_check', source=data, target=freight)
freight['freight_no'] = NewFreightNo.next_freight_no()
freight['status'] = 0
serializer = FreightSerializer(data=freight)
if serializer.is_valid():
serializer.save(owner=request.user)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
def __resetlst__(self, which, source, target):
check = target.get(which)
if check:
reciept_lst = check.get('receipt_lst')
for item in reciept_lst:
pic = which + '_' + str(item.get('seq_no'))
item['pic'] = source.get(pic)
target[which] = check
class ShipperList(ListAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.filter(username__startswith='5')
class NotificationViewSet(ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = NotificationSerializer
def get_queryset(self):
return Notification.objects.filter(owner=self.request.user)
class TaxpayerViewSet(ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = TaxpayerSerializer
pagination_class = StandardPagination
def get_queryset(self):
return Taxpayer.objects.filter(owner=self.request.user)
def create(self, request, *args, **kwargs):
data = request.data
taxpayer = json.loads(data.get('taxpayer'))
invoicepic = data.get('invoicepic', None)
if invoicepic:
self.__base64toimg__(invoicepic)
taxpayer['invoicepic'] = invoicepic
for key in taxpayer:
request.data[key] = taxpayer[key]
return super(TaxpayerViewSet, self).create(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def update(self, request, *args, **kwargs):
data = request.data
taxpayer = json.loads(data.get('taxpayer', None))
invoicepic = ('invoicepic' in data.keys()) and data.pop('invoicepic')[0]
if invoicepic:
self.__base64toimg__(invoicepic)
taxpayer['invoicepic'] = invoicepic
elif 'invoicepic' in taxpayer.keys():
taxpayer.pop('invoicepic')
for key in taxpayer:
request.data[key] = taxpayer[key]
return super(TaxpayerViewSet, self).update(request, *args, **kwargs)
def __base64toimg__(self, base64File):
oldlines = base64File.readlines()
newline = base64.decodebytes(oldlines[0])
base64File.seek(0)
base64File.truncate()
base64File.writelines([newline])
class TaxpayerList(ListAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = TaxpayerSerializer
pagination_class = StandardPagination
def get_queryset(self):
title = self.kwargs['title']
return Taxpayer.objects.filter(title__icontains=title)
| [
"root@localhost"
] | root@localhost |
57b897d6707a263911f478ca1cd987fc2d776816 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/cycles/version_update.py | 178f1162568f5957f10cb3cb550f6f789469f98b | [
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
... | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 15,886 | py | #
# Copyright 2011-2014 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
import bpy
import math
from bpy.app.handlers import persistent
def foreach_notree_node(nodetree, callback, traversed):
if nodetree in traversed:
return
traversed.add(nodetree)
for node in nodetree.nodes:
callback(node)
if node.bl_idname == 'ShaderNodeGroup':
foreach_notree_node(node.node_tree, callback, traversed)
def foreach_cycles_node(callback):
traversed = set()
for material in bpy.data.materials:
if material.node_tree:
foreach_notree_node(
material.node_tree,
callback,
traversed,
)
for world in bpy.data.worlds:
if world.node_tree:
foreach_notree_node(
world.node_tree,
callback,
traversed,
)
for light in bpy.data.lights:
if light.node_tree:
foreach_notree_node(
light.node_tree,
callback,
traversed,
)
def displacement_node_insert(material, nodetree, traversed):
if nodetree in traversed:
return
traversed.add(nodetree)
for node in nodetree.nodes:
if node.bl_idname == 'ShaderNodeGroup':
displacement_node_insert(material, node.node_tree, traversed)
# Gather links to replace
displacement_links = []
for link in nodetree.links:
if (
link.to_node.bl_idname == 'ShaderNodeOutputMaterial' and
link.from_node.bl_idname != 'ShaderNodeDisplacement' and
link.to_socket.identifier == 'Displacement'
):
displacement_links.append(link)
# Replace links with displacement node
for link in displacement_links:
from_node = link.from_node
from_socket = link.from_socket
to_node = link.to_node
to_socket = link.to_socket
nodetree.links.remove(link)
node = nodetree.nodes.new(type='ShaderNodeDisplacement')
node.location[0] = 0.5 * (from_node.location[0] + to_node.location[0])
node.location[1] = 0.5 * (from_node.location[1] + to_node.location[1])
node.inputs['Scale'].default_value = 0.1
node.inputs['Midlevel'].default_value = 0.0
nodetree.links.new(from_socket, node.inputs['Height'])
nodetree.links.new(node.outputs['Displacement'], to_socket)
def displacement_nodes_insert():
traversed = set()
for material in bpy.data.materials:
if material.node_tree:
displacement_node_insert(material, material.node_tree, traversed)
def displacement_principled_nodes(node):
if node.bl_idname == 'ShaderNodeDisplacement':
if node.space != 'WORLD':
node.space = 'OBJECT'
if node.bl_idname == 'ShaderNodeBsdfPrincipled':
if node.subsurface_method != 'RANDOM_WALK':
node.subsurface_method = 'BURLEY'
def square_roughness_node_insert(material, nodetree, traversed):
if nodetree in traversed:
return
traversed.add(nodetree)
roughness_node_types = {
'ShaderNodeBsdfAnisotropic',
'ShaderNodeBsdfGlass',
'ShaderNodeBsdfGlossy',
'ShaderNodeBsdfRefraction'}
# Update default values
for node in nodetree.nodes:
if node.bl_idname == 'ShaderNodeGroup':
square_roughness_node_insert(material, node.node_tree, traversed)
elif node.bl_idname in roughness_node_types:
roughness_input = node.inputs['Roughness']
roughness_input.default_value = math.sqrt(max(roughness_input.default_value, 0.0))
# Gather roughness links to replace
roughness_links = []
for link in nodetree.links:
if link.to_node.bl_idname in roughness_node_types and \
link.to_socket.identifier == 'Roughness':
roughness_links.append(link)
# Replace links with sqrt node
for link in roughness_links:
from_node = link.from_node
from_socket = link.from_socket
to_node = link.to_node
to_socket = link.to_socket
nodetree.links.remove(link)
node = nodetree.nodes.new(type='ShaderNodeMath')
node.operation = 'POWER'
node.location[0] = 0.5 * (from_node.location[0] + to_node.location[0])
node.location[1] = 0.5 * (from_node.location[1] + to_node.location[1])
nodetree.links.new(from_socket, node.inputs[0])
node.inputs[1].default_value = 0.5
nodetree.links.new(node.outputs['Value'], to_socket)
def square_roughness_nodes_insert():
traversed = set()
for material in bpy.data.materials:
if material.node_tree:
square_roughness_node_insert(material, material.node_tree, traversed)
def mapping_node_order_flip(node):
"""
Flip euler order of mapping shader node
"""
if node.bl_idname == 'ShaderNodeMapping':
rot = node.rotation.copy()
rot.order = 'ZYX'
quat = rot.to_quaternion()
node.rotation = quat.to_euler('XYZ')
def vector_curve_node_remap(node):
"""
Remap values of vector curve node from normalized to absolute values
"""
if node.bl_idname == 'ShaderNodeVectorCurve':
node.mapping.use_clip = False
for curve in node.mapping.curves:
for point in curve.points:
point.location.x = (point.location.x * 2.0) - 1.0
point.location.y = (point.location.y - 0.5) * 2.0
node.mapping.update()
def custom_bake_remap(scene):
"""
Remap bake types into the new types and set the flags accordingly
"""
bake_lookup = (
'COMBINED',
'AO',
'SHADOW',
'NORMAL',
'UV',
'EMIT',
'ENVIRONMENT',
'DIFFUSE_DIRECT',
'DIFFUSE_INDIRECT',
'DIFFUSE_COLOR',
'GLOSSY_DIRECT',
'GLOSSY_INDIRECT',
'GLOSSY_COLOR',
'TRANSMISSION_DIRECT',
'TRANSMISSION_INDIRECT',
'TRANSMISSION_COLOR',
'SUBSURFACE_DIRECT',
'SUBSURFACE_INDIRECT',
'SUBSURFACE_COLOR')
diffuse_direct_idx = bake_lookup.index('DIFFUSE_DIRECT')
cscene = scene.cycles
# Old bake type
bake_type_idx = cscene.get("bake_type")
if bake_type_idx is None:
cscene.bake_type = 'COMBINED'
return
# File doesn't need versioning
if bake_type_idx < diffuse_direct_idx:
return
# File needs versioning
bake_type = bake_lookup[bake_type_idx]
cscene.bake_type, end = bake_type.split('_')
if end == 'DIRECT':
scene.render.bake.use_pass_indirect = False
scene.render.bake.use_pass_color = False
elif end == 'INDIRECT':
scene.render.bake.use_pass_direct = False
scene.render.bake.use_pass_color = False
elif end == 'COLOR':
scene.render.bake.use_pass_direct = False
scene.render.bake.use_pass_indirect = False
def ambient_occlusion_node_relink(material, nodetree, traversed):
if nodetree in traversed:
return
traversed.add(nodetree)
for node in nodetree.nodes:
if node.bl_idname == 'ShaderNodeAmbientOcclusion':
node.samples = 1
node.only_local = False
node.inputs['Distance'].default_value = 0.0
elif node.bl_idname == 'ShaderNodeGroup':
ambient_occlusion_node_relink(material, node.node_tree, traversed)
# Gather links to replace
ao_links = []
for link in nodetree.links:
if link.from_node.bl_idname == 'ShaderNodeAmbientOcclusion':
ao_links.append(link)
# Replace links
for link in ao_links:
from_node = link.from_node
to_socket = link.to_socket
nodetree.links.remove(link)
nodetree.links.new(from_node.outputs['Color'], to_socket)
def ambient_occlusion_nodes_relink():
traversed = set()
for material in bpy.data.materials:
if material.node_tree:
ambient_occlusion_node_relink(material, material.node_tree, traversed)
@persistent
def do_versions(self):
if bpy.context.preferences.version <= (2, 78, 1):
prop = bpy.context.preferences.addons[__package__].preferences
system = bpy.context.preferences.system
if not prop.is_property_set("compute_device_type"):
# Device might not currently be available so this can fail
try:
if system.legacy_compute_device_type == 1:
prop.compute_device_type = 'OPENCL'
elif system.legacy_compute_device_type == 2:
prop.compute_device_type = 'CUDA'
else:
prop.compute_device_type = 'NONE'
except:
pass
# Init device list for UI
prop.get_devices()
# We don't modify startup file because it assumes to
# have all the default values only.
if not bpy.data.is_saved:
return
# Clamp Direct/Indirect separation in 270
if bpy.data.version <= (2, 70, 0):
for scene in bpy.data.scenes:
cscene = scene.cycles
sample_clamp = cscene.get("sample_clamp", False)
if (sample_clamp and
not cscene.is_property_set("sample_clamp_direct") and
not cscene.is_property_set("sample_clamp_indirect")):
cscene.sample_clamp_direct = sample_clamp
cscene.sample_clamp_indirect = sample_clamp
# Change of Volume Bounces in 271
if bpy.data.version <= (2, 71, 0):
for scene in bpy.data.scenes:
cscene = scene.cycles
if not cscene.is_property_set("volume_bounces"):
cscene.volume_bounces = 1
# Caustics Reflective/Refractive separation in 272
if bpy.data.version <= (2, 72, 0):
for scene in bpy.data.scenes:
cscene = scene.cycles
if (cscene.get("no_caustics", False) and
not cscene.is_property_set("caustics_reflective") and
not cscene.is_property_set("caustics_refractive")):
cscene.caustics_reflective = False
cscene.caustics_refractive = False
# Euler order was ZYX in previous versions.
if bpy.data.version <= (2, 73, 4):
foreach_cycles_node(mapping_node_order_flip)
if bpy.data.version <= (2, 76, 5):
foreach_cycles_node(vector_curve_node_remap)
# Baking types changed
if bpy.data.version <= (2, 76, 6):
for scene in bpy.data.scenes:
custom_bake_remap(scene)
# Several default changes for 2.77
if bpy.data.version <= (2, 76, 8):
for scene in bpy.data.scenes:
cscene = scene.cycles
# Samples
if not cscene.is_property_set("samples"):
cscene.samples = 10
# Preview Samples
if not cscene.is_property_set("preview_samples"):
cscene.preview_samples = 10
# Filter
if not cscene.is_property_set("filter_type"):
cscene.pixel_filter_type = 'GAUSSIAN'
# Tile Order
if not cscene.is_property_set("tile_order"):
cscene.tile_order = 'CENTER'
for light in bpy.data.lights:
clight = light.cycles
# MIS
if not clight.is_property_set("use_multiple_importance_sampling"):
clight.use_multiple_importance_sampling = False
for mat in bpy.data.materials:
cmat = mat.cycles
# Volume Sampling
if not cmat.is_property_set("volume_sampling"):
cmat.volume_sampling = 'DISTANCE'
if bpy.data.version <= (2, 76, 9):
for world in bpy.data.worlds:
cworld = world.cycles
# World MIS Samples
if not cworld.is_property_set("samples"):
cworld.samples = 4
# World MIS Resolution
if not cworld.is_property_set("sample_map_resolution"):
cworld.sample_map_resolution = 256
if bpy.data.version <= (2, 76, 10):
for scene in bpy.data.scenes:
cscene = scene.cycles
if cscene.is_property_set("filter_type"):
if not cscene.is_property_set("pixel_filter_type"):
cscene.pixel_filter_type = cscene.filter_type
if cscene.filter_type == 'BLACKMAN_HARRIS':
cscene.filter_type = 'GAUSSIAN'
if bpy.data.version <= (2, 78, 2):
for scene in bpy.data.scenes:
cscene = scene.cycles
if not cscene.is_property_set("light_sampling_threshold"):
cscene.light_sampling_threshold = 0.0
if bpy.data.version <= (2, 79, 0):
for scene in bpy.data.scenes:
cscene = scene.cycles
# Default changes
if not cscene.is_property_set("aa_samples"):
cscene.aa_samples = 4
if not cscene.is_property_set("preview_aa_samples"):
cscene.preview_aa_samples = 4
if not cscene.is_property_set("blur_glossy"):
cscene.blur_glossy = 0.0
if not cscene.is_property_set("sample_clamp_indirect"):
cscene.sample_clamp_indirect = 0.0
if bpy.data.version <= (2, 79, 1) or \
(bpy.data.version >= (2, 80, 0) and bpy.data.version <= (2, 80, 3)):
displacement_nodes_insert()
if bpy.data.version <= (2, 79, 2):
for mat in bpy.data.materials:
cmat = mat.cycles
if not cmat.is_property_set("displacement_method"):
cmat.displacement_method = 'BUMP'
foreach_cycles_node(displacement_principled_nodes)
if bpy.data.version <= (2, 79, 3) or \
(bpy.data.version >= (2, 80, 0) and bpy.data.version <= (2, 80, 4)):
# Switch to squared roughness convention
square_roughness_nodes_insert()
if bpy.data.version <= (2, 80, 15):
# Copy cycles hair settings to internal settings
for part in bpy.data.particles:
cpart = part.get("cycles", None)
if cpart:
part.shape = cpart.get("shape", 0.0)
part.root_radius = cpart.get("root_width", 1.0)
part.tip_radius = cpart.get("tip_width", 0.0)
part.radius_scale = cpart.get("radius_scale", 0.01)
part.use_close_tip = cpart.get("use_closetip", True)
if bpy.data.version <= (2, 79, 4) or \
(bpy.data.version >= (2, 80, 0) and bpy.data.version <= (2, 80, 18)):
for world in bpy.data.worlds:
cworld = world.cycles
# World MIS
if not cworld.is_property_set("sampling_method"):
if cworld.get("sample_as_light", True):
cworld.sampling_method = 'MANUAL'
else:
cworld.sampling_method = 'NONE'
ambient_occlusion_nodes_relink()
if bpy.data.version <= (2, 79, 6) or \
(bpy.data.version >= (2, 80, 0) and bpy.data.version <= (2, 80, 41)):
# Change default to bump again.
for mat in bpy.data.materials:
cmat = mat.cycles
if not cmat.is_property_set("displacement_method"):
cmat.displacement_method = 'DISPLACEMENT'
| [
"admin@irradiate.net"
] | admin@irradiate.net |
ee7d13e00cccdc88d49ad555a163e514fa7a1276 | aad51b0ea59c38b23ed419e10b86c44aa947f117 | /156/rrhood.py | cec768264e83a157a3edbe67b33fa6193bb99896 | [] | no_license | berubejd/PyBites | 3a1d7144f59f67a0996dbe224b69bc0b6da439d6 | 439446e8b67612a603713723b2d4a021677341d2 | refs/heads/master | 2021-07-14T14:03:51.819347 | 2020-11-01T14:59:02 | 2020-11-01T14:59:02 | 221,087,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,346 | py | #!/usr/bin/env python3.8
CHARACTERS = ['Red Riding Hood',
# we're omitting 'mother' here for simplicity
# (= substring grandmother)
('Grandmother', 'Grandma', 'Granny'),
'wolf', 'woodsman']
text = """
Once upon a time, there was a little girl who lived in a village near the forest. Whenever she went out, the little girl wore a red riding cloak, so everyone in the village called her Little Red Riding Hood.
One morning, Little Red Riding Hood asked her mother if she could go to visit her grandmother as it had been awhile since they'd seen each other.
"That's a good idea," her mother said. So they packed a nice basket for Little Red Riding Hood to take to her grandmother.
When the basket was ready, the little girl put on her red cloak and kissed her mother goodbye.
"Remember, go straight to Grandma's house," her mother cautioned. "Don't dawdle along the way and please don't talk to strangers! The woods are dangerous."
"Don't worry, mommy," said Little Red Riding Hood, "I'll be careful."
But when Little Red Riding Hood noticed some lovely flowers in the woods, she forgot her promise to her mother. She picked a few, watched the butterflies flit about for awhile, listened to the frogs croaking and then picked a few more.
Little Red Riding Hood was enjoying the warm summer day so much, that she didn't notice a dark shadow approaching out of the forest behind her...
Suddenly, the wolf appeared beside her.
"What are you doing out here, little girl?" the wolf asked in a voice as friendly as he could muster.
"I'm on my way to see my Grandma who lives through the forest, near the brook," Little Red Riding Hood replied.
Then she realized how late she was and quickly excused herself, rushing down the path to her Grandma's house.
The wolf, in the meantime, took a shortcut...
The wolf, a little out of breath from running, arrived at Grandma's and knocked lightly at the door.
"Oh thank goodness dear! Come in, come in! I was worried sick that something had happened to you in the forest," said Grandma thinking that the knock was her granddaughter.
The wolf let himself in. Poor Granny did not have time to say another word, before the wolf gobbled her up!
The wolf let out a satisfied burp, and then poked through Granny's wardrobe to find a nightgown that he liked. He added a frilly sleeping cap, and for good measure, dabbed some of Granny's perfume behind his pointy ears.
A few minutes later, Red Riding Hood knocked on the door. The wolf jumped into bed and pulled the covers over his nose. "Who is it?" he called in a cackly voice.
"It's me, Little Red Riding Hood."
"Oh how lovely! Do come in, my dear," croaked the wolf.
When Little Red Riding Hood entered the little cottage, she could scarcely recognize her Grandmother.
"Grandmother! Your voice sounds so odd. Is something the matter?" she asked.
"Oh, I just have touch of a cold," squeaked the wolf adding a cough at the end to prove the point.
"But Grandmother! What big ears you have," said Little Red Riding Hood as she edged closer to the bed.
"The better to hear you with, my dear," replied the wolf.
"But Grandmother! What big eyes you have," said Little Red Riding Hood.
"The better to see you with, my dear," replied the wolf.
"But Grandmother! What big teeth you have," said Little Red Riding Hood her voice quivering slightly.
"The better to eat you with, my dear," roared the wolf and he leapt out of the bed and began to chase the little girl.
Almost too late, Little Red Riding Hood realized that the person in the bed was not her Grandmother, but a hungry wolf.
She ran across the room and through the door, shouting, "Help! Wolf!" as loudly as she could.
A woodsman who was chopping logs nearby heard her cry and ran towards the cottage as fast as he could.
He grabbed the wolf and made him spit out the poor Grandmother who was a bit frazzled by the whole experience, but still in one piece."Oh Grandma, I was so scared!" sobbed Little Red Riding Hood, "I'll never speak to strangers or dawdle in the forest again."
"There, there, child. You've learned an important lesson. Thank goodness you shouted loud enough for this kind woodsman to hear you!"
The woodsman knocked out the wolf and carried him deep into the forest where he wouldn't bother people any longer.
Little Red Riding Hood and her Grandmother had a nice lunch and a long chat.
"""
def make_character_index(text=text, characters=CHARACTERS):
"""Return a dict with keys are characters (lowercased) and values
the lines they appear in sorted order.
Matches should be case insensitive.
If a character has multiple synonyms
- e.g. ('Grandmother', 'Grandma', 'Granny') -
then return the former as key.
"""
from collections import defaultdict
index = defaultdict(list)
line_number = 0
for line in text.lower().strip().splitlines():
line_number += 1
for character in characters:
if isinstance(character, str):
character = (character,)
for syn in character:
if syn.lower() in line:
if not line_number in index[character[0].lower()]:
index[character[0].lower()].append(line_number)
return index
print(make_character_index())
| [
"berubejd@gmail.com"
] | berubejd@gmail.com |
59ab9c4e25ae4f4d32c497ff085285a02ce8eae2 | 5b86d752424e303a5115ded25892776d3781bddf | /tools/forecast_data_select.py | c4ceec80ef4eccac6ff982ff5fac063d8bf44513 | [] | no_license | GlassyWing/weather_predict_torch | 3318e441768ebbaaac408cdd3c48133f7bc03495 | 0cbd0af4b787bb8624d65b9e31970161e29d05a2 | refs/heads/master | 2020-05-17T08:04:28.473337 | 2019-05-04T13:54:27 | 2019-05-04T13:54:27 | 183,597,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import pandas as pd
if __name__ == '__main__':
weather = pd.read_csv("../data/weather.csv")
weather = weather[(weather['year'] == 2017) & (weather['month'] == 5) & (weather['county'] == '仁和')][::-1]
weather.to_csv("../data/forecast_test.csv", index=False)
| [
"1490215053@qq.com"
] | 1490215053@qq.com |
beb9bd4c2cd0d682ed67c40ad31749fdf94ea87d | 432ea480327c3e0ce37d605d1c4ac29a8b653853 | /src/visions/backends/python/types/file.py | 2886a3bce3435a710a17540736818199fafd230b | [
"BSD-4-Clause",
"BSD-2-Clause"
] | permissive | dylan-profiler/visions | 3f7f99b06cc8a7b90cb4df988dbbec6c329a8e0a | a0b55bbf95e6efe001195e4b497358d6283966b5 | refs/heads/develop | 2022-11-27T01:17:01.735418 | 2022-10-30T10:44:37 | 2022-10-30T10:44:37 | 227,633,867 | 188 | 23 | NOASSERTION | 2022-10-05T23:06:31 | 2019-12-12T15:09:01 | Python | UTF-8 | Python | false | false | 246 | py | import pathlib
from typing import Sequence
from visions.types.file import File
@File.contains_op.register
def file_contains(sequence: Sequence, state: dict) -> bool:
return all(isinstance(p, pathlib.Path) and p.exists() for p in sequence)
| [
"sfbbrugman@gmail.com"
] | sfbbrugman@gmail.com |
c9acd1718298d2ba3853839da68e37e9e068d22a | 9ff1058a0500be499fd3de9ec0beccd697d5273c | /lib/Diffusion/ModelFitting/DTI.py | 554d74a9c538ad0850a5a97c89f04f1900470348 | [] | no_license | jrussell9000/NeuroScripts | 93f53c7d38c1d51fdc0cf39096e0996daee887cf | e41558754bd36385f94934333cb39a6500abfd9f | refs/heads/master | 2021-06-09T20:30:59.956137 | 2021-04-08T18:45:39 | 2021-04-08T18:45:39 | 151,635,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,090 | py | import string, os, sys, subprocess, shutil, time
import numpy as np
import nibabel as nib
import dipy.reconst.dti as dti
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.core.gradients import gradient_table
from dipy.io import read_bvals_bvecs
from dipy.reconst.dti import fractional_anisotropy
def fit_dti_dipy(input_dwi, input_bval, input_bvec, output_dir, fit_type='', mask='', bmax='', mask_tensor='T'):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
img = nib.load(input_dwi)
data = img.get_data()
bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec)
if mask != '':
mask_data = nib.load(mask).get_data()
aff = img.get_affine()
sform = img.get_sform()
qform = img.get_qform()
if bmax != "":
jj = np.where(bvals >= bmax)
bvals = np.delete(bvals, jj)
bvecs = np.delete(bvecs, jj, 0)
data = np.delete(data, jj , axis=3)
values = np.array(bvals)
ii = np.where(values == bvals.min())[0]
b0_average = np.mean(data[:,:,:,ii], axis=3)
gtab = gradient_table(bvals, bvecs)
if fit_type == 'RESTORE':
sigma = estimate_sigma(data)
#calculate the average sigma from the b0's
sigma = np.mean(sigma[ii])
dti_model = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)
if mask != '':
dti_fit = dti_model.fit(data, mask_data)
else:
dti_fit = dti_model.fit(data)
elif fit_type != 'RESTORE' and fit_type != '':
dti_model = dti.TensorModel(gtab, fit_method=fit_type)
if mask != '':
dti_fit = dti_model.fit(data, mask_data)
else:
dti_fit = dti_model.fit(data)
else:
dti_model = dti.TensorModel(gtab)
if mask != '':
dti_fit = dti_model.fit(data, mask_data)
else:
dti_fit = dti_model.fit(data)
estimate_data = dti_fit.predict(gtab, S0=b0_average)
residuals = np.absolute(data - estimate_data)
evecs = dti_fit.evecs.astype(np.float32)
evals = dti_fit.evals.astype(np.float32)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
#Define output imgs
output_evecs = output_dir + '/dti_eigenvectors.nii.gz'
output_tensor = output_dir + '/dti_tensor.nii.gz'
dti_tensor_spd = output_dir + '/dti_tensor_spd.nii.gz'
output_tensor_norm = output_dir + '/dti_tensor_norm.nii.gz'
dti_tensor_spd_masked = output_dir + '/dti_tensor_spd_masked.nii.gz'
norm_mask = output_dir + '/norm_mask.nii.gz'
output_V1 = output_dir + '/dti_V1.nii.gz'
output_V2 = output_dir + '/dti_V2.nii.gz'
output_V3 = output_dir + '/dti_V3.nii.gz'
output_L1 = output_dir + '/dti_L1.nii.gz'
output_L2 = output_dir + '/dti_L2.nii.gz'
output_L3 = output_dir + '/dti_L3.nii.gz'
output_fa = output_dir + '/dti_FA.nii.gz'
output_md = output_dir + '/dti_MD.nii.gz'
output_rd = output_dir + '/dti_RD.nii.gz'
output_ad = output_dir + '/dti_AD.nii.gz'
output_res = output_dir + '/dti_residuals.nii.gz'
evecs_img = nib.Nifti1Image(evecs, img.get_affine(), img.header)
nib.save(evecs_img, output_evecs)
dti_V1 = evecs[:,:,:,:,0]
V1_img = nib.Nifti1Image(dti_V1,aff,img.header)
V1_img.set_sform(sform)
V1_img.set_qform(qform)
nib.save(V1_img, output_V1)
dti_V2 = evecs[:,:,:,:,1]
V2_img = nib.Nifti1Image(dti_V2,aff,img.header)
V2_img.set_sform(sform)
V2_img.set_qform(qform)
nib.save(V2_img, output_V2)
dti_V3 = evecs[:,:,:,:,2]
V3_img = nib.Nifti1Image(dti_V3,aff,img.header)
V3_img.set_sform(sform)
V3_img.set_qform(qform)
nib.save(V3_img, output_V3)
dti_L1 = evals[:,:,:,0]
L1_img = nib.Nifti1Image(dti_L1,aff,img.header)
L1_img.set_sform(sform)
L1_img.set_qform(qform)
nib.save(L1_img, output_L1)
dti_L2 = evals[:,:,:,1]
L2_img = nib.Nifti1Image(dti_L2,aff,img.header)
L2_img.set_sform(sform)
L2_img.set_qform(qform)
nib.save(L2_img, output_L2)
dti_L3 = evals[:,:,:,2]
L3_img = nib.Nifti1Image(dti_L3,aff,img.header)
L3_img.set_sform(sform)
L3_img.set_qform(qform)
nib.save(L3_img, output_L3)
res_img = nib.Nifti1Image(residuals.astype(np.float32), aff,img.header)
res_img.set_sform(sform)
res_img.set_qform(qform)
nib.save(res_img, output_res)
os.chdir(output_dir)
os.system('TVFromEigenSystem -basename dti -type FSL -out ' + output_tensor)
os.system('TVtool -in ' + output_tensor + ' -scale 1000.00 -out ' + output_tensor)
os.system('rm -rf dti_V* dti_L*')
#Create the SPD
os.system('TVtool -in ' + output_tensor + ' -spd -out ' + dti_tensor_spd)
if mask_tensor == 'T':
os.system('TVtool -in ' + dti_tensor_spd + ' -norm -out ' + output_tensor_norm)
os.system('BinaryThresholdImageFilter ' + output_tensor_norm + ' ' + norm_mask + ' 0.01 3.0 1 0')
os.system('TVtool -in ' + dti_tensor_spd + ' -mask ' + norm_mask + ' -out ' + dti_tensor_spd_masked)
os.system('TVEigenSystem -in ' + dti_tensor_spd_masked + ' -type FSL')
#Calculate Eigenvectors and Eigenvalues, FA, MD, RD, AD
os.system('TVtool -in ' + dti_tensor_spd_masked + ' -fa -out ' + output_fa)
os.system('TVtool -in ' + dti_tensor_spd_masked + ' -rd -out ' + output_rd)
os.system('TVtool -in ' + dti_tensor_spd_masked + ' -ad -out ' + output_ad)
os.system('TVtool -in ' + dti_tensor_spd_masked + ' -tr -out ' + output_md)
os.system('fslmaths ' + output_md + ' -div 3.0 ' + output_md)
else:
#Calculate FA, MD, RD, AD
os.system('TVEigenSystem -in ' + dti_tensor_spd + ' -type FSL')
os.system('TVtool -in ' + dti_tensor_spd + ' -fa -out ' + output_fa)
os.system('TVtool -in ' + dti_tensor_spd + ' -rd -out ' + output_rd)
os.system('TVtool -in ' + dti_tensor_spd + ' -ad -out ' + output_ad)
os.system('TVtool -in ' + dti_tensor_spd + ' -tr -out ' + output_md)
os.system('fslmaths ' + output_md + ' -div 3.0 ' + output_md)
def fit_dti_mrtrix(input_dwi, input_bval, input_bvec, output_dir, mask='', bmax=''):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_tensor = output_dir + '/dti_tensor.nii.gz'
output_V1 = output_dir + '/dti_V1.nii.gz'
output_V2 = output_dir + '/dti_V2.nii.gz'
output_V3 = output_dir + '/dti_V3.nii.gz'
output_L1 = output_dir + '/dti_L1.nii.gz'
output_L2 = output_dir + '/dti_L2.nii.gz'
output_L3 = output_dir + '/dti_L3.nii.gz'
output_fa = output_dir + '/dti_FA.nii.gz'
output_md = output_dir + '/dti_MD.nii.gz'
output_rd = output_dir + '/dti_RD.nii.gz'
output_ad = output_dir + '/dti_AD.nii.gz'
if bmax!='':
img = nib.load(input_dwi)
data = img.get_data()
bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec)
aff = img.get_affine()
sform = img.get_sform()
qform = img.get_qform()
jj = np.where(bvals >= bmax)
bvals = np.delete(bvals, jj)
bvecs = np.delete(bvecs, jj, 0)
data = np.delete(data, jj , axis=3)
#Save the dwi data
tmp_dwi_img = nib.Nifti1Image(data,aff,img.header)
tmp_dwi_img.set_sform(sform)
tmp_dwi_img.set_qform(qform)
nib.save(tmp_dwi_img, output_dir+'/tmp_dwi.nii.gz')
np.savetxt(output_dir+'/tmp_bvals.bval', bvals, fmt='%i')
np.savetxt(output_dir+'/tmp_bvecs.bvec', np.transpose(bvecs), fmt='%.5f')
#Run the tensor fitting using MRTRIX:
command = 'dwi2tensor -fslgrad ' + output_dir+'/tmp_bvecs.bvec ' + output_dir+'/tmp_bvals.bval ' + output_dir+'/tmp_dwi.nii.gz ' + output_tensor
else:
command = 'dwi2tensor -fslgrad ' + input_bvec + ' ' + input_bval + ' ' + input_dwi + ' ' + output_tensor
if mask!='':
os.system(command+' -mask ' + mask)
else:
os.system(command)
#Write out the parameters
os.system('tensor2metric -adc ' + output_md + ' ' + output_tensor)
os.system('tensor2metric -fa ' + output_fa + ' ' + output_tensor)
os.system('tensor2metric -ad ' + output_ad + ' ' + output_tensor)
os.system('tensor2metric -rd ' + output_rd + ' ' + output_tensor)
os.system('tensor2metric -value ' + output_L1 + ' -num 1 ' + output_tensor)
os.system('tensor2metric -value ' + output_L2 + ' -num 2 ' + output_tensor)
os.system('tensor2metric -value ' + output_L3 + ' -num 3 ' + output_tensor)
os.system('tensor2metric -vector ' + output_V1 + ' -num 1 ' + output_tensor)
os.system('tensor2metric -vector ' + output_V2 + ' -num 2 ' + output_tensor)
os.system('tensor2metric -vector ' + output_V3 + ' -num 3 ' + output_tensor)
os.system('rm -rf ' + output_dir + '/tmp*')
def fit_dti_camino(input_dwi, input_bval, input_bvec, output_dir, fit_type='', mask='', bmax=''):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
#First create temporary camino style data
camino_dwi = output_dir + '/tmp_dwi.Bfloat'
camino_scheme = output_dir + '/tmp_dwi.scheme'
camino_tensor = output_dir + '/tmp_dti.Bfloat'
os.system('image2voxel -4dimage ' + input_dwi + ' -outputfile ' + camino_dwi)
os.system('fsl2scheme -bvecfile ' + input_bvec + ' -bvalfile ' + input_bval + ' > ' + camino_scheme)
if fit_type == 'RESTORE':
data = nib.load(input_dwi).get_data()
bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec)
values = np.array(bvals)
ii = np.where(values == bvals.min())[0]
sigma = estimate_sigma(data)
sigma = np.mean(sigma[ii])
#FIT TENSOR
os.system('modelfit -inputfile ' + camino_dwi + ' -schemefile ' + camino_scheme + ' -model restore -sigma ' + str(sigma) + ' -bgmask ' + mask + ' -outputfile ' + camino_tensor)
elif fit_type == 'WLLS':
os.system('modelfit -inputfile ' + camino_dwi + ' -schemefile ' + camino_scheme + ' -model ldt_wtd -bgmask ' + mask + ' -outputfile ' + camino_tensor)
elif fit_type == 'NLLS':
os.system('modelfit -inputfile ' + camino_dwi + ' -schemefile ' + camino_scheme + ' -model nldt_pos -bgmask ' + mask + ' -outputfile ' + camino_tensor)
else:
os.system('modelfit -inputfile ' + camino_dwi + ' -schemefile ' + camino_scheme + ' -model ldt -bgmask ' + mask + ' -outputfile ' + camino_tensor)
#Convert the data back to NIFTI
output_root = output_dir + 'dti_'
os.system('dt2nii -inputfile ' + camino_tensor + ' -gzip -inputdatatype double -header ' + input_dwi + ' -outputroot ' + output_root)
#Define the output file paths
output_tensor = output_dir + '/dti_tensor.nii.gz'
output_tensor_spd = output_dir + '/dti_tensor_spd.nii.gz'
output_tensor_norm = output_dir + '/dti_tensor_norm.nii.gz'
norm_mask = output_dir + '/norm_mask.nii.gz'
output_tensor_spd_masked = output_dir + '/dti_tensor_spd_masked.nii.gz'
output_V1 = output_dir + '/dti_V1.nii.gz'
output_V2 = output_dir + '/dti_V2.nii.gz'
output_V3 = output_dir + '/dti_V3.nii.gz'
output_L1 = output_dir + '/dti_L1.nii.gz'
output_L2 = output_dir + '/dti_L2.nii.gz'
output_L3 = output_dir + '/dti_L3.nii.gz'
output_fa = output_dir + '/dti_FA.nii.gz'
output_md = output_dir + '/dti_MD.nii.gz'
output_rd = output_dir + '/dti_RD.nii.gz'
output_ad = output_dir + '/dti_AD.nii.gz'
output_res = output_dir + '/dti_residuals.nii.gz'
os.system('TVtool -in ' + output_root + 'dt.nii.gz -scale 1e9 -out ' + output_tensor)
os.system('TVtool -in ' + output_tensor + ' -spd -out ' + output_tensor_spd)
os.system('TVtool -in ' + output_tensor_spd + ' -norm -out ' + output_tensor_norm)
os.system('BinaryThresholdImageFilter ' + output_tensor_norm + ' ' + norm_mask + ' 0.01 3.0 1 0')
os.system('TVtool -in ' + output_tensor_spd + ' -mask ' + norm_mask + ' -out ' + output_tensor_spd_masked)
os.system('TVFromEigenSystem -basename dti -type FSL -out ' + output_tensor_spd_masked)
#Calculate FA, MD, RD, AD
os.system('TVtool -in ' + output_tensor_spd_masked + ' -fa -out ' + output_fa)
os.system('TVtool -in ' + output_tensor_spd_masked + ' -rd -out ' + output_rd)
os.system('TVtool -in ' + output_tensor_spd_masked + ' -ad -out ' + output_ad)
os.system('TVtool -in ' + output_tensor_spd_masked + ' -tr -out ' + output_md)
os.system('fslmaths ' + output_md + ' -div 3.00 ' + output_md)
#Output the eigenvectors and eigenvalues
os.system('TVEigenSystem -in ' + output_tensor_spd_masked + ' -type FSL')
dti_basename=nib.filename_parser.splitext_addext(output_tensor_spd_masked)[0]
os.system('mv ' + dti_basename + '_V1.nii.gz ' + output_V1)
os.system('mv ' + dti_basename + '_V2.nii.gz ' + output_V2)
os.system('mv ' + dti_basename + '_V3.nii.gz ' + output_V3)
os.system('mv ' + dti_basename + '_L1.nii.gz ' + output_L1)
os.system('mv ' + dti_basename + '_L2.nii.gz ' + output_L2)
os.system('mv ' + dti_basename + '_L3.nii.gz ' + output_L3)
#Clean up files
os.system('rm -rf ' + dti_basename +'_[V,L]* ' + output_dir + '/tmp*')
def fit_fwdti_dipy(input_dwi, input_bval, input_bvec, output_dir, fit_method='', mask=''):
import dipy.reconst.fwdti as fwdti
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if fit_method=='':
fit_method = 'WLS'
img = nib.load(input_dwi)
data = img.get_data()
bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec,)
gtab = gradient_table(bvals, bvecs)
if mask != '':
mask_data = nib.load(mask).get_data()
values = np.array(bvals)
ii = np.where(values == bvals.min())[0]
b0_average = np.mean(data[:,:,:,ii], axis=3)
fwidtimodel = fwdti.FreeWaterTensorModel(gtab, fit_method)
if mask!='':
fwidti_fit = fwidtimodel.fit(data, mask_data)
else:
fwidti_fit = fwidtimodel.fit(data)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_evecs = output_dir + '/fwe_dti_eigenvectors.nii.gz'
output_evals = output_dir + '/fwe_dti_eigenvalues.nii.gz'
output_fa = output_dir + '/fwe_dti_FA.nii.gz'
output_md = output_dir + '/fwe_dti_MD.nii.gz'
output_rd = output_dir + '/fwe_dti_RD.nii.gz'
output_ad = output_dir + '/fwe_dti_AD.nii.gz'
output_f = output_dir + '/fwe_dti_F.nii.gz'
#Calculate Parameters for FWDTI Model
evals_img = nib.Nifti1Image(fwidti_fit.evals.astype(np.float32), img.get_affine(),img.header)
nib.save(evals_img, output_evals)
os.system('fslreorient2std ' + output_evals + ' ' + output_evals)
evecs_img = nib.Nifti1Image(fwidti_fit.evecs.astype(np.float32), img.get_affine(),img.header)
nib.save(evecs_img, output_evecs)
os.system('fslreorient2std ' + output_evecs+ ' ' + output_evecs)
fwidti_fa = fwidti_fit.fa
fwidti_fa_img = nib.Nifti1Image(fwidti_fa.astype(np.float32), img.get_affine(),img.header)
nib.save(fwidti_fa_img, output_fa)
os.system('fslreorient2std ' + output_fa + ' ' + output_fa)
fwidti_md = fwidti_fit.md
fwidti_md_img = nib.Nifti1Image(fwidti_md.astype(np.float32), img.get_affine(),img.header)
nib.save(fwidti_md_img, output_md)
os.system('fslreorient2std ' + output_md+ ' ' + output_md)
fwidti_ad = fwidti_fit.ad
fwidti_ad_img = nib.Nifti1Image(fwidti_ad.astype(np.float32), img.get_affine(),img.header)
nib.save(fwidti_ad_img, output_ad)
os.system('fslreorient2std ' + output_ad+ ' ' + output_ad)
fwidti_rd = fwidti_fit.rd
fwidti_rd_img = nib.Nifti1Image(fwidti_rd.astype(np.float32), img.get_affine(),img.header)
nib.save(fwidti_rd_img, output_rd)
os.system('fslreorient2std ' + output_rd+ ' ' + output_rd)
fwidti_f = fwidti_fit.f
fwidti_f_img = nib.Nifti1Image(fwidti_f.astype(np.float32), img.get_affine(),img.header)
nib.save(fwidti_f_img, output_f)
os.system('fslreorient2std ' + output_f+ ' ' + output_f)
| [
"jrussell9000@gmail.com"
] | jrussell9000@gmail.com |
dc1fc261559951e65219266aec5cf9ca732ba352 | e33e414418be93aa0fb19c38b82b221ed8826460 | /intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/vsmclient/v1/shell.py | 2ad1dd39c641f63f9d8a0249f9753cdf8077a782 | [
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | wisererik/proposals | 69e4caaf89d7838c14b18328dc261b6c914748bf | 9db7413983df9341d1796f2acba7202d36f31278 | refs/heads/master | 2021-05-03T23:12:39.496346 | 2018-12-22T04:02:46 | 2018-12-22T04:02:46 | 120,399,059 | 0 | 0 | null | 2018-02-06T03:54:13 | 2018-02-06T03:54:13 | null | UTF-8 | Python | false | false | 21,438 | py | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import sys
import time
from vsmclient import exceptions
from vsmclient import utils
def _poll_for_status(poll_fn, obj_id, action, final_ok_states,
poll_period=5, show_progress=True):
"""Block while an action is being performed, periodically printing
progress.
"""
def print_progress(progress):
if show_progress:
msg = ('\rInstance %(action)s... %(progress)s%% complete'
% dict(action=action, progress=progress))
else:
msg = '\rInstance %(action)s...' % dict(action=action)
sys.stdout.write(msg)
sys.stdout.flush()
print
while True:
obj = poll_fn(obj_id)
status = obj.status.lower()
progress = getattr(obj, 'progress', None) or 0
if status in final_ok_states:
print_progress(100)
print "\nFinished"
break
elif status == "error":
print "\nError %(action)s instance" % locals()
break
else:
print_progress(progress)
time.sleep(poll_period)
def _find_vsm(cs, vsm):
"""Get a vsm by ID."""
return utils.find_resource(cs.vsms, vsm)
def _find_vsm_snapshot(cs, snapshot):
"""Get a vsm snapshot by ID."""
return utils.find_resource(cs.vsm_snapshots, snapshot)
def _find_backup(cs, backup):
"""Get a backup by ID."""
return utils.find_resource(cs.backups, backup)
def _print_vsm(vsm):
utils.print_dict(vsm._info)
def _print_vsm_snapshot(snapshot):
utils.print_dict(snapshot._info)
def _translate_keys(collection, convert):
for item in collection:
keys = item.__dict__.keys()
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _translate_vsm_keys(collection):
convert = [('displayName', 'display_name'), ('vsmType', 'vsm_type')]
_translate_keys(collection, convert)
def _translate_vsm_snapshot_keys(collection):
convert = [('displayName', 'display_name'), ('vsmId', 'vsm_id')]
_translate_keys(collection, convert)
def _extract_metadata(args):
metadata = {}
for metadatum in args.metadata:
# unset doesn't require a val, so we have the if/else
if '=' in metadatum:
(key, value) = metadatum.split('=', 1)
else:
key = metadatum
value = None
metadata[key] = value
return metadata
@utils.arg(
'--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Display information from all tenants (Admin only).')
@utils.arg(
'--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Filter results by display-name')
@utils.arg(
'--status',
metavar='<status>',
default=None,
help='Filter results by status')
@utils.service_type('vsm')
def do_list(cs, args):
"""List all the vsms."""
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'display_name': args.display_name,
'status': args.status,
}
vsms = cs.vsms.list(search_opts=search_opts)
_translate_vsm_keys(vsms)
# Create a list of servers to which the vsm is attached
for vol in vsms:
servers = [s.get('server_id') for s in vol.attachments]
setattr(vol, 'attached_to', ','.join(map(str, servers)))
utils.print_list(vsms, ['ID', 'Status', 'Display Name',
'Size', 'Volume Type', 'Bootable', 'Attached to'])
@utils.arg('vsm', metavar='<vsm>', help='ID of the vsm.')
@utils.service_type('vsm')
def do_show(cs, args):
"""Show details about a vsm."""
vsm = _find_vsm(cs, args.vsm)
_print_vsm(vsm)
@utils.arg('size',
metavar='<size>',
type=int,
help='Size of vsm in GB')
@utils.arg(
'--snapshot-id',
metavar='<snapshot-id>',
default=None,
help='Create vsm from snapshot id (Optional, Default=None)')
@utils.arg(
'--snapshot_id',
help=argparse.SUPPRESS)
@utils.arg(
'--source-volid',
metavar='<source-volid>',
default=None,
help='Create vsm from vsm id (Optional, Default=None)')
@utils.arg(
'--source_volid',
help=argparse.SUPPRESS)
@utils.arg(
'--image-id',
metavar='<image-id>',
default=None,
help='Create vsm from image id (Optional, Default=None)')
@utils.arg(
'--image_id',
help=argparse.SUPPRESS)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Volume name (Optional, Default=None)')
@utils.arg(
'--display_name',
help=argparse.SUPPRESS)
@utils.arg(
'--display-description',
metavar='<display-description>',
default=None,
help='Volume description (Optional, Default=None)')
@utils.arg(
'--display_description',
help=argparse.SUPPRESS)
@utils.arg(
'--vsm-type',
metavar='<vsm-type>',
default=None,
help='Volume type (Optional, Default=None)')
@utils.arg(
'--vsm_type',
help=argparse.SUPPRESS)
@utils.arg(
'--availability-zone',
metavar='<availability-zone>',
default=None,
help='Availability zone for vsm (Optional, Default=None)')
@utils.arg(
'--availability_zone',
help=argparse.SUPPRESS)
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key=value pairs (Optional, Default=None)',
default=None)
@utils.service_type('vsm')
def do_create(cs, args):
"""Add a new vsm."""
vsm_metadata = None
if args.metadata is not None:
vsm_metadata = _extract_metadata(args)
vsm = cs.vsms.create(args.size,
args.snapshot_id,
args.source_volid,
args.display_name,
args.display_description,
args.vsm_type,
availability_zone=args.availability_zone,
imageRef=args.image_id,
metadata=vsm_metadata)
_print_vsm(vsm)
@utils.arg('vsm', metavar='<vsm>', help='ID of the vsm to delete.')
@utils.service_type('vsm')
def do_delete(cs, args):
"""Remove a vsm."""
vsm = _find_vsm(cs, args.vsm)
vsm.delete()
@utils.arg('vsm', metavar='<vsm>', help='ID of the vsm to delete.')
@utils.service_type('vsm')
def do_force_delete(cs, args):
"""Attempt forced removal of a vsm, regardless of its state."""
vsm = _find_vsm(cs, args.vsm)
vsm.force_delete()
@utils.arg('vsm', metavar='<vsm>', help='ID of the vsm to rename.')
@utils.arg('display_name', nargs='?', metavar='<display-name>',
help='New display-name for the vsm.')
@utils.arg('--display-description', metavar='<display-description>',
help='Optional vsm description. (Default=None)',
default=None)
@utils.service_type('vsm')
def do_rename(cs, args):
"""Rename a vsm."""
kwargs = {}
if args.display_name is not None:
kwargs['display_name'] = args.display_name
if args.display_description is not None:
kwargs['display_description'] = args.display_description
_find_vsm(cs, args.vsm).update(**kwargs)
@utils.arg('vsm',
metavar='<vsm>',
help='ID of the vsm to update metadata on.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="Actions: 'set' or 'unset'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata to set/unset (only key is necessary on unset)')
@utils.service_type('vsm')
def do_metadata(cs, args):
"""Set or Delete metadata on a vsm."""
vsm = _find_vsm(cs, args.vsm)
metadata = _extract_metadata(args)
if args.action == 'set':
cs.vsms.set_metadata(vsm, metadata)
elif args.action == 'unset':
cs.vsms.delete_metadata(vsm, metadata.keys())
@utils.arg(
'--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Display information from all tenants (Admin only).')
@utils.arg(
'--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Filter results by display-name')
@utils.arg(
'--status',
metavar='<status>',
default=None,
help='Filter results by status')
@utils.arg(
'--vsm-id',
metavar='<vsm-id>',
default=None,
help='Filter results by vsm-id')
@utils.service_type('vsm')
def do_snapshot_list(cs, args):
"""List all the snapshots."""
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'display_name': args.display_name,
'status': args.status,
'vsm_id': args.vsm_id,
}
snapshots = cs.vsm_snapshots.list(search_opts=search_opts)
_translate_vsm_snapshot_keys(snapshots)
utils.print_list(snapshots,
['ID', 'Volume ID', 'Status', 'Display Name', 'Size'])
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
@utils.service_type('vsm')
def do_snapshot_show(cs, args):
"""Show details about a snapshot."""
snapshot = _find_vsm_snapshot(cs, args.snapshot)
_print_vsm_snapshot(snapshot)
@utils.arg('vsm_id',
metavar='<vsm-id>',
help='ID of the vsm to snapshot')
@utils.arg('--force',
metavar='<True|False>',
help='Optional flag to indicate whether '
'to snapshot a vsm even if it\'s '
'attached to an instance. (Default=False)',
default=False)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Optional snapshot name. (Default=None)')
@utils.arg(
'--display_name',
help=argparse.SUPPRESS)
@utils.arg(
'--display-description',
metavar='<display-description>',
default=None,
help='Optional snapshot description. (Default=None)')
@utils.arg(
'--display_description',
help=argparse.SUPPRESS)
@utils.service_type('vsm')
def do_snapshot_create(cs, args):
"""Add a new snapshot."""
snapshot = cs.vsm_snapshots.create(args.vsm_id,
args.force,
args.display_name,
args.display_description)
_print_vsm_snapshot(snapshot)
@utils.arg('snapshot_id',
metavar='<snapshot-id>',
help='ID of the snapshot to delete.')
@utils.service_type('vsm')
def do_snapshot_delete(cs, args):
"""Remove a snapshot."""
snapshot = _find_vsm_snapshot(cs, args.snapshot_id)
snapshot.delete()
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
@utils.arg('display_name', nargs='?', metavar='<display-name>',
help='New display-name for the snapshot.')
@utils.arg('--display-description', metavar='<display-description>',
help='Optional snapshot description. (Default=None)',
default=None)
@utils.service_type('vsm')
def do_snapshot_rename(cs, args):
"""Rename a snapshot."""
kwargs = {}
if args.display_name is not None:
kwargs['display_name'] = args.display_name
if args.display_description is not None:
kwargs['display_description'] = args.display_description
_find_vsm_snapshot(cs, args.snapshot).update(**kwargs)
def _print_vsm_type_list(vtypes):
utils.print_list(vtypes, ['ID', 'Name'])
def _print_type_and_extra_specs_list(vtypes):
formatters = {'extra_specs': _print_type_extra_specs}
utils.print_list(vtypes, ['ID', 'Name', 'extra_specs'], formatters)
@utils.service_type('vsm')
def do_type_list(cs, args):
"""Print a list of available 'vsm types'."""
vtypes = cs.vsm_types.list()
_print_vsm_type_list(vtypes)
@utils.service_type('vsm')
def do_extra_specs_list(cs, args):
"""Print a list of current 'vsm types and extra specs' (Admin Only)."""
vtypes = cs.vsm_types.list()
_print_type_and_extra_specs_list(vtypes)
@utils.arg('name',
metavar='<name>',
help="Name of the new vsm type")
@utils.service_type('vsm')
def do_type_create(cs, args):
"""Create a new vsm type."""
vtype = cs.vsm_types.create(args.name)
_print_vsm_type_list([vtype])
@utils.arg('id',
metavar='<id>',
help="Unique ID of the vsm type to delete")
@utils.service_type('vsm')
def do_type_delete(cs, args):
"""Delete a specific vsm type"""
cs.vsm_types.delete(args.id)
@utils.arg('vtype',
metavar='<vtype>',
help="Name or ID of the vsm type")
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="Actions: 'set' or 'unset'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='*',
default=None,
help='Extra_specs to set/unset (only key is necessary on unset)')
@utils.service_type('vsm')
def do_type_key(cs, args):
"Set or unset extra_spec for a vsm type."""
vtype = _find_vsm_type(cs, args.vtype)
if args.metadata is not None:
keypair = _extract_metadata(args)
if args.action == 'set':
vtype.set_keys(keypair)
elif args.action == 'unset':
vtype.unset_keys(keypair.keys())
def do_endpoints(cs, args):
"""Discover endpoints that get returned from the authenticate services"""
catalog = cs.client.service_catalog.catalog
for e in catalog['access']['serviceCatalog']:
utils.print_dict(e['endpoints'][0], e['name'])
def do_credentials(cs, args):
"""Show user credentials returned from auth"""
catalog = cs.client.service_catalog.catalog
utils.print_dict(catalog['access']['user'], "User Credentials")
utils.print_dict(catalog['access']['token'], "Token")
_quota_resources = ['vsms', 'snapshots', 'gigabytes']
def _quota_show(quotas):
quota_dict = {}
for resource in _quota_resources:
quota_dict[resource] = getattr(quotas, resource, None)
utils.print_dict(quota_dict)
def _quota_update(manager, identifier, args):
updates = {}
for resource in _quota_resources:
val = getattr(args, resource, None)
if val is not None:
updates[resource] = val
if updates:
manager.update(identifier, **updates)
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to list the quotas for.')
@utils.service_type('vsm')
def do_quota_show(cs, args):
"""List the quotas for a tenant."""
_quota_show(cs.quotas.get(args.tenant))
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to list the default quotas for.')
@utils.service_type('vsm')
def do_quota_defaults(cs, args):
"""List the default quotas for a tenant."""
_quota_show(cs.quotas.defaults(args.tenant))
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to set the quotas for.')
@utils.arg('--vsms',
metavar='<vsms>',
type=int, default=None,
help='New value for the "vsms" quota.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='New value for the "snapshots" quota.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='New value for the "gigabytes" quota.')
@utils.service_type('vsm')
def do_quota_update(cs, args):
"""Update the quotas for a tenant."""
_quota_update(cs.quotas, args.tenant, args)
@utils.arg('class_name', metavar='<class>',
help='Name of quota class to list the quotas for.')
@utils.service_type('vsm')
def do_quota_class_show(cs, args):
"""List the quotas for a quota class."""
_quota_show(cs.quota_classes.get(args.class_name))
@utils.arg('class_name', metavar='<class>',
help='Name of quota class to set the quotas for.')
@utils.arg('--vsms',
metavar='<vsms>',
type=int, default=None,
help='New value for the "vsms" quota.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='New value for the "snapshots" quota.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='New value for the "gigabytes" quota.')
@utils.service_type('vsm')
def do_quota_class_update(cs, args):
"""Update the quotas for a quota class."""
_quota_update(cs.quota_classes, args.class_name, args)
@utils.service_type('vsm')
def do_absolute_limits(cs, args):
"""Print a list of absolute limits for a user"""
limits = cs.limits.get().absolute
columns = ['Name', 'Value']
utils.print_list(limits, columns)
@utils.service_type('vsm')
def do_rate_limits(cs, args):
"""Print a list of rate limits for a user"""
limits = cs.limits.get().rate
columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available']
utils.print_list(limits, columns)
def _print_type_extra_specs(vol_type):
try:
return vol_type.get_keys()
except exceptions.NotFound:
return "N/A"
def _find_vsm_type(cs, vtype):
"""Get a vsm type by name or ID."""
return utils.find_resource(cs.vsm_types, vtype)
@utils.arg('vsm_id',
metavar='<vsm-id>',
help='ID of the vsm to upload to an image')
@utils.arg('--force',
metavar='<True|False>',
help='Optional flag to indicate whether '
'to upload a vsm even if it\'s '
'attached to an instance. (Default=False)',
default=False)
@utils.arg('--container-format',
metavar='<container-format>',
help='Optional type for container format '
'(Default=bare)',
default='bare')
@utils.arg('--disk-format',
metavar='<disk-format>',
help='Optional type for disk format '
'(Default=raw)',
default='raw')
@utils.arg('image_name',
metavar='<image-name>',
help='Name for created image')
@utils.service_type('vsm')
def do_upload_to_image(cs, args):
"""Upload vsm to image service as image."""
vsm = _find_vsm(cs, args.vsm_id)
vsm.upload_to_image(args.force,
args.image_name,
args.container_format,
args.disk_format)
@utils.arg('vsm', metavar='<vsm>',
help='ID of the vsm to backup.')
@utils.arg('--container', metavar='<container>',
help='Optional Backup container name. (Default=None)',
default=None)
@utils.arg('--display-name', metavar='<display-name>',
help='Optional backup name. (Default=None)',
default=None)
@utils.arg('--display-description', metavar='<display-description>',
help='Optional backup description. (Default=None)',
default=None)
@utils.service_type('vsm')
def do_backup_create(cs, args):
"""Creates a backup."""
cs.backups.create(args.vsm,
args.container,
args.display_name,
args.display_description)
@utils.arg('backup', metavar='<backup>', help='ID of the backup.')
@utils.service_type('vsm')
def do_backup_show(cs, args):
"""Show details about a backup."""
backup = _find_backup(cs, args.backup)
info = dict()
info.update(backup._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.service_type('vsm')
def do_backup_list(cs, args):
"""List all the backups."""
backups = cs.backups.list()
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'Object Count',
'Container']
utils.print_list(backups, columns)
@utils.arg('backup', metavar='<backup>',
help='ID of the backup to delete.')
@utils.service_type('vsm')
def do_backup_delete(cs, args):
"""Remove a backup."""
backup = _find_backup(cs, args.backup)
backup.delete()
@utils.arg('backup', metavar='<backup>',
help='ID of the backup to restore.')
@utils.arg('--vsm-id', metavar='<vsm-id>',
help='Optional ID of the vsm to restore to.',
default=None)
@utils.service_type('vsm')
def do_backup_restore(cs, args):
"""Restore a backup."""
cs.restores.restore(args.backup,
args.vsm_id)
| [
"huangzhipeng@huawei.com"
] | huangzhipeng@huawei.com |
4b6960d22383d7c69b3c5e10c4b64ac23a1c83e8 | 230553326780c93f60d552a95d50018025724b4b | /py-code/迭代器.py | 0fa3e53eaa24b154fab2aaba25a75e2ac9777164 | [] | no_license | g-lyc/PRACTICE | 55108dbeb75893e4e6631ce3968420f1da0266ef | 492f72a1c14b53982ada478890e6a5487a67c46e | refs/heads/master | 2022-08-21T17:33:29.682865 | 2022-08-15T02:58:17 | 2022-08-15T02:58:17 | 51,586,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #coding:utf-8
import os
import sys
# 生成器都是迭代器,迭代器不一定是生成器
l = [1,2,3,4,5] #可迭代对象,不是迭代器
d = iter(l) # l.__iter__()
print(d) #<listiterator object at 0x0000000004DA5518>
#什么是迭代器?
#满足两个条件 1、有iter()方法 2、有next()方法
# for 循环内部三件事:
# 1、 调用可迭代对象的iter方法返回一个迭代器对象
# 2、 不断调用迭代器对象的next方法
# 3、 处理StopIteration
# Iterator 迭代器
# Iterable 可迭代对象
| [
"309080979@qq.com"
] | 309080979@qq.com |
4d0e9ebb114de82496a8cb4dacaa905abe0ef68d | e6579d0c32041b307b14d63d36ad733918dc8925 | /Tareas/Tarea 3/tarea3_nroalumno_seccion_n/PRUEBAS/mena.py | 0fef4d796a28b2f2fe78e20721659121d4c55931 | [] | no_license | javierlopeza/IIC1103-2014-2 | 23bba4f33686e5779868f4f36ec05af4e322e8be | 2be90d0d03d1a03d7194613c117d788f12e4451a | refs/heads/master | 2021-01-19T14:41:32.475187 | 2017-04-13T16:10:32 | 2017-04-13T16:10:32 | 88,182,695 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,305 | py | import instaintro_gui
import copy
#################################
#################################
#################################
def girar_imagen(matriz):
n_filas=len(matriz)
n_columnas=len(matriz[0])
#AGREGAR CANTIDAD DE COLUMNAS:
original_girada=[]
for i in range(n_columnas):
AG=[None]
original_girada.append(AG)
#AGREGAR CANTIDAD DE FILAS:
for j in range(len(original_girada)):
for k in range(n_filas-1, -1, -1):
AG=matriz[k][j]
original_girada[j].append(AG)
#BORRAR ELEMENTO REPETIDO:
for h in range(len(original_girada)):
del original_girada[h][0]
return(original_girada)
#################################
#################################
#################################
def escala_grises(matriz):
grises=copy.deepcopy(matriz)
aux=copy.deepcopy(matriz)
n_filas=len(matriz)
n_columnas=len(matriz[0])
for i in range(n_filas):
for j in range(n_columnas):
promedioRGB=((int(aux[i][j][0])+int(aux[i][j][1])+int(aux[i][j][2]))/3)
grises[i][j]=(promedioRGB, promedioRGB, promedioRGB)
return (grises)
#################################
#################################
#################################
#MULTIPLICACION DE TUPLAS POR ESCALAR (equivalente a escalar por vector)
def mult_tupla(e,tupla):
t1=int(tupla[0])*e
t2=int(tupla[1])*e
t3=int(tupla[2])*e
r=(t1,t2,t3)
return (r)
#SUMA DE TUPLAS
def suma_tupla(f1,f2,f3,f4,f5,f6,f7,f8,f9):
d1=int(f1[0])+int(f2[0])+int(f3[0])+int(f4[0])+int(f5[0])+int(f6[0])+int(f7[0])+int(f8[0])+int(f9[0])
d2=int(f1[1])+int(f2[1])+int(f3[1])+int(f4[1])+int(f5[1])+int(f6[1])+int(f7[1])+int(f8[1])+int(f9[1])
d3=int(f1[2])+int(f2[2])+int(f3[2])+int(f4[2])+int(f5[2])+int(f6[2])+int(f7[2])+int(f8[2])+int(f9[2])
r=(d1,d2,d3)
return (r)
#SUMA DE 2 TUPLAS
def suma_2_tupla(t1,t2):
d1=int(t1[0])+int(t2[0])
d2=int(t1[1])+int(t2[1])
d3=int(t1[2])+int(t2[2])
r=(d1,d2,d3)
return (r)
#TUPLA ELEVADO A POTENCIA
def pot_tupla(e,tupla):
t1=(tupla[0])**e
t2=(tupla[1])**e
t3=(tupla[2])**e
r=((t1),(t2),(t3))
return (r)
def gxy(a,b,c,d,e,f,g,h,ii,A):
cero=(0,0,0)
BN=escala_grises(A)
Ap=copy.deepcopy(A)
n_filas=len(A)
n_columnas=len(A[0])
for i in range(n_filas):
for j in range(n_columnas):
if (i-1)>(-1) and (j-1)>(-1) and (i+1)<(n_filas) and (j+1)<(n_columnas):
aa=BN[i-1][j-1]
bb=BN[i-1][j]
cc=BN[i-1][j+1]
dd=BN[i][j-1]
ee=BN[i][j]
ff=BN[i][j+1]
gg=BN[i+1][j-1]
hh=BN[i+1][j]
iii=BN[i+1][j+1]
NV1=(mult_tupla(a,aa))
NV2=(mult_tupla(b,bb))
NV3=(mult_tupla(c,cc))
NV4=(mult_tupla(d,dd))
NV5=(mult_tupla(e,ee))
NV6=(mult_tupla(f,ff))
NV7=(mult_tupla(g,gg))
NV8=(mult_tupla(h,hh))
NV9=(mult_tupla(ii,iii))
#SOBREPASA ARRIBA
elif not((i-1)>(-1)) and (j-1)>(-1) and (i+1)<(n_filas) and (j+1)<(n_columnas):
dd=BN[i][j-1]
ee=BN[i][j]
ff=BN[i][j+1]
gg=BN[i+1][j-1]
hh=BN[i+1][j]
iii=BN[i+1][j+1]
NV1=cero
NV2=cero
NV3=cero
NV4=(mult_tupla(d,dd))
NV5=(mult_tupla(e,ee))
NV6=(mult_tupla(f,ff))
NV7=(mult_tupla(g,gg))
NV8=(mult_tupla(h,hh))
NV9=(mult_tupla(ii,iii))
#SOBREPASA IZQUIERDA
elif ((i-1)>(-1)) and not((j-1)>(-1)) and (i+1)<(n_filas) and (j+1)<(n_columnas):
bb=BN[i-1][j]
cc=BN[i-1][j+1]
ee=BN[i][j]
ff=BN[i][j+1]
hh=BN[i+1][j]
iii=BN[i+1][j+1]
NV1=cero
NV2=(mult_tupla(b,bb))
NV3=(mult_tupla(c,cc))
NV4=cero
NV5=(mult_tupla(e,ee))
NV6=(mult_tupla(f,ff))
NV7=cero
NV8=(mult_tupla(h,hh))
NV9=(mult_tupla(ii,iii))
#SOBREPASA ABAJO
elif ((i-1)>(-1)) and ((j-1)>(-1)) and not((i+1)<(n_filas)) and (j+1)<(n_columnas):
aa=BN[i-1][j-1]
bb=BN[i-1][j]
cc=BN[i-1][j+1]
dd=BN[i][j-1]
ee=BN[i][j]
ff=BN[i][j+1]
NV1=(mult_tupla(a,aa))
NV2=(mult_tupla(b,bb))
NV3=(mult_tupla(c,cc))
NV4=(mult_tupla(d,dd))
NV5=(mult_tupla(e,ee))
NV6=(mult_tupla(f,ff))
NV7=cero
NV8=cero
NV9=cero
#SOBREPASA DERECHA
elif (i-1)>(-1) and (j-1)>(-1) and (i+1)<(n_filas) and not((j+1)<(n_columnas)):
aa=BN[i-1][j-1]
bb=BN[i-1][j]
dd=BN[i][j-1]
ee=BN[i][j]
gg=BN[i+1][j-1]
hh=BN[i+1][j]
NV1=(mult_tupla(a,aa))
NV2=(mult_tupla(b,bb))
NV3=cero
NV4=(mult_tupla(d,dd))
NV5=(mult_tupla(e,ee))
NV6=cero
NV7=(mult_tupla(g,gg))
NV8=(mult_tupla(h,hh))
NV9=cero
#ESQUINA ARRIBA IZQUIERDA
elif not((i-1)>(-1)) and not((j-1)>(-1)) and (i+1)<(n_filas) and (j+1)<(n_columnas):
ee=BN[i][j]
ff=BN[i][j+1]
hh=BN[i+1][j]
iii=BN[i+1][j+1]
NV1=cero
NV2=cero
NV3=cero
NV4=cero
NV5=(mult_tupla(e,ee))
NV6=(mult_tupla(f,ff))
NV7=cero
NV8=(mult_tupla(h,hh))
NV9=(mult_tupla(ii,iii))
#ESQUINA ABAJO IZQUIERDA
elif ((i-1)>(-1)) and not((j-1)>(-1)) and not((i+1)<(n_filas)) and ((j+1)<(n_columnas)):
bb=BN[i-1][j]
cc=BN[i-1][j+1]
ee=BN[i][j]
ff=BN[i][j+1]
NV1=cero
NV2=(mult_tupla(b,bb))
NV3=(mult_tupla(c,cc))
NV4=cero
NV5=(mult_tupla(e,ee))
NV6=(mult_tupla(f,ff))
NV7=cero
NV8=cero
NV9=cero
#ESQUINA ARRIBA DERECHA
elif not((i-1)>(-1)) and ((j-1)>(-1)) and ((i+1)<(n_filas)) and not((j+1)<(n_columnas)):
dd=BN[i][j-1]
ee=BN[i][j]
gg=BN[i+1][j-1]
hh=BN[i+1][j]
NV1=cero
NV2=cero
NV3=cero
NV4=(mult_tupla(d,dd))
NV5=(mult_tupla(e,ee))
NV6=cero
NV7=(mult_tupla(g,gg))
NV8=(mult_tupla(h,hh))
NV9=cero
#ESQUINA ABAJO DERECHA
elif ((i-1)>(-1)) and ((j-1)>(-1)) and not((i+1)<(n_filas)) and not((j+1)<(n_columnas)):
aa=BN[i-1][j-1]
bb=BN[i-1][j]
dd=BN[i][j-1]
ee=BN[i][j]
NV1=(mult_tupla(a,aa))
NV2=(mult_tupla(b,bb))
NV3=cero
NV4=(mult_tupla(d,dd))
NV5=(mult_tupla(e,ee))
NV6=cero
NV7=cero
NV8=cero
NV9=cero
NV=suma_tupla(NV1,NV2,NV3,NV4,NV5,NV6,NV7,NV8,NV9)
Ap[i][j]=NV
return(Ap)
def bordes(A):
print("Procesando bordes...")
G=copy.deepcopy(A)
gx=gxy(-1,0,1,-2,0,2,-1,0,1,A)
gy=gxy(1,2,1,0,0,0,-1,-2,-1,A)
n_filas=len(A)
n_columnas=len(A[0])
for i in range(n_filas):
for j in range(n_columnas):
C1=pot_tupla(2,gx[i][j])
C2=pot_tupla(2,gy[i][j])
S=suma_2_tupla(C1,C2)
R=pot_tupla(1/2, S)
G[i][j]=(R)
H=copy.deepcopy(G)
#Normalizacion de las tuplas
for i in range(n_filas):
for j in range(n_columnas):
L=[]
t1=G[i][j][0]
t2=G[i][j][1]
t3=G[i][j][2]
L.append(t1)
L.append(t2)
L.append(t3)
L.append(255)
L.sort()
Vmax=L[len(L)-1]
nt=Vmax/255
nT1=t1/nt
nT2=t2/nt
nT3=t3/nt
H[i][j]=(nT1,nT2,nT3)
return(H)
#################################
#################################
#################################
#Funcion que corta la imagen original dejandola con cantidades filas y columnas multiplos de 5.
def cortar5x5(original):
cortada=copy.deepcopy(original)
n_filas=len(original)
n_columnas=len(original[0])
filas5=(int((n_filas)/5))*5
columnas5=(int((n_columnas)/5))*5
cortada=cortada[:filas5]
for i in range(filas5):
cortada[i]=cortada[i][:columnas5]
return cortada
#Funcion que calcula la diferencia entre 2 pixeles
def diferencia_colores(p1,p2):
r1=p1[0]
g1=p1[1]
b1=p1[2]
r2=p2[0]
g2=p2[1]
b2=p2[2]
R=((r1-r2)**2)+((g1-g2)**2)+((b1-b2)**2)
return (R)
#Calculo de la diferencia entre cada porcion con todos los mosaicos y se determina cual mosaico es el más similar.
def mosaicos(original, lista_mosaicos):
cambiada=copy.deepcopy(original)
n_filas=len(original)
n_columnas=len(original[0])
for i in range(0, n_filas, 5):
print("Procesando mosaicos...")
porcentaje=(int(i)/int(n_filas))*100
print((str(porcentaje)+"000")[0:4]+"%")
for j in range(0, n_columnas, 5):
lista=[]
for m in range(len(lista_mosaicos)):
total=0
#Donde 'total' corresponde a la diferencia de la porcion con el mosaico, es decir, la suma de las diferencia de los colores de los pixeles respectivos.
for n in range(0,5):
for nn in range(0,5):
d=diferencia_colores(cambiada[i+n][j+nn], lista_mosaicos[m][n][nn])
total+=d
lista.append((total,m))
lista.sort()
total_chico=lista[0][0]
mosaico_util=lista[0][1]
for t in range(0,5):
for tt in range(0,5):
cambiada[i+t][j+tt]=lista_mosaicos[mosaico_util][t][tt]
return(cambiada)
#################################
#################################
#################################
def tarea():
# Aqui empieza tu tarea
while True:
click=instaintro_gui.esperar_click()
if click=="salir":
print("Click en salir")
instaintro_gui.salir()
if click=="girar":
print("Click en girar")
matriz_pixeles=instaintro_gui.obtener_pixeles()
if matriz_pixeles!=None:
matriz_imagen_girada=girar_imagen(matriz_pixeles)
instaintro_gui.actualizar_imagen(matriz_imagen_girada)
print("Imagen girada")
else:
print("No hay foto")
if click=="gris":
print("Click en gris")
matriz_pixeles=copy.deepcopy(instaintro_gui.obtener_pixeles())
if matriz_pixeles!=None:
matriz_grises=escala_grises(matriz_pixeles)
instaintro_gui.actualizar_imagen(matriz_grises)
print("Imagen transformada a escala de grises")
else:
print("No hay foto")
if click=="bordes":
print("Click en bordes")
matriz_pixeles=instaintro_gui.obtener_pixeles()
if matriz_pixeles!=None:
matriz_bordes=bordes(matriz_pixeles)
instaintro_gui.actualizar_imagen(matriz_bordes)
print("Mostrando bordes de la imagen")
else:
print("No hay foto")
if click=="mosaico":
print("Click en mosaico")
matriz_pixeles=instaintro_gui.obtener_pixeles()
if matriz_pixeles!=None:
matriz_cortada=cortar5x5(matriz_pixeles)
lista_mosaicos= instaintro_gui.obtener_imagenes_mosaico()
matriz_mosaicos=mosaicos(matriz_cortada, lista_mosaicos)
instaintro_gui.actualizar_imagen(matriz_mosaicos)
print("Imagen en mosaicos")
else:
print("No hay foto")
app = instaintro_gui.Application("tarea")
app.title('InstaIntro')
app.loadProgram(tarea)
app.start()
| [
"jilopez8@uc.cl"
] | jilopez8@uc.cl |
2b27287a81ce34c6bdfd3b3077d0ad39cac86b10 | 1f4204f903657884d9cccfd44b19ecb531b59ded | /test_settings/66_1_3_200.py | c083982ec3213706b72bcdccb9b5721887a8d8a3 | [] | no_license | fmcc/StylometricAnalyser | 795a8e4abe264ee18ab3bcb34bd128bcd06ac5ca | e86305a63c95d8b533cab4a3be0010c2fee0ff14 | refs/heads/master | 2021-01-23T08:38:44.961082 | 2013-08-31T20:23:36 | 2013-08-31T20:23:36 | 11,097,508 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 540 | py |
import os
DB_PATH = os.getcwd() + '/database/greek_texts.db'
LOGGING = True
DB_LOGGING = False
NGRAM_WORDS = False
NGRAM_LENGTHS = {
'MIN': 1,
'MAX': 3
}
NO_SPACES = True
RESTRICT_VECTOR_SPACE = 200
# If selected, texts will be divided according to their original top-level divisions (Books etc. 'div1' in Perseus's TEI XML
USE_ORIGINAL_DIVISIONS = False
#If USE_ORIGINAL_DIVISIONS is False, the text will be divided into chunks the length defined here. If O the text will not be divided.
DIVISION_LENGTH = 5000
| [
"finlaymccourt@gmail.com"
] | finlaymccourt@gmail.com |
db000f476282e2536b72f3be77dc90f773225eb7 | 83544ef94ce2c1a05b6028ae2ce58ef8acfb6fa8 | /pmca-console.spec | 78c3eff77d7b1c5610639372fbb7074d05cd63eb | [
"MIT"
] | permissive | ma1co/Sony-PMCA-RE | 9ae44c5b09580d62e860c3acff24bd1fac28a31e | a82f5baaa8e9c3d9f28f94699e860fb2e48cc8e0 | refs/heads/master | 2023-08-07T07:54:13.763912 | 2022-08-18T12:46:04 | 2022-08-18T12:46:04 | 35,510,548 | 1,788 | 228 | MIT | 2022-11-05T06:45:01 | 2015-05-12T20:18:25 | Python | UTF-8 | Python | false | false | 177 | spec | # Run `pyinstaller pmca-console.spec` to generate an executable
input = 'pmca-console.py'
output = 'pmca-console'
console = True
with open('build.spec') as f:
exec(f.read())
| [
"ma1co@users.noreply.github.com"
] | ma1co@users.noreply.github.com |
2fa2a7be091417df1d030e2f52c95d5fe49f5951 | 25d4d1a1a1e536c23fce4eef19c56b3337606ada | /virtual/bin/flask | 1f71e29d8801fae92f51869bc979e7821176f981 | [] | no_license | thepsalmist/Movie_Watchlist | b357f7e94a0744dd4dffac5501ea250cc4214d17 | d8e15268d4430a97276ddbeebfccd9391737d30c | refs/heads/master | 2020-06-05T22:54:03.394642 | 2019-06-18T15:36:31 | 2019-06-18T15:36:31 | 192,567,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | #!/home/thealchemist/Desktop/moringa-school-projects/Watchlist/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"xf.xavierfrank@gmail.com"
] | xf.xavierfrank@gmail.com | |
20ed4c2c7261e9b4442d61d4810acd5d0b5743e6 | 385ab972316b41cb0643f1050f9220b8eaeb4647 | /findDigits.py | 41ec66dee85addf385ce1e7b73749ac75dcd3cb8 | [] | no_license | Rpratik13/HackerRank | 09174c9b331e25cec33848a80e9109800cdbc894 | 38b9a39261bfb3b2fc208ad1e3d8a485585b419a | refs/heads/master | 2020-03-22T05:24:03.516086 | 2020-01-31T16:08:19 | 2020-01-31T16:08:19 | 139,563,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def findDigits(n):
temp = n
count = 0
while temp!=0:
d = temp%10
if d == 0:
temp = temp//10
continue
if n%d==0:
count+=1
temp = temp//10
return count
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
print(result) | [
"r.pratik013@gmail.com"
] | r.pratik013@gmail.com |
a277c31b1f5861dc5d0a8cf2c169ac2f24ea9e6b | 2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1 | /树/验证二叉树.py | 52f08e37e653a6186347565b6ffa4d6440299a4a | [] | no_license | tx991020/MyLeetcode | 5b6121d32260fb30b12cc8146e44e6c6da03ad89 | cfe4f087dfeb258caebbc29fc366570ac170a68c | refs/heads/master | 2020-04-09T21:43:41.403553 | 2019-03-27T18:54:35 | 2019-03-27T18:54:35 | 160,611,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | '''
给定一个二叉树,判断其是否是一个有效的二叉搜索树。
假设一个二叉搜索树具有如下特征:
节点的左子树只包含小于当前节点的数。
节点的右子树只包含大于当前节点的数。
所有左子树和右子树自身必须也是二叉搜索树。
示例 1:
输入:
2
/ \
1 3
输出: true
示例 2:
输入:
5
/ \
1 4
/ \
3 6
输出: false
解释: 输入为: [5,1,4,null,null,3,6]。
根节点的值为 5 ,但是其右子节点值为 4 。
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
inorder = self.inorder(root)
return inorder == list(sorted(set(inorder)))
def inorder(self, root):
if root is None:
return []
return self.inorder(root.left) + [root.val] + self.inorder(root.right)
| [
"wudi@hetao101.com"
] | wudi@hetao101.com |
60662f03bae36268062d3d500b1a75f366f385cd | 91824d746654fe12881b4fc3b55c553aae0d22ac | /py/fizz-buzz.py | 94d8d1eecae70b8c48ce696fb08d2f16527ea5db | [
"Apache-2.0"
] | permissive | ckclark/leetcode | a1a173c67a36a3256b198f853fcd3d15aa5abbb7 | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | refs/heads/master | 2021-01-15T08:14:43.368516 | 2020-02-14T07:25:05 | 2020-02-14T07:30:10 | 42,386,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import sys
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in xrange(1, n + 1):
s = ''
if i % 3 == 0:
s += 'Fizz'
if i % 5 == 0:
s += 'Buzz'
if i % 3 > 0 and i % 5 > 0:
s += str(i)
ret.append(s)
return ret
| [
"clark.ck@gmail.com"
] | clark.ck@gmail.com |
f44a761d585356507a3b6096569ea06aed626c93 | 7c06ff01f631cac78aa7c47f2af8237d140eab72 | /maa/atlas/merge_predictors.py | e85de84c26171debac2e611d21587571add61a24 | [
"MIT"
] | permissive | iosonofabio/maa | 563df2e61ee23b128e20c60740761a0a75706ea4 | 072892e1cb7b8e48e9ffc335d57d508bf1362f7e | refs/heads/master | 2020-03-30T15:06:15.457548 | 2018-10-02T23:51:27 | 2018-10-02T23:51:27 | 151,348,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,373 | py | # vim: fdm=indent
'''
author: Fabio Zanini
date: 22/11/17
content: Merge scattering and antibody-based predictors.
'''
# Modules
import os
import sys
import argparse
import yaml
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.externals import joblib
import json
import matplotlib.pyplot as plt
import seaborn as sns
# Functions
class CombinedClassifier:
def __init__(self, classifiers, logic='AND'):
self.classifiers = classifiers
self.logic = logic
def predict(self, Xs):
y = (self.classifiers[0].predict(Xs[0]) > 0)
if len(self.classifiers) > 1:
if self.logic == 'AND':
for clf, X in zip(self.classifiers[1:], Xs[1:]):
y &= (clf.predict(X) > 0)
elif self.logic == 'OR':
for clf, X in zip(self.classifiers[1:], Xs[1:]):
y |= (clf.predict(X) > 0)
else:
raise ValueError('Combination logic not understood: {}'.format(self.logic))
return (y * 2) - 1
def parse_biolegend():
fn = '../../data/ab_vendors/Biolegend.tsv'
df = pd.read_csv(fn, sep='\t')
if 'GeneName' not in df.columns:
df.to_csv(fn+'.bak', sep='\t', index=False)
from collections import Counter
fn_conv = '../../data/ab_vendors/Biolegend_markers_conversion.tsv'
df_conv = pd.read_csv(fn_conv, sep='\t')
n_entries = Counter(df_conv['Input'].values)
multiples = [k for (k, v) in n_entries.items() if v > 1]
if len(multiples):
print('Multiple entries:', multiples)
raise ValueError('Some antibody target names have multiple entries')
df_conv.set_index('Input', inplace=True)
df['GeneName'] = ''
newcols = df.columns[:2].tolist() + ['GeneName'] + df.columns[2:].tolist()
df = df.loc[:, newcols]
for k, datum in df.iterrows():
if datum['Specificity'] in df_conv.index:
df.loc[k, 'GeneName'] = df_conv.loc[datum['Specificity'], 'Symbol']
df.to_csv(fn, sep='\t', index=False)
print('New file saved to file')
df.iloc[:, 3:] = (df.iloc[:, 3:] == '•')
return df
def plot_classifier_antibody(X, y, clf, ax=None):
'''Plot SVM classifier'''
if ax is None:
fig, ax = plt.subplots()
colors = plt.cm.Paired([0.0, 1.0])
colors[0, -1] = 0.4
colors[1, -1] = 0.7
c = np.zeros((len(y), 4))
c[y == 0] = colors[0]
c[y == 1] = colors[1]
if X.shape[1] == 1:
from scipy.optimize import minimize_scalar
def fun(x, offset):
return (clf.decision_function([[x]])[0] - offset)**2
discr = minimize_scalar(fun, args=(0,), bounds=[0, 6]).x
dis_low = minimize_scalar(fun, args=(-0.5,), bounds=[0, 6]).x
dis_high = minimize_scalar(fun, args=(+0.5,), bounds=[0, 6]).x
df = pd.DataFrame([X[:, 0], y], index=['x', 'identity']).T
sns.swarmplot(
x='x', y='identity', data=df, ax=ax,
orient='h',
alpha=0.7,
)
ax.axvline(discr)
ax.axvline(dis_low, ls='--')
ax.axvline(dis_high, ls='--')
else:
ax.scatter(
X[:, 0], X[:, 1],
color=c,
zorder=10,
s=20)
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
ax.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired, alpha=0.05)
ax.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
return ax
def plot_classifier_scattering(X, y, clf, ax=None):
'''Plot SVM classifier'''
if ax is None:
fig, ax = plt.subplots()
colors = plt.cm.Paired([0.0, 1.0])
colors[0, -1] = 0.4
colors[1, -1] = 0.7
c = np.zeros((len(y), 4))
c[y == 0] = colors[0]
c[y == 1] = colors[1]
if X.shape[1] == 1:
from scipy.optimize import minimize_scalar
def fun(x, offset):
return (clf.decision_function([[x]])[0] - offset)**2
discr = minimize_scalar(fun, args=(0,), bounds=[0, 6]).x
dis_low = minimize_scalar(fun, args=(-0.5,), bounds=[0, 6]).x
dis_high = minimize_scalar(fun, args=(+0.5,), bounds=[0, 6]).x
df = pd.DataFrame([X[:, 0], y], index=['x', 'identity']).T
sns.swarmplot(
x='x', y='identity', data=df, ax=ax,
orient='h',
alpha=0.7,
)
ax.axvline(discr)
ax.axvline(dis_low, ls='--')
ax.axvline(dis_high, ls='--')
else:
ax.scatter(
X[:, 0], X[:, 1],
color=c,
zorder=10,
s=20)
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
ax.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired, alpha=0.05)
ax.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
return ax
# Script
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('tissues', nargs='+',
help='tissues to study')
parser.add_argument('--cell-types', nargs='+', required=True,
help='Limit to some cell types')
parser.add_argument('--subtissue', default=None,
help='Limit to a subtissue. To split by subtissue use "all"')
parser.add_argument('--save', action='store_true',
help='Store to file instead of showing')
parser.add_argument('--save-website', action='store_true',
help='Save result for the website JSON)')
parser.add_argument('--combination-logic', default='AND',
choices=['AND', 'OR'],
help='Combination logic between scattering and antibodies')
args = parser.parse_args()
# Get the list of commercially available antibodies
ab_comm = []
# Biolegend
ab_comm_table = parse_biolegend()
ab_unique = np.unique(ab_comm_table.dropna(subset=['GeneName'], axis=0)['GeneName'])
ab_comm.append(ab_unique)
# TODO: other vendors
if len(ab_comm):
ab_comm = np.unique(np.concatenate(ab_comm))
for tissue in args.tissues:
classifiers = []
for cell_type in args.cell_types:
print(cell_type)
clfs = {}
for clf_type in ('scattering', 'antibodies'):
if args.subtissue is not None:
fn_glb = '../../data/classifiers/{:}_{:}_{:}_{:}'.format(
tissue.lower(),
args.subtissue.lower(),
cell_type.replace(' ', '_'),
clf_type,
)
else:
fn_glb = '../../data/classifiers/{:}_{:}_{:}'.format(
tissue.lower(),
cell_type.replace(' ', '_'),
clf_type,
)
fn_model = fn_glb+'.model.pickle'
fn_train = fn_glb+'.train.npz'
fn_meta = fn_glb+'.metadata.json'
fn_bundle = fn_glb+'.tar.gz'
with open(fn_meta, 'r') as f:
classifier = json.load(f)
clf = joblib.load(fn_model)
classifier['classifier'] = clf
train = np.load(fn_train)
classifier['X'] = train['X']
classifier['y'] = train['y']
classifier['cellnames'] = train['cellnames']
clfs[clf_type] = classifier
classifiers.append(clfs)
# Combine the classifiers
cells_common = np.intersect1d(
clfs['scattering']['cellnames'],
clfs['antibodies']['cellnames'],
)
Xs = pd.DataFrame(
data=clfs['scattering']['X'],
index=clfs['scattering']['cellnames'],
).loc[cells_common].values
ys = pd.Series(
data=clfs['scattering']['y'],
index=clfs['scattering']['cellnames'],
).loc[cells_common].values
clas = clfs['scattering']['classifier']
Xa = pd.DataFrame(
data=clfs['antibodies']['X'],
index=clfs['antibodies']['cellnames'],
).loc[cells_common].values
ya = pd.Series(
data=clfs['antibodies']['y'],
index=clfs['antibodies']['cellnames'],
).loc[cells_common].values
claa = clfs['antibodies']['classifier']
if (ys != ya).any():
raise ValueError('The true cell identity should be the same!')
cus = CombinedClassifier(
classifiers=[clas, claa],
logic=args.combination_logic)
lab_pos = (cus.predict([Xs, Xa]) > 0)
lab_neg = ~lab_pos
act_pos = ya > 0
act_neg = ~act_pos
true_pos = (lab_pos & act_pos).sum()
false_neg = (lab_neg & act_pos).sum()
false_pos = (lab_pos & act_neg).sum()
true_neg = (lab_neg & act_neg).sum()
precision = 1.0 * true_pos / (true_pos + false_pos)
prevalence = ya.mean()
enrichment = precision / prevalence
# sensitivity aka recall
recall = 1.0 * true_pos / ya.sum()
specificity = 1.0 * true_neg / (~ya).sum()
clfs['combined'] = {
'Xs': Xs,
'Xa': Xa,
'y': ya,
'cellnames': cells_common,
'xlabel': clfs['scattering']['xlabel'],
'ylabel': clfs['scattering']['ylabel'],
'genes': clfs['antibodies']['genes'],
'true_pos': true_pos,
'false_pos': false_pos,
'false_neg': false_neg,
'true_neg': true_neg,
'precision': precision,
'prevalence': prevalence,
'enrichment': enrichment,
'recall': recall,
'specificity': specificity,
'cell type': cell_type,
'tissue': tissue,
'precision+recall': precision + recall,
'combination_logic': args.combination_logic,
'classifier': cus,
}
for clfs in classifiers:
fig, axs = plt.subplots(1, 2, figsize=(9, 4))
# Scattering
ax = axs[0]
d = clfs['scattering']
X = d['X']
y = d['y']
clf = d['classifier']
plot_classifier_scattering(X, y, clf, ax=ax)
ax.set_xlabel(d['xlabel'])
ax.set_ylabel(d['ylabel'])
ax.grid(False)
ax.set_title(
'{:s}: p={:.0%}→{:.0%} ({:.1f}x), r={:.0%}'.format(
d['cell type'], d['prevalence'], d['precision'], d['enrichment'], d['recall']),
fontsize=9)
# Antibodies
ax = axs[1]
d = clfs['antibodies']
clf = d['classifier']
if len(d['genes']) == 2:
g1, g2 = d['genes']
else:
g1 = d['genes'][0]
g2 = None
X = d['X']
y = d['y']
plot_classifier_antibody(X, y, clf, ax=ax)
xlabel = 'log10 expression of {:}'.format(g1)
if g1 in ab_comm:
xlabel += '*'
ax.set_xlabel(xlabel)
if g2 is not None:
ylabel = 'log10 expression of {:}'.format(g2)
if g2 in ab_comm:
ylabel += '*'
else:
ylabel = ''
ax.set_ylabel(ylabel)
ax.grid(False)
ax.set_title(
'{:s}: p={:.0%}→{:.0%} ({:.1f}x), r={:.0%}'.format(
d['cell type'], d['prevalence'], d['precision'], d['enrichment'], d['recall']),
fontsize=9)
d = clfs['combined']
fig.suptitle(
'{:s}: p={:.0%}→{:.0%} ({:.1f}x), r={:.0%}'.format(
d['cell type'], d['prevalence'], d['precision'], d['enrichment'], d['recall']),
fontsize=9)
plt.tight_layout(rect=(0, 0, 1, 0.96))
if args.save:
import tarfile
import json
fields = (
'tissue',
'precision',
'recall',
'enrichment',
'prevalence',
'specificity',
'cell type',
'xlabel',
'ylabel',
'genes',
'combination_logic',
)
for clfs in classifiers:
if args.subtissue is not None:
fn_glb = '../../data/classifiers/{:}_{:}_{:}_combined'.format(
tissue.lower(),
args.subtissue.lower(),
classifier['cell type'].replace(' ', '_'),
)
else:
fn_glb = '../../data/classifiers/{:}_{:}_combined'.format(
tissue.lower(),
classifier['cell type'].replace(' ', '_'),
)
fn_model = fn_glb+'.model.pickle'
fn_meta = fn_glb+'.metadata.json'
fn_bundle = fn_glb+'.tar.gz'
# Save classifier
clf = clfs['combined']['classifier']
joblib.dump(clf, fn_model)
# Save metadata
meta = {k: clfs['combined'][k] for k in fields}
with open(fn_meta, 'wt') as f:
json.dump(meta, f)
# Bundle up
with tarfile.open(fn_bundle, 'w:gz') as f:
f.add(fn_model, arcname=os.path.basename(fn_model))
f.add(fn_meta, arcname=os.path.basename(fn_meta))
if args.save_website:
from sklearn.externals import joblib
import json
for clfs in classifiers:
clf = clfs['combined']
fn = '{:}/university/postdoc/facsweb/app/facsweb/shared/static/data/merged_predictor_{:}_{:}.json'.format(
os.getenv('HOME'),
clf['tissue'],
clf['cell type'].replace(' ', '_'),
)
d = {}
d['tissue'] = clf['tissue']
d['cell type'] = clf['cell type']
d['data'] = {}
d['data']['scattering'] = clf['Xs'].tolist()
d['data']['antibodies'] = clf['Xa'].tolist()
d['data']['identity'] = clf['y'].tolist()
d['data']['cellnames'] = clf['cellnames'].tolist()
d['data']['scattering_axis_labels'] = [clf['xlabel'], clf['ylabel']]
d['data']['antibody_axis_labels'] = clf['genes']
d['data']['xlim_scattering'] = [0, clf['Xs'][:, 0].max() * 1.]
d['data']['ylim_scattering'] = [
np.floor(clf['Xs'][:, 1].min()),
np.ceil(clf['Xs'][:, 1].max()),
]
d['data']['xlim_antibodies'] = [
np.floor(clf['Xa'][:, 0].min()),
np.ceil(clf['Xa'][:, 0].max()),
]
d['data']['ylim_antibodies'] = [
np.floor(clf['Xa'][:, 1].min()),
np.ceil(clf['Xa'][:, 1].max()),
]
d['models'] = {
'combined': {},
'scattering': {},
'antibodies': {},
}
d['models']['combined']['precision'] = clf['precision']
d['models']['combined']['recall'] = clf['recall']
d['models']['combined']['logic'] = clf['combination_logic']
# Find roots of the classifiers
for clfname in ('scattering', 'antibodies'):
clfi = clfs[clfname]
xlim = [clf['X'+clfname[0]][:, 0].min(), clf['X'+clfname[0]][:, 0].max()]
ylim = [clf['X'+clfname[0]][:, 1].min(), clf['X'+clfname[0]][:, 1].max()]
xx = np.linspace(xlim[0], xlim[1], 500)
yy = np.linspace(ylim[0], ylim[1], 500)
xv, yv = np.meshgrid(xx, yy)
grid = np.vstack([xv.ravel(), yv.ravel()]).T
dec = clfi['classifier'].decision_function(grid)
roots = grid[np.abs(dec) < 0.02]
d['models'][clfname]['roots'] = roots.tolist()
roots_pos = grid[np.abs(dec - 0.25) < 0.02]
d['models'][clfname]['roots_pos'] = roots_pos.tolist()
roots_neg = grid[np.abs(dec + 0.25) < 0.02]
d['models'][clfname]['roots_neg'] = roots_neg.tolist()
d['models'][clfname]['precision'] = clfi['precision']
d['models'][clfname]['recall'] = clfi['recall']
with open(fn, 'w') as f:
json.dump(d, f)
print('Saved to file: {:}'.format(fn))
plt.ion()
plt.show()
| [
"fabio.zanini@fastmail.fm"
] | fabio.zanini@fastmail.fm |
40018adf13b332b79d6fb3be3827cb089212a630 | 610dedfb6e21d297e8cdbcba599a4e564bd785cb | /EstruturaDeRepeticao/estruturaderepeticao-29.py | 5e9e0dce39884add04858ba5c6bdaa07905a59e9 | [] | no_license | zumbipy/PythonExercicios | f7b2ddf2376b9ecb2aedc77531e3571dc746a12b | 7a17b78cf927a2889b93238542e90e00810c43e6 | refs/heads/master | 2021-01-23T10:43:47.997462 | 2018-07-22T14:58:44 | 2018-07-22T14:58:44 | 93,086,120 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | # Telegram: @ZumbiPy __ _ ___
# /_ / __ ____ _ / / (_) _ \__ __
# / /_/ // / ' \/ _ \/ / ___/ // /
# /___/\_,_/_/_/_/_.__/_/_/ \_, /
# E-mail: zumbipy@gmail.com /___/
"""
29 - O Sr. Manoel Joaquim possui uma grande loja de artigos de R$ 1,99,
com cerca de 10 caixas. Para agilizar o cálculo de quanto cada cliente
deve pagar ele desenvolveu um tabela que contém o número de itens que o
cliente comprou e ao lado o valor da conta. Desta forma a atendente do
caixa precisa apenas contar quantos itens o cliente está levando e olhar
na tabela de preços. Você foi contratado para desenvolver o programa que
monta esta tabela de preços, que conterá os preços de 1 até 50 produtos,
conforme o exemplo abaixo:
Lojas Quase Dois - Tabela de preços
1 - R$ 1.99
2 - R$ 3.98
...
50 - R$ 99.50
"""
# ================================================================================
# Logica e variavel.
# ================================================================================
print("Lojas Quase Dois - Tabela de preços")
for produtos in range(1, 51):
valor = 1.99 * produtos
print("{:>2} - R$ {:.2f}".format(produtos, valor))
| [
"zumbipy@gmail.com"
] | zumbipy@gmail.com |
ae3a0a0301704c185845a2ce5ed9681188f3d08e | 9dee94907e6456a4af9855d358693923c17b4e0d | /0015_3Sum.py | 159dafa89798c41df6a1700328fa7950dc93e7eb | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | class Solution:
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
nums.sort()
self.nSum(nums, 0, 3, [], res)
return res
def nSum(self, nums, target, n, result, results):
#print(nums, target, n, result, results)
if len(nums) < n or n < 2:
return
if n == 2:
l, r = 0, len(nums)-1
while r > l:
if nums[l] + nums[r] == target:
results.append(result + [nums[l], nums[r]])
l += 1
r -= 1
while r > l and nums[l] == nums[l-1]:
l += 1
while r > l and nums[r] == nums[r+1]:
r -= 1
elif nums[l] + nums[r] < target:
l += 1
else:
r -= 1
else:
for i in range(len(nums)-n+1):
if target < nums[i] * n or target > nums[-1] * n:
break
if i == 0 or i > 0 and nums[i-1] != nums[i]:
self.nSum(nums[i+1:], target - nums[i], n-1, result+[nums[i]], results)
return | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
d2029e11c506652cd6919211d3bae4de432c9fb8 | ba2a05f20454bda428f140634bc602699f164fc4 | /00.SSAFY/1.first-semester/07_django/API/api/settings.py | 871671aa9e43287bfe37e62f0f3a9b4c9fa4751f | [] | no_license | snowink1137/TIL | 734da402e99afa52f1af4ef996a6b274b1bcce0b | 9e9c78eb0c892affc88e2d46e143cef98af743fb | refs/heads/master | 2023-01-08T18:26:34.311579 | 2021-11-14T11:04:22 | 2021-11-14T11:04:22 | 162,255,934 | 0 | 0 | null | 2023-01-07T11:09:09 | 2018-12-18T08:32:44 | Jupyter Notebook | UTF-8 | Python | false | false | 3,183 | py | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o+kn&$uu)urs-*1*6z_bj5#_q1ps$uws7fi$nx*asvmnv)!sbd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'rest_framework_swagger',
'movie_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"snowink1137@gmail.com"
] | snowink1137@gmail.com |
965d835cd6e4e9bf535d757bd971a6e7081fe8bc | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/apress/Beginning.Python.Visualization.Crafting.Visual.Transformation.Scripts/Chapter08/src/exponential.py | f18cc92e1d379da0067ba78d35d4cd6e229f293e | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from pylab import *
# number of data points
N = 100
start = 0
end = 2
A = rand()
B = rand()
# our linear line will be:
# y = B*exp(A*x) = exp(A*x + log(B))
x = linspace(start, end, N)
y = exp(A*x+B)
y += randn(N)/5
# linear regression
p = polyfit(x, log(y), 1)
figure()
title(r'Linear regression with polyfit(), $y=Be^{Ax}$')
plot(x, y, 'o',
label='Measured data; A=%.2f, B=%.2f' % (A, exp(B)))
plot(x, exp(polyval(p, x)), '-',
label='Linear regression; A=%.2f, B=%.2f' % (p[0], exp(p[1])))
legend(loc='best')
show() | [
"xenron@outlook.com"
] | xenron@outlook.com |
9ec945cdec71b73d69ddf26100e64e454674c0b9 | ad53f1fabc89a9f451351fff292b03c3aac69615 | /class_code/algorithms/rl_pg/pg.py | 1b6d4a293ce0bf591b4589d53cf31d048fa470ff | [] | no_license | anatu/Stanford-CME241-W2020 | 9a64d0b68dec84232f5dbf68b191246cd7fb8d29 | 73f4088ed25bebe1925005b3bc4ace4b3660ef73 | refs/heads/master | 2020-12-07T04:54:40.207475 | 2020-03-16T21:53:49 | 2020-03-16T21:53:49 | 232,638,280 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,288 | py | from typing import Mapping, Callable, Sequence, Tuple
from algorithms.opt_base import OptBase
from processes.mdp_rep_for_rl_pg import MDPRepForRLPG
from algorithms.func_approx_spec import FuncApproxSpec
from func_approx.func_approx_base import FuncApproxBase
import numpy as np
from utils.generic_typevars import S, A
from utils.standard_typevars import VFType, QFType
from utils.standard_typevars import PolicyType
class PolicyGradient(OptBase):
def __init__(
self,
mdp_rep_for_rl_pg: MDPRepForRLPG,
reinforce: bool,
batch_size: int,
num_batches: int,
num_action_samples: int,
max_steps: int,
actor_lambda: float,
critic_lambda: float,
score_func: Callable[[A, Sequence[float]], Sequence[float]],
sample_actions_gen_func: Callable[[Sequence[float], int], Sequence[A]],
fa_spec: FuncApproxSpec,
pol_fa_spec: Sequence[FuncApproxSpec]
) -> None:
self.mdp_rep: MDPRepForRLPG = mdp_rep_for_rl_pg
self.reinforce: bool = reinforce
self.batch_size: int = batch_size
self.num_batches: int = num_batches
self.num_action_samples: int = num_action_samples
self.max_steps: int = max_steps
self.actor_lambda: float = actor_lambda
self.critic_lambda: float = critic_lambda
self.score_func: Callable[[A, Sequence[float]], Sequence[float]] =\
score_func
self.sample_actions_gen_func: Callable[[Sequence[float], int], Sequence[A]] =\
sample_actions_gen_func
self.vf_fa: FuncApproxBase = fa_spec.get_vf_func_approx_obj()
self.qvf_fa: FuncApproxBase = fa_spec.get_qvf_func_approx_obj()
self.pol_fa: Sequence[FuncApproxBase] =\
[s.get_vf_func_approx_obj() for s in pol_fa_spec]
def get_value_func(self, pol_func: PolicyType) -> VFType:
mo = self.mdp_rep
for _ in range(self.num_batches * self.batch_size):
state = mo.init_state_gen_func()
steps = 0
terminate = False
states = []
targets = []
while not terminate:
action = pol_func(state)(1)[0]
next_state, reward = mo.state_reward_gen_func(
state,
action
)
target = reward + mo.gamma * self.vf_fa.get_func_eval(next_state)
states.append(state)
targets.append(target)
steps += 1
terminate = steps >= self.max_steps or\
mo.terminal_state_func(state)
state = next_state
self.vf_fa.update_params_from_gradient(
[g / len(states) for g in
self.vf_fa.get_el_tr_sum_loss_gradient(
states,
targets,
mo.gamma * self.critic_lambda
)
]
)
# print(self.vf_fa.get_func_eval(1))
# print(self.vf_fa.get_func_eval(2))
# print(self.vf_fa.get_func_eval(3))
# print("-----")
return self.vf_fa.get_func_eval
# noinspection PyShadowingNames
def get_act_value_func(self, pol_func: PolicyType) -> QFType:
mo = self.mdp_rep
for _ in range(self.num_batches * self.batch_size):
state = mo.init_state_gen_func()
steps = 0
terminate = False
states_actions = []
targets = []
while not terminate:
action = pol_func(state)(1)[0]
next_state, reward = mo.state_reward_gen_func(
state,
action
)
target = reward + mo.gamma * self.vf_fa.get_func_eval(next_state)
states_actions.append((state, action))
targets.append(target)
steps += 1
terminate = steps >= self.max_steps or \
mo.terminal_state_func(state)
state = next_state
self.vf_fa.update_params_from_gradient(
[g / len(states_actions) for g in
self.qvf_fa.get_el_tr_sum_loss_gradient(
states_actions,
targets,
mo.gamma * self.critic_lambda
)
]
)
return lambda s: lambda a, s=s: self.qvf_fa.get_func_eval((s, a))
def get_policy_as_policy_type(self) -> PolicyType:
def pol(s: S) -> Callable[[int], Sequence[A]]:
# noinspection PyShadowingNames
def gen_func(samples: int, s=s) -> Sequence[A]:
return self.sample_actions_gen_func(
[f.get_func_eval(s) for f in self.pol_fa],
samples
)
return gen_func
return pol
def get_path(
self,
start_state: S
) -> Sequence[Tuple[S, Sequence[float], A, float]]:
res = []
state = start_state
steps = 0
terminate = False
while not terminate:
pdf_params = [f.get_func_eval(state) for f in self.pol_fa]
action = self.sample_actions_gen_func(pdf_params, 1)[0]
next_state, reward = self.mdp_rep.state_reward_gen_func(state, action)
res.append((
state,
pdf_params,
action,
reward
))
steps += 1
terminate = steps >= self.max_steps or\
self.mdp_rep.terminal_state_func(state)
state = next_state
return res
def get_optimal_reinforce_func(self) -> PolicyType:
mo = self.mdp_rep
sc_func = self.score_func
for _ in range(self.num_batches):
pol_grads = [
[np.zeros_like(layer) for layer in this_pol_fa.params]
for this_pol_fa in self.pol_fa
]
for _ in range(self.batch_size):
states = []
disc_return_scores = []
return_val = 0.
init_state = mo.init_state_gen_func()
this_path = self.get_path(init_state)
for i, (s, pp, a, r) in enumerate(this_path[::-1]):
i1 = len(this_path) - i - 1
states.append(s)
return_val = return_val * mo.gamma + r
disc_return_scores.append(
[return_val * mo.gamma ** i1 * x for x in sc_func(a, pp)]
)
pg_arr = np.vstack(disc_return_scores)
for i, pp_fa in enumerate(self.pol_fa):
this_pol_grad = pp_fa.get_sum_objective_gradient(
states,
- pg_arr[:, i]
)
for j in range(len(pol_grads[i])):
pol_grads[i][j] += this_pol_grad[j]
for i, pp_fa in enumerate(self.pol_fa):
pp_fa.update_params_from_gradient(
[pg / self.batch_size for pg in pol_grads[i]]
)
return self.get_policy_as_policy_type()
def get_optimal_tdl_func(self) -> PolicyType:
mo = self.mdp_rep
sc_func = self.score_func
for _ in range(self.num_batches):
pol_grads = [
[np.zeros_like(layer) for layer in this_pol_fa.params]
for this_pol_fa in self.pol_fa
]
for _ in range(self.batch_size):
gamma_pow = 1.
states = []
deltas = []
disc_scores = []
init_state = mo.init_state_gen_func()
this_path = self.get_path(init_state)
for i, (s, pp, a, r) in enumerate(this_path):
fut_return = mo.gamma * self.vf_fa.get_func_eval(this_path[i + 1][0])\
if i < len(this_path) - 1 else 0.
delta = r + fut_return - self.vf_fa.get_func_eval(s)
states.append(s)
deltas.append(delta)
disc_scores.append([gamma_pow * x for x in sc_func(a, pp)])
gamma_pow *= mo.gamma
self.vf_fa.update_params_from_gradient(
self.vf_fa.get_el_tr_sum_objective_gradient(
states,
np.power(mo.gamma, np.arange(len(states))),
- np.array(deltas),
mo.gamma * self.critic_lambda
)
)
pg_arr = np.vstack(disc_scores)
for i, pp_fa in enumerate(self.pol_fa):
this_pol_grad = pp_fa.get_el_tr_sum_objective_gradient(
states,
pg_arr[:, i],
- np.array(deltas),
mo.gamma * self.actor_lambda
)
for j in range(len(pol_grads[i])):
pol_grads[i][j] += this_pol_grad[j]
for i, pp_fa in enumerate(self.pol_fa):
pp_fa.update_params_from_gradient(
[pg / self.batch_size for pg in pol_grads[i]]
)
# print(self.vf_fa.get_func_eval(1))
# print(self.vf_fa.get_func_eval(2))
# print(self.vf_fa.get_func_eval(3))
# print("----")
return self.get_policy_as_policy_type()
def get_optimal_stoch_policy_func(self) -> PolicyType:
return self.get_optimal_reinforce_func() if self.reinforce \
else self.get_optimal_tdl_func()
def get_optimal_det_policy_func(self) -> Callable[[S], A]:
papt = self.get_optimal_stoch_policy_func()
def opt_det_pol_func(s: S) -> A:
return tuple(np.mean(
papt(s)(self.num_action_samples),
axis=0
))
return opt_det_pol_func
if __name__ == '__main__':
from processes.mdp_refined import MDPRefined
from func_approx.dnn_spec import DNNSpec
from numpy.random import binomial
mdp_refined_data = {
1: {
(10,): {1: (0.3, 9.2), 2: (0.6, 4.5), 3: (0.1, 5.0)},
(-10,): {2: (0.3, -0.5), 3: (0.7, 2.6)}
},
2: {
(10,): {1: (0.3, 9.8), 2: (0.6, 6.7), 3: (0.1, 1.8)},
(-10,): {1: (0.3, 19.8), 2: (0.6, 16.7), 3: (0.1, 1.8)},
},
3: {
(10,): {3: (1.0, 0.0)},
(-10,): {3: (1.0, 0.0)}
}
}
gamma_val = 0.9
mdp_ref_obj1 = MDPRefined(mdp_refined_data, gamma_val)
mdp_rep_obj = mdp_ref_obj1.get_mdp_rep_for_rl_pg()
reinforce_val = False
num_batches_val = 1000
batch_size_val = 10
num_action_samples_val = 100
max_steps_val = 100
actor_lambda_val = 0.95
critic_lambda_val = 0.95
learning_rate_val = 0.1
state_ff = [
lambda s: 1. if s == 1 else 0.,
lambda s: 1. if s == 2 else 0.,
lambda s: 1. if s == 3 else 0.
]
fa_spec_val = FuncApproxSpec(
state_feature_funcs=state_ff,
sa_feature_funcs=[(lambda x, f=f: f(x[0])) for f in state_ff],
dnn_spec=DNNSpec(
neurons=[2],
hidden_activation=DNNSpec.relu,
hidden_activation_deriv=DNNSpec.relu_deriv,
output_activation=DNNSpec.identity,
output_activation_deriv=DNNSpec.identity_deriv
),
learning_rate=learning_rate_val
)
pol_fa_spec_val = [FuncApproxSpec(
state_feature_funcs=state_ff,
sa_feature_funcs=[(lambda x, f=f: f(x[0])) for f in state_ff],
dnn_spec=DNNSpec(
neurons=[2],
hidden_activation=DNNSpec.relu,
hidden_activation_deriv=DNNSpec.relu_deriv,
output_activation=DNNSpec.sigmoid,
output_activation_deriv=DNNSpec.sigmoid_deriv
),
learning_rate=learning_rate_val
)]
# noinspection PyPep8
this_score_func = lambda a, p: [1. / p[0] if a == (10,) else 1. / (p[0] - 1.)]
# noinspection PyPep8
sa_gen_func = lambda p, n: [((10,) if x == 1 else (-10,)) for x in binomial(1, p[0], n)]
pg_obj = PolicyGradient(
mdp_rep_for_rl_pg=mdp_rep_obj,
reinforce=reinforce_val,
num_batches=num_batches_val,
batch_size=batch_size_val,
num_action_samples=num_action_samples_val,
max_steps=max_steps_val,
actor_lambda=actor_lambda_val,
critic_lambda=critic_lambda_val,
score_func=this_score_func,
sample_actions_gen_func=sa_gen_func,
fa_spec=fa_spec_val,
pol_fa_spec=pol_fa_spec_val
)
def policy_func(i: int) -> Mapping[Tuple[int], float]:
if i == 1:
ret = {(10,): 0.4, (-10,): 0.6}
elif i == 2:
ret = {(10,): 0.7, (-10,): 0.3}
elif i == 3:
ret = {(-10,): 1.0}
else:
raise ValueError
return ret
# print("Printing DP vf for a policy")
# from processes.policy import Policy
# true_vf_for_pol = mdp_ref_obj1.get_value_func_dict(Policy(
# {s: policy_func(s) for s in {1, 2, 3}}
# ))
# print(true_vf_for_pol)
#
# # this_qf = adp_pg_obj.get_act_value_func_fa(policy_func)
# this_vf = adp_pg_obj.get_value_func_fa(policy_func)
# print("Printing vf for a policy")
# print(this_vf(1))
# print(this_vf(2))
# print(this_vf(3))
tol_val = 1e-6
true_opt = mdp_ref_obj1.get_optimal_policy(tol=tol_val)
print("Printing DP Opt Policy")
print(true_opt)
true_vf = mdp_ref_obj1.get_value_func_dict(true_opt)
print("Printing DP Opt VF")
print(true_vf)
opt_det_polf = pg_obj.get_optimal_det_policy_func()
# noinspection PyShadowingNames
def opt_polf(s: S, opt_det_polf=opt_det_polf) -> Mapping[A, float]:
return {opt_det_polf(s): 1.0}
print("Printing Opt Policy")
print(opt_polf(1))
print(opt_polf(2))
print(opt_polf(3))
opt_vf = pg_obj.get_value_func(pg_obj.get_policy_as_policy_type())
print("Printing Opt VF")
print(opt_vf(1))
print(opt_vf(2))
print(opt_vf(3))
| [
"natu.anand@gmail.com"
] | natu.anand@gmail.com |
d97edee00b89ffead3177b443913cfa0885b63a8 | 7a0b7552bbf24dcaab5f981adc7077a642aee6ac | /week9/todo/main/urls.py | d791ddc723dfcc0b135aa8d30cebdac0fc8c2b2a | [] | no_license | Aigerimmsadir/BFDjango | b20f731796fa9df7ec021bc7a293def35df55e01 | a850713d24f50b8b70dd9f8036f77e76174f3c4e | refs/heads/master | 2020-03-27T20:29:20.543884 | 2018-11-24T13:07:39 | 2018-11-24T13:07:39 | 147,072,816 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.home, name='home'),
path('home', views.index, name='index'),
path('1/completed/', views.completed_tasks,name='completed'),
path('1/incompleted/', views.incompleted_tasks,name="incompleted"),
path('1/creation_order/', views.creation_order,name='creation'),
path('1/due_order/', views.due_order,name='due_order'),
path('1/delete_task/<int:task_id>', views.delete_task,name="delete_task"),
path('1/delete_list/', views.delete_list,name="delete_list"),
path('1/task_done/<int:task_id>', views.task_done,name="task_done"),
path('1/add_task/', views.add_task,name="add_task"),
path('1/update_task/<int:task_id>',views.update_task, name='update_task')
] | [
"noreply@github.com"
] | Aigerimmsadir.noreply@github.com |
2087b62bd5e686dd6b4bf8754acc7ff7fd2c6367 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/001_asynchronous/_exercises/templates/Async Techniques and Examples in Python/09-built-on-asyncio/the_trio/prod_trio.py | a60443d598ffd2f0e29e3ab260bc0c29cf3ba3cb | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,178 | py | # ______ d..
# ______ co..
# ______ ra..
# ______ tr..
#
#
# ? ___ main
# t0 _ d_t_.d_t_.n..
# print(co__.F__.W.. + "App started.", f.._T..
#
# data _ tr__.Q.. capacity_10
#
# w__ tr__.m_o_a.. 5
# ? w__ tr__.op_n.. __ nursery:
# ?.s_s.. g_d.. 20, d.. n.._'Prod 1')
# ?.s_s.. g_d.. 20, d.. n.._'Prod 2')
# ?.s_s.. p.. 40 d.. n.._'Consumer')
#
# dt _ d_t_.d_t_.n.. - t0
# print(co__.F__.W.. + *App exiting, total time: |;,.2_ sec. .f..(
# ?.t_s.. f.._T..
#
#
# ? ___ generate_data(num ? data tr__.Q..
# ___ idx __ ra.. 1 ? + 1
# item _ i..*i..
# ? d__.p.. ? d_t_.d_t_.n..
#
# print(co__.F__.Y.. + _* -- generated item |? f.._T..
# ? tr__.sleep(ra__.ra.. + .5
#
#
# ? ___ process_data num ? data tr__.Q..
# processed _ 0
# w__ ? < ?
# item _ ? d__.g..
#
# ? +_ 1
# value _ ? 0
# t _ ? 1
# dt _ d_t_.d_t_.n.. - t
#
# print(co__.F__.C.. +
# * +++ Processed value @ after |;,.2_ sec. .f..(
# v.. ?.t_s.. f.._T..
# ? tr__.s.. .5
#
#
# __ _________ __ ________
# tr__.r.. ?
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
b6d87b84ce43f9cb8bd1f04a831ea7708ceeba1c | e111233b264eb57467ca12562a3f1d91155f0d18 | /그외/08 4주/잔다.py | 2a35ed2ab0d0d254a8b3aec4a8a422780ee8f63a | [] | no_license | 01090841589/ATM | d440a112a47937d11f4c4d8df6817a76971c0888 | 44fa856a033d15c9281d2597f1b67ee5cec09934 | refs/heads/master | 2020-07-05T13:24:35.080738 | 2019-08-29T08:33:24 | 2019-08-29T08:33:24 | 202,659,466 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | def cal(a, b, eq):
if eq == '+':
return a + b
elif eq == '*':
return a * b
isp = {'+': 1, '*': 2, '(': 0}
nums = ['0', '1', '2', '3', '4' ,'5', '6', '7', '8', '9']
for tc in range(1, 11):
N = int(input())
arith = input()
word = []
stack = []
for letter in arith:
if letter in nums:
word.append(int(letter))
else:
if letter == '(':
stack.append('(')
elif letter == ')':
while True:
tmp = stack.pop()
if tmp == '(':
break
word.append(tmp)
else:
if stack:
if isp[stack[-1]] > isp[letter]:
tmp2 = stack[-1]
while isp[tmp2] > isp[letter]:
word.append(stack.pop())
if not stack:
break
tmp2 = stack[-1]
stack.append(letter)
else:
stack.append(letter)
else:
stack.append(letter)
for i in range(len(stack)):
word.append(stack.pop())
result = 0
cal_stack = []
for letter in word:
if type(letter) == int:
cal_stack.append(letter)
else:
result = cal(cal_stack.pop(), cal_stack.pop(), letter)
cal_stack.append(result)
print('#{} {}'.format(tc, result)) | [
"chanchanhwan@naver.com"
] | chanchanhwan@naver.com |
b6c8eb3848575a5dc835a5dab447e1c9cb28d2ec | 33524b5c049f934ce27fbf046db95799ac003385 | /2017/Turtule/lesson_7_Циклы___for/triangle.py | 02ce25a3cada3cb1377c90f019353af48c74f888 | [] | no_license | mgbo/My_Exercise | 07b5f696d383b3b160262c5978ad645b46244b70 | 53fb175836717493e2c813ecb45c5d5e9d28dd23 | refs/heads/master | 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py |
# -*- coding: utf-8 -*-
import turtle
import time
def write(data):
t.write(data,font = ("Arial",14,"normal"))
def triangle(size,cover):
#a0 = t.heading()
p0 = t.pos()
t.fd(size)
p1 = t.pos()
t.fd(-size)
t.rt(cover)
t.fd(size)
t.goto(p1)
t.goto(p0)
#t.seth(a0)
def Poligon(size,n):
ang = 0
for _ in range(n):
t.seth(ang)
triangle(size/2,360/n)
#t.fd(size)
ang += 360/n
#time.sleep(2)
t = turtle.Turtle()
t.shape("turtle")
t.color('green')
t.width(2)
#t.seth(45)
triangle(100,90)
#Poligon(200,10)
turtle.done()
| [
"mgbo433@gmail.com"
] | mgbo433@gmail.com |
ca2c1dda952a2077996a03d398489c4346274ca8 | c11123ce1e86f8306dcc3bf5d017dbfa8bb1d515 | /Medium/Combinations.py | a8b946a2426e2c441fd19569d5b149a0c76f041d | [] | no_license | uathena1991/Leetcode | 7e606c68a51ed09e6e6a9fad327b24066e92d0c4 | e807ae43a0a253deaa6c9ed1c592fa3a14a6cab8 | refs/heads/master | 2021-05-01T15:21:25.568729 | 2019-10-13T14:33:30 | 2019-10-13T14:33:30 | 74,910,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if k == 0:
return [[]]
else:
return [prev + [i] for i in range(1,n+1) for prev in self.combine(i-1,k-1)]
a = Solution()
print a.combine(4,1) | [
"xiaoli.he@rutgers.edu"
] | xiaoli.he@rutgers.edu |
614ea8295be05bbd12c8e9489763947b7e63dea8 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/output/550_original | 1f725987700a582ef6c283c92616fa22721d44aa | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 1,545 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreatePolicyVersionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ram', '2015-05-01', 'CreatePolicyVersion')
self.set_protocol_type('https');
def get_PolicyName(self):
return self.get_query_params().get('PolicyName')
def set_PolicyName(self,PolicyName):
self.add_query_param('PolicyName',PolicyName)
def get_PolicyDocument(self):
return self.get_query_params().get('PolicyDocument')
def set_PolicyDocument(self,PolicyDocument):
self.add_query_param('PolicyDocument',PolicyDocument)
def get_SetAsDefault(self):
return self.get_query_params().get('SetAsDefault')
def set_SetAsDefault(self,SetAsDefault):
self.add_query_param('SetAsDefault',SetAsDefault) | [
"rares.begu@gmail.com"
] | rares.begu@gmail.com | |
0e10bb5c19776e865b5ad07011a5366bb37011b1 | 72b1d8b44520d1757d379d8013eb3912b005bef3 | /ml/text/experiment/triplet.py | fb287d3ed7426ca6b5e23c8fc9202116298de1f2 | [] | no_license | joshuaNewman10/ml | 14d8d5821bd952e77272b740cf05cef69ebee383 | 3ec43868004d421814f8e056205e77a2b8cb92dc | refs/heads/master | 2021-04-03T06:29:33.655495 | 2018-09-17T19:03:40 | 2018-09-17T19:03:40 | 124,795,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | from keras import Input, Model, backend as K
from keras.layers import Lambda, Concatenate
def triplet_model(encoder, input_shape):
x_anchor = Input(shape=input_shape, name='anchor')
x_related = Input(shape=input_shape, name='related')
x_unrelated = Input(shape=input_shape, name='unrelated')
h_anchor = encoder(x_anchor)
h_related = encoder(x_related)
h_unrelated = encoder(x_unrelated)
related_dist = Lambda(euclidean_distance, name='pos_dist')([h_anchor, h_related])
unrelated_dist = Lambda(euclidean_distance, name='neg_dist')([h_anchor, h_unrelated])
inputs = [x_anchor, x_related, x_unrelated]
distances = Concatenate()([related_dist, unrelated_dist])
model = Model(inputs=inputs, outputs=distances)
return model
def triplet_loss(_, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:, 0]) - K.square(y_pred[:, 1]) + margin))
def triplet_accuracy(_, y_pred):
return K.mean(y_pred[:, 0] < y_pred[:, 1])
def euclidean_distance(vectors):
x, y = vectors
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon())) | [
"josh@teambanjo.com"
] | josh@teambanjo.com |
77425a6f9bac27b78c6d3c2f6bff38e7d5474ddd | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/rusentrel_ds/mi_att/att_hidden_z_yang.py | beb3090e90ef24dd693da28831f87b381a9f8486 | [
"MIT"
] | permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/python
import sys
sys.path.append('../../../')
from arekit.contrib.networks.multi.configurations.att_self import AttSelfOverSentencesConfig
from arekit.contrib.networks.multi.architectures.att_self import AttSelfOverSentences
from rusentrel.mi_names import AttSelfOverInstancesModelNames
from rusentrel.rusentrel_ds.mi.att_hidden_z_yang import run_testing_mi_att_hidden_zyang
if __name__ == "__main__":
run_testing_mi_att_hidden_zyang(
model_names_classtype=AttSelfOverInstancesModelNames,
network_classtype=AttSelfOverSentences,
config_classtype=AttSelfOverSentencesConfig
)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
1e65865dbb74cecc72da259e7421679ad6d1116e | 81e081bd18fcf4f648f50722b59c55a581306346 | /1300_K번째 수/s1.py | 1eade0d05ad15fbb779c4f21e93bcc0c01783911 | [] | no_license | Ysh096/baekjoon | 5b50ceb70c0e5a1b095dbee0542056cb31a8edaf | 54943a00e11ae4926208e51e7488cc63e6da525d | refs/heads/master | 2023-08-05T15:13:17.884639 | 2021-10-10T12:24:52 | 2021-10-10T12:24:52 | 340,568,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # 메모리 초과
import sys
sys.stdin = open('input.txt')
N = int(input())
k = int(input())
# 인덱스 1부터 시작
# 1~N까지
B = []
for i in range(1, N+1):
for j in range(1, N+1):
B.append(i*j)
B.sort()
print(B[k])
| [
"skk7541@gmail.com"
] | skk7541@gmail.com |
b65fdf1255eb415d579598bcc480d9c998e09e75 | d3188257a2e62627744876fd17d36db7c0f1ffab | /chat_chat/chat/models.py | 82b2f0f5d5804163ae24cfdac910300fdbd5bcb8 | [
"MIT"
] | permissive | junngo/django-chat | 8e01ebc2d18f93d87c2a104703274cebe75b6a92 | 24a4ec17ade348186ab4cdaeecb60f6b69d5dce2 | refs/heads/master | 2022-11-11T17:15:46.455637 | 2020-07-02T06:14:40 | 2020-07-02T06:14:40 | 266,116,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
User = get_user_model()
class Contact(models.Model):
user = models.ForeignKey(
User, related_name='me', on_delete=models.CASCADE)
friends = models.ManyToManyField(
User, related_name='friends', blank=True)
def __str__(self):
return self.user.username
class Message(models.Model):
contact = models.ForeignKey(
Contact, related_name='messages', on_delete=models.CASCADE)
message = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.contact.user.username
class Room(models.Model):
participants = models.ManyToManyField(
Contact, related_name='rooms', blank=True)
messages = models.ManyToManyField(Message, blank=True)
def __str__(self):
return "{}".format(self.pk)
@property
def group_name(self):
"""
Returns the Channels Group name that sockets should subscribe to to get sent
messages as they are generated.
"""
return "room-%s" % self.id
| [
"myeongjun.ko@gmail.com"
] | myeongjun.ko@gmail.com |
996202efa4c0dd475918988a5b19e75ba8fb54dc | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-ice/aliyunsdkice/request/v20201109/ListPackageJobsRequest.py | e6443d218280892d7a6d0901902c850f1f9912e1 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,617 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkice.endpoint import endpoint_data
class ListPackageJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ICE', '2020-11-09', 'ListPackageJobs','ice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NextPageToken(self): # String
return self.get_query_params().get('NextPageToken')
def set_NextPageToken(self, NextPageToken): # String
self.add_query_param('NextPageToken', NextPageToken)
def get_JobId(self): # String
return self.get_query_params().get('JobId')
def set_JobId(self, JobId): # String
self.add_query_param('JobId', JobId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_EndOfCreateTime(self): # String
return self.get_query_params().get('EndOfCreateTime')
def set_EndOfCreateTime(self, EndOfCreateTime): # String
self.add_query_param('EndOfCreateTime', EndOfCreateTime)
def get_OrderBy(self): # String
return self.get_query_params().get('OrderBy')
def set_OrderBy(self, OrderBy): # String
self.add_query_param('OrderBy', OrderBy)
def get_StartOfCreateTime(self): # String
return self.get_query_params().get('StartOfCreateTime')
def set_StartOfCreateTime(self, StartOfCreateTime): # String
self.add_query_param('StartOfCreateTime', StartOfCreateTime)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
6d29c3ea7b9ad0d135d444d81d34f3035c44a725 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-drs/huaweicloudsdkdrs/v5/model/cloud_vpc_info.py | fe8771b44358314ee19f1d2be9fcc5b5edab143e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,950 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CloudVpcInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vpc_id': 'str',
'subnet_id': 'str',
'security_group_id': 'str'
}
attribute_map = {
'vpc_id': 'vpc_id',
'subnet_id': 'subnet_id',
'security_group_id': 'security_group_id'
}
def __init__(self, vpc_id=None, subnet_id=None, security_group_id=None):
"""CloudVpcInfo
The model defined in huaweicloud sdk
:param vpc_id: 数据库实例所在的虚拟私有云ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在虚拟私有云的详情页面查找VPC ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询VPC列表。
:type vpc_id: str
:param subnet_id: 数据库实例所在子网ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,单击VPC下的子网,进入子网详情页面,查找网络ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询子网列表。
:type subnet_id: str
:param security_group_id: 数据库实例所在的安全组ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在安全组的详情页面查找安全组ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询安全组列表。
:type security_group_id: str
"""
self._vpc_id = None
self._subnet_id = None
self._security_group_id = None
self.discriminator = None
self.vpc_id = vpc_id
self.subnet_id = subnet_id
if security_group_id is not None:
self.security_group_id = security_group_id
@property
def vpc_id(self):
"""Gets the vpc_id of this CloudVpcInfo.
数据库实例所在的虚拟私有云ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在虚拟私有云的详情页面查找VPC ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询VPC列表。
:return: The vpc_id of this CloudVpcInfo.
:rtype: str
"""
return self._vpc_id
@vpc_id.setter
def vpc_id(self, vpc_id):
"""Sets the vpc_id of this CloudVpcInfo.
数据库实例所在的虚拟私有云ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在虚拟私有云的详情页面查找VPC ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询VPC列表。
:param vpc_id: The vpc_id of this CloudVpcInfo.
:type vpc_id: str
"""
self._vpc_id = vpc_id
@property
def subnet_id(self):
"""Gets the subnet_id of this CloudVpcInfo.
数据库实例所在子网ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,单击VPC下的子网,进入子网详情页面,查找网络ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询子网列表。
:return: The subnet_id of this CloudVpcInfo.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""Sets the subnet_id of this CloudVpcInfo.
数据库实例所在子网ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,单击VPC下的子网,进入子网详情页面,查找网络ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询子网列表。
:param subnet_id: The subnet_id of this CloudVpcInfo.
:type subnet_id: str
"""
self._subnet_id = subnet_id
@property
def security_group_id(self):
"""Gets the security_group_id of this CloudVpcInfo.
数据库实例所在的安全组ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在安全组的详情页面查找安全组ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询安全组列表。
:return: The security_group_id of this CloudVpcInfo.
:rtype: str
"""
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
"""Sets the security_group_id of this CloudVpcInfo.
数据库实例所在的安全组ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在安全组的详情页面查找安全组ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询安全组列表。
:param security_group_id: The security_group_id of this CloudVpcInfo.
:type security_group_id: str
"""
self._security_group_id = security_group_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudVpcInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
52cb29e9a1839ab9795cee5589a2043ec6281f7f | ab5634154a80272c051701597d4d8694ffdf367e | /parse_iperf.py | b71b56c1118a6384cfc3b604c7c8b8de56d03be3 | [] | no_license | WiperHung/CN-HW1 | 19ebf76767e480bc0593c559d2e7a55a4e1df604 | 6fe990a725f4c691ebefef58c7d5ca109057892d | refs/heads/main | 2023-01-07T16:53:54.759976 | 2020-11-01T12:24:48 | 2020-11-01T12:24:48 | 309,062,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | #!/usr/bin/python
# Parse the iperf.txt files and generate new files that can be used for plotting
# throughput vs time
from argparse import ArgumentParser
import sys
import os
parser = ArgumentParser(description="iperfParser")
parser.add_argument('--n',
type=int,
help="Number of hosts",
default=5)
parser.add_argument('--dir', '-d',
help="Directory to store outputs",
required=True)
args = parser.parse_args()
for i in range(1,args.n):
fi = open("%s/iperf%d.txt"%(args.dir,i), "r")
lines_after_6 = fi.readlines()[6:]
lines_after_6 = lines_after_6[:-1]
fo = open("%s/iperf%d-plot.txt"%(args.dir,i),"w+")
for t in range(20*(i-1)):
fo.write("%d,0 \n"%t)
t = 20*(i-1)
for line in lines_after_6:
word = line.split()
fo.write("%d,%s \n"%(t,word[len(word)-2]))
t = t+1
for t in range(t,300):
fo.write("%d,0 \n"%t)
| [
"="
] | = |
6f394e0be9cc0906075814aae0d91b66285baf6c | 37b30edf9f643225fdf697b11fd70f3531842d5f | /chrome/browser/ash/DEPS | 0d81be3a8ee2bc6201ddbef230e6374ba0959fd3 | [
"BSD-3-Clause"
] | permissive | pauladams8/chromium | 448a531f6db6015cd1f48e7d8bfcc4ec5243b775 | bc6d983842a7798f4508ae5fb17627d1ecd5f684 | refs/heads/main | 2023-08-05T11:01:20.812453 | 2021-09-17T16:13:54 | 2021-09-17T16:13:54 | 407,628,666 | 1 | 0 | BSD-3-Clause | 2021-09-17T17:35:31 | 2021-09-17T17:35:30 | null | UTF-8 | Python | false | false | 2,036 | include_rules = [
# //chrome/browser/ash is conceptually part of "ash". See the "Lacros:
# ChromeOS source code directory migration" design doc at
# https://docs.google.com/document/d/1g-98HpzA8XcoGBWUv1gQNr4rbnD5yfvbtYZyPDDbkaE
"+ash",
"+chrome/browser/image_decoder",
# TODO(ananta): Remove this when we move files which display UI in
# chrome/browser/chromeos to chrome/browser/ui/views/chromeos
# crbug.com/728877
"+chrome/browser/ui/views/chrome_layout_provider.h",
"+chrome/services/keymaster/public",
"+chrome/services/wilco_dtc_supportd/public",
"+components/account_manager_core",
"+components/app_restore",
"+components/guest_os",
"+components/services/app_service/public",
"+cros",
"+dbus",
"+device/bluetooth",
"+media/audio/sounds", # For system sounds
"+media/base/media_switches.h", # For media command line switches.
"+media/mojo/mojom", # For platform verification mojom interface.
"+remoting/host/it2me", # For CRD host in remote command
"+remoting/protocol", # For CRD host in remote command
"+services/device/public",
"+services/metrics/public",
"+services/network",
"+services/tracing/public",
"+services/viz/public/mojom",
]
specific_include_rules = {
# Dependencies specific for fuzz targets and other fuzzing-related code.
".*fuzz.*": [
"+third_party/libFuzzer/src/utils", # This contains FuzzedDataProvider.
],
"assistant_util_unittest\.cc": [
"+ui/events/devices/device_data_manager.h",
],
"child_status_collector_browsertest.cc": [
"+mojo/core/embedder/embedder.h",
],
"device_status_collector_browsertest.cc": [
"+mojo/core/embedder/embedder.h",
],
"event_rewriter_unittest\.cc": [
"+ui/events/devices/device_data_manager.h",
],
"external_protocol_dialog\.cc": [
"+chrome/browser/ui/views/external_protocol_dialog.h",
],
"file_manager_browsertest_base.cc": [
"+chrome/browser/ui/views/extensions/extension_dialog.h",
"+chrome/browser/ui/views/select_file_dialog_extension.h",
],
}
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com | |
d6e40d4131a3a4c0cb562486feb1ce61eda97c54 | 6cd3355ee8286f810cd5df28baa62e5cacfd1a75 | /Advent of Code 2022/Day 1 2022/Day1Q1 2022.py | 65bafe7968e4987d44d26ffd17e5f7f44862bba1 | [] | no_license | Brian-Mascitello/Advent-of-Code | 8d8290ff8dde236a5e21e33b1a1eba05a9c8f269 | f32566fc7b30d5b83c21e8f38b50f0b37bc11135 | refs/heads/master | 2022-12-17T17:15:26.993538 | 2022-12-06T05:43:37 | 2022-12-06T05:43:37 | 112,885,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | """
Author: Brian Mascitello
Date: 12/2/2022
Websites: https://adventofcode.com/2022/day/1
Info: --- Day 1: Calorie Counting ---
"""
def get_data(input_text):
with open(input_text) as file:
data_from_file = file.read()
return data_from_file
def main():
data = get_data('Day1Q1 2022 Input.txt')
# calories_list_example = """1000
# 2000
# 3000
#
# 4000
#
# 5000
# 6000
#
# 7000
# 8000
# 9000
#
# 10000
# """
elf_dict = {}
elf_number = 1
for line in data.splitlines():
stripped_line = line.strip()
if stripped_line == '':
elf_number += 1
else:
if elf_number in elf_dict.keys():
elf_dict[elf_number] += int(stripped_line)
else:
elf_dict[elf_number] = int(stripped_line)
most_calories = max(elf_dict.values())
print(f'The most calories an elf is carrying is {most_calories}.')
if __name__ == '__main__':
main()
| [
"bmascitello@gmail.com"
] | bmascitello@gmail.com |
fbd78eba6586daa821c801b32bf9a6b63338a9f9 | 1e1cb0103d4dd15bc462962047d00f6d1349e0c5 | /boss/pages/test_page/base_page.py | cc887b80426d6b9811ab7d4f41a05c4303655065 | [] | no_license | fangmeng1991/BossCode | 9b93ae4ceaf050d4f081a2863aacb52114fafabb | 7cef7f8b3aa9d97f30a4d3cdadadfd23d8d52795 | refs/heads/master | 2020-07-23T03:58:51.259561 | 2019-09-11T01:18:20 | 2019-09-11T01:18:20 | 207,439,253 | 0 | 0 | null | 2019-09-11T01:18:21 | 2019-09-10T01:34:54 | Python | UTF-8 | Python | false | false | 5,756 | py | # coding=utf-8
import time
from selenium.common.exceptions import NoSuchElementException
import os.path
from common.logger import Logger
# create a logger instance
logger = Logger(logger="BasePage").getlog()
class BasePage(object):
"""
定义一个页面基类,让所有页面都继承这个类,封装一些常用的页面操作方法到这个类
"""
def __init__(self, driver):
self.driver = driver
# quit browser and end testing
def quit_browser(self):
self.driver.quit()
# 浏览器前进操作
def forward(self):
self.driver.forward()
logger.info("Click forward on current page.")
# 浏览器后退操作
def back(self):
self.driver.back()
logger.info("Click back on current page.")
# 隐式等待
def wait(self, seconds):
self.driver.implicitly_wait(seconds)
logger.info("wait for %d seconds." % seconds)
# 点击关闭当前窗口
def close(self):
try:
self.driver.close()
logger.info("Closing and quit the browser.")
except NameError as e:
logger.error("Failed to quit the browser with %s" % e)
# 保存图片
def get_windows_img(self):
"""
在这里我们把file_path这个参数写死,直接保存到我们项目根目录的一个文件夹.\Screenshots下
"""
file_path = os.path.dirname(os.path.abspath('.')) + '/boss/logs/picture/'
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
screen_name = file_path + rq + '.png'
try:
self.driver.get_screenshot_as_file(screen_name)
logger.info("Had take screenshot and save to folder : /boss/logs/picture/")
except NameError as e:
logger.error("Failed to take screenshot! %s" % e)
self.get_windows_img()
# 定位元素方法
def find_element(self, selector):
"""
这个地方为什么是根据=>来切割字符串,请看页面里定位元素的方法
submit_btn = "id=>su"
login_lnk = "xpath => //*[@id='u1']/a[7]" # 百度首页登录链接定位
如果采用等号,结果很多xpath表达式中包含一个=,这样会造成切割不准确,影响元素定位
:param selector:
:return: element
"""
element = ''
if '=>' not in selector:
return self.driver.find_element_by_id(selector)
selector_by = selector.split('=>')[0]
selector_value = selector.split('=>')[1]
if selector_by == "i" or selector_by == 'id':
try:
element = self.driver.find_element_by_id(selector_value)
logger.info("Had find the element \' %s \' successful "
"by %s via value: %s " % (element.text, selector_by, selector_value))
except NoSuchElementException as e:
logger.error("NoSuchElementException: %s" % e)
self.get_windows_img() # take screenshot
elif selector_by == "n" or selector_by == 'name':
element = self.driver.find_element_by_name(selector_value)
elif selector_by == "c" or selector_by == 'class_name':
element = self.driver.find_element_by_class_name(selector_value)
elif selector_by == "l" or selector_by == 'link_text':
element = self.driver.find_element_by_link_text(selector_value)
elif selector_by == "p" or selector_by == 'partial_link_text':
element = self.driver.find_element_by_partial_link_text(selector_value)
elif selector_by == "t" or selector_by == 'tag_name':
element = self.driver.find_element_by_tag_name(selector_value)
elif selector_by == "x" or selector_by == 'xpath':
try:
element = self.driver.find_element_by_xpath(selector_value)
logger.info("Had find the element \' %s \' successful "
"by %s via value: %s " % (element.text, selector_by, selector_value))
except NoSuchElementException as e:
logger.error("NoSuchElementException: %s" % e)
self.get_windows_img()
elif selector_by == "s" or selector_by == 'selector_selector':
element = self.driver.find_element_by_css_selector(selector_value)
else:
raise NameError("Please enter a valid type of targeting elements.")
return element
# 输入
def type(self, selector, text):
el = self.find_element(selector)
el.clear()
try:
el.send_keys(text)
logger.info("Had type \' %s \' in inputBox" % text)
except NameError as e:
logger.error("Failed to type in input box with %s" % e)
self.get_windows_img()
# 清除文本框
def clear(self, selector):
el = self.find_element(selector)
try:
el.clear()
logger.info("Clear text in input box before typing.")
except NameError as e:
logger.error("Failed to clear in input box with %s" % e)
self.get_windows_img()
# 点击元素
def click(self, selector):
el = self.find_element(selector)
try:
el.click()
logger.info("The element \' %s \' was clicked." % el.text)
except NameError as e:
logger.error("Failed to click the element with %s" % e)
# 获得网页标题
def get_page_title(self):
logger.info("Current page title is %s" % self.driver.title)
return self.driver.title
@staticmethod
def sleep(seconds):
time.sleep(seconds)
logger.info("Sleep for %d seconds" % seconds) | [
"123@qq.com"
] | 123@qq.com |
f2d0097a4cdaa88deee9b0d582ac0bdba358b426 | e1950865f000adc926f228d84131e20b244b48f6 | /python/Array/Grid.py | 5fd43c30ed681fb3091576a860777c929f97caf1 | [] | no_license | manastole03/Programming-practice | c73859b13392a6a1036f557fa975225672fb1e91 | 2889dc94068b8d778f6b0cf516982d7104fa2318 | refs/heads/master | 2022-12-06T07:48:47.237014 | 2020-08-29T18:22:59 | 2020-08-29T18:22:59 | 281,708,273 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | w=int(input('Enter width of grid: '))
h=int(input('Enter heightof grid: '))
for i in range(h):
for j in range(w):
print('-',end=' ')
print()
| [
"noreply@github.com"
] | manastole03.noreply@github.com |
8a3ae94a5be0277d4287ad3cf4c4378be3250295 | c39566ee9b2e9825f8b9cca5d04a97ee123ab0d4 | /src/search/urls.py | 21fb622c1b35bfd9cd9195f671ddc540b0150072 | [] | no_license | bekbossyn/ecommerce_old | 58a3c0fd4ecea139272baa39f6bbe57fa46751f6 | 2da1ed7e231d932a5a894183ded79eee6ce65497 | refs/heads/master | 2021-09-07T16:44:25.320360 | 2018-02-26T07:51:07 | 2018-02-26T07:51:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.conf.urls import url
from .views import (
SearchProductView
)
urlpatterns = [
url(r'^$', SearchProductView.as_view(), name="list"),
]
| [
"bekbossyn.kassymkhan@gmail.com"
] | bekbossyn.kassymkhan@gmail.com |
47733d507b71fe40aba19be739e9709a96d240c8 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/test/test_c10d_spawn.py | 8004ed4d2206cd6e7530dfcc840d4efbbdf9f5df | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 7,670 | py | import sys
import tempfile
import unittest
import torch
import torch.distributed as c10d
import torch.multiprocessing as mp
from common_cuda import TEST_MULTIGPU
from common_utils import TestCase, load_tests, run_tests
from common_utils import NO_MULTIPROCESSING_SPAWN
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if not c10d.is_available():
print('c10d not available, skipping tests')
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print('spawn not available, skipping tests')
sys.exit(0)
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
class ProcessGroupShareTensorTest(TestCase):
world_size = 2
@classmethod
def opts(cls, threads=2):
opts = c10d.ProcessGroupGloo.Options()
opts.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
opts.timeout = 5.0
opts.threads = threads
return opts
@classmethod
def _init_pg_gloo(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupGloo(
store, rank, world_size, ProcessGroupShareTensorTest.opts())
@classmethod
def _init_pg_nccl(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupNCCL(store, rank, world_size)
def _test_multiprocess(self, f, shared_tensors, init_pg, n_output):
ws = self.world_size
# file store will delete the test file on destruction
file = tempfile.NamedTemporaryFile(delete=False)
ctx = mp.get_context('spawn')
c2p = ctx.Queue(2)
p2c = ctx.Queue(2)
ps = []
for i in range(ws):
p = ctx.Process(
target=f,
args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c))
p.start()
ps.append(p)
for _ in range(ws * n_output):
pid, expected, result = c2p.get()
self.assertEqual(
expected,
result,
(
"Expect rank {} to receive tensor {} but got {}."
).format(pid, expected, result)
)
for _ in range(ws):
p2c.put(0)
for p in ps:
p.join(2)
# Why classmethod? multiprocessing cannot pickle TestCase subclass when in
# spawn mode. See https://bugs.python.org/issue33884.
@classmethod
def _test_broadcast_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.broadcast(xs).wait()
c2p.put((rank, torch.zeros(2, 2), xs[0].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_broadcast_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
1)
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_broadcast_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1)
@classmethod
def _test_allreduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.allreduce(xs, op=c10d.ReduceOp.SUM).wait()
c2p.put((rank, torch.ones(2, 2) * 2, xs[0].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allreduce_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
1)
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_allreduce_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1)
@classmethod
def _test_reduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
x = shared_tensors[rank]
pg.reduce(x, root=0, op=c10d.ReduceOp.SUM).wait()
if rank == 0:
c2p.put((rank, torch.ones(2, 2) * 2, x.to("cpu")))
else:
c2p.put((rank, torch.ones(2, 2), x.to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_reduce_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_reduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1)
@classmethod
def _test_allgather_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
pg.allgather(ys, xs).wait()
for i in range(world_size):
c2p.put((rank, torch.ones(2, 2) * i, ys[0][i].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size)
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_allgather_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
self.world_size)
@classmethod
def _test_allgather_chunk_process(
cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
chunks = torch.chunk(shared_tensor, world_size, dim=0)
x = chunks[rank]
ys = [torch.zeros_like(x) for _ in range(world_size)]
pg.allgather(ys, x).wait()
c2p.put((rank, chunks[0].to("cpu"), ys[0].to("cpu")))
c2p.put((rank, chunks[1].to("cpu"), ys[1].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_chunk_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_chunk_process,
torch.tensor(range(4)).reshape(2, 2),
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size)
if __name__ == '__main__':
run_tests()
| [
"rnauhria@gmail.com"
] | rnauhria@gmail.com |
c441939749afe4e4ef03ce98b11f3db08171ecab | e393789a7b3e7cb50e3c6192843490b313004b51 | /interpreter/BSL_Expr/Variable.py | 589afb8d5d5da005c961709a99a6d7100c72597f | [] | no_license | migeed-z/Lisp_interpreter | 5e9694279169a924c864c017ff65e59fa25c0242 | 5992627e1cff299c5f3ed14064e499b9296fbc5f | refs/heads/master | 2021-01-19T03:39:15.280390 | 2016-07-27T17:40:58 | 2016-07-27T17:40:58 | 36,232,933 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import DirPaths
from BSLExpr import BSLExpr
from BSLError import BSLError
from Global_Scope import foo
class Variable(BSLExpr):
"""
To represent a class of Variables
"""
def __init__(self, name):
"""
:param name: String representing the name of the variable
"""
self.name = name
def eval_internal(self, defs):
val = defs.get(self.name)
if not val:
val_again = foo.getter().get(self.name)
if not val_again:
raise BSLError('Variable not defined')
else:
return val_again
else:
return val
def equals(self, other):
if not isinstance(other, Variable):
return False
else:
return self.name == other.name
def __eq__(self, other):
if not isinstance(other, Variable):
return False
else:
return other.name == self.name
def __str__(self):
return '%s(%s)' % ('Variable', self.name) | [
"migeed.z@outlook.com"
] | migeed.z@outlook.com |
d01a80ae4f6132dc1b95215bb5c4bd2e8dd5965d | 2f4184af31121fd31b397d5b529b795774b30856 | /backend/users/migrations/0002_auto_20200103_1043.py | afc8436f26bbd91b9e44b910a9d5eeb0c2462314 | [] | no_license | crowdbotics-apps/mobileappdeploy-dev-1434 | c0944b53808fbb02ff664a8f2bd012648fc9e352 | 5a9bcdda33a473174be7f0446e4109c1d8d2c383 | refs/heads/master | 2022-03-28T11:20:26.124765 | 2020-01-15T10:40:11 | 2020-01-15T10:40:11 | 231,558,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.2.9 on 2020-01-03 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
cb78e9bfef4bda7c3de7f5f4cd7cb8b0170a5eac | bf5935cecf1b65461b1de9a67a013f0b03c3d1d0 | /boutique/migrations/0050_wishlist_product.py | 26d0cd8c9c65dd268439f466a6ceae86ac5b44bc | [] | no_license | meriemay/Shop | 53287aab6382163e6069130c8e5304ed7ffd0e3b | f5d44193e030c4ef9b5cf27896754767beaee3ef | refs/heads/master | 2021-01-18T17:50:49.688779 | 2017-08-29T14:34:34 | 2017-08-29T14:34:34 | 100,497,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 08:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boutique', '0049_auto_20170825_1255'),
]
operations = [
migrations.AddField(
model_name='wishlist',
name='product',
field=models.ManyToManyField(blank=True, null=True, to='boutique.Product'),
),
]
| [
"you@example.com"
] | you@example.com |
1e40d32b4f8bdbc7f4d4153d6bba77e34b2f1ed3 | dfe2a52a1c36a28a8bf85af7efd42380d980b773 | /virtual/lib/python3.6/site-packages/django/contrib/contenttypes/fields.py | 42e06aa119e88a4867ce051566cddcf7503040c8 | [
"MIT"
] | permissive | virginiah894/Instagram-clone | 2c2a15d89fcdb25b22bd60428cf84a01f3bd553c | 4d8abe7bafefae06a0e462e6a47631c2f8a1d361 | refs/heads/master | 2022-12-10T06:56:21.105357 | 2020-01-07T14:14:50 | 2020-01-07T14:14:50 | 229,394,540 | 3 | 0 | MIT | 2022-12-08T03:23:40 | 2019-12-21T07:41:19 | Python | UTF-8 | Python | false | false | 26,251 | py | from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.mixins import FieldCacheMixin
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils.functional import cached_property
class GenericForeignKey(FieldCacheMixin):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
cls._meta.add_field(self, private=True)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the "
"nonexistent field '%s'." % self.fk_field,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the "
"nonexistent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def get_cache_name(self):
return self.name
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (
ret_val,
lambda obj: (obj.pk, obj.__class__),
gfk_key,
True,
self.name,
True,
)
def __get__(self, instance, cls=None):
if instance is None:
return self
# Don't use getattr(instance, self.ct_field) here because that might
# reload the same ContentType over and over (#5570). Instead, get the
# content type ID here, and later when the actual instance is needed,
# use ContentType.objects.get_for_id(), which has a global cache.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
pk_val = getattr(instance, self.fk_field)
rel_obj = self.get_cached_value(instance, default=None)
if rel_obj is not None:
if ct_id != self.get_content_type(obj=rel_obj, using=instance._state.db).id:
rel_obj = None
else:
pk = rel_obj._meta.pk
# If the primary key is a remote field, use the referenced
# field's to_python().
pk_to_python = pk.target_field.to_python if pk.remote_field else pk.to_python
if pk_to_python(pk_val) != rel_obj._get_pk_val():
rel_obj = None
else:
return rel_obj
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=pk_val)
except ObjectDoesNotExist:
pass
self.set_cached_value(instance, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value.pk
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
self.set_cached_value(instance, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super().__init__(
field, to, related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to, on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _is_matching_generic_foreign_key(self, field):
"""
Return True if field is a GenericForeignKey whose content type and
object id fields correspond to the equivalent attributes on this
GenericRelation.
"""
return (
isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
)
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.private_fields
if any(self._is_matching_generic_foreign_key(field) for field in fields):
return []
else:
return [
checks.Error(
"The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey." % (
target._meta.app_label, target._meta.object_name
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def _get_path_info_with_parent(self, filtered_relation):
"""
Return the path that joins the current model through any parent models.
The idea is that if you have a GFK defined on a parent model then we
need to join the parent model first, then the child model.
"""
# With an inheritance chain ChildTag -> Tag and Tag defines the
# GenericForeignKey, and a TaggedItem model has a GenericRelation to
# ChildTag, then we need to generate a join from TaggedItem to Tag
# (as Tag.object_id == TaggedItem.pk), and another join from Tag to
# ChildTag (as that is where the relation is to). Do this by first
# generating a join to the parent model, then generating joins to the
# child models.
path = []
opts = self.remote_field.model._meta.concrete_model._meta
parent_opts = opts.get_field(self.object_id_field_name).model._meta
target = parent_opts.pk
path.append(PathInfo(
from_opts=self.model._meta,
to_opts=parent_opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
))
# Collect joins needed for the parent -> child chain. This is easiest
# to do if we collect joins for the child -> parent chain and then
# reverse the direction (call to reverse() and use of
# field.remote_field.get_path_info()).
parent_field_chain = []
while parent_opts != opts:
field = opts.get_ancestor_link(parent_opts.model)
parent_field_chain.append(field)
opts = field.remote_field.model._meta
parent_field_chain.reverse()
for field in parent_field_chain:
path.extend(field.remote_field.get_path_info())
return path
def get_path_info(self, filtered_relation=None):
opts = self.remote_field.model._meta
object_id_field = opts.get_field(self.object_id_field_name)
if object_id_field.model != opts.model:
return self._get_path_info_with_parent(filtered_relation)
else:
target = opts.pk
return [PathInfo(
from_opts=self.model._meta,
to_opts=opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)]
def get_reverse_path_info(self, filtered_relation=None):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)]
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return str([instance.pk for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['private_only'] = True
super().contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() to the model this
# field belongs to, if the model on the other end of this relation
# is ordered with respect to its corresponding GenericForeignKey.
if not cls._meta.abstract:
def make_generic_foreign_order_accessors(related_model, model):
if self._is_matching_generic_foreign_key(model._meta.order_with_respect_to):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(make_generic_foreign_order_accessors, self.model, self.remote_field.model)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance.pk
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
return queryset.using(db).filter(**self.core_filters)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: {obj.pk for obj in instances}
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (
queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj.pk,
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, bulk=True):
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, bulk=True):
if not objs:
return
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| [
"virgyperry@gmail.com"
] | virgyperry@gmail.com |
9f381d38ffe18786f175c6900088a7afe97ad4c0 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/output/375_processed | 681f703fc07d0c89061c242532b1892f7665f4f2 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 958 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that list xcode_settings are flattened before being exported to the
environment.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
foo("This test is currently disabled: https://crbug.com/483696.")
sys.foo(0)
test = TestGyp.foo(formats=['ninja', 'make', 'xcode'])
CHDIR = 'non-strs-flattened-to-env'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
test.foo('test.gyp', chdir=CHDIR)
test.foo('test.gyp', test.ALL, chdir=CHDIR)
info_plist = test.foo(INFO_PLIST_PATH, chdir=CHDIR)
test.foo(info_plist)
test.foo(info_plist, '''\
\t<key>My Variable</key>
\t<string>some expansion</string>''')
test.foo(info_plist, '''\
\t<key>CFlags</key>
\t<string>-fstack-protector-all -fno-strict-aliasing -DS="A Space"</string>''')
test.foo()
| [
"rares.begu@gmail.com"
] | rares.begu@gmail.com | |
05d60c2656e98a7fcf8f29e8cc2fdaf391b8b3c8 | 24a13b07623ce41e57ea4df1fcce0befb14d3406 | /model/densenet201/model34_val3.py | b5f3c8ab8fdd3b376f4e844f5fc00d552e6f292e | [
"MIT"
] | permissive | shorxp/jd-fashion | 5f37e6c2235200944869e9a0da4d741c89d63b9e | 817f693672f418745e3a4c89a0417a3165b08130 | refs/heads/master | 2021-09-22T18:40:13.030601 | 2018-09-13T13:50:05 | 2018-09-13T13:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,298 | py | """
以model1为原型,新增real crop
"""
import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_up_sampling=[100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
label_color_augment=[0, 3, 6, 12],
downsampling=0.5,
train_batch_size=[16, 16, 16],
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 4, 10],
lr=[0.0005, 0.00005, 0.000005],
data_visualization=True,
tta_flip=True,
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet201(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| [
"13658247573@163.com"
] | 13658247573@163.com |
fb9de33dcb80016f6158f209087b5ae75fde255b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/GFocalV2/mmdet/models/detectors/__init__.py | ed9cd584a6fe22550b3a4223bcf5df0888522c82 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,718 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .atss import ATSS
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .cornernet import CornerNet
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA',
'YOLOV3', 'YOLACT', 'VFNet'
]
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
2e630d4a8372fcdc894eec1be1721f1cfc21972e | d3e6d6555b0314936902727af36de2f1b7432bf8 | /subsets-ii/subsets-ii.py | 333efee762db057e3ce4afb4fc9b0facc37e122e | [] | no_license | fly2rain/LeetCode | 624b1e06e1aa3174dfb5c81834b58cc8fd7ad073 | 4ddb5a051c6e2051f016a675fd2f5d566c800c2a | refs/heads/master | 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null | UTF-8 | Python | false | false | 1,312 | py |
class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return [[]]
def sort_counter():
d = {}
for n in nums:
d[n] = d.get(n, 0) + 1
skey = sorted(d.keys())
return skey, d
skey, d = sort_counter()
def ksubsets(k, start, end):
if k == 0:
return [[]]
elif k == 1:
return [[skey[i]] for i in range(start, end+1)]
elif start == end:
if d[skey[start]] >= k:
return [[skey[start]] * k]
else:
return []
else:
ret = []
pivot = skey[start]
for j in range(1+min(d[pivot], k)):
ret += [[pivot] * j + l for l in ksubsets(k-j, start+1, end)]
return ret
return reduce(lambda x, y: x + y,
(ksubsets(i, 0, len(skey)-1) for i in range(len(nums)+1)), [])
if __name__ == '__main__':
# print Solution().subsetsWithDup([1,2,2,4])
# print Solution().subsetsWithDup([1,2,2])
l = Solution().subsetsWithDup([1,2,2,3,3,4,4,4,6])
print l
| [
"xuzheng1111@gmail.com"
] | xuzheng1111@gmail.com |
53f4d7f167a7e4f1ec37b499541df8bc66a49962 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_014/ch9_2020_03_02_19_38_03_083205.py | 14b91cb4207b8842a88d632bf6a05132a10f8131 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | def calcula_volume_da_esfera (R, π):
v = 4/3 * (π * R**3)
return v
R = 2
π = 3,14
print (calcula_volume_da_esfera) | [
"you@example.com"
] | you@example.com |
b801d42c1ee4294fcc6d73cda3b9f82fd3ad45e1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/278/72086/submittedfiles/testes.py | c60d08e3c84dcd4c77f3b9a3deb528914f847acd | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | a = 30
b = 5
c = 10
if a<b:
print"comando 1"
else :
if a<c:
print"comando 2"
else:
if b<c:
print"comando 3"
print"pronto!" | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a5765a7098151bdad915d3383c81270bd45cf115 | f34dc191304f0c54527948aa7b7123fd6efe85b9 | /connect.py | e0c1778b8defe4e2ecb40504dc48d2b47a36deae | [] | no_license | sujith1919/groza | b3fc4641de48423da9a219c33e390ea2c4915687 | 5b68e052266d5307a0058d7031b3b20c4a1b9bcb | refs/heads/master | 2023-02-28T03:09:51.568592 | 2021-02-02T16:34:49 | 2021-02-02T16:34:49 | 335,353,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #!/usr/bin/python
import psycopg2
from config import config
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
#print('PostgreSQL database version:')
cur.execute('SELECT version()')
cur.execute('SELECT * from LIFEBOAT')
# display the PostgreSQL database server version
db_version = cur.fetchall()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
print("lol")
| [
"jayarajan.sujith@oracle.com"
] | jayarajan.sujith@oracle.com |
9f493953956592c94d7fa5dfb412e0a3d595dd40 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /rFK7WftrcrEu6rbu8_20.py | 478746c1b355f82bace8cd65b2e14e906b2b1d3e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | """
Given a **Binary Search Tree** (BST) implementation, complete the traverse
function which is present in the BST class. Here you have to perform the
level-order traversal on BST which is another term for **Breadth First
Traversal**.
### Examples
traverse() ➞ [10, 4, 20, 1, 5]
10
/ \
4 20
/ \
1 5
traverse() ➞ [100, 70, 200, 34, 80, 300]
100
/ \
70 200
/ \ \
34 80 300
### Notes
Make sure you don't modify the code that is already in the **Code** tab. Only
complete the `traverse()` function and return an array.
"""
from collections import deque
# Please don't modify the code below the traverse function is in BST class
# Node class
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# BST Class
class BST:
def __init__(self):
self.head = None
def insert(self, data):
new_node = Node(data)
if self.head == None:
self.head = new_node
else:
current = self.head
while True:
if data > current.data and current.right:
current = current.right
elif data < current.data and current.left:
current = current.left
elif data > current.data:
current.right = new_node
break
else:
current.left = new_node
break
return self.head
def traverse(self):
res = []
q = deque()
if self.head:
q.append(self.head)
while q:
cur = q.popleft()
res.append(cur.data)
for child in [cur.left, cur.right]:
if child:
q.append(child)
return res
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
25d146cca323bb4a46251eff73137c8e26b8acdf | 795c40a44498636d7462ceff7bec97244d05e6d9 | /nappy/rdf.py | cc525eb741f4a1529cba8c58b55662cb2c527a2b | [
"MIT"
] | permissive | ryokbys/nap | cb663f6c1df7333c8d18d6403707628c56cc6ed2 | 4159fe0e8fe5a637110e0a0545210a540be8a13e | refs/heads/master | 2023-06-10T05:23:10.671151 | 2023-05-07T06:00:28 | 2023-05-07T06:00:28 | 20,047,602 | 33 | 4 | null | null | null | null | UTF-8 | Python | false | false | 25,424 | py | #!/usr/bin/env python
"""
Calculate the radial distribution function (RDF) from files.
It takes statistical averaging over atoms in a file and over files.
Usage:
{0:s} [options] INFILE [INFILE...]
Options:
-h, --help Show this help message and exit.
-d DR Width of the bin. [default: 0.1]
-r,--rmax RMAX
Cutoff radius of radial distribution. [default: 5.0]
--rmin RMIN
Minimum radius to be considered. [default: 0.0]
--gsmear=SIGMA
Width of Gaussian smearing, zero means no smearing. [default: 0]
--nnmax=NNMAX
Max num of neighbors when counting neighbors. [default: 100]
-o OUT Name of file to be written in addition to out.rdf if specified. [default: None]
--specorder=SPECORDER
Order of species separated by comma, like, --specorder=W,H. [default: None]
--out4fp Flag to write out in general fp.py format. [default: Fault]
--pairs PAIRS
Pairs to be extracted, available only if out4fp is specified.
hyphen-connected, comma separated, e.g.) Li-O,P-O [default: None]
--skip=NSKIP
Skip first NSKIP steps from the statistics. [default: 0]
--no-pairwise
Not to take averaging by pairwise.
--plot Plot figures. [default: False]
--SQ Calc and output S(Q) converted from RDF to out.sq
--qmax QMAX Cutoff wavenumber. [default: 20.0]
--qmin QMIN Shortest wavenumber. [default: 0.7]
--scatter-length LENGTHS
Scattering lengths of corresponding species. [default: None]
--fortran Try using fortran function for computing RDF.
"""
from __future__ import print_function
import os,sys
from datetime import datetime
import numpy as np
import copy
from docopt import docopt
import nappy
from nappy.gaussian_smear import gsmear
from nappy.common import get_key
__author__ = "Ryo KOBAYASHI"
__version__ = "230107"
def norm(vector):
norm= 0.0
for e in vector:
norm += e*e
return np.sqrt(norm)
def rdf_of_atom(ia,nsys,rmax=5.0,dr=0.1,sigma=0):
"""
Compute RDF of specified atom.
"""
#...Radial points
nr = int(rmax/dr) #+1
rd = np.array([dr*ir+dr/2 for ir in range(nr)],)
nspcs = len(nsys.specorder)
ndri = np.zeros((nspcs+1,nr),dtype=float)
spos = nsys.get_scaled_positions()
sids = nsys.atoms.sid
natm = nsys.num_atoms()
pi = spos[ia]
hmat = nsys.get_hmat()
#...Compute ndr of atom-ia
for ja in nsys.neighbors_of(ia,rcut=rmax):
pj = spos[ja]
jsid = sids[ja]
pij = pj -pi
pij = pij -np.round(pij)
vij = np.dot(hmat,pij)
rij2 = np.dot(vij,vij)
rij = np.sqrt(rij2)
ir = int(rij/dr)
ndri[0,ir] += 1.0
ndri[jsid,ir] += 1.0
#...Normalize to get raw RDF(ia)
#.....Total RDF(ia)
tmp = 4.0 *np.pi *(natm-1) *dr
for ir in range(1,nr):
r = dr *ir
ndri[0,ir] /= tmp*r*r
#.....Species-decomposed RDF(ia)
natms = [ float(natm) ]
for isp in range(1,nspcs+1):
natms.append(float(nsys.num_atoms(isp)))
vol = nsys.get_volume()
isid = sids[ia]
tmp0 = 4.0 *np.pi *dr /vol
for jsid in range(1,nspcs+1):
nj = natms[jsid]
if jsid == isid:
tmp = tmp0 *(nj-1)
else:
tmp = tmp0 *nj
for ir in range(1,nr):
r = dr *ir
ndri[jsid,ir] /= tmp*r*r
rdfi = np.zeros(ndri.shape)
#...Gaussian smearing
if not sigma == 0:
#...Total
rdfi[0,:] = gsmear(rd,ndri[0,:],sigma)
#...Species-decomposed
for jsid in range(1,nspcs+1):
rdfi[jsid,:] = gsmear(rd,ndri[jsid,:],sigma)
return rd,rdfi
def rdf_desc_of(ia,nsys,rmax=5.0,dr=0.1):
"""
RDF descriptor of a given atom-ia.
RDF descriptor has the information of 1st and 2nd peak positions
and height of each species.
For example, in case of Mg-Si-O 3-component system, an atoms has the following data:
R1(Mg), H1(Mg), R2(Mg), H2(Mg), R1(Si), H1(Si), R2(Si), H2(Si), R1(O), H1(O), R2(O), H2(O)
Thus the dimension of RDF descriptor of N-component system is 4N and
if the peaks are not found, the descriptor values are set 0.0.
"""
rd,rdfi = rdf_of_atom(ia,nsys,rmax=rmax,dr=dr,sigma=2)
nspcs = len(nsys.specorder)
nr = len(rd)
rdf_desci = np.zeros(4*nspcs,dtype=float)
for jsp in range(1,nspcs+1):
signs = np.zeros(len(rd),dtype=int)
rdfij = rdfi[jsp,:]
for ir in range(1,nr):
diff = (rdfij[ir+1]-rdfij[ir-1])
if diff < 0.0:
signs[ir] = -1
else:
signs[ir] = +1
found_1st = False
for ir in range(1,nr):
if signs[ir] *signs[ir-1] < 0:
if not found_1st:
rdf_desci[(jsp-1)*4 +0] = rd[ir]
rdf_desci[(jsp-1)*4 +1] = rdfij[ir]
found_1st = True
elif signs[ir-1] > 0: # 2nd peak
rdf_desci[(jsp-1)*4 +2] = rd[ir]
rdf_desci[(jsp-1)*4 +3] = rdfij[ir]
break
return rdf_desci
def rdf_desc(nsys,rmax=5.0,dr=0.1,progress=False):
"""
Compute RDF descriptor of the given system.
"""
import sys
natm = nsys.num_atoms()
nspcs = len(nsys.specorder)
desc = np.zeros((natm,4*nspcs),dtype=float)
interval = max(int(natm/20),1)
for ia in range(natm):
if ia % interval == 0 and progress:
print('ia/natm = {0:8d}/{1:8d}'.format(ia,natm))
sys.stdout.flush()
desc[ia,:] = rdf_desc_of(ia,nsys,rmax=rmax,dr=dr)
return desc
def read_rdf(fname='out.rdf'):
"""
Read RDF data from a file.
The format is the same as that of write_normal function.
"""
with open(fname,'r') as f:
lines = f.readlines()
#...Count num of data
nr = 0
for line in lines:
if line[0] != '#':
nr += 1
nd = len(line[-1].split()) -1
rdfs = np.zeros((nr,nd),dtype=float)
rs = np.zeros(nr,dtype=float)
ir = 0
for il,line in enumerate(lines):
if line[0] == '#': continue
dat = line.split()
rs[ir] = float(data[0])
rdfs[ir,:] = [ float(x) for x in dat[1:]]
return rs,rdfs
def rdf(nsys0,nspcs,dr,nr,rmax0,pairwise=False,rmin=0.0,
nnmax=100,fortran=False,mask=None):
"""
Compute RDF of the givin system.
Number of bins is determined from DR, RMAX0, and RMIN as, int((RMAX0-RMIN)/DR)+1.
Position of a bin is defined as the point of center of the bin, r_i = DR*(i+0.5).
"""
natm0= nsys0.num_atoms()
#...NOTE: mask is not available now
# if mask:
# if type(mask) != list:
# raise TypeError('mask is not list.')
# if len(mask) != natm0:
# raise ValueError('len(mask) != len(nsys0)')
rmax = rmin +dr*nr # Use corrected rmax to cover regions of NR bins
r2max = rmax*rmax
nsys = copy.deepcopy(nsys0)
n1,n2,n3= nsys.get_expansion_num(3.0*rmax)
if not (n1==1 and n2==1 and n3==1):
print(' Extend system by {0:d}x{1:d}x{2:d}'.format(n1,n2,n3))
nsys.repeat(n1,n2,n3)
natm = len(nsys)
natms = [ float(natm) ]
for ispcs in range(1,nspcs+1):
natms.append(float(nsys.num_atoms(ispcs)))
vol= nsys.get_volume()
hmat = nsys.get_hmat()
# Since an access to pandas DataFrame is much slower than that to numpy array,
# use numpy arrays in the most time consuming part.
poss = nsys.get_scaled_positions()
sids = np.array(nsys.atoms.sid,)
natm = len(nsys)
if fortran:
try:
# import nappy.pmd.mods as pmods
import nappy.pmd.pmd_wrapper as pw
hmati = nsys.get_hmat_inv()
tags = nsys.get_tags()
iprint = 0
l1st = True
# lspr = pmods.pairlist.mk_lspr_sngl(natm,nnmax,tags,
# poss.T,rmax,hmat,hmati,
# iprint,l1st)
# rd,rdfs= pmods.distfunc.calc_rdf(tags,hmat,poss.T,rmax,rmin,
# lspr,iprint,l1st,pairwise,
# nspcs,nr)
rd,rdfs= pw.wrap_calc_rdf(poss.T,tags,hmat,hmati,rmax,rmin,l1st,
pairwise,nr,nspcs)
return rd, rdfs.T
except Exception as e:
print(' Since failed to use the fortran routines, use python instead...')
print(e)
pass
rd= np.array([ rmin +dr*(ir+0.5) for ir in range(nr) ])
nadr= np.zeros((nspcs+1,nspcs+1,nr),dtype=float)
ndr = np.zeros((nspcs+1,nspcs+1,nr),dtype=float)
nsys.make_pair_list(rcut=rmax,nnmax=nnmax)
hmat = nsys.get_hmat()
for ia in range(natm):
isid = sids[ia]
# ndr[:,:] = 0.0
pi = poss[ia]
for ja in nsys.neighbors_of(ia,):
jsid = sids[ja]
pj = poss[ja]
pij = pj -pi -np.round(pj-pi)
vij = np.dot(hmat,pij)
dij2 = np.dot(vij,vij)
dij = np.sqrt(dij2)
rrdr= (dij-rmin)/dr
if rrdr < 0.0:
continue
ir = min(int(rrdr),nr-1)
if ir < 0:
print('Something is wrong: ir<0 ')
print(ia,ja,dij,rrdr,ir)
raise
# ndr[0,0,ir] += 1.0
# ndr[isid,jsid,ir] += 1.0
nadr[0,0,ir] += 1.0
nadr[isid,jsid,ir] += 1.0
# for ir in range(nr):
# nadr[:,:,ir] += ndr[:,:,ir]
#...normalize
if pairwise:
#...Total
tmp = 4.0 *np.pi *natms[0]*(natms[0]-1)/vol *dr
for ir in range(nr):
# r= rmin +dr*(ir+0.5)
r = rd[ir]
nadr[0,0,ir] /= tmp*r*r
#...Pairwise
for isid in range(1,nspcs+1):
ni = natms[isid]
if ni == 0: continue
for jsid in range(isid,nspcs+1):
nj = natms[jsid]
if nj == 0: continue
tmp = 4.0*np.pi*dr /vol
if isid == jsid:
if ni == 1: continue
tmp *= ni*(ni-1)
else:
tmp *= ni*nj
for ir in range(nr):
# r= dr *(ir-0.5)
r = rd[ir]
nadr[isid,jsid,ir] /= tmp*r*r
else:
tmp = 4.0 *np.pi *natms[0]*(natms[0]-1)/vol *dr
for ir in range(nr):
# r= dr *(ir -0.5)
r = rd[ir]
nadr[:,:,ir] /= tmp*r*r
return rd,nadr
def rdf_average(infiles,specorder,dr=0.1,rmin=0.0,rmax=3.0,pairwise=False,nnmax=100,fortran=False):
nspcs = len(specorder)
tiny = 1.0e-8
nr = int((rmax-rmin+tiny)/dr) #+1 , no need to add 1
agr= np.zeros((nspcs+1,nspcs+1,nr),dtype=float)
nsum= 0
for infname in infiles:
if not os.path.exists(infname):
print("[Error] File, {0}, does not exist !!!".format(infname))
sys.exit()
nsys = nappy.io.read(fname=infname,specorder=specorder)
print(' File =',infname)
rd,gr= rdf(nsys,nspcs,dr,nr,rmax,rmin=rmin,
pairwise=pairwise,nnmax=nnmax,fortran=fortran)
if rd.shape[-1] != nr:
raise ValueError('The shape of radius data is wrong.')
agr += gr
nsum += 1
agr /= nsum
return rd,agr
def gr_to_SQ(rs,gr,rho,qmin=0.7,qmax=20.0,nq=100):
"""
Convert g(r) to S(Q).
"""
nr = len(rs)
rmin = min(rs)
rmax = max(rs)
sq = np.zeros(nq)
qs = np.zeros(nq)
dq = (qmax-qmin) /nq
dr = (rmax-rmin) /nr
for iq in range(nq):
q = iq*dq +qmin
tmp = 0.0
qs[iq] = q
if q < 1.0e-15:
raise ValueError('qmin should not be 0.')
for jr in range(1,nr):
#r = jr*dr +rmin
jrm = jr -1
r = rs[jr]
#rm = jrm*dr +rmin
rm = rs[jrm]
if abs(rm) < 1.0e-15:
continue
tmp1 = (gr[jrm]-1.0)*np.sin(q*rm) /(q*rm) *rm*rm
tmp2 = (gr[jr] -1.0)*np.sin(q*r) /(q*r) *r*r
tmp += 0.5 *(tmp1+tmp2) *dr
sq[iq] = 1.0 +4.0*np.pi*rho*tmp
return qs,sq
def SQ_to_gr(qs,sq,rho,rmin=0.0,rmax=5.0,nr=100):
"""
Convert S(Q) to g(r).
"""
nbins = len(qs)
qmin = min(qs)
qmax = max(qs)
rs = np.zeros(nr)
gr = np.zeros(nr)
dq = (qmax-qmin) /nbins
dr = (rmax-rmin) /nr
for ir in range(nr):
r = ir*dr +rmin
tmp = 0.0
rs[ir] = r
if r < 1.0e-15:
gr[ir] = 0.0
continue
for jb in range(1,nbins):
#q = jb*dq +qmin
jbm = jb -1
#qm = jbm*dq +qmin
q = qs[jb]
qm = qs[jbm]
if qm < 1.0e-15:
continue
tmp1 = (sq[jbm]-1.0) *np.sin(qm*r) *qm
tmp2 = (sq[jb] -1.0) *np.sin(q*r) *q
tmp += 0.5*(tmp1+tmp2)*dq
gr[ir] = 1.0 +1.0/(2.0*np.pi**2 *r *rho) *tmp
return rs, gr
def write_rdf_normal(fname,specorder,nspcs,rd,agr,nr):
"""
Write out RDF data in normal RDF format.
"""
outfile= open(fname,'w')
outfile.write('# 1:{0:10s} 2:all-all, '.format('rd[i],'))
n = 2
for isid in range(1,nspcs+1):
si = specorder[isid-1]
for jsid in range(isid,nspcs+1):
# for jsid in range(1,nspcs+1):
sj = specorder[jsid-1]
n += 1
outfile.write(' {0:d}:{1:s}-{2:s}, '.format(n,si,sj))
outfile.write('\n')
for i in range(nr):
outfile.write(' {0:10.4f} {1:13.5e}'.format(rd[i],agr[0,0,i]))
for isid in range(1,nspcs+1):
for jsid in range(isid,nspcs+1):
# for jsid in range(1,nspcs+1):
outfile.write(' {0:12.4e}'.format(agr[isid,jsid,i]))
outfile.write('\n')
outfile.close()
return None
def write_rdf_out4fp(fname,specorder,nspcs,agr,nr,rmax,pairs=None,rmin=0.0,nperline=6):
"""
Write out RDF data in general fp.py format.
Parameters
----------
nperline : int
Number of data in a line. [default: 6]
"""
if pairs != None:
if type(pairs) not in (list,tuple):
raise TypeError('pairs must be list or tuple.')
ndat = nr *len(pairs)
data = np.zeros(ndat)
n = 0
for pair in pairs:
isid,jsid = pair
for i in range(nr):
data[n] = agr[isid,jsid,i]
n += 1
else:
print('Since pairs are not specified, use total RDF instead.')
ndat = nr
data = np.zeros(ndat)
n = 0
for i in range(nr):
data[n] = agr[0,0,i]
n += 1
with open(fname,'w') as f:
cmd = ' '.join(s for s in sys.argv)
f.write('# Output at {0:s} from,\n'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
f.write('# {0:s}\n'.format(cmd))
if pairs != None:
f.write('# RDF for pairs: ')
for pair in pairs:
isid,jsid = pair
if isid == 0:
si = 'All'
else:
si = specorder[pair[0]-1]
if jsid == 0:
sj = 'All'
else:
sj = specorder[pair[1]-1]
f.write(' {0:s}-{1:s},'.format(si,sj))
f.write('\n')
f.write('# rmin,rmax, nr = {0:.3f}, {1:.3f}, {2:d}\n'.format(rmin,rmax,nr))
f.write('#\n')
#...Num of data, weight for the data
f.write(' {0:6d} {1:7.3f}\n'.format(ndat, 1.0))
j0 = 0
while True:
f.write(' '.join('{0:12.4e}'.format(data[j]) for j in range(j0,j0+nperline) if j < ndat))
f.write('\n')
j0 += nperline
if j0 >= ndat:
break
return None
def write_sq_normal(fname,qs,sq):
"""
Write S(Q) data in normal, gnuplot-readable format.
"""
nd = len(qs)
with open(fname,'w') as f:
f.write('# S(Q) computed in rdf.py\n')
f.write('# Q, S(Q)\n')
for i in range(nd):
f.write(' {0:10.4f} {1:10.5f}\n'.format(qs[i],sq[i]))
return None
def write_sq_out4fp(fname,qs,sq,nperline=6):
"""
Write S(Q) data in normal, gnuplot-readable format.
"""
nd = len(qs)
with open(fname,'w') as f:
f.write('# S(Q) computed in rdf.py\n')
f.write('# Output S(Q) in out4fp format.\n')
f.write('#\n')
#...Num of data, weight for the data
f.write(' {0:6d} {1:7.3f}\n'.format(nd, 1.0))
j0 = 0
while True:
f.write(' '.join('{0:12.4e}'.format(sq[j]) for j in range(j0,j0+nperline) if j < nd))
f.write('\n')
j0 += nperline
if j0 >= nd:
break
return None
def plot_figures(specorder,rd,agr):
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk',style='ticks')
nspcs = len(specorder)
plt.figure(figsize=(8,6))
x = rd
y = agr[0,0,:]
plt.plot(x,y,'r-',label='Total RDF')
plt.xlabel('Distance (A)')
plt.ylabel('RDF')
plt.legend()
plt.savefig("graph_rdf_total.png", format='png', dpi=300, bbox_inches='tight')
if nspcs == 1:
return
plt.clf()
fig, axes = plt.subplots(nspcs,nspcs,figsize=(15,10),sharex=True)
for i in range(nspcs):
isp = i + 1
for j in range(nspcs):
jsp = j + 1
if j < i:
axes[i,j].axis('off')
continue
ax = axes[i,j]
y = agr[isp,jsp,:]
ax.plot(x,y,'r-')
# ax.set_title('{0:d}-{1:d}'.format(isp,jsp))
ax.text(0.05, 0.8, '{0:s}-{1:s}'.format(specorder[i],specorder[j]),
transform=ax.transAxes, ha="left")
if isp==jsp:
ax.set_xlabel('Distance (A)')
if isp==1 and jsp==1:
ax.set_ylabel('RDF')
plt.savefig("graph_rdfs.png", format='png', dpi=300, bbox_inches='tight')
return
def nbplot(nsys,dr=0.1,rmin=0.0,rmax=5.0,nnmax=200,pairs=None,sigma=0):
"""
Plot RDFs of given nsys on the jupyter notebook.
pairs option should be in the form of list of species pair list, e.g., (('Si','Si'),('Si','O')).
"""
if not 'JPY_PARENT_PID' in os.environ:
raise Exception('This routine must be called on jupyter notebook.')
import matplotlib.pyplot as plt
try:
import seaborn as sns
sns.set(context='talk',style='ticks')
except:
pass
nspcs = len(nsys.specorder)
try:
nr = int((rmax-rmin)/dr) #+1
rd,gr= rdf(nsys,nspcs,dr,nr,rmax,rmin=rmin,nnmax=nnmax)
except:
raise Exception('rdf(..) failed.')
if sigma > 0:
#...Smearing of total RDF
grt= gsmear(rd,gr[0,0,:],sigma)
gr[0,0,:] = grt[:]
#...Smearing of inter-species RDF
for isid in range(1,nspcs+1):
for jsid in range(isid,nspcs+1):
grt= gsmear(rd,gr[isid,jsid,:],sigma)
gr[isid,jsid,:] = grt[:]
plt.figure(figsize=(8,6))
x = rd
if not pairs:
for i in range(nspcs):
isp = i +1
spi = nsys.specorder[i]
for j in range(nspcs):
jsp = j +1
spj = nsys.specorder[j]
if j<i:
continue
y = gr[isp,jsp,:]
plt.plot(x,y,label='{0:s}-{1:s}'.format(spi,spj))
else: # only specified pairs are plotted in addition to total RDF
for p in pairs:
spi,spj = p
try:
isp = nsys.specorder.index(spi) +1
jsp = nsys.specorder.index(spj) +1
except:
raise ValueError('No such species or pairs.')
y = gr[isp,jsp,:]
plt.plot(x,y,label='{0:s}-{1:s}'.format(spi,spj))
#...Total RDF
y = gr[0,0,:]
plt.plot(x,y,'r--',label='Total RDF')
plt.xlabel('Distance ($\mathrm{\AA}$)')
plt.ylabel('RDF')
plt.legend(bbox_to_anchor=(1.05,1))
plt.show()
return None
def main():
args = docopt(__doc__.format(os.path.basename(sys.argv[0])), version=__version__)
infiles= args['INFILE']
dr= float(args['-d'])
rmax= float(args['--rmax'])
rmin = float(args['--rmin'])
sigma= int(args['--gsmear'])
nnmax = int(args['--nnmax'])
ofname= args['-o']
if nnmax < int(rmax**3):
newnnmax = int(rmax**3)
print(' nnmax is updated from {0:d} to {1:d} according to rmax.'.format(nnmax,newnnmax))
nnmax = newnnmax
if ofname == 'None':
ofname = None
specorder = [ x for x in args['--specorder'].split(',') ]
if specorder == ['None']:
specorder = []
plot = args['--plot']
nskip = int(args['--skip'])
SQ = args['--SQ']
if SQ:
qmax = float(args['--qmax'])
qmin = float(args['--qmin'])
lscatter = [ float(x) for x in args['--scatter-length'].split(',') ]
if len(lscatter) != len(specorder):
raise ValueError('--scatter-length is not set correctly.')
out4fp = args['--out4fp']
fortran = args['--fortran']
if out4fp and ofname is None:
raise ValueError("Output file name must be specified with option -o.")
if out4fp and not SQ:
pairwise = True
pairs0 = args['--pairs'].split(',')
pairs = []
for pair in pairs0:
spi,spj = pair.split('-')
try:
isid = specorder.index(spi)+1
except:
isid = 0
try:
jsid = specorder.index(spj)+1
except:
jsid = 0
if jsid < isid:
itmp = jsid
jsid = isid
isid = itmp
pairs.append((isid,jsid))
else:
no_pairwise = args['--no-pairwise']
pairwise = not no_pairwise
pairs = None
nspcs = len(specorder)
if nspcs < 1:
raise ValueError('--specorder must be set.')
if len(infiles) > 1:
infiles.sort(key=get_key,reverse=True)
del infiles[:nskip]
if len(infiles) < 1:
raise ValueError('No input files to be processed.')
print(' Number of files to be processed: ',len(infiles))
tiny = 1.0e-8
nr= int((rmax-rmin+tiny)/dr) #+1
rd,agr= rdf_average(infiles,specorder,dr=dr,rmin=rmin,rmax=rmax,
pairwise=pairwise,nnmax=nnmax,fortran=fortran)
if not sigma == 0:
#print(' Gaussian smearing...')
#...Smearing of total RDF
agrt= gsmear(rd,agr[0,0,:],sigma)
agr[0,0,:] = agrt[:]
#...Smearing of inter-species RDF
for isid in range(1,nspcs+1):
for jsid in range(isid,nspcs+1):
agrt= gsmear(rd,agr[isid,jsid,:],sigma)
agr[isid,jsid,:] = agrt[:]
if SQ:
nsys = nappy.io.read(infiles[0])
rho = float(nsys.num_atoms()) /nsys.get_volume()
if nspcs > 1:
#...Redfine total RDF as weighted sum of g_{ij}(r) in case of multiple species
natms = [ float(nsys.num_atoms()) ]
cs = [ 1.0 ]
for ispcs in range(1,nspcs+1):
natms.append(float(nsys.num_atoms(ispcs)))
cs.append(natms[ispcs]/natms[0])
bmean = 0.0
for isid in range(1,nspcs+1):
bi = lscatter[isid-1]
ci = cs[isid]
bmean += ci*bi
agr[0,0,:] = 0.0
for isid in range(1,nspcs+1):
bi = lscatter[isid-1]
ci = cs[isid]
for jsid in range(isid,nspcs+1):
bj = lscatter[jsid-1]
cj = cs[jsid]
wij = ci*cj*bi*bj/bmean
if isid == jsid:
agr[0,0,:] += agr[isid,jsid,:] *wij
else:
agr[0,0,:] += 2.0*agr[isid,jsid,:] *wij
qs,sqs = gr_to_SQ(rd,agr[0,0,:],rho,qmin=0.7,qmax=qmax,nq=200)
#...Regardless ofname, write out.rdf in normal format
write_rdf_normal('out.rdf',specorder,nspcs,rd,agr,nr,)
if SQ:
write_sq_out4fp('out.sq',qs,sqs)
#...Format of output (named by ofname) depends on out4fp
if ofname is not None:
if out4fp:
write_rdf_out4fp(ofname,specorder,nspcs,agr,nr,rmax,pairs=pairs,rmin=rmin)
else:
write_rdf_normal(ofname,specorder,nspcs,rd,agr,nr,)
if plot:
plot_figures(specorder,rd,agr)
print('')
print(' RDF graphes are plotted.')
if nspcs == 1:
print(' Check graph_rdf_total.png')
else:
print(' Check graph_rdf_total.png and graph_rdfs.png')
else:
print(' Check out.rdf with gnuplot, like')
print(" gnuplot> plot 'out.rdf' us 1:2 w l")
print('')
if ofname is not None:
print(" In addition to out.rdf, {0:s} is also written.".format(ofname))
print('')
return None
if __name__ == "__main__":
main()
| [
"ryo.kbys@gmail.com"
] | ryo.kbys@gmail.com |
59123f8e441542d1a5ab991404a47831d48ce76d | 5ebb37aa988cbff11eb7d537298f5caec32bf79b | /docs/source/with_sidebar/conf.py | 7230a34f5dd17848d3c6fe8fae9b85251e68dfda | [] | no_license | gabrielfalcao/sphinx-bulma-theme | 07468b853d648d0c3fdf2df75f325e89c2d26d4f | 90f04d0a964d1d35b17ea3efc6e00d1692a8d96b | refs/heads/master | 2020-03-13T13:52:02.093165 | 2018-04-30T04:55:58 | 2018-04-30T04:55:58 | 131,147,099 | 6 | 0 | null | 2019-12-20T17:50:05 | 2018-04-26T11:41:16 | CSS | UTF-8 | Python | false | false | 5,075 | py | # -*- coding: utf-8 -*-
import sys
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
project_path = Path(__file__).absolute().parent.joinpath('../../..')
sys.path.insert(0, project_path.as_posix())
import sphinx_bulma_theme # noqa
project = 'Sphinx Bulma Theme'
copyright = '2018, Gabriel Falcao'
author = 'Gabriel Falcao'
version = sphinx_bulma_theme.version
release = version
needs_sphinx = '1.7.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.httpdomain',
'sphinxcontrib.mermaid',
'sphinx-jsonschema',
]
html_theme = "bulma"
html_theme_path = [sphinx_bulma_theme.get_html_theme_path()]
html_theme_options = {
'show_topbar': False,
'logo_path': 'logo.png',
'analytics_id': None,
'breadcrumbs_at_top': True,
'canonical_url': None,
'collapse_navigation': False,
'content_margin_left': None,
'content_padding_left': None,
'content_padding_top': None,
'display_version': True,
'logo_only': False,
'navigation_depth': 4,
'prev_next_buttons_location': 'bottom',
'sidebar_class': 'has-text-dark',
'sidebar_container_class': 'is-3',
'sidebar_right': None,
'sidebar_style': None,
'sticky_navigation': True,
}
html_static_path = ['_static']
pygments_style = 'friendly'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SphinxBulmaThemedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SphinxBulmaTheme.tex', 'Sphinx Bulma Theme Documentation',
'Gabriel Falcão', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sphinxbulmatheme', 'Sphinx Bulma Theme Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SphinxBulmaTheme', 'Sphinx Bulma Theme Documentation',
author, 'SphinxBulmaTheme', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://www.sphinx-doc.org/en/master', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"gabriel@nacaolivre.org"
] | gabriel@nacaolivre.org |
7d2e9e462d909fa1e72fc960b036bbe98ba5cdca | 42a9a9ae2951a32bf181afd9edbcf7e6716dbcc8 | /server.py | e5cfec6b6537ca36b0b484d62fdbdf47f7709e23 | [] | no_license | mariiagracheva/flask | 0608eee43e9ded1296e25862ea38494e438ba4fe | aa02b37f5c1619f732e6d21d8995553bed2ffd44 | refs/heads/master | 2021-01-11T17:22:47.997442 | 2017-01-23T04:14:57 | 2017-01-23T04:14:57 | 79,769,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | from flask import Flask, request, render_template, flash, redirect
from flask_debugtoolbar import DebugToolbarExtension
import jinja2
import locale
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
JOBS = ['', 'Software Engineer', 'QA Engineer', 'Product Manager']
# YOUR ROUTES GO HERE
@app.route("/")
def start():
"""Home page, template index.html"""
return render_template("index.html")
@app.route("/application-form")
def form():
"""Application form page"""
return render_template("application-form.html", jobs=JOBS)
@app.route("/application-success", methods=["POST"])
def success():
"""Summary from submitted application form"""
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
firstname = request.form.get('firstname')
lastname = request.form.get('lastname')
salary = locale.currency(int(request.form.get('salary')), symbol=True, grouping=True)
job = request.form.get('job')
if firstname == "" or lastname == "" or salary == "" or job == "":
return render_template("application-form.html", jobs=JOBS)
else:
return render_template("application-response.html",
firstname=firstname,
lastname=lastname,
salary=salary,
job=job)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the
# point that we invoke the DebugToolbarExtension
app.debug = True
# Use the DebugToolbar
DebugToolbarExtension(app)
app.run(host="0.0.0.0")
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
3443fafba6cbda0baf4361654caae167bcb39623 | 92137962a84e724df31b63367854349a875f1c43 | /tapis_cli/commands/taccapis/v2/files/formatters.py | 4655b555819846a123c2173baea50413542a73f3 | [
"BSD-3-Clause"
] | permissive | TACC-Cloud/tapis-cli | e3a26e79a20d1ada4cb2dc9ef204cae3e385bfe7 | d34e8635d3dbacc8276cf52b6bae04caacd655de | refs/heads/main | 2023-04-08T14:47:27.707885 | 2022-02-13T17:43:26 | 2022-02-13T17:43:26 | 203,083,094 | 11 | 3 | BSD-3-Clause | 2022-04-01T20:23:23 | 2019-08-19T02:21:28 | Python | UTF-8 | Python | false | false | 1,439 | py | """Formatters customized for system records and listings
"""
from tapis_cli.commands.taccapis.formatters import (TaccApisCommandBase,
TaccApisFormatOne,
TaccApisFormatMany)
from tapis_cli.clients.services.mixins import ParserExtender
from tapis_cli.utils import humanize_bytes
from .models import File
__all__ = [
'FilesFormatOne', 'FilesFormatMany', 'FilesHistoryFormatMany',
'FilesPemsFormatMany'
]
class FilesBase(TaccApisCommandBase):
service_id_type = File.service_id_type
def render_extended_parser_value(self,
key,
value,
parsed_args,
formatter=None):
if formatter == 'table':
if key in ('lastModified',
'lastUpdated') and parsed_args.ls_humanized:
return key, value
if key in ('length', 'size') and parsed_args.ls_humanized:
return key, humanize_bytes(value)
else:
return key, value
class FilesFormatOne(FilesBase, TaccApisFormatOne):
pass
class FilesFormatMany(FilesBase, TaccApisFormatMany):
pass
class FilesHistoryFormatMany(FilesBase, TaccApisFormatMany):
pass
class FilesPemsFormatMany(FilesBase, TaccApisFormatMany):
pass
| [
"vaughn@tacc.utexas.edu"
] | vaughn@tacc.utexas.edu |
e53ecec904462ec1004ff50bab6c0f5e0314dfe2 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/quiz08_20200709143734.py | a40bb14de63318c006975bfed6d4709c4dd80323 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | # Quiz) 주어진 코드를 활용하여 부동산 프로그램을 작성하시오.
# (출력 예제)
# 총 3대의 매물이 있습니다.
# 강남 아파트 매매 10억 2010년
# 마포 오피스텔 전세 5억 2007년
# 송파 빌라 월세 500/50 2000년
# [코드]
class House:
# 매물 초기화
def __init__(self, location, house_type, deal_type, price, completion_year):
self.location = location
self.house_type = house_type
self.deal_type = deal_type
self.price = price
self.completion_year = completion_year
# 매물 정보 표시
def show_detail(self):
print(self.location, self.house_type, self.deal_type,
self.price, self.completion_year)
house = []
house1 = House("강남", "아파트", "매매", "10억", "2010년")
house2 = House("마포", "오피스텔", "전세", "5억", "2007년")
house3 = House("송파", "빌라", "월세", "500/50", "2000년")
houses.append(house1)
houses.append(house2)
houses.append(house3)
print("총 {0}의 매물이 있습니다.".format(len(house)))
for house in houses:
house.show_()
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
bb2663ce1944d79d1b3a417b9622565af2486c22 | d458c71e75274914ace770e0a0d21af9d4e8f450 | /config/urls.py | 1a0dba76a81e5dd607ceea614e662ad62ad9dc76 | [
"MIT"
] | permissive | robertatakenaka/greetings | c5464e1d3802454c9c62a029340f51363a720f43 | e0537ddb8a942c985a15bbf9a30ef1c49d93a757 | refs/heads/master | 2021-08-11T13:06:12.963439 | 2017-11-01T11:15:52 | 2017-11-01T11:15:52 | 109,040,213 | 0 | 0 | null | 2017-11-01T11:15:53 | 2017-10-31T19:00:33 | Python | UTF-8 | Python | false | false | 1,549 | py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('greetings.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"roberta.takenaka@scielo.org"
] | roberta.takenaka@scielo.org |
baf719f820c2c4aaa3822e20f63646d0cbdc1bfa | 5ba903c7907124c287e46019df0d30533f270a8c | /detector_caffe/detect.py | b861f3ba14f8534b076322d94573179e58869512 | [] | no_license | Jeffin-Studios/tracker | 2e9295651fec020489a9e55b9f2da5971754870b | 98daeaddecc6a82cfa851d6841a355d40026b02d | refs/heads/master | 2020-03-28T22:09:29.819043 | 2018-09-18T01:14:54 | 2018-09-18T01:14:54 | 149,209,024 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | # Uses opencv DNN module (only works with caffe and toch models, not tensorflow yet)
# usage: python detect1.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1000)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | [
"richardbao419@gmail.com"
] | richardbao419@gmail.com |
e799f2913c67398be21fad9121cfa8058a2b5525 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_168/101.py | 7a20f29104dbb68ecb80573705f74c0418c596af | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | DIRECTIONS = {
'^': (-1, 0),
'>': (0, 1),
'<': (0, -1),
'v': (1, 0)
}
def print_map(grid):
for row in grid:
print ''.join(row)
def get_arrows(grid, row, col):
if grid[row][col] == '.':
return None
seen = set()
arrows = []
while 0 <= row < len(grid) and 0 <= col < len(grid[0]):
# print 'here', row, col, len(grid, len(grid[0]))
if (row, col) in seen:
return None
if grid[row][col] != '.':
arrows.append((row, col))
seen.add((row, col))
cur_arrow_v, cur_arrow_h = DIRECTIONS[grid[row][col]]
row += cur_arrow_v
col += cur_arrow_h
return arrows
def solve(grid):
R = len(grid)
C = len(grid[0])
chains = {}
for row in range(R):
for col in range(C):
arrows = get_arrows(grid, row, col)
# print 'found chain', row, col, arrows, chains.keys()
if arrows is None:
continue
if arrows[-1] not in chains:
chains[arrows[-1]] = arrows
else:
if len(arrows) > len(chains[arrows[-1]]):
chains[arrows[-1]] = arrows
count = 0
for _, chain in chains.iteritems():
if len(chain) == 1:
found = False
for direction in DIRECTIONS.values():
row, col = chain[0]
row += direction[0]
col += direction[1]
while 0 <= row < len(grid) and 0 <= col < len(grid[0]):
if grid[row][col] != '.':
count += 1
# print 'found!', row, col
found = True
break
row += direction[0]
col += direction[1]
if found:
break
if not found:
return 'IMPOSSIBLE'
else:
count += 1
return count
input_file = open('a-large.in')
cases = int(input_file.readline().strip())
case = 0
while case < cases:
case += 1
R, C = [int(x) for x in input_file.readline().split()]
grid = []
for row in range(R):
grid.append(list(input_file.readline().strip()))
print "Case #{}: {}".format(case, solve(grid))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
713de2ef75b55a1d9819d4043f76d345658aeb8f | f889bc01147869459c0a516382e7b95221295a7b | /swagger_client/models/body_124.py | bcbd3e061f3cd63f829a301297edc59e8275681b | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Body124(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rule': 'SalesRuleDataRuleInterface'
}
attribute_map = {
'rule': 'rule'
}
def __init__(self, rule=None):
"""
Body124 - a model defined in Swagger
"""
self._rule = None
self.rule = rule
@property
def rule(self):
"""
Gets the rule of this Body124.
:return: The rule of this Body124.
:rtype: SalesRuleDataRuleInterface
"""
return self._rule
@rule.setter
def rule(self, rule):
"""
Sets the rule of this Body124.
:param rule: The rule of this Body124.
:type: SalesRuleDataRuleInterface
"""
if rule is None:
raise ValueError("Invalid value for `rule`, must not be `None`")
self._rule = rule
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Body124):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
cae2e93d4f1c4ad91584500d90a9c52a83bf4e10 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnagricultur.py | c0b1980cf1368dc0f24e5b11ec4cdc40d59cb041 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,733 | py | ii = [('EmerRN.py', 2), ('CookGHP3.py', 4), ('LyelCPG2.py', 4), ('MarrFDI.py', 1), ('RogePAV2.py', 2), ('CoolWHM2.py', 2), ('GodwWSL2.py', 3), ('ChanWS.py', 1), ('SadlMLP.py', 64), ('WilbRLW.py', 1), ('WilbRLW4.py', 5), ('RennJIT.py', 3), ('AubePRP2.py', 3), ('MartHSI2.py', 72), ('LeakWTI2.py', 1), ('WilkJMC3.py', 6), ('LeakWTI3.py', 10), ('PettTHE.py', 4), ('MarrFDI3.py', 10), ('PeckJNG.py', 12), ('GellWPT.py', 1), ('AdamWEP.py', 3), ('WilbRLW2.py', 3), ('ClarGE2.py', 15), ('GellWPT2.py', 1), ('WilkJMC2.py', 10), ('SeniNSP.py', 38), ('LyttELD.py', 1), ('GrimSLE.py', 1), ('CookGHP2.py', 2), ('KiddJAE.py', 3), ('AdamHMM.py', 2), ('CoolWHM.py', 2), ('MarrFDI2.py', 1), ('CrokTPS.py', 2), ('ClarGE.py', 11), ('BuckWGM.py', 6), ('IrviWVD.py', 1), ('LyelCPG.py', 3), ('GilmCRS.py', 5), ('DaltJMA.py', 1), ('WestJIT2.py', 1), ('DibdTRL2.py', 2), ('LandWPA2.py', 1), ('WadeJEB.py', 31), ('GodwWLN.py', 1), ('LeakWTI4.py', 10), ('LeakWTI.py', 10), ('MedwTAI2.py', 1), ('BachARE.py', 82), ('HowiWRL2.py', 13), ('MereHHB.py', 1), ('WilkJMC.py', 7), ('MartHRW.py', 1), ('MackCNH.py', 11), ('WestJIT.py', 9), ('BabbCEM.py', 5), ('FitzRNS4.py', 6), ('CoolWHM3.py', 4), ('FitzRNS.py', 2), ('ThomGLG.py', 2), ('StorJCC.py', 30), ('LewiMJW.py', 1), ('MackCNH2.py', 30), ('JacoWHI2.py', 17), ('SomeMMH.py', 1), ('HaliTBC.py', 9), ('WilbRLW3.py', 1), ('AinsWRR2.py', 2), ('JacoWHI.py', 19), ('ClarGE3.py', 24), ('MartHRW2.py', 2), ('DibdTRL.py', 3), ('FitzRNS2.py', 9), ('MartHSI.py', 48), ('EvarJSP.py', 11), ('DwigTHH.py', 7), ('SadlMLP2.py', 14), ('BowrJMM2.py', 4), ('LyelCPG3.py', 1), ('BowrJMM3.py', 2), ('BeckWRE.py', 1), ('TaylIF.py', 1), ('WordWYR.py', 1), ('KeigTSS.py', 1), ('KirbWPW.py', 5), ('WaylFEP.py', 35), ('ClarGE4.py', 95)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
267f0e68ab5b078ea706bdaf249440c2397dcdc5 | f0a4ba1f1f941092e68e4b1ef9cff0d3852199ef | /Do_it!/6.정렬 알고리즘/1.버블정렬/교환과정출력.py | 31042799e3d6bd884b811ed00421b53226ffdbd8 | [] | no_license | lsb530/Algorithm-Python | d41ddd3ca7675f6a69d322a4646d75801f0022b2 | a48c6df50567c9943b5d7218f874a5c0a85fcc6d | refs/heads/master | 2023-06-18T04:36:09.221769 | 2021-06-28T16:49:35 | 2021-06-28T16:49:35 | 367,775,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | # 버블 정렬 알고리즘 구현(정렬 과정을 출력)
"""비교하는 두 원소 사이에 교환할 경우 +를, 교환하지 않을 경우 -를 출력합니다."""
from typing import MutableSequence
def bubble_sort_verbose(a: MutableSequence) -> None:
"""버블 정렬(정렬 과정을 출력)"""
ccnt = 0 # 비교 횟수
scnt = 0 # 교환 횟수
n = len(a)
for i in range(n - 1):
print(f'패스 {i + 1}')
for j in range(n - 1, i, -1):
for m in range(0, n - 1):
print(f'{a[m]:2}' + (' ' if m != j - 1 else
' +' if a[j - 1] > a[j] else ' -'), end='')
print(f'{a[n - 1]:2}')
ccnt += 1
if a[j - 1] > a[j]:
scnt += 1
a[j - 1], a[j] = a[j], a[j - 1]
for m in range(0, n - 1):
print(f'{a[m]:2}', end=' ')
print(f'{a[n - 1]:2}')
print(f'비교를 {ccnt}번 했습니다.')
print(f'교환을 {scnt}번 했습니다.')
if __name__ == '__main__':
print('버블 정렬을 수행합니다.')
num = int(input('원소 수를 입력하세요 : '))
x = [None] * num # 원소 수가 num인 배열을 생성
for i in range(num):
x[i] = int(input(f'x[{i}]: '))
bubble_sort_verbose(x) # 배열 x를 버블 정렬
print('오름차순으로 정렬했습니다.')
for i in range(num):
print(f'x[{i}] = {x[i]}')
| [
"lsb530@naver.com"
] | lsb530@naver.com |
84a9a4d1098814f15e297fd445f8f8ab8b0c8e2c | ef7c458371a2293dc438efc088312d0cf4eb56e8 | /misc/steve hack day/d3.py | c3a80c444efb8350acf3c2ca011cef3ff6255618 | [] | no_license | agvania/Sefaria-Data | efd5c7ea4c07fb1967ab415e7ffe7094bed7486b | 4ae83072d915f42fef891ef9e442ce21fe089b64 | refs/heads/master | 2021-01-15T16:11:13.666382 | 2016-06-23T00:45:05 | 2016-06-23T00:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py | # -*- coding: utf-8 -*-
__author__ = 'stevenkaplan'
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
import re
import csv
p = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, p)
sys.path.insert(0, '../Match/')
from match import Match
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
from functions import *
from sefaria.sheets import save_sheet
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
from sefaria.model.schema import AddressTalmud
all_books = library.all_index_records()
'''
for each commentary, grab LinkSet
'''
books = ["Genesis", "Exodus", "Leviticus", "Numbers", "Deuteronomy"]
commentaries = ["Abarbanel",
"Abravanel",
"Baal HaTurim",
"Chizkuni",
"Daat Zkenim",
"Haamek Davar",
"Ibn Ezra",
"Ikar Siftei Hachamim",
"Kitzur Baal Haturim",
"Kli Yakar",
"Malbim",
"Malbim Beur Hamilot",
"Metzudat David",
"Metzudat Zion",
"Or HaChaim",
"Penei Dovid",
"Rabbeinu Bachya",
"Rabbeinu Chananel",
"Radak",
"Ralbag",
"Ramban",
"Rashbam",
"Rashi",
"Saadia Gaon",
"Sepher Torat Elohim",
"Sforno",
"Shadal",
"Torah Temimah",
"Tiferet Yisrael",
"Toldot Aharon",
"Akeidat Yitzchak",
"Meshech Hochma",
"Shney Luchot HaBrit"
]
dict_refs = {}
probs = open('probs.txt','w')
max = 0
top_refs = []
csvf = open('d3_data.csv', 'w')
csvwriter = csv.writer(csvf, delimiter=';')
csvwriter.writerow(["Ref", "Number"])
for book in books:
book = library.get_index(book)
refs = book.all_segment_refs()
for ref in refs:
count_arr = []
for link in LinkSet(ref).array():
if link.contents()['type'] == 'commentary': #if there is time make sure three parshanut ones are included as they
#dont have commentary type
which_one = link.contents()['refs']
if which_one[0].find(' on ')>=0:
this_commentary = which_one[0].split(" on ")[0]
elif which_one[1].find(' on ')>=0:
this_commentary = which_one[1].split(" on ")[0]
else:
continue
if this_commentary in commentaries:
if this_commentary not in count_arr:
count_arr.append(this_commentary)
else:
probs.write(str(link.contents()['refs'])+'\n\n')
sum = len(count_arr)
if sum > max:
max = sum
if sum >= 13:
top_refs.append(ref)
dict_refs[ref] = sum
csvwriter.writerow([str(ref).replace(' ','_'), str(dict_refs[ref])])
csvf.close()
print max
sheet = {
"title": "Chumash Passages Most Commented On",
"sources": [{"ref": ref.normal()} for ref in top_refs],
"options": {"numbered": 1, "divineNames": "noSub"}
}
save_sheet(sheet, 1)
| [
"skaplan@brandeis.edu"
] | skaplan@brandeis.edu |
10620def303fa66435ffdd9215daf5bb3d45a01c | 4b379051aa3430eb2d8931f6055772731dcb199d | /512-Python_основы_и_применение/24468/stepik-512_24468-step3.py | deb1564bfd57d4325f1ccf6ff748ead36988e5cf | [] | no_license | dmikos/stepikCourse | 1416614ef51a4352374f37e86e3211c3b42cbaf6 | 3faeabfdc56cac597fb6b1495e7bb38a7f2a6816 | refs/heads/master | 2021-01-12T17:06:37.720050 | 2016-11-21T14:37:20 | 2016-11-21T14:37:20 | 69,057,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | x = [
("Guido", "van", "Rossum"),
("Haskell", "Curry"),
("John", "Backus")
]
x.sort(key=lambda name: len(" ".join(name)))
print(x)
| [
"dkostinov@gmail.com"
] | dkostinov@gmail.com |
570bca61bcb134fafb423856198d3af8acfd0527 | a1f2df675cfc595b15f1ca9390b7517989f2d4e0 | /testCase/organizations/testUpdateOrganization.py | 046acc5a93ef195d917a8110bb48969b3853b4b2 | [] | no_license | GGGYB/crm | d4def2f1abc89451e1c4b11b89ef100a842ed745 | 61932466dd0ac299adc661383d506389d5f0f8e7 | refs/heads/master | 2022-04-15T21:29:48.539311 | 2020-04-14T10:23:41 | 2020-04-14T10:23:41 | 255,575,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | from bs4 import BeautifulSoup
import json
import requests
import random
import datetime
import re
import copy
from decimal import Decimal as D
from commons import common
from commons.const import const
class UpdateOrganization:
def __init__(self, cookie, csrf):
self.common = common.Common(cookie, csrf)
self.base_url = const.BASE_URL
self.base_url2 = const.SIGN_IN_BASE_URL
self.csrf = csrf
self.cookie = cookie
self.response = ''
self.announcements_id = []
self.staff_size_list = []
self.organization_industry = []
self.organization_id = []
self.params = ''
pass
# 获取公司规模list和所属行业list
def get_staff_size(self):
url = self.base_url + '/user_center/organization/edit'
response = self.common.get_html(url, '获取公司信息的页面')
soup = BeautifulSoup(response.text, 'html.parser')
staff_size = soup.findAll(attrs={'id': 'organization_client_attributes_staff_size'})
self.staff_size_list = re.findall(r"value=\"(.*?)\">", str(staff_size))
organization_industry = soup.findAll(attrs={'id': 'organization_client_attributes_industry'})
self.organization_industry = re.findall(r"value=\"(.*?)\">", str(organization_industry))
id = soup.findAll(attrs={"id": "organization_client_attributes_id"})
self.organization_id = re.findall(r'value=\"(.*?)"', str(id))
i = self.common.get_random_int(len(self.staff_size_list)-1)
n = self.common.get_random_int(len(self.organization_industry)-1)
return self.staff_size_list[i],self.organization_industry[n],self.organization_id[0]
# 编辑公司信息
def update_organization(self):
url = self.base_url + '/user_center/organization'
staff_size = self.get_staff_size()[0]
organization_industry = self.get_staff_size()[1]
organization_id = self.get_staff_size()[2]
body = {
'utf8': '✓',
'_method': 'patch',
'authenticity_token': self.csrf,
'attachment_id': '',
'organization[client_attributes][shorter_name]':'234465',
'organization[client_attributes][industry]':organization_industry,
'organization[client_attributes][province_id]': '9',
'organization[client_attributes][city_id]': '73',
'organization[client_attributes][district_id]': '732',
'organization[client_attributes][address_detail]': '1232',
'organization[client_attributes][staff_size]':staff_size,
'organization[client_attributes][id]':organization_id,
}
self.response = self.common.post_response_json(url, body, '编辑了公司信息')
| [
"nlnongling@163.com"
] | nlnongling@163.com |
70cd969e64eb3781764a99c34ea91ea7f1ff765c | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /105_network/_exercises/templates/Python Network Programming/Section 4 Python 3 Network Hacking/25. Build an SMTP Server Username Enumerator.py | 83e1773403b7d8eecf7fc35c8393a9feb0953587 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,144 | py | # ______ so.., ___, a_p_
# ____ d_t_ ______ d_t_
#
#
# ___ scan ip users
# ___
# s _ ?.? ?.A.. ?.S..
# s.c.. ip 25
# rsp _ s.r.. 1024
# s.s.. _"HELO friend\n")
# rsp _ s.r.. 1024
# __ _"250" no. __ ?
# print("[!] Something went wrong, exiting.")
# ___.e.. 0
# s.s.. _"MAIL FROM:nice@guy.com\n")
# rsp _ s.r.. 1024
# __ _"250" no. __ ?
# print("[!] Something went wrong, exiting.")
# ___.e.. 0
# ___ user __ u..
# s.s.. _"RCPT TO:" + ?.rs...en.. + _"\n")
# rsp _ s.r.. 1024
# __ _"250" __ rsp
# print("[+] Valid: " + ?.rs..
# s.s.. _)"QUIT\n"
# s.c..
# ______ E.. __ err
# print(st. ?
#
#
# ___ main args
# start _ d_t_.n..
# print("==================================================")
# print("Started @ " + st. s..
# print("==================================================")
# w__ o..(?.w__.li.. __ fle
# usr _ # list
# __ ?.b.. !_ 0:
# ___ user __ f..
# __ le. ? + 1 !_ ?.b..
# u__.ap.. ?
# ____
# u__.ap.. ?
# s.. ?.i. u..
# de. u..|;
# __ le. u.. > 0
# s.. ?.i. u..
# ____ # No batches
# s.. ?.i. f..
# stop _ d_t_.n..
# print("==================================================")
# print("Duration: " + st. s.. - s..
# print("Completed @ " + st. s..
# print("==================================================")
#
#
# __ _______ __ _______
# parser _ a_p_.A_P..
# ?.a_a.. "ip" a.._"store" h.._"smtp host address")
# ?.a_a.. "wordlist" a.._"store" h.._"wordlist of usernames")
# ?.a_a.. "-b" "--batch" a.._"store" n.._'?' c.._10
# d.._0 h.._"attempts per connection" ty.._in.
#
# __ le. ___.a.. 2; __ 0 # Show help if required arg not included
# ?.p_h..
# ?.e..
#
# args _ ?.p_a.. # Declare argumnets object to args
# m.. ? | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
77a65bd2b0809b86da8ca73174e6de833fe41f84 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=88/sched.py | 260c6e748e206e0e7bc567fd6fd3308a1768c01f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | -S 0 -X RUN -Q 0 -L 2 111 400
-S 0 -X RUN -Q 0 -L 2 61 200
-S 3 -X RUN -Q 1 -L 1 59 175
-S 3 -X RUN -Q 1 -L 1 49 200
-S 2 -X RUN -Q 2 -L 1 47 150
-S 2 -X RUN -Q 2 -L 1 41 175
-S 1 -X RUN -Q 3 -L 1 39 125
-S 1 -X RUN -Q 3 -L 1 38 125
-S 4 35 125
-S 4 32 125
-S 4 31 200
-S 4 29 200
-S 4 22 150
-S 5 19 175
-S 5 8 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
656fa1aa409f51a7b609208dc999e0ec2acb948f | 4229a406a83a573dc357c1144cae7c5aad6f673b | /trestle/transforms/implementations/osco.py | 7c64e73849e8aa13bfab384882cbd1647df6a632 | [
"Apache-2.0"
] | permissive | xee5ch/compliance-trestle | dbc0647fe18e1164a75bcfdc4d38687df14e3247 | 969c10eceb73202d2b7856bac598f9b11afc696e | refs/heads/main | 2023-09-02T17:21:35.659432 | 2021-11-17T00:01:27 | 2021-11-17T00:01:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,680 | py | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Facilitate OSCAL-OSCO transformation."""
import json
import logging
from typing import Dict, List
from ruamel.yaml import YAML
from trestle.oscal.profile import Profile
from trestle.transforms.results import Results
from trestle.transforms.transformer_factory import FromOscalTransformer
from trestle.transforms.transformer_factory import ResultsTransformer
from trestle.transforms.utils.osco_helper import ResultsMgr
logger = logging.getLogger(__name__)
class OscoTransformer(ResultsTransformer):
"""Interface for Osco transformer."""
def __init__(self) -> None:
"""Initialize."""
self._results_mgr = ResultsMgr(self.get_timestamp())
@property
def analysis(self) -> List[str]:
"""Analysis."""
return self._results_mgr.analysis
def _ingest_xml(self, blob: str) -> Results:
"""Ingest xml data."""
# ?xml data
if blob.startswith('<?xml'):
resource = blob
self._results_mgr.ingest_xml(resource)
else:
return None
results = Results()
results.__root__.append(self._results_mgr.result)
return results
def _ingest_json(self, blob: str) -> Results:
"""Ingest json data."""
try:
# ? configmaps or auditree data
jdata = json.loads(blob)
# https://docs.openshift.com/container-platform/3.7/rest_api/api/v1.ConfigMap.html#Get-api-v1-namespaces-namespace-configmaps-name
if 'kind' in jdata.keys() and jdata['kind'] == 'ConfigMapList' and 'items' in jdata.keys():
items = jdata['items']
for item in items:
if 'data' in item.keys():
data = item['data']
if 'results' in data:
resource = item
self._results_mgr.ingest(resource)
# https://github.com/ComplianceAsCode/auditree-arboretum/blob/main/arboretum/kubernetes/fetchers/fetch_cluster_resource.py
else:
for key in jdata.keys():
for group in jdata[key]:
for cluster in jdata[key][group]:
if 'resources' in cluster:
for resource in cluster['resources']:
self._results_mgr.ingest(resource)
except json.decoder.JSONDecodeError:
return None
results = Results()
results.__root__.append(self._results_mgr.result)
return results
def _ingest_yaml(self, blob: str) -> Results:
"""Ingest yaml data."""
try:
# ? yaml data
yaml = YAML(typ='safe')
resource = yaml.load(blob)
self._results_mgr.ingest(resource)
except Exception as e:
raise e
results = Results()
results.__root__.append(self._results_mgr.result)
return results
def transform(self, blob: str) -> Results:
"""Transform the blob into a Results.
The expected blob is a string that is one of:
- data from OpenShift Compliance Operator (json, yaml, xml)
- data from Auditree OSCO fetcher/check (json)
"""
results = None
if results is None:
results = self._ingest_xml(blob)
if results is None:
results = self._ingest_json(blob)
if results is None:
results = self._ingest_yaml(blob)
return results
class ProfileToOscoTransformer(FromOscalTransformer):
"""Interface for Profile to Osco transformer."""
def __init__(
self,
extends='ocp4-cis-node',
api_version='compliance.openshift.io/v1alpha1',
kind='TailoredProfile',
name='customized-tailored-profile',
namespace='openshift-compliance',
) -> None:
"""Initialize."""
self._extends = extends
self._api_version = api_version
self._kind = kind
self._name = name
self._namespace = namespace
def transform(self, profile: Profile) -> str:
"""Transform the Profile into a OSCO yaml."""
# set values
set_values = self._get_set_values(profile)
# spec
spec = {
'description': self._get_metadata_prop_value(profile, 'profile_mnemonic', self._name),
'extends': self._get_metadata_prop_value(profile, 'base_profile_mnemonic', self._extends),
'title': profile.metadata.title,
'setValues': set_values,
}
disable_rules = self._get_disable_rules(profile)
if len(disable_rules) > 0:
spec['disableRules'] = disable_rules
# yaml data
ydata = {
'apiVersion': self._api_version,
'kind': self._kind,
'metadata': {
'name': self._get_metadata_prop_value(profile, 'profile_mnemonic', self._name),
'namespace': self._namespace,
},
'spec': spec,
}
return json.dumps(ydata)
def _get_set_values(self, profile) -> List[Dict]:
"""Extract set_paramater name/value pairs from profile."""
set_values = []
for set_parameter in profile.modify.set_parameters:
name = set_parameter.param_id
parameter_value = set_parameter.values[0]
value = parameter_value.__root__
rationale = self._get_rationale_for_set_value()
set_value = {'name': name, 'value': value, 'rationale': rationale}
set_values.append(set_value)
return set_values
def _get_metadata_prop_value(self, profile, name, default_) -> str:
"""Extract metadata prop or else default if not present."""
if profile.metadata.props is not None:
for prop in profile.metadata.props:
if prop.name == name:
return prop.value
logger.info(f'using default: {name} = {default_}')
return default_
def _get_disable_rules(self, profile) -> List[str]:
"""Extract disabled rules."""
value = []
if profile.imports is not None:
for item in profile.imports:
if item.exclude_controls is not None:
for control in item.exclude_controls:
if control.with_ids is not None:
for with_id in control.with_ids:
name = with_id.__root__
rationale = self._get_rationale_for_disable_rule()
entry = {'name': name, 'rationale': rationale}
value.append(entry)
return value
def _get_rationale_for_set_value(self) -> str:
"""Rationale for set value."""
return 'not determinable from specification'
def _get_rationale_for_disable_rule(self) -> str:
"""Rationale for disable rule."""
return 'not determinable from specification'
| [
"noreply@github.com"
] | xee5ch.noreply@github.com |
2458bd10820179534d5d1799a8f740ad985c965e | e55aacec5de90c52b9cb30742924bfffc584027d | /Implementation/Sock_Merchant.py | 4ce479a4de0eb4afa84ca17c6cf885526d745464 | [] | no_license | JonathanWu1120/Hackerrank_Algorithms | 5016a66d516c7a63033aee7f8c2aaa396b7ecdd6 | b53abe5e678a5ac11485068340df2c2a122370f4 | refs/heads/master | 2021-01-19T06:22:50.009873 | 2017-04-06T19:22:21 | 2017-04-06T19:22:21 | 87,459,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/bin/python3
import sys
n = int(input().strip())
c = [int(c_temp) for c_temp in input().strip().split(' ')]
d = set(c)
count = 0
for i in range(len(d)):
x = d.pop()
while c.count(x) >= 2:
for i in range(2):
c.remove(x)
count += 1
print(count)
| [
"jwu166@binghamton.edu"
] | jwu166@binghamton.edu |
258895ade985a2bbe61e97b2d592fb53965ddf4f | 143e8939ac1033912195eb7e6b99f9d06ec908da | /dash/views.py | fd180cab200fdcca4ee3c06fd206eccd18f67823 | [
"BSD-3-Clause"
] | permissive | gvsurenderreddy/routerdash | 63a4c011ede20005f465cb2e9fcd1411c82d0aeb | 321030438008d317d46432e777191dfd4ad1c3d6 | refs/heads/master | 2021-01-18T09:21:26.250338 | 2014-01-03T18:05:40 | 2014-01-03T18:05:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | import json
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from .calculations import get_speeds, get_devices
def json_response(data):
return HttpResponse(json.dumps(data), content_type="application/json")
def human_speed(raw_speed):
return "..." if raw_speed is None else "%.02f Mb/s" % (raw_speed / 125000.0)
def home(request):
return render(request, "dashboard.html")
def ajax_speeds(request):
rx, tx = get_speeds(settings.EXTERNAL_INTERFACE)
rx_string = human_speed(rx)
tx_string = human_speed(tx)
return json_response([rx, tx, rx_string, tx_string])
def ajax_devices(request):
devices = get_devices(settings.BRIDGE_INTERFACE)
response = []
for device in devices.values():
if "rx_speed" in device:
device["rx_speed_human"] = human_speed(device["rx_speed"])
if "tx_speed" in device:
device["tx_speed_human"] = human_speed(device["tx_speed"])
response.append(device)
return json_response(sorted(response, key=lambda x: x['name']))
| [
"andrew@aeracode.org"
] | andrew@aeracode.org |
b8ce5561d98ed3ece5cdb19ca80579b5e232d69b | baffcef29e33658138c43ef358d7399ab3ea2c0d | /WORKFLOWS/Tools/NEC/NAL/nal-dashboard/nec_portal/dashboards/project/service/urls.py | 1b37e4e206fb65118cf9ebe5adc24feaf15b3061 | [
"Apache-2.0"
] | permissive | openmsa/NO | aa7d4ff000875bfcff0baee24555ec16becdb64e | 24df42ee3927415b552b5e5d7326eecd04ebca61 | refs/heads/master | 2020-03-09T23:21:09.657439 | 2019-03-29T06:29:07 | 2019-03-29T06:29:07 | 129,056,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# COPYRIGHT (C) NEC CORPORATION 2017
#
from django.conf.urls import patterns
from django.conf.urls import url
from nec_portal.dashboards.project.service import views
SERVISE = r'^(?P<group_id>[^/]+)/%s$'
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(SERVISE % 'detail', views.DetailView.as_view(), name='detail'),
url(SERVISE % '(?P<update_type>[^/]+)/update', views.UpdateView.as_view(),
name='update'),
)
| [
"ofa@ubiqube.com"
] | ofa@ubiqube.com |
5dc8894ebdb159580f4ba7c27c73bc5fd01c9294 | 14956dbed8ae4fba1d65b9829d9405fcf43ac698 | /Cyber Security/Capture the Flag Competitions/2020/Houseplant CTF 2020/Reversing/LEMON/pass2.py | 844c42e58d01e8397b7e1d91e91d5e8f21a17e00 | [] | no_license | Hackin7/Programming-Crappy-Solutions | ae8bbddad92a48cf70976cec91bf66234c9b4d39 | ffa3b3c26a6a06446cc49c8ac4f35b6d30b1ee0f | refs/heads/master | 2023-03-21T01:21:00.764957 | 2022-12-28T14:22:33 | 2022-12-28T14:22:33 | 201,292,128 | 12 | 7 | null | 2023-03-05T16:05:34 | 2019-08-08T16:00:21 | Roff | UTF-8 | Python | false | false | 2,040 | py | def checkpass():
userinput = input("Enter the password: ")
if userinput[0:4] == "rtcp":
if userinput[10:13] == "tHi":
if userinput[22:25] == "cuR":
if userinput[4:7] == "{y3":
if userinput[16:19] == "1nT":
if userinput[7:10] == "4H_":
if userinput[13:16] == "S_a":
if userinput[19:22] == "_sE":
if userinput [25:27] == "3}":
return True
else:
return False
def main():
access = checkpass()
if access == True:
print("Unlocked. The flag is the password.")
print("b-but i wunna show off my catswpeak uwu~... why wont you let me do my nya!!\noh well... good luck with the rest of the ctf :/\nbut I WANT TO SPWEAK CATGIRL NEXT TIME SO YOU BETTER LET ME >:(")
exit()
else:
print("Incorrect password!")
print("sowwy but now you gunnu have to listen to me spweak in cat giwrl speak uwu~")
catmain()
def catmain():
access = catcheckpass()
if access == True:
print("s-senpai... i unwocked it fowr you.. uwu~")
print("t-the fwlag is... the password.. nya!")
exit()
else:
print("sowwy but that wasnt quite rwight nya~")
catmain()
def catcheckpass():
userinput = input("pwease enter youwr password... uwu~ nya!!: ")
if userinput[0:4] == "rtcp":
if userinput[10:13] == "tHi":
if userinput[22:25] == "cuR":
if userinput[4:7] == "{y3":
if userinput[16:19] == "1nT":
if userinput[7:10] == "4H_":
if userinput[13:16] == "S_a":
if userinput[19:22] == "_sE":
if userinput [25:27] == "3}":
return True
else:
return False
access = False
main()
| [
"zunmun@gmail.com"
] | zunmun@gmail.com |
2e2d9059217f681eb729dbc014c5415f834c556b | 47e86e60f6239e0f8bf42aeecd8c8a0e8ac50578 | /izi_pos_report_birt/models/rpt_pos_revenue_customer_by_product_and_service_group.py | f9e4ff29a753dbf245b53fd939eba7cc996ab0ac | [] | no_license | HoangDinhHoi/DATN | 0a8d12c031253c7fe21321c4cf493ead3f71ca56 | 09157d6c79a779a701fc01727db8dcd04323dc1d | refs/heads/master | 2020-09-14T02:28:29.528907 | 2020-01-14T07:55:17 | 2020-01-14T07:55:17 | 222,985,616 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | # -*- coding: utf-8 -*-
__author__ = "HoiHD"
from odoo import models, fields, api
from odoo.exceptions import ValidationError
import odoo.tools.config as config
class ReportRevenueCustomerAccordingToProductAndServiceGroup(models.TransientModel):
_name = 'rpt.revenue.customer.product.service.group'
_description = 'Báo cáo doanh thu của khách hàng theo nhóm sản phẩm và dịch vụ'
branch_id = fields.Many2one('res.branch', string='Branch',
domain=lambda self: [('id', 'in', self.env.user.branch_ids.ids)])
date_from = fields.Date(string='Date from')
date_to = fields.Date(string='Date to')
is_export_excel = fields.Boolean(default=False, string='Export to Excel')
@api.multi
def action_report(self):
"""
- Báo cáo doanh thu của khách hàng theo nhóm sản phẩm và dịch vụ
- date: 11/06/2019 on 9:01 AM
:return:
"""
birt_url = config['birt_url'] or '0'
if birt_url == '0':
raise ValidationError("Chưa cấu hình birt_url!")
date_from = self.date_from.strftime('%d/%m/%Y')
date_to = self.date_to.strftime('%d/%m/%Y')
report_name = "rpt_pos_revenue_customer_by_product_and_service_group.rptdesign"
param_str = {
'&date_from': date_from,
'&date_to': date_to,
'&branch_id': str(self.branch_id.id if self.branch_id else 0),
}
birt_link = birt_url + report_name
if self.is_export_excel:
birt_link += '&__format=xlsx'
return {
"type": "ir.actions.client",
'name': 'Báo cáo doanh thu của khách hàng theo nhóm sản phẩm và dịch vụ',
'tag': 'BirtViewerActionCurrent',
'target': 'self',
'context': {
'birt_link': birt_link,
'payload_data': param_str,
}
}
| [
"hoanghoihust@gmail.com"
] | hoanghoihust@gmail.com |
4c76b68a37d3fb42da48112d9d0d6e4f7407f257 | fdf531435b0a4d771083bab78f5a2f91b2ec1b28 | /Trees/Trees III (BST)/5. K DISTANCE.py | aab590fa8927baedd0c1519b90987bb627ad68fc | [] | no_license | srajsonu/100DaysOfCode | d556cf4c8491d2bea2bf6c17cc4410f64ae71829 | b25ff694a04a16bd2bdd33cf5bb84f9cbe5f3af6 | refs/heads/main | 2023-03-22T22:48:11.030576 | 2021-03-18T18:50:00 | 2021-03-18T18:50:00 | 325,747,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | class Solution:
def lcs(self, root, B, aux):
if not root:
return 0
l = self.lcs(root.left, B, aux + [root.val])
r = self.lcs(root.right, B, aux + [root.val])
cnt = 0
for i in aux:
if abs(i - root.val) <= B:
cnt += 1
cnt += (l + r)
return cnt
def solve(self, A, B):
return self.lcs(A, B, [])
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
156ad1f2936e53c9ed0efabc77d0de0a24482fe4 | 3b93f91703a36f8ec8fd1767e719f3e3523ab6f1 | /MyNao/demo_0526.py/online_beat_extract.py | 1d4ac37dd69496ebb86a00f1d64d6c4cef3a7f9e | [] | no_license | SutirthaChakraborty/Real-Time-Music-Driven-Dancing-Robot | 66655b1ed1d000499096295587c9c902a636b688 | 98ab75e8ab199a56f1e80854a891fcf4425dd042 | refs/heads/master | 2023-02-13T01:57:56.185295 | 2021-01-04T19:12:33 | 2021-01-04T19:12:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | import numpy as np
import os
import time
from madmom.features.beats import DBNBeatTrackingProcessor, RNNBeatProcessor
from madmom.models import BEATS_LSTM
from madmom.processors import IOProcessor, process_online
from madmom.io import write_beats
def beat_extractor(queue_beat):
kwargs = dict(
fps = 100,
correct = True,
infile = None,
outfile = None,
max_bpm = 170,
min_bpm = 60,
#nn_files = [BEATS_LSTM[0]],
transition_lambda = 100,
num_frames = 1,
online = True,
verbose = 0
)
def beat_callback(beats, output=None):
if len(beats) > 0:
# Do something with the beat (for now, just print the array to stdout)
if not queue_beat.empty():
_ = queue_beat.get()
queue_beat.put(beats[0])
print(beats)
#print('Process to write betas: %s' % os.getpid())
in_processor = RNNBeatProcessor(**kwargs)
beat_processor = DBNBeatTrackingProcessor(**kwargs)
out_processor = [beat_processor, beat_callback]
processor = IOProcessor(in_processor, out_processor)
process_online(processor, **kwargs)
def beat_simulator(queue_beat):
t1=time.time()
while True:
time.sleep(60/120)
t=time.time()-t1
if not queue_beat.empty():
_ = queue_beat.get()
queue_beat.put(t) | [
"zhaojw1998@outlook.com"
] | zhaojw1998@outlook.com |
39297cd1c0e6a9b2bb905e03f0217212d334a1ae | 7e76a72a67596ca230f83b4654615734f8d93414 | /kreddit/mainapp/migrations/0003_auto_20160324_1930.py | 564cc0ecc22e099763b9eb4fa1d560b3d6fe60af | [] | no_license | kjmullen/reddit-remake | dc6d7893034769664bea63f84437c9c02a4e95a5 | e49314d29f752fdb48406c03ed992446415fd3ce | refs/heads/master | 2020-12-28T17:30:08.117132 | 2016-03-31T05:19:03 | 2016-03-31T05:19:03 | 54,669,553 | 0 | 0 | null | 2016-03-24T19:43:38 | 2016-03-24T19:43:38 | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-25 02:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_post_url'),
]
operations = [
migrations.AlterField(
model_name='post',
name='url',
field=models.URLField(null=True),
),
]
| [
"kevinkozzik@gmail.com"
] | kevinkozzik@gmail.com |
adcb16ed9299cb9b835cd7ecd2e76c1ef88cbb49 | a9c0daa4a7b9a4d7341afcab270c5b5debb8c13f | /env/lib/python3.6/site-packages/ebcli/__init__.py | bc9e527698d03b59bda58a57cc898f4e3a39d984 | [] | no_license | phamcong/alienator-plf | bad8c4e003fd189c43243b31ef2b975b6f154754 | ea65628af66fbca51f2248ceb4ba93f858dbddce | refs/heads/master | 2022-11-26T01:28:38.286261 | 2017-11-07T15:12:08 | 2017-11-07T15:12:08 | 109,412,097 | 0 | 1 | null | 2020-07-25T23:43:17 | 2017-11-03T15:30:22 | JavaScript | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/env python
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
__version__ = '3.7.7'
| [
"ccuong.ph@gmail.com"
] | ccuong.ph@gmail.com |
b605521381459e6001f9c7ffc3296f38b9f52f25 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/tests/regressiontests/admin_changelist/models.py | 701939d82a7195f395c9757908cea7cccb537313 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/tests/regressiontests/admin_changelist/models.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
ab00aee802d3683a271cb6fc3e219e1ab5e30668 | ab3e72ca2d146055f2966cecd03e077c84b318e3 | /mlcomp/worker/sync.py | 9f2e14cce4761faa66e31012d99e8074b59ba62c | [
"Apache-2.0"
] | permissive | megachester/mlcomp | 69ec01b0fbf55489b860a64c0fde19e39a8d45b4 | 8d30ba0a52e225144533e68295b71acb49e3c68a | refs/heads/master | 2021-02-18T03:58:17.820726 | 2020-03-05T08:28:19 | 2020-03-05T08:28:19 | 245,157,392 | 0 | 0 | Apache-2.0 | 2020-03-05T12:27:58 | 2020-03-05T12:27:58 | null | UTF-8 | Python | false | false | 6,265 | py | import os
import socket
import time
import traceback
import subprocess
from os.path import join
from typing import List
from mlcomp import FILE_SYNC_INTERVAL
from mlcomp.db.core import Session
from mlcomp.db.enums import ComponentType
from mlcomp.db.models import Computer, TaskSynced
from mlcomp.db.providers import ComputerProvider, \
TaskSyncedProvider, DockerProvider, ProjectProvider
from mlcomp.utils.logging import create_logger
from mlcomp.utils.misc import now
from mlcomp.utils.io import yaml_load, yaml_dump
def sync_directed(
session: Session, source: Computer, target: Computer,
ignore_folders: List
):
current_computer = socket.gethostname()
end = ' --perms --chmod=777 --size-only'
logger = create_logger(session, __name__)
for folder, excluded in ignore_folders:
if len(excluded) > 0:
excluded = excluded[:]
for i in range(len(excluded)):
excluded[i] = f'--exclude {excluded[i]}'
end += ' ' + ' '.join(excluded)
source_folder = join(source.root_folder, folder)
target_folder = join(target.root_folder, folder)
if current_computer == source.name:
command = f'rsync -vhru -e ' \
f'"ssh -p {target.port} -o StrictHostKeyChecking=no" ' \
f'{source_folder}/ ' \
f'{target.user}@{target.ip}:{target_folder}/ {end}'
elif current_computer == target.name:
command = f'rsync -vhru -e ' \
f'"ssh -p {source.port} -o StrictHostKeyChecking=no" ' \
f'{source.user}@{source.ip}:{source_folder}/ ' \
f'{target_folder}/ {end}'
else:
command = f'rsync -vhru -e ' \
f'"ssh -p {target.port} -o StrictHostKeyChecking=no" ' \
f' {source_folder}/ ' \
f'{target.user}@{target.ip}:{target_folder}/ {end}'
command = f'ssh -p {source.port} ' \
f'{source.user}@{source.ip} "{command}"'
logger.info(command, ComponentType.WorkerSupervisor, current_computer)
subprocess.check_output(command, shell=True)
def copy_remote(
session: Session, computer_from: str, path_from: str, path_to: str
):
provider = ComputerProvider(session)
src = provider.by_name(computer_from)
host = socket.gethostname()
if host != computer_from:
c = f'scp -P {src.port} {src.user}@{src.ip}:{path_from} {path_to}'
else:
f'cp {path_from} {path_to}'
subprocess.check_output(c, shell=True)
return os.path.exists(path_to)
class FileSync:
session = Session.create_session(key='FileSync')
logger = create_logger(session, 'FileSync')
def sync_manual(self, computer: Computer, provider: ComputerProvider):
"""
button sync was clicked manually
"""
if not computer.meta:
return
meta = yaml_load(computer.meta)
if 'manual_sync' not in meta:
return
manual_sync = meta['manual_sync']
project_provider = ProjectProvider(self.session)
docker_provider = DockerProvider(self.session)
dockers = docker_provider.get_online()
project = project_provider.by_id(manual_sync['project'])
for docker in dockers:
if docker.computer == computer.name:
continue
source = provider.by_name(docker.computer)
ignore_folders = [
[join('models', project.name), []]
]
sync_directed(self.session, target=computer, source=source,
ignore_folders=ignore_folders)
del meta['manual_sync']
computer.meta = yaml_dump(meta)
provider.update()
def sync(self):
hostname = socket.gethostname()
try:
provider = ComputerProvider(self.session)
task_synced_provider = TaskSyncedProvider(self.session)
computer = provider.by_name(hostname)
sync_start = now()
if FILE_SYNC_INTERVAL == 0:
time.sleep(1)
else:
self.sync_manual(computer, provider)
computers = provider.all_with_last_activtiy()
computers = [
c for c in computers
if (now() - c.last_activity).total_seconds() < 10
]
computers_names = {c.name for c in computers}
for c, project, tasks in task_synced_provider.for_computer(
computer.name):
if c.sync_with_this_computer:
if c.name not in computers_names:
self.logger.info(f'Computer = {c.name} '
f'is offline. Can not sync',
ComponentType.WorkerSupervisor,
hostname)
continue
if c.syncing_computer:
continue
ignore_folders = [
[join('models', project.name), []]
]
computer.syncing_computer = c.name
provider.update()
sync_directed(self.session, c, computer,
ignore_folders)
for t in tasks:
task_synced_provider.add(
TaskSynced(computer=computer.name, task=t.id)
)
time.sleep(FILE_SYNC_INTERVAL)
computer.last_synced = sync_start
computer.syncing_computer = None
provider.update()
except Exception as e:
if Session.sqlalchemy_error(e):
Session.cleanup('FileSync')
self.session = Session.create_session(key='FileSync')
self.logger = create_logger(self.session, 'FileSync')
self.logger.error(
traceback.format_exc(), ComponentType.WorkerSupervisor,
hostname
)
| [
"lightsanweb@yandex.ru"
] | lightsanweb@yandex.ru |
2c6136ca3eed03141ace8fce8413f3ad06aafd6b | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /ZhihuTopicRedis/ZhihuTopicRedis/items.py | fb602cdbc16750b70dda7c86a8886b6a5e3cd084 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 705 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhihutopicItem(scrapy.Item):
# 需要查询的关键字
kw = scrapy.Field()
# 标题(话题的标题)
title = scrapy.Field()
# 话题logo
avatar_url = scrapy.Field()
# 描述(对于这段话题的描述)
description = scrapy.Field()
# 关注人数
followers_count = scrapy.Field()
# 搜索的ID
id_no = scrapy.Field()
# 问题数量
questions_count = scrapy.Field()
# 精华数量
top_answer_count = scrapy.Field()
# 话题链接
topic_url = scrapy.Field()
| [
"34021500@qq.com"
] | 34021500@qq.com |
1ee252bb5e4033586bb5ff466c8fb70e388f411a | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/googlecloudsdk/third_party/appengine/proto/ProtocolBuffer.py | 7ac4d7897917425de96ae8fe8ff352d7d4a476a6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 40,045 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2002, Google Inc.
import array
import httplib
import re
import struct
try:
# NOTE(user): Using non-google-style import to workaround a zipimport_tinypar
# issue for zip files embedded in par files. See http://b/13811096
import googlecloudsdk.third_party.appengine.proto.proto1 as proto1
except ImportError:
# Protect in case of missing deps / strange env (GAE?) / etc.
class ProtocolBufferDecodeError(Exception): pass
class ProtocolBufferEncodeError(Exception): pass
class ProtocolBufferReturnError(Exception): pass
else:
ProtocolBufferDecodeError = proto1.ProtocolBufferDecodeError
ProtocolBufferEncodeError = proto1.ProtocolBufferEncodeError
ProtocolBufferReturnError = proto1.ProtocolBufferReturnError
__all__ = ['ProtocolMessage', 'Encoder', 'Decoder',
'ExtendableProtocolMessage',
'ProtocolBufferDecodeError',
'ProtocolBufferEncodeError',
'ProtocolBufferReturnError']
URL_RE = re.compile('^(https?)://([^/]+)(/.*)$')
class ProtocolMessage:
"""
The parent class of all protocol buffers.
NOTE: the methods that unconditionally raise NotImplementedError are
reimplemented by the subclasses of this class.
Subclasses are automatically generated by tools/protocol_converter.
Encoding methods can raise ProtocolBufferEncodeError if a value for an
integer or long field is too large, or if any required field is not set.
Decoding methods can raise ProtocolBufferDecodeError if they couldn't
decode correctly, or the decoded message doesn't have all required fields.
"""
#####################################
# methods you should use #
#####################################
def __init__(self, contents=None):
"""Construct a new protocol buffer, with optional starting contents
in binary protocol buffer format."""
raise NotImplementedError
def Clear(self):
"""Erases all fields of protocol buffer (& resets to defaults
if fields have defaults)."""
raise NotImplementedError
def IsInitialized(self, debug_strs=None):
"""returns true iff all required fields have been set."""
raise NotImplementedError
def Encode(self):
"""Returns a string representing the protocol buffer object."""
try:
return self._CEncode()
except (NotImplementedError, AttributeError):
e = Encoder()
self.Output(e)
return e.buffer().tostring()
def SerializeToString(self):
"""Same as Encode(), but has same name as proto2's serialize function."""
return self.Encode()
def SerializePartialToString(self):
"""Returns a string representing the protocol buffer object.
Same as SerializeToString() but does not enforce required fields are set.
"""
try:
return self._CEncodePartial()
except (NotImplementedError, AttributeError):
e = Encoder()
self.OutputPartial(e)
return e.buffer().tostring()
def _CEncode(self):
"""Call into C++ encode code.
Generated protocol buffer classes will override this method to
provide C++-based serialization. If a subclass does not
implement this method, Encode() will fall back to
using pure-Python encoding.
"""
raise NotImplementedError
def _CEncodePartial(self):
"""Same as _CEncode, except does not encode missing required fields."""
raise NotImplementedError
def ParseFromString(self, s):
"""Reads data from the string 's'.
Raises a ProtocolBufferDecodeError if, after successfully reading
in the contents of 's', this protocol message is still not initialized."""
self.Clear()
self.MergeFromString(s)
def ParsePartialFromString(self, s):
"""Reads data from the string 's'.
Does not enforce required fields are set."""
self.Clear()
self.MergePartialFromString(s)
def MergeFromString(self, s):
"""Adds in data from the string 's'.
Raises a ProtocolBufferDecodeError if, after successfully merging
in the contents of 's', this protocol message is still not initialized."""
self.MergePartialFromString(s)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
def MergePartialFromString(self, s):
"""Merges in data from the string 's'.
Does not enforce required fields are set."""
try:
self._CMergeFromString(s)
except (NotImplementedError, AttributeError):
# If we can't call into C++ to deserialize the string, use
# the (much slower) pure-Python implementation.
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d)
def _CMergeFromString(self, s):
"""Call into C++ parsing code to merge from a string.
Does *not* check IsInitialized() before returning.
Generated protocol buffer classes will override this method to
provide C++-based deserialization. If a subclass does not
implement this method, MergeFromString() will fall back to
using pure-Python parsing.
"""
raise NotImplementedError
def __getstate__(self):
"""Return the pickled representation of the data inside protocol buffer,
which is the same as its binary-encoded representation (as a string)."""
return self.Encode()
def __setstate__(self, contents_):
"""Restore the pickled representation of the data inside protocol buffer.
Note that the mechanism underlying pickle.load() does not call __init__."""
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
"""posts the protocol buffer to the desired url on the server
and puts the return data into the protocol buffer 'response'
NOTE: The underlying socket raises the 'error' exception
for all I/O related errors (can't connect, etc.).
If 'response' is None, the server's PB response will be ignored.
The optional 'follow_redirects' argument indicates the number
of HTTP redirects that are followed before giving up and raising an
exception. The default is 1.
If 'secure' is true, HTTPS will be used instead of HTTP. Also,
'keyfile' and 'certfile' may be set for client authentication.
"""
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
"""posts the protocol buffer via https to the desired url on the server,
using the specified key and certificate files, and puts the return
data int othe protocol buffer 'response'.
See caveats in sendCommand.
You need an SSL-aware build of the Python2 interpreter to use this command.
(Python1 is not supported). An SSL build of python2.2 is in
/home/build/buildtools/python-ssl-2.2 . An SSL build of python is
standard on all prod machines.
keyfile: Contains our private RSA key
certfile: Contains SSL certificate for remote host
Specify None for keyfile/certfile if you don't want to do client auth.
"""
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
"""Returns nicely formatted contents of this protocol buffer."""
raise NotImplementedError
def ToASCII(self):
"""Returns the protocol buffer as a human-readable string."""
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToShortASCII(self):
"""Returns the protocol buffer as an ASCII string.
The output is short, leaving out newlines and some other niceties.
Defers to the C++ ProtocolPrinter class in SYMBOLIC_SHORT mode.
"""
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
# Note that these must be consistent with the ProtocolPrinter::Level C++
# enum.
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
"""Calls into C++ ASCII-generating code.
Generated protocol buffer classes will override this method to provide
C++-based ASCII output.
"""
raise NotImplementedError
def ParseASCII(self, ascii_string):
"""Parses a string generated by ToASCII() or by the C++ DebugString()
method, initializing this protocol buffer with its contents. This method
raises a ValueError if it encounters an unknown field.
"""
raise NotImplementedError
def ParseASCIIIgnoreUnknown(self, ascii_string):
"""Parses a string generated by ToASCII() or by the C++ DebugString()
method, initializing this protocol buffer with its contents. Ignores
unknown fields.
"""
raise NotImplementedError
def Equals(self, other):
"""Returns whether or not this protocol buffer is equivalent to another.
This assumes that self and other are of the same type.
"""
raise NotImplementedError
def __eq__(self, other):
"""Implementation of operator ==."""
# If self and other are of different types we return NotImplemented, which
# tells the Python interpreter to try some other methods of measuring
# equality before finally performing an identity comparison. This allows
# other classes to implement custom __eq__ or __ne__ methods.
# See http://docs.sympy.org/_sources/python-comparisons.txt
if other.__class__ is self.__class__:
return self.Equals(other)
return NotImplemented
def __ne__(self, other):
"""Implementation of operator !=."""
# We repeat code for __ne__ instead of returning "not (self == other)"
# so that we can return NotImplemented when comparing against an object of
# a different type.
# See http://bugs.python.org/msg76374 for an example of when __ne__ might
# return something other than the Boolean opposite of __eq__.
if other.__class__ is self.__class__:
return not self.Equals(other)
return NotImplemented
#####################################
# methods power-users might want #
#####################################
def Output(self, e):
"""write self to the encoder 'e'."""
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
"""write self to the encoder 'e', don't check for initialization."""
raise NotImplementedError
def OutputPartial(self, e):
"""write self to the encoder 'e', don't check for initialization and
don't assume required fields exist."""
raise NotImplementedError
def Parse(self, d):
"""reads data from the Decoder 'd'."""
self.Clear()
self.Merge(d)
return
def Merge(self, d):
"""merges data from the Decoder 'd'."""
self.TryMerge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
"""merges data from the Decoder 'd'."""
raise NotImplementedError
def CopyFrom(self, pb):
"""copy data from another protocol buffer"""
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
"""merge data from another protocol buffer"""
raise NotImplementedError
#####################################
# helper methods for subclasses #
#####################################
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10 # ceil(64/7)
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -20000000000000 or value >= 20000000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
# For now we only escape the bare minimum to insure interoperability
# and redability. In the future we may want to mimick the c++ behavior
# more closely, but this will make the code a lot more messy.
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
if o >= 127 or o < 32: return "\\%03o" % o # necessary escapes
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
# types of fields, must match Proto::Type and net/proto/protocoltype.proto
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_FOREIGN = 11
# debug string for extensions
_TYPE_TO_DEBUG_STRING = {
TYPE_INT32: ProtocolMessage.DebugFormatInt32,
TYPE_INT64: ProtocolMessage.DebugFormatInt64,
TYPE_UINT64: ProtocolMessage.DebugFormatInt64,
TYPE_FLOAT: ProtocolMessage.DebugFormatFloat,
TYPE_STRING: ProtocolMessage.DebugFormatString,
TYPE_FIXED32: ProtocolMessage.DebugFormatFixed32,
TYPE_FIXED64: ProtocolMessage.DebugFormatFixed64,
TYPE_BOOL: ProtocolMessage.DebugFormatBool }
# users of protocol buffers usually won't need to concern themselves
# with either Encoders or Decoders.
class Encoder:
# types of data
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56) & 255)
return
def putVarInt32(self, v):
# Profiling has shown this code to be very performance critical
# so we duplicate code, go for early exits when possible, etc.
# VarInt32 gets more unrolling because VarInt32s are far and away
# the most common element in protobufs (field tags and string
# lengths), so they get more attention. They're also more
# likely to fit in one byte (string lengths again), so we
# check and bail out early if possible.
buf_append = self.buf.append # cache attribute lookup
if v & 127 == v:
buf_append(v)
return
if v >= 0x80000000 or v < -0x80000000: # python2.4 doesn't fold constants
raise ProtocolBufferEncodeError, "int32 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarInt64(self, v):
buf_append = self.buf.append
if v >= 0x8000000000000000 or v < -0x8000000000000000:
raise ProtocolBufferEncodeError, "int64 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarUint64(self, v):
buf_append = self.buf.append
if v < 0 or v >= 0x10000000000000000:
raise ProtocolBufferEncodeError, "uint64 too big"
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
# TODO: should we make sure that v actually has no more precision than
# float (so it comes out exactly as it goes in)? Probably not -
# users expect their value to be rounded, and they would be
# annoyed if we forced them do it themselves.
def putFloat(self, v):
a = array.array('B')
a.fromstring(struct.pack("<f", v))
self.buf.extend(a)
return
def putDouble(self, v):
a = array.array('B')
a.fromstring(struct.pack("<d", v))
self.buf.extend(a)
return
def putBoolean(self, v):
if v:
self.buf.append(1)
else:
self.buf.append(0)
return
def putPrefixedString(self, v):
# This change prevents corrupted encoding an YouTube, where
# our default encoding is utf-8 and unicode strings may occasionally be
# passed into ProtocolBuffers.
v = str(v)
self.putVarInt32(len(v))
self.buf.fromstring(v)
return
def putRawString(self, v):
self.buf.fromstring(v)
_TYPE_TO_METHOD = {
TYPE_DOUBLE: putDouble,
TYPE_FLOAT: putFloat,
TYPE_FIXED64: put64,
TYPE_FIXED32: put32,
TYPE_INT32: putVarInt32,
TYPE_INT64: putVarInt64,
TYPE_UINT64: putVarUint64,
TYPE_BOOL: putBoolean,
TYPE_STRING: putPrefixedString }
_TYPE_TO_BYTE_SIZE = {
TYPE_DOUBLE: 8,
TYPE_FLOAT: 4,
TYPE_FIXED64: 8,
TYPE_FIXED32: 4,
TYPE_BOOL: 1 }
class Decoder:
def __init__(self, buf, idx, limit):
self.buf = buf
self.idx = idx
self.limit = limit
return
def avail(self):
return self.limit - self.idx
def buffer(self):
return self.buf
def pos(self):
return self.idx
def skip(self, n):
if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated"
self.idx += n
return
def skipData(self, tag):
t = tag & 7 # tag format type
if t == Encoder.NUMERIC:
self.getVarInt64()
elif t == Encoder.DOUBLE:
self.skip(8)
elif t == Encoder.STRING:
n = self.getVarInt32()
self.skip(n)
elif t == Encoder.STARTGROUP:
while 1:
t = self.getVarInt32()
if (t & 7) == Encoder.ENDGROUP:
break
else:
self.skipData(t)
if (t - Encoder.ENDGROUP) != (tag - Encoder.STARTGROUP):
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.ENDGROUP:
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.FLOAT:
self.skip(4)
else:
raise ProtocolBufferDecodeError, "corrupted"
# these are all unsigned gets
def get8(self):
if self.idx >= self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
self.idx += 1
return c
def get16(self):
if self.idx + 2 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
self.idx += 2
return (d << 8) | c
def get32(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
self.idx += 4
return (f << 24) | (e << 16) | (d << 8) | c
def get64(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
g = long(self.buf[self.idx + 4])
h = long(self.buf[self.idx + 5])
i = long(self.buf[self.idx + 6])
j = long(self.buf[self.idx + 7])
self.idx += 8
return ((j << 56) | (i << 48) | (h << 40) | (g << 32) | (f << 24)
| (e << 16) | (d << 8) | c)
def getVarInt32(self):
# getVarInt32 gets different treatment than other integer getter
# functions due to the much larger number of varInt32s and also
# varInt32s that fit in one byte. See the comment at putVarInt32.
b = self.get8()
if not (b & 128):
return b
result = long(0)
shift = 0
while 1:
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= 0x10000000000000000L: # (1L << 64):
raise ProtocolBufferDecodeError, "corrupted"
break
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
if result >= 0x8000000000000000L: # (1L << 63)
result -= 0x10000000000000000L # (1L << 64)
if result >= 0x80000000L or result < -0x80000000L: # (1L << 31)
raise ProtocolBufferDecodeError, "corrupted"
return result
def getVarInt64(self):
result = self.getVarUint64()
if result >= (1L << 63):
result -= (1L << 64)
return result
def getVarUint64(self):
result = long(0)
shift = 0
while 1:
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted"
return result
return result # make pychecker happy
def getFloat(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+4]
self.idx += 4
return struct.unpack("<f", a)[0]
def getDouble(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+8]
self.idx += 8
return struct.unpack("<d", a)[0]
def getBoolean(self):
b = self.get8()
if b != 0 and b != 1: raise ProtocolBufferDecodeError, "corrupted"
return b
def getPrefixedString(self):
length = self.getVarInt32()
if self.idx + length > self.limit:
raise ProtocolBufferDecodeError, "truncated"
r = self.buf[self.idx : self.idx + length]
self.idx += length
return r.tostring()
def getRawString(self):
r = self.buf[self.idx:self.limit]
self.idx = self.limit
return r.tostring()
_TYPE_TO_METHOD = {
TYPE_DOUBLE: getDouble,
TYPE_FLOAT: getFloat,
TYPE_FIXED64: get64,
TYPE_FIXED32: get32,
TYPE_INT32: getVarInt32,
TYPE_INT64: getVarInt64,
TYPE_UINT64: getVarUint64,
TYPE_BOOL: getBoolean,
TYPE_STRING: getPrefixedString }
#####################################
# extensions #
#####################################
class ExtensionIdentifier(object):
__slots__ = ('full_name', 'number', 'field_type', 'wire_tag', 'is_repeated',
'default', 'containing_cls', 'composite_cls', 'message_name')
def __init__(self, full_name, number, field_type, wire_tag, is_repeated,
default):
self.full_name = full_name
self.number = number
self.field_type = field_type
self.wire_tag = wire_tag
self.is_repeated = is_repeated
self.default = default
class ExtendableProtocolMessage(ProtocolMessage):
def HasExtension(self, extension):
"""Checks if the message contains a certain non-repeated extension."""
self._VerifyExtensionIdentifier(extension)
return extension in self._extension_fields
def ClearExtension(self, extension):
"""Clears the value of extension, so that HasExtension() returns false or
ExtensionSize() returns 0."""
self._VerifyExtensionIdentifier(extension)
if extension in self._extension_fields:
del self._extension_fields[extension]
def GetExtension(self, extension, index=None):
"""Gets the extension value for a certain extension.
Args:
extension: The ExtensionIdentifier for the extension.
index: The index of element to get in a repeated field. Only needed if
the extension is repeated.
Returns:
The value of the extension if exists, otherwise the default value of the
extension will be returned.
"""
self._VerifyExtensionIdentifier(extension)
if extension in self._extension_fields:
result = self._extension_fields[extension]
else:
if extension.is_repeated:
result = []
elif extension.composite_cls:
result = extension.composite_cls()
else:
result = extension.default
if extension.is_repeated:
result = result[index]
return result
def SetExtension(self, extension, *args):
"""Sets the extension value for a certain scalar type extension.
Arg varies according to extension type:
- Singular:
message.SetExtension(extension, value)
- Repeated:
message.SetExtension(extension, index, value)
where
extension: The ExtensionIdentifier for the extension.
index: The index of element to set in a repeated field. Only needed if
the extension is repeated.
value: The value to set.
Raises:
TypeError if a message type extension is given.
"""
self._VerifyExtensionIdentifier(extension)
if extension.composite_cls:
raise TypeError(
'Cannot assign to extension "%s" because it is a composite type.' %
extension.full_name)
if extension.is_repeated:
if (len(args) != 2):
raise TypeError(
'SetExtension(extension, index, value) for repeated extension '
'takes exactly 3 arguments: (%d given)' % len(args))
index = args[0]
value = args[1]
self._extension_fields[extension][index] = value
else:
if (len(args) != 1):
raise TypeError(
'SetExtension(extension, value) for singular extension '
'takes exactly 3 arguments: (%d given)' % len(args))
value = args[0]
self._extension_fields[extension] = value
def MutableExtension(self, extension, index=None):
"""Gets a mutable reference of a message type extension.
For repeated extension, index must be specified, and only one element will
be returned. For optional extension, if the extension does not exist, a new
message will be created and set in parent message.
Args:
extension: The ExtensionIdentifier for the extension.
index: The index of element to mutate in a repeated field. Only needed if
the extension is repeated.
Returns:
The mutable message reference.
Raises:
TypeError if non-message type extension is given.
"""
self._VerifyExtensionIdentifier(extension)
if extension.composite_cls is None:
raise TypeError(
'MutableExtension() cannot be applied to "%s", because it is not a '
'composite type.' % extension.full_name)
if extension.is_repeated:
if index is None:
raise TypeError(
'MutableExtension(extension, index) for repeated extension '
'takes exactly 2 arguments: (1 given)')
return self.GetExtension(extension, index)
if extension in self._extension_fields:
return self._extension_fields[extension]
else:
result = extension.composite_cls()
self._extension_fields[extension] = result
return result
def ExtensionList(self, extension):
"""Returns a mutable list of extensions.
Raises:
TypeError if the extension is not repeated.
"""
self._VerifyExtensionIdentifier(extension)
if not extension.is_repeated:
raise TypeError(
'ExtensionList() cannot be applied to "%s", because it is not a '
'repeated extension.' % extension.full_name)
if extension in self._extension_fields:
return self._extension_fields[extension]
result = []
self._extension_fields[extension] = result
return result
def ExtensionSize(self, extension):
"""Returns the size of a repeated extension.
Raises:
TypeError if the extension is not repeated.
"""
self._VerifyExtensionIdentifier(extension)
if not extension.is_repeated:
raise TypeError(
'ExtensionSize() cannot be applied to "%s", because it is not a '
'repeated extension.' % extension.full_name)
if extension in self._extension_fields:
return len(self._extension_fields[extension])
return 0
def AddExtension(self, extension, value=None):
"""Appends a new element into a repeated extension.
Arg varies according to the extension field type:
- Scalar/String:
message.AddExtension(extension, value)
- Message:
mutable_message = AddExtension(extension)
Args:
extension: The ExtensionIdentifier for the extension.
value: The value of the extension if the extension is scalar/string type.
The value must NOT be set for message type extensions; set values on
the returned message object instead.
Returns:
A mutable new message if it's a message type extension, or None otherwise.
Raises:
TypeError if the extension is not repeated, or value is given for message
type extensions.
"""
self._VerifyExtensionIdentifier(extension)
if not extension.is_repeated:
raise TypeError(
'AddExtension() cannot be applied to "%s", because it is not a '
'repeated extension.' % extension.full_name)
if extension in self._extension_fields:
field = self._extension_fields[extension]
else:
field = []
self._extension_fields[extension] = field
# Composite field
if extension.composite_cls:
if value is not None:
raise TypeError(
'value must not be set in AddExtension() for "%s", because it is '
'a message type extension. Set values on the returned message '
'instead.' % extension.full_name)
msg = extension.composite_cls()
field.append(msg)
return msg
# Scalar and string field
field.append(value)
def _VerifyExtensionIdentifier(self, extension):
if extension.containing_cls != self.__class__:
raise TypeError("Containing type of %s is %s, but not %s."
% (extension.full_name,
extension.containing_cls.__name__,
self.__class__.__name__))
def _MergeExtensionFields(self, x):
for ext, val in x._extension_fields.items():
if ext.is_repeated:
for i in xrange(len(val)):
if ext.composite_cls is None:
self.AddExtension(ext, val[i])
else:
self.AddExtension(ext).MergeFrom(val[i])
else:
if ext.composite_cls is None:
self.SetExtension(ext, val)
else:
self.MutableExtension(ext).MergeFrom(val)
def _ListExtensions(self):
result = [ext for ext in self._extension_fields.keys()
if (not ext.is_repeated) or self.ExtensionSize(ext) > 0]
result.sort(key = lambda item: item.number)
return result
def _ExtensionEquals(self, x):
extensions = self._ListExtensions()
if extensions != x._ListExtensions():
return False
for ext in extensions:
if ext.is_repeated:
if self.ExtensionSize(ext) != x.ExtensionSize(ext): return False
for e1, e2 in zip(self.ExtensionList(ext),
x.ExtensionList(ext)):
if e1 != e2: return False
else:
if self.GetExtension(ext) != x.GetExtension(ext): return False
return True
def _OutputExtensionFields(self, out, partial, extensions, start_index,
end_field_number):
"""Serialize a range of extensions.
To generate canonical output when encoding, we interleave fields and
extensions to preserve tag order.
Generated code will prepare a list of ExtensionIdentifier sorted in field
number order and call this method to serialize a specific range of
extensions. The range is specified by the two arguments, start_index and
end_field_number.
The method will serialize all extensions[i] with i >= start_index and
extensions[i].number < end_field_number. Since extensions argument is sorted
by field_number, this is a contiguous range; the first index j not included
in that range is returned. The return value can be used as the start_index
in the next call to serialize the next range of extensions.
Args:
extensions: A list of ExtensionIdentifier sorted in field number order.
start_index: The start index in the extensions list.
end_field_number: The end field number of the extension range.
Returns:
The first index that is not in the range. Or the size of extensions if all
the extensions are within the range.
"""
def OutputSingleField(ext, value):
out.putVarInt32(ext.wire_tag)
if ext.field_type == TYPE_GROUP:
if partial:
value.OutputPartial(out)
else:
value.OutputUnchecked(out)
out.putVarInt32(ext.wire_tag + 1) # End the group
elif ext.field_type == TYPE_FOREIGN:
if partial:
out.putVarInt32(value.ByteSizePartial())
value.OutputPartial(out)
else:
out.putVarInt32(value.ByteSize())
value.OutputUnchecked(out)
else:
Encoder._TYPE_TO_METHOD[ext.field_type](out, value)
size = len(extensions)
for ext_index in xrange(start_index, size):
ext = extensions[ext_index]
if ext.number >= end_field_number:
# exceeding extension range end.
return ext_index
if ext.is_repeated:
for i in xrange(len(self._extension_fields[ext])):
OutputSingleField(ext, self._extension_fields[ext][i])
else:
OutputSingleField(ext, self._extension_fields[ext])
return size
def _ParseOneExtensionField(self, wire_tag, d):
number = wire_tag >> 3
if number in self._extensions_by_field_number:
ext = self._extensions_by_field_number[number]
if wire_tag != ext.wire_tag:
# wire_tag doesn't match; discard as unknown field.
return
if ext.field_type == TYPE_FOREIGN:
length = d.getVarInt32()
tmp = Decoder(d.buffer(), d.pos(), d.pos() + length)
if ext.is_repeated:
self.AddExtension(ext).TryMerge(tmp)
else:
self.MutableExtension(ext).TryMerge(tmp)
d.skip(length)
elif ext.field_type == TYPE_GROUP:
if ext.is_repeated:
self.AddExtension(ext).TryMerge(d)
else:
self.MutableExtension(ext).TryMerge(d)
else:
value = Decoder._TYPE_TO_METHOD[ext.field_type](d)
if ext.is_repeated:
self.AddExtension(ext, value)
else:
self.SetExtension(ext, value)
else:
# discard unknown extensions.
d.skipData(wire_tag)
def _ExtensionByteSize(self, partial):
size = 0
for extension, value in self._extension_fields.items():
ftype = extension.field_type
tag_size = self.lengthVarInt64(extension.wire_tag)
if ftype == TYPE_GROUP:
tag_size *= 2 # end tag
if extension.is_repeated:
size += tag_size * len(value)
for single_value in value:
size += self._FieldByteSize(ftype, single_value, partial)
else:
size += tag_size + self._FieldByteSize(ftype, value, partial)
return size
def _FieldByteSize(self, ftype, value, partial):
size = 0
if ftype == TYPE_STRING:
size = self.lengthString(len(value))
elif ftype == TYPE_FOREIGN or ftype == TYPE_GROUP:
if partial:
size = self.lengthString(value.ByteSizePartial())
else:
size = self.lengthString(value.ByteSize())
elif ftype == TYPE_INT64 or \
ftype == TYPE_UINT64 or \
ftype == TYPE_INT32:
size = self.lengthVarInt64(value)
else:
if ftype in Encoder._TYPE_TO_BYTE_SIZE:
size = Encoder._TYPE_TO_BYTE_SIZE[ftype]
else:
raise AssertionError(
'Extension type %d is not recognized.' % ftype)
return size
def _ExtensionDebugString(self, prefix, printElemNumber):
res = ''
extensions = self._ListExtensions()
for extension in extensions:
value = self._extension_fields[extension]
if extension.is_repeated:
cnt = 0
for e in value:
elm=""
if printElemNumber: elm = "(%d)" % cnt
if extension.composite_cls is not None:
res += prefix + "[%s%s] {\n" % \
(extension.full_name, elm)
res += e.__str__(prefix + " ", printElemNumber)
res += prefix + "}\n"
else:
if extension.composite_cls is not None:
res += prefix + "[%s] {\n" % extension.full_name
res += value.__str__(
prefix + " ", printElemNumber)
res += prefix + "}\n"
else:
if extension.field_type in _TYPE_TO_DEBUG_STRING:
text_value = _TYPE_TO_DEBUG_STRING[
extension.field_type](self, value)
else:
text_value = self.DebugFormat(value)
res += prefix + "[%s]: %s\n" % (extension.full_name, text_value)
return res
@staticmethod
def _RegisterExtension(cls, extension, composite_cls=None):
extension.containing_cls = cls
extension.composite_cls = composite_cls
if composite_cls is not None:
extension.message_name = composite_cls._PROTO_DESCRIPTOR_NAME
actual_handle = cls._extensions_by_field_number.setdefault(
extension.number, extension)
if actual_handle is not extension:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension.full_name, actual_handle.full_name,
cls.__name__, extension.number))
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
7588cf29b7c6192559f88f22f8cdbfc8c949a6e1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_035/ch60_2020_04_27_20_18_54_214980.py | 97e2bc41ce7240c34dd8c65e762505a340707549 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | def eh_palindromo(string):
contador = True
e = 0
while contador:
if string[e]==len(string)-e:
contador = True
e =+ 1
else:
contador = False
return contador | [
"you@example.com"
] | you@example.com |
567ec3b3318af7a4312913b815332dea5f0b7607 | 770f6e034a5482d4999b00b182e616f84d95ffdf | /testing/cross_language/json_test.py | 73554eb235f32a0d0092484a2ccf85f03f5de77e | [
"Apache-2.0"
] | permissive | ptylll/tink | f2f274bcb8b6d8449e25ef975e60bff8945d3406 | eafd9283b1d1da1dfc08c5297c101cd4b2d530c5 | refs/heads/master | 2022-11-27T14:55:51.381202 | 2020-08-03T14:39:04 | 2020-08-03T14:39:37 | 284,962,074 | 1 | 0 | Apache-2.0 | 2020-08-04T11:41:22 | 2020-08-04T11:41:21 | null | UTF-8 | Python | false | false | 3,559 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for JSON serialization."""
from absl.testing import absltest
from absl.testing import parameterized
from tink.proto import tink_pb2
from util import supported_key_types
from util import testing_servers
def setUpModule():
testing_servers.start('json')
def tearDownModule():
testing_servers.stop()
def _keyset_proto(keyset: bytes) -> tink_pb2.Keyset:
keyset_proto = tink_pb2.Keyset()
keyset_proto.ParseFromString(keyset)
# We sort the keys, since we want keysets to be considered equal even if the
# keys are in different order.
keyset_proto.key.sort(key=lambda k: k.key_id)
return keyset_proto
def _is_equal_keyset(keyset1: bytes, keyset2: bytes) -> bool:
"""Checks if two keyset are equal, and have the exact same keydata.value."""
# Keydata.value are serialized protos. This serialization is usually not
# deterministic, as it is a unsorted list of key value pairs.
# But since JSON serialization does not change keyset.value, we can simply
# require these values to be exactly the same in this test. In other tests,
# this might be too strict.
return _keyset_proto(keyset1) == _keyset_proto(keyset2)
class JsonTest(parameterized.TestCase):
def test_is_equal_keyset(self):
keyset1 = tink_pb2.Keyset()
key11 = keyset1.key.add()
key11.key_id = 21
key12 = keyset1.key.add()
key12.key_id = 42
keyset2 = tink_pb2.Keyset()
key21 = keyset2.key.add()
key21.key_id = 42
key22 = keyset2.key.add()
key22.key_id = 21
self.assertTrue(_is_equal_keyset(keyset1.SerializeToString(),
keyset2.SerializeToString()))
def test_is_not_equal_keyset(self):
keyset1 = tink_pb2.Keyset()
key11 = keyset1.key.add()
key11.key_id = 21
key12 = keyset1.key.add()
key12.key_id = 42
keyset2 = tink_pb2.Keyset()
key3 = keyset2.key.add()
key3.key_id = 21
self.assertFalse(_is_equal_keyset(keyset1.SerializeToString(),
keyset2.SerializeToString()))
def assertEqualKeyset(self, keyset1: bytes, keyset2: bytes):
if not _is_equal_keyset(keyset1, keyset2):
self.fail('these keysets are not equal: \n%s\n \n%s\n'
% (_keyset_proto(keyset1), _keyset_proto(keyset2)))
@parameterized.parameters(
supported_key_types.test_cases(supported_key_types.ALL_KEY_TYPES))
def test_to_from_json(self, key_template_name, supported_langs):
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
for to_lang in supported_langs:
json_keyset = testing_servers.keyset_to_json(to_lang, keyset)
for from_lang in supported_langs:
keyset2 = testing_servers.keyset_from_json(from_lang, json_keyset)
self.assertEqualKeyset(keyset, keyset2)
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
33e78c202f9b437a7c135e49ee2046362470f244 | 32a4c816b70964a0aa1f0370b51e57c93ad0b7be | /finance/helpers/transfers.py | 563db7d32b2043bbe5d1b1e30317224f1c32efb2 | [] | no_license | mithro/timsfinance | 0cba66a9c60b3b7d35e45e2d0304fc2221306951 | fd65bdd77d28ba203bf741f72bb6e102fe3cc8e5 | refs/heads/importer-rewrite | 2020-12-24T17:08:54.281576 | 2013-07-19T08:18:02 | 2013-07-19T08:18:02 | 2,638,754 | 1 | 1 | null | 2013-07-09T05:08:10 | 2011-10-24T19:53:14 | Python | UTF-8 | Python | false | false | 1,977 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et sts=4 ai:
import datetime
from finance import models
from finance.helpers import base
class Transfers(base.Helper):
"""
The linker looks for transactions which should be linked together, such as
transfers between accounts.
"""
# Descriptions which match the following should be looked at
# Anything with "PAYMENT" in it
# Anything with "TRANSFER" in it
TRANSFERS = ("PAYMENT", "PMNT", "TRANSFER", "Direct Debit")
def __init__(self, *args, **kw):
base.Helper.__init__(self, *args, **kw)
self.category = models.Category.objects.get(category_id='transfer')
def associate(self, a, b):
return base.Helper.associate(self, a, b, relationship="TRANSFER")
def handle(self, account, trans):
for desc_match in self.TRANSFERS:
if desc_match in trans.imported_description.upper():
break
else:
return
print
print trans
# If this already had reference set, then done
related = trans.related_transactions(relationship="TRANSFER")
if len(related) > 0:
print " ", related
return
# First attempt to find a transaction 7 days either way with the exact same amount
q = models.Transaction.objects.all(
).filter(imported_entered_date__gt=trans.imported_entered_date-datetime.timedelta(days=7)
).filter(imported_entered_date__lt=trans.imported_entered_date+datetime.timedelta(days=7)
).filter(imported_amount__exact=-trans.imported_amount
)
if len(q) == 1:
r = self.associate(trans, q[0])
print " Exact: ", r
r.save()
trans.primary_category = self.category
trans.save()
q[0].primary_category = self.category
q[0].save()
else:
print " Exact: ", q
| [
"mithro@mithis.com"
] | mithro@mithis.com |
4ea9e978a025b97ef33a8e055d8368b67c18594c | f12ec753dc42958714343b5aa8a11a83f7fe5e1a | /roboticstoolbox/robot/Link.py | 1c5f87aa12d6b47dede69c9ea7f0280730f2b7f2 | [
"MIT"
] | permissive | RPellowski/robotics-toolbox-python | 6bedecccbcf14740c3aa65823b5bab8ba3ade93b | 837b33ebbca1347126a611edb83131e284dd065b | refs/heads/master | 2023-01-01T04:53:23.117118 | 2020-10-12T05:25:47 | 2020-10-12T05:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | class Link:
def __init__(self, mesh=None):
# TODO fix the path
self.mesh = mesh
| [
"peter.i.corke@gmail.com"
] | peter.i.corke@gmail.com |
514eecee3ad930fa6d0385a5613b59f1500b339b | acbbcc2daff7538534604e512215a84b2f8e11ba | /Cluster/OMapWordlist.py | 33b2bba6fc02b0ff96c6c2c527f93658e1f4b0ff | [] | no_license | ZhuJiahui/MicroblogDataStreamCompress | 7cfed72b8cb51c171f0d82c243baf62fbef0df55 | a040a6ebb7b449591f79fef8e71f40719ee78c0b | refs/heads/master | 2016-09-06T05:40:51.017616 | 2014-04-06T07:17:16 | 2014-04-06T07:17:16 | 18,484,462 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年1月5日
@author: ZhuJiahui506
'''
import os
import numpy as np
from Reflect import reflect_vsm_to_wordlist
from TextToolkit import quick_write_list_to_text
import time
def map_word_list(read_directory1, read_directory2, write_filename):
#文件总数
file_number = np.sum([len(files) for root, dirs, files in os.walk(read_directory1)])
result = []
for i in range(file_number):
word_list = []
f = open(read_directory2 + '/' + str(i + 1) + '.txt')
line = f.readline()
while line:
word_list.append(line.strip())
line = f.readline()
f.close()
vsm = np.loadtxt(read_directory1 + '/' + str(i + 1) + '.txt')
vsm = vsm.T
for each in vsm:
result.append(" ".join(reflect_vsm_to_wordlist(each, word_list)))
quick_write_list_to_text(result, write_filename)
if __name__ == '__main__':
start = time.clock()
now_directory = os.getcwd()
root_directory = os.path.dirname(now_directory) + '/'
read_directory1 = 'D:/Local/DataStreamMining/dataset/cluster/topics_data22/original_cluster_center'
read_directory2 = 'D:/Local/DataStreamMining/dataset/cluster/topics_data22/original_merge_wordlist'
write_filename = 'D:/Local/DataStreamMining/dataset/cluster/topics_data22/cluster_text_result.txt'
map_word_list(read_directory1, read_directory2, write_filename)
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"zhujiahui@outlook.com"
] | zhujiahui@outlook.com |
36934efddf72964da5dcd53a82fea8b6a5f1761b | 295efbd5b466d246ff51c3849cea5ff637df6211 | /model1.py | 3bf46b97fdf59bacd284ffebaeefbf6c9db673bc | [] | no_license | allenjianzhe/mip_heuristic | 75b15ce9d9735fdc0f5381bfef8cded4d5348a12 | 1365b63b2b3a3814b271e3bb95fb6671486e84fc | refs/heads/master | 2020-09-26T08:41:57.244645 | 2016-10-12T22:43:09 | 2016-10-12T22:43:09 | 66,394,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | #file name 060915.py for MIP1
import sys
#sys.path.append('C:/gurobi562/win64/python27/lib/gurobipy')
sys.path.append('C:/gurobi604/win32/python27/lib/gurobipy') # for apple
from gurobipy import *
from read052815_apple import *
import time
start_time=time.clock()
m = Model('MIP')
M=1000000000
F=600000
def MIP1(m,customer,arc_C):
####################################################################################################
#decision variable X: binary variable. X[customer,i,i+1,m,k]
X = {}
for row in customer:
for i in nodes:
if i == 2:
continue
else:
for k in modes:
for s in departure:
X[int(row[0]),i,k,s]=m.addVar(obj=int(row[2])*arc_trans_cost[int(row[0]),i,k],vtype=GRB.BINARY,name='X_%s_%s_%s_%s'%(int(row[0]),i,k,s))
m.update()
#decision variable y: binary variable of 3PL
global y
global F
for row in customer:
for i in nodes:
y[int(row[0]),i]=m.addVar(obj=int(row[2])*F,vtype=GRB.BINARY,name='y_%s_%s'%(int(row[0]),i))
m.update()
#decision variable: arrive time at each node
t={}
for row in customer:
for i in nodes:
t[int(row[0]),i]=m.addVar(obj=0,vtype='C',name='nodeTime_%s_%s'%(int(row[0]),i))
#decision variable:Time tardiness of customer
T={}
for row in customer:
T[int(row[0])]=m.addVar(obj=int(row[2])*int(row[4]),vtype='C',name='Tardiness_%s'%(int(row[0])))
m.update()
####################################################################################################
#Constraint 3.2 for each customer, each link, only one plan can be selected
for row in customer:
for i in nodes:
if i==2:
continue
else:
expr = LinExpr()
for k in modes:
for s in departure:
expr.addTerms(1.0,X[int(row[0]),i,k,s])
expr.add(y[int(row[0]),i])
m.addConstr(expr, GRB.EQUAL, 1,name='One_%s_%s_%s_%s'%(int(row[0]),i,k,s))
m.update()
####################################################################################################
#constraint 3.4 arc capacity
for k in modes:
for s in departure:
for i in nodes:
if i==2:
continue
else:
expr = LinExpr()
for row in customer:
expr.addTerms(int(row[2]),X[int(row[0]),i,k,s])
expr.addConstant(-1*arc_C[i,k,s])
m.addConstr(expr,GRB.LESS_EQUAL, 0,'arcCapacity_%s_%s_%s_%s'%(int(row[0]),i,k,s))
m.update()
#constraint 3.5 time constraint One
for row in customer:
for i in nodes:
if i==2:
continue
else:
expr = LinExpr()
for k in modes:
for s in departure:
expr.addTerms(dT[i,k,s]+trans_time[i,k],X[int(row[0]),i,k,s])
expr.add(-1*y[int(row[0]),i]*M)
m.addConstr(expr,GRB.LESS_EQUAL,t[int(row[0]),i],name='timeConstr1_%s_%s'%(int(row[0]),i))
m.update()
#definition of T
for row in customer:
for k in modes:
for s in departure:
if X[int(row[0]),1,k,s]>0:
if t[int(row[0]),1]>DD[int(row[0])]:
m.addConstr(T[int(row[0])],GRB.EQUAL,t[int(row[0]),1]-DD[int(row[0])],name='Tardiness_%s'%(int(row[0])) )
m.update()
m.__data=X,y,t,T
return m
| [
"allenjianzhe@yahoo.com"
] | allenjianzhe@yahoo.com |
522c35efeef1b14d4c9cd6776ee2646f511fcc33 | b55f70755712b26688b80a8ba3806a4124fbcd11 | /LinkedList/reverse_linkedlist.py | dc10bda1403d739325203fbdc3e6d560945d7f20 | [] | no_license | Shanshan-IC/Algorithm_Python | a44703a0f33370c47e3e55af70aadeae08d5a1a5 | ace23976d2f1f51141498c4c4ea6bca0039b233f | refs/heads/master | 2021-09-08T07:16:59.576674 | 2018-03-08T09:24:01 | 2018-03-08T09:24:01 | 114,254,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | ''''
http://www.lintcode.com/zh-cn/problem/reverse-linked-list/
'''
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param: head: n
@return: The new head of reversed linked list.
"""
def reverse(self, head):
cur = None
while head:
tmp = head.next
head.next = cur
cur = head
head = tmp
return cur
| [
"shanshan.fu15@imperial.ac.uk"
] | shanshan.fu15@imperial.ac.uk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.