blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
843a9896e1ada59e40bd827a2af39e3d13effdba | 9c7581c3b862174878a5e71609f94b3e5a2de5c9 | /CursoEmVideo/Aula22/ex109/titulo.py | 3136d4230f599c8c02894ae254a298636a7cf2f6 | [
"MIT"
] | permissive | lucashsouza/Desafios-Python | 6d9fdc3500e0d01ce9a75201fc4fe88469928170 | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | refs/heads/master | 2020-06-21T16:49:32.884025 | 2019-07-23T01:23:07 | 2019-07-23T01:23:07 | 143,765,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def titulo(mensagem):
print()
print('~' * (len(mensagem)+1))
print(mensagem)
print('~' * (len(mensagem)+1))
print()
| [
"noreply@github.com"
] | lucashsouza.noreply@github.com |
7f0113de4aed2785b90b4ba6789bc0a244c1ed09 | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_sqrt/trend_constant/cycle_5/ar_12/test_artificial_1024_sqrt_constant_5_12_0.py | 9a4bfcc628edeec08dcd54acf3df24173d35377f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 5, transform = "sqrt", sigma = 0.0, exog_count = 0, ar_order = 12);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
8d529d30512884f3d3f678345f7f2c07b0ef5615 | 650076fb94a086e15bdaa5bd2f51ce72df42dce4 | /test/functional/rpc_signrawtransaction.py | 3395fe5a26b85a2f2336d0ede4d5f0e73aa5c243 | [
"MIT"
] | permissive | c0de0x/ErosCore | 548075fe85c46e2bb3946f94361689dbad692da8 | a71767f7ee7105dc83973aac8ac60903b69459c9 | refs/heads/master | 2022-11-25T14:35:59.091923 | 2020-07-30T14:38:39 | 2020-07-30T14:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,422 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import ErosTestFramwork
from test_framework.util import *
class SignRawTransactionsTest(ErosTestFramwork):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'xwMWGTnBNUmGxMm8vfAdbL45bWXyVTYctd': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'xwMWGTnBNUmGxMm8vfAdbL45bWXyVTYctd': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| [
"60665036+ErosCore@users.noreply.github.com"
] | 60665036+ErosCore@users.noreply.github.com |
14bcbf43eb9678724906eec240db3f26cfa53cd3 | 2a68b03c923119cc747c4ffcc244477be35134bb | /Algorithm/DFS/dice.py | b52118987a96576b1be898e542042e09aed51736 | [] | no_license | QitaoXu/Lintcode | 0bce9ae15fdd4af1cac376c0bea4465ae5ea6747 | fe411a0590ada6a1a6ae1166c86c585416ac8cda | refs/heads/master | 2020-04-24T20:53:27.258876 | 2019-09-24T23:54:59 | 2019-09-24T23:54:59 | 172,259,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | class Solution:
def findTarget(self, dices, sides, target):
print("\nnum of dices = %d, num of sides = %d, target = %d" %(dices, sides, target))
results = []
combination = []
found = set()
self.dfs(dices, 0, sides, combination, target, results, found)
return results
def dfs(self, dices, start_index, sides, combination, target, results, found):
if start_index == dices:
if target == 0:
#
# filter duplicates and
# handle corner case like [4] is not a valid combination
# when dices = 2, sides = 4, target = 4
#
if tuple(sorted(combination)) not in found and len(combination) == dices:
results.append(combination.copy())
found.add(tuple(sorted(combination)))
return
for i in range(start_index, dices):
if target <= 0:
return
for side in range(1, sides + 1):
combination.append(side)
self.dfs(dices, i + 1, sides, combination, target - side, results, found)
combination.pop()
solution = Solution()
print(solution.findTarget(3, 5, 10))
print(solution.findTarget(2, 3, 4))
print(solution.findTarget(2, 4, 4))
print(solution.findTarget(3, 4, 4)) | [
"jeremyxuqitao@outlook.com"
] | jeremyxuqitao@outlook.com |
b41ea6ae823b3495b06bde530884580ce3d476c5 | f4e69d05d4bea5198f5bd15c968562fac654c88e | /test/test_certificates_api.py | 593599eedf46635c5eac7c1269f77d51ecbbe987 | [] | no_license | krezreb/openapi-client-otoroshi | 2877ae9230b1ca29024880994420101a232cb906 | 0dafc780777857b9a0d0d8264e215bd6e0557224 | refs/heads/master | 2023-05-06T07:23:45.988523 | 2021-05-27T13:00:18 | 2021-05-27T13:00:18 | 371,374,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,969 | py | """
Otoroshi Admin API
Admin API of the Otoroshi reverse proxy # noqa: E501
The version of the OpenAPI document: 1.5.0-alpha.14
Contact: oss@maif.fr
Generated by: https://openapi-generator.tech
"""
import unittest
import openapi_client
from openapi_client.api.certificates_api import CertificatesApi # noqa: E501
class TestCertificatesApi(unittest.TestCase):
"""CertificatesApi unit test stubs"""
def setUp(self):
self.api = CertificatesApi() # noqa: E501
def tearDown(self):
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_create_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_create_action
Create multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_delete_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_delete_action
Delete multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_patch_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_patch_action
Update (using json-patch) multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_update_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_update_action
Update multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_create_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_create_action
Creates a Cert # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_delete_entity_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_delete_entity_action
Deletes a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_find_all_entities_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_find_all_entities_action
Find all possible Certs entities # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_find_entity_by_id_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_find_entity_by_id_action
Find a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_patch_entity_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_patch_entity_action
Updates (using json-patch) a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_renew_cert(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_renew_cert
Renew a certificates with the same attributes as the original one # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_update_entity_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_update_entity_action
Updates a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_templates_controller_initiate_certificate(self):
"""Test case for otoroshi_controllers_adminapi_templates_controller_initiate_certificate
Creates a new Certificate from a template # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"josephbeeson@gmail.com"
] | josephbeeson@gmail.com |
f2898fb3b0248f525810044f3f4e5a5ce1ec680b | 79acd6922037d309857d95fb4a633788525a0535 | /infrastructure/ansible/roles/dataset_loader/files/selection.set.py | aacaf48079e3a85eeea16a62f5685d58c4c8b3ab | [
"Apache-2.0",
"MPL-2.0",
"BSD-3-Clause",
"CC0-1.0",
"Artistic-2.0",
"CC-BY-SA-4.0",
"MIT",
"ISC",
"BSD-2-Clause",
"OFL-1.1",
"JSON",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"WTFPL",
"LGPL-2.0-or-later",
"X11"
] | permissive | apache/trafficcontrol | 86da3a526e5e0d533547969274cd30d74636d922 | e835435e47175f84a04234d15183ab7b61cc2825 | refs/heads/master | 2023-08-31T08:02:25.363164 | 2023-08-30T23:00:43 | 2023-08-30T23:00:43 | 67,198,520 | 811 | 438 | Apache-2.0 | 2023-09-14T18:12:55 | 2016-09-02T07:00:06 | Go | UTF-8 | Python | false | false | 1,333 | py | #!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is used to provide a round-robin merging of two lists
import sys
import json
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("{}")
sys.exit(0)
cdn_csv_list = sys.argv[1].split(',')
fqdn_csv_list = sys.argv[2].split(',')
option = ''
if len(sys.argv) == 4:
option = sys.argv[3]
cdn_csv_list.sort()
fqdn_csv_list.sort()
step_size = len(cdn_csv_list)
out_list_normal = {}
for i, val in enumerate(cdn_csv_list):
sublist = fqdn_csv_list[i:]
out_list_normal[val] = ','.join(sublist[::step_size])
out_list_denormal = {}
for val, csvlist in out_list_normal.items():
for i in csvlist.split(','):
if i != "":
out_list_denormal[i] = val
if option == 'denormalize':
print(json.dumps(out_list_denormal))
else:
print(json.dumps(out_list_normal))
| [
"noreply@github.com"
] | apache.noreply@github.com |
2414522bbf49b310f6773608c43006d53b555cb4 | 47eaf898a430209658df7973ea6b9b266014aa86 | /cont-attn/train.py | 36e92112ec1187dc91c2b61f7e629f47c2c0f336 | [] | no_license | dhruvramani/language-robotics | 89d4ffc376757207f85c73e5d6a06bd8301507e0 | 54e1db11cb5bbcfa3a3ea60ad42d5a572f1b5fb7 | refs/heads/master | 2023-05-11T21:33:04.894238 | 2020-11-17T04:59:44 | 2020-11-17T04:59:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,749 | py | import os
import sys
import torch
import numpy as np
import torch.nn.functional as F
#from tqdm import tqdm # TODO : Remove TQDM
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models import *
from models import get_similar_traj
# NOTE : If in future, you operate on bigger hardwares - move to PyTorch Lightning
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(config):
print("Device - ", device)
tensorboard_writer = SummaryWriter(logdir=config.tensorboard_path)
deg = config.deg()
vobs_dim, dof_dim = deg.obs_space[deg.vis_obv_key], deg.obs_space[deg.dof_obv_key]
act_dim = deg.action_space
if config.use_visual_obv:
percept = PerceptionModule(visual_obv_dim=vobs_dim, dof_obv_dim=dof_dim, state_dim=config.vis_emb_dim)
dof_dim += config.vis_emb_dim
if config.model == 'basic_attn':
attn_module = BasicAttnModel(dof_dim, act_dim).to(device)
elif config.model == 'rl_transformer':
attn_module = RLTransformerEncoder(dof_dim, act_dim).to(device)
params = list(attn_module.parameters())
if config.use_visual_obv:
params += list(percept.parameters())
print("Number of parameters : {}".format(len(params)))
optimizer = torch.optim.Adam(params, lr=config.learning_rate)
mse_loss = torch.nn.MSELoss()
if(config.save_graphs):
tensorboard_writer.add_graph(attn_module)
if(config.resume):
if config.use_visual_obv:
percept.load_state_dict(torch.load(os.path.join(config.models_save_path, 'percept.pth')))
attn_module.load_state_dict(torch.load(os.path.join(config.models_save_path, '{}.pth'.format(config.model))))
optimizer.load_state_dict(torch.load(os.path.join(config.models_save_path, 'optimizer.pth')))
print("Run : `tensorboard --logdir={} --host '0.0.0.0' --port 6006`".format(config.tensorboard_path))
dataloader = deg.get_instruct_dataloader if config.use_lang_search else deg.get_traj_dataloader
dataloader = dataloader(batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
max_step_size = len(dataloader.dataset)
for epoch in range(config.max_epochs): #tqdm(, desc="Check Tensorboard"):
if config.use_lang_search:
loss_avg = 0.0
for i, instruct_traj in enumerate(dataloader):
support_trajs = get_similar_traj(config, deg, instruct_traj)
support_trajs = {key : support_trajs[key].float().to(device) for key in support_trajs.keys()}
key_dof_obs, key_actions = support_trajs[deg.dof_obv_key], support_trajs['action']
key_batch_size, key_seq_len = key_dof_obs.shape[0], key_dof_obs.shape[1]
if config.use_visual_obv:
key_vis_obs = support_trajs[deg.vis_obv_key].reshape(key_seq_len * key_batch_size, -1)
key_vis_obs = key_vis_obs.reshape(-1, vobs_dim[2], vobs_dim[0], vobs_dim[1])
key_dof_obs = key_dof_obs.reshape(key_seq_len * key_batch_size, -1)
key_dof_obs = percept(key_vis_obs, key_dof_obs)
key_dof_obs = key_dof_obs.reshape(key_seq_len, key_batch_size, -1)
key_actions = key_actions.reshape(key_seq_len, key_batch_size, -1)
query_traj = {key : instruct_traj[key].float().to(device) for key in instruct_traj.keys()
if key in [deg.vis_obv_key, deg.dof_obv_key, 'action']}
query_dof_obs, query_actions = query_traj[deg.dof_obv_key], query_traj['action']
query_batch_size, query_seq_len = query_dof_obs.shape[0], query_dof_obs.shape[1]
if config.use_visual_obv:
query_vis_obs = query_traj[deg.vis_obv_key].reshape(query_seq_len * query_batch_size, -1)
query_vis_obs = query_vis_obs.reshape(-1, vobs_dim[2], vobs_dim[0], vobs_dim[1])
query_dof_obs = query_dof_obs.reshape(query_seq_len * query_batch_size, -1)
query_dof_obs = percept(query_vis_obs, query_dof_obs)
query_dof_obs = query_dof_obs.reshape(query_seq_len, query_batch_size, -1)
query_actions = query_actions.reshape(query_seq_len, query_batch_size, -1)
# NOTE - Might have to debug here
nopeak_mask = np.triu(np.ones((1, query_seq_len, query_seq_len)), k=1).astype('uint8')
nopeak_mask = Variable(torch.from_numpy(nopeak_mask) == 0)
preds = attn_module(curr_state=query_dof_obs, state_set=key_dof_obs, action_set=key_actions, mask=nopeak_mask)
loss = mse_loss(preds, query_actions)
loss_avg += loss
tensorboard_writer.add_scalar('lang_{}_{}_loss'.format(config.model, "visual" if config.use_visual_obv else "state"), loss, epoch * max_step_size + i)
loss.backward()
optimizer.step()
if int(i % config.save_interval_steps) == 0:
if config.use_visual_obv:
torch.save(percept.state_dict(), os.path.join(config.models_save_path, 'percept.pth'))
torch.save(attn_module.state_dict(), os.path.join(config.models_save_path, '{}.pth'.format(config.model)))
torch.save(optimizer.state_dict(), os.path.join(config.models_save_path, 'optimizer.pth'))
else:
# NOTE - This is for testing purposes only, remove in release.
for i, trajectory in enumerate(dataloader):
trajectory = {key : trajectory[key].float().to(device) for key in trajectory.keys()}
dof_obs, actions = trajectory[deg.dof_obv_key], trajectory['action']
batch_size, seq_len = dof_obs.shape[0], dof_obs.shape[1]
# NOTE : using ^ instead of config.batch_size coz diff. no. of samples possible from data.
if config.use_visual_obv:
vis_obs = trajectory[deg.key_vis_obs].reshape(seq_len * batch_size, -1)
dof_obs = dof_obs.reshape(seq_len * batch_size, -1)
dof_obs = percept(vis_obs, dof_obs)
dof_obs = dof_obs.reshape(seq_len, batch_size, -1)
actions = actions.reshape(seq_len, batch_size, -1)
state_set = dof_obs.reshape(seq_len * batch_size, 1, -1).repeat(1, batch_size, 1)
action_set = actions.reshape(seq_len * batch_size, 1, -1).repeat(1, batch_size, 1)
preds = attn_module(curr_state=dof_obs, state_set=state_set, action_set=action_set)
optimizer.zero_grad()
loss = mse_loss(preds, actions)
tensorboard_writer.add_scalar('{}_loss'.format(config.model), loss, epoch * max_step_size + i)
loss.backward()
optimizer.step()
if int(i % config.save_interval_steps) == 0:
if config.use_visual_obv:
torch.save(percept.state_dict(), os.path.join(config.models_save_path, 'percept.pth'))
torch.save(attn_module.state_dict(), os.path.join(config.models_save_path, '{}.pth'.format(config.model)))
torch.save(optimizer.state_dict(), os.path.join(config.models_save_path, 'optimizer.pth'))
print("Epoch {} | Loss : {}".format(epoch, loss_avg / len(dataloader.dataset)))
if __name__ == '__main__':
from model_config import get_model_args
config = get_model_args()
torch.manual_seed(config.seed)
train(config) | [
"dhruvramani98@gmail.com"
] | dhruvramani98@gmail.com |
dfae5c44fb06d9b66de15c95505e1082411d9afd | 8a84375dac5e6b33215d20e12e0c197aeaa6e83d | /pymoji/__init__.py | c0ca5b641eab739a4af72ee91afc1f188a417cb5 | [
"Apache-2.0"
] | permissive | michaeljoseph/pymoji | 5579af089cabf1784c656e7fddf9d20f9e6f5d6a | 4bf26babc7b968d9a753907d4db5402cfd5c6d63 | refs/heads/master | 2021-01-01T18:12:37.805141 | 2013-12-09T10:42:24 | 2013-12-09T10:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | """Emits HTML from emoji"""
__author__ = 'Michael Joseph'
__email__ = 'michaeljoseph@gmail.com'
__url__ = 'https://github.com/michaeljoseph/pymoji'
__version__ = '0.0.1'
from .emoji import emoji
def pymoji(text):
single_word = len(text.split(' ')) < 2
first_and_last_dont_match = text[0] != text[-1:]
first_character_is_colon = text[0] != ':'
if first_and_last_dont_match and first_character_is_colon and single_word:
text = ':%s:' % text
return emoji(text)
| [
"michaeljoseph+github@gmail.com"
] | michaeljoseph+github@gmail.com |
bb1867412159c6486be01f2224049f1100599ae6 | 2fdea85db7be2d39e52191b5aa444150d5a8e995 | /apps/hbase/src/hbase/hbase_site.py | d4abf7edb18cea66212055a2ad05a2290bf0f813 | [
"Apache-2.0"
] | permissive | bazaarvoice/hue | a464cd28bb181a9977095b05cff31a6c50859bde | 9aa150b0b48e90f236335d49904fef5e49b0d41d | refs/heads/master | 2023-03-18T15:11:29.540137 | 2020-06-16T17:13:37 | 2020-06-17T03:22:15 | 108,895,807 | 3 | 0 | Apache-2.0 | 2020-06-17T03:23:02 | 2017-10-30T19:03:18 | Python | UTF-8 | Python | false | false | 2,525 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os.path
from hadoop import confparse
from desktop.lib.security_util import get_components
from hbase.conf import HBASE_CONF_DIR, USE_DOAS
LOG = logging.getLogger(__name__)
SITE_PATH = None
SITE_DICT = None
_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL = 'hbase.thrift.kerberos.principal'
_CNF_HBASE_AUTHENTICATION = 'hbase.security.authentication'
_CNF_HBASE_IMPERSONATION_ENABLED = 'hbase.thrift.support.proxyuser'
_CNF_HBASE_USE_THRIFT_HTTP = 'hbase.regionserver.thrift.http'
_CNF_HBASE_USE_THRIFT_SSL = 'hbase.thrift.ssl.enabled'
def reset():
global SITE_DICT
SITE_DICT = None
def get_conf():
if SITE_DICT is None:
_parse_site()
return SITE_DICT
def get_server_principal():
principal = get_conf().get(_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL, None)
components = get_components(principal)
if components is not None:
return components[0]
def get_server_authentication():
return get_conf().get(_CNF_HBASE_AUTHENTICATION, 'NOSASL').upper()
def is_impersonation_enabled():
return get_conf().get(_CNF_HBASE_IMPERSONATION_ENABLED, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()
def is_using_thrift_http():
return get_conf().get(_CNF_HBASE_USE_THRIFT_HTTP, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()
def is_using_thrift_ssl():
return get_conf().get(_CNF_HBASE_USE_THRIFT_SSL, 'FALSE').upper() == 'TRUE'
def _parse_site():
global SITE_DICT
global SITE_PATH
SITE_PATH = os.path.join(HBASE_CONF_DIR.get(), 'hbase-site.xml')
try:
data = file(SITE_PATH, 'r').read()
except IOError, err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (SITE_PATH, err))
return
data = ""
SITE_DICT = confparse.ConfParse(data)
| [
"romain@cloudera.com"
] | romain@cloudera.com |
5033de95c14e1bda42f174d71402c54e3ecbfec5 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/restaurants/restaurant_utils.py | 227f85a7e1f4623d9c874840e4e304d6fdefbce7 | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,195 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\restaurants\restaurant_utils.py
# Compiled at: 2018-10-01 20:40:25
# Size of source mod 2**32: 5998 bytes
from protocolbuffers import Restaurant_pb2
from business.business_enums import BusinessType
from event_testing.resolver import SingleSimResolver
from restaurants.restaurant_tuning import RestaurantTuning, get_restaurant_zone_director
from tunable_multiplier import TunableMultiplier
import services
class RestaurantUtils:
MEAL_COST_MULTIPLIERS = TunableMultiplier.TunableFactory(description='\n Multipliers used to change the value of things in a menu and for the\n overall cost of the meal.\n \n If any member of the party meets the requirement of the multiplier then\n the multiplier is applied once. The benefit will not be applied for \n each Sim in the group that meets the multiplier tests.\n ')
def get_chef_situation(chef_sim=None):
situation_manager = services.get_zone_situation_manager()
if chef_sim is not None:
situations = situation_manager.get_situations_sim_is_in(chef_sim)
else:
situations = situation_manager.running_situations()
for situation in situations:
if type(situation) is RestaurantTuning.CHEF_SITUATION:
return situation
if RestaurantTuning.HOME_CHEF_SITUATION_TAG in situation.tags:
return situation
def get_waitstaff_situation(waitstaff_sim=None):
situation_manager = services.get_zone_situation_manager()
if waitstaff_sim is not None:
situations = situation_manager.get_situations_sim_is_in(waitstaff_sim)
else:
situations = situation_manager.running_situations()
for situation in situations:
if type(situation) is RestaurantTuning.WAITSTAFF_SITUATION:
return situation
def get_menu_message(menu_map, group_sim_ids, chef_order=False, daily_special_ids_map=None, is_recommendation=False):
show_menu_message = Restaurant_pb2.ShowMenu()
menu = Restaurant_pb2.Menu()
active_household = services.active_household()
if active_household is not None:
holiday_multiplier = active_household.holiday_tracker.get_active_holiday_business_price_multiplier(BusinessType.RESTAURANT)
else:
holiday_multiplier = 1.0
tested_meal_cost_multiplier = tested_cost_multipliers_for_group(group_sim_ids)
for course_enum, recipes in menu_map:
course_item = menu.courses.add()
course_item.course_tag = course_enum
daily_special_ids = daily_special_ids_map.get(course_enum, None) if daily_special_ids_map else None
for recipe in recipes:
recipe_item = course_item.items.add()
recipe_item.recipe_id = recipe.guid64
is_daily_special = recipe.guid64 == daily_special_ids
recipe_item.item_type = 1 if is_daily_special else 0
price = recipe.restaurant_base_price
price *= holiday_multiplier
price *= tested_meal_cost_multiplier
if is_daily_special:
price *= RestaurantTuning.DAILY_SPECIAL_DISCOUNT
else:
zone_director = get_restaurant_zone_director()
if zone_director:
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is not None:
price = business_manager.get_value_with_markup(price)
else:
price *= RestaurantTuning.UNOWNED_RESTAURANT_PRICE_MULTIPLIER
recipe_item.price_override = int(price)
show_menu_message.menu = menu
show_menu_message.sim_ids.extend(group_sim_ids)
show_menu_message.chef_order = chef_order
show_menu_message.recommend_order = is_recommendation
return show_menu_message
def food_on_table_gen(table_id):
slot_types = {
RestaurantTuning.TABLE_FOOD_SLOT_TYPE, RestaurantTuning.TABLE_DRINK_SLOT_TYPE}
object_manager = services.object_manager()
table = object_manager.get(table_id)
if table is None:
return
for table_part in table.parts:
for runtime_slot in table_part.get_runtime_slots_gen(slot_types=slot_types):
yield from runtime_slot.children
if False:
yield None
def tested_cost_multipliers_for_group(group_sim_ids):
cost_multiplier = RestaurantUtils.MEAL_COST_MULTIPLIERS.base_value
sim_info_manager = services.sim_info_manager()
group_sim_info_resolvers = {}
for sim_id in group_sim_ids:
sim_info = sim_info_manager.get(sim_id)
if sim_info is not None:
group_sim_info_resolvers[sim_info] = SingleSimResolver(sim_info)
for multiplier in RestaurantUtils.MEAL_COST_MULTIPLIERS.multipliers:
for sim_info, resolver in group_sim_info_resolvers.items():
if multiplier.tests.run_tests(resolver):
cost_multiplier *= multiplier.multiplier
break
return cost_multiplier | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
6b30db7207514a2684ce861ee9668aafabb830eb | 77941c4e6d28e45039f880cfd55e0a7c9b25e1be | /jax_dft/jax_dft/losses_test.py | a425dcc3b79d334a6609c0f0d9e729c3be18aafa | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | ritesh861/google-research | e29c7ba836a91454eec9a1d39e1af62dc6e4860e | 5d901d6895cc254a911a3cdc97487f04487f44ed | refs/heads/master | 2022-12-31T17:54:24.150450 | 2020-10-23T02:12:53 | 2020-10-23T02:18:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,743 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for jax_dft.losses."""
from absl.testing import absltest
from jax.config import config
import jax.numpy as jnp
import numpy as np
from jax_dft.jax_dft import losses
# Set the default dtype as float64
config.update('jax_enable_x64', True)
class LossesTest(absltest.TestCase):
def test_trajectory_mse_wrong_predict_ndim(self):
with self.assertRaisesRegex(
ValueError,
'The size of the shape of predict should be '
'greater or equal to 2, got 1'):
losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([0.6, 0.6, 0.6, 0.6]),
discount=1.)
def test_trajectory_mse_wrong_predict_target_ndim_difference(self):
with self.assertRaisesRegex(
ValueError,
'The size of the shape of predict should be greater than '
'the size of the shape of target by 1, '
r'but got predict \(2\) and target \(2\)'):
losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
discount=1.)
def test_density_mse(self):
self.assertAlmostEqual(
float(losses.mean_square_error(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([[0.4, 0.5, 0.2, 0.3], [0.6, 0.6, 0.6, 0.6]]))),
# ((
# (0.4 - 0.2) ** 2 + (0.5 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# ) / 4 + 0) / 2 = 0.0175
0.0175)
def test_energy_mse(self):
self.assertAlmostEqual(
float(losses.mean_square_error(
target=jnp.array([[0.2, 0.6]]),
predict=jnp.array([[0.4, 0.7]]))),
# ((0.4 - 0.2) ** 2 + (0.7 - 0.6) ** 2) / 2 = 0.025
0.025)
def test_get_discount_coefficients(self):
np.testing.assert_allclose(
losses._get_discount_coefficients(num_steps=4, discount=0.8),
[0.512, 0.64, 0.8, 1.])
def test_trajectory_mse_on_density(self):
self.assertAlmostEqual(
float(losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([
[[0.4, 0.5, 0.2, 0.3],
[0.3, 0.3, 0.2, 0.2],
[0.3, 0.3, 0.3, 0.2]],
[[0.6, 0.6, 0.6, 0.6],
[0.6, 0.6, 0.6, 0.5],
[0.6, 0.6, 0.6, 0.6]]]),
discount=0.6)),
# First sample in the batch:
# (
# (0.4 - 0.2) ** 2 + (0.5 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# ) / 4 * 0.6 * 0.6
# + (
# (0.3 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.2 - 0.2) ** 2
# ) / 4 * 0.6
# + (
# (0.3 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# + (0.3 - 0.2) ** 2 + (0.2 - 0.2) ** 2
# ) / 4 = 0.0231
# Second sample in the batch:
# (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# ) / 4 * 0.6 * 0.6
# + (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.5 - 0.6) ** 2
# ) / 4 * 0.6
# + (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# ) / 4 = 0.0015
# Loss:
# (0.0231 + 0.0015) / 2 = 0.0123
0.0123)
def test_trajectory_mse_on_energy(self):
self.assertAlmostEqual(
float(losses.trajectory_mse(
target=jnp.array([0.2, 0.6]),
predict=jnp.array([[0.4, 0.3, 0.2], [0.7, 0.7, 0.7]]),
discount=0.6)),
# First sample in the batch:
# ((0.4 - 0.2) ** 2 * 0.6 * 0.6
# + (0.3 - 0.2) ** 2 * 0.6 + (0.2 - 0.2) ** 2) = 0.0204
# Second sample in the batch:
# ((0.7 - 0.6) ** 2 * 0.6 * 0.6
# + (0.7 - 0.6) ** 2 * 0.6 + (0.7 - 0.6) ** 2) = 0.0196
# Loss:
# (0.0204 + 0.0196) / 2 = 0.02
0.02)
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
a6cf2f9283eb95bbbcd982e940d290cc39549bec | 475e2fe71fecddfdc9e4610603b2d94005038e94 | /Amazon/RepeatedSubstringPattern.py | 493dc5e2605eec318b294b395ed8cf35491aa97f | [] | no_license | sidhumeher/PyPractice | 770473c699aab9e25ad1f8b7b7cd8ad05991d254 | 2938c14c2e285af8f02e2cfc7b400ee4f8d4bfe0 | refs/heads/master | 2021-06-28T20:44:50.328453 | 2020-12-15T00:51:39 | 2020-12-15T00:51:39 | 204,987,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | '''
Created on Dec 10, 2020
@author: sidteegela
'''
def repeatedSubstringPattern(s) -> bool:
tracker = {}
for item in s:
if item not in tracker:
tracker[item] = 1
else:
tracker[item] += 1
index = 0
values = list(tracker.values())
while index < len(values):
if values[index] != values[index - 1]:
return False
index += 1
return True
# Time complexity: O(n)
# Space: O(n)
if __name__ == '__main__':
s = 'abab'
print(repeatedSubstringPattern(s))
s = 'aba'
print(repeatedSubstringPattern(s))
s = 'abcabcabcabc'
print(repeatedSubstringPattern(s))
| [
"sidhumeher@yahoo.co.in"
] | sidhumeher@yahoo.co.in |
8b534fd770aad5f4bd416754cc830b08843ce337 | 1f76f04e44f9e65a96e02ef1314cdd60f4b5e934 | /blog/migrations/0001_initial.py | b8513a6184d7a181cedd022179d80c5d2e6d592a | [] | no_license | eclipsical/blog | a37f0d0248a41c6cf0612c28685e24658aa41ccf | 7d45242d58ac84393d29e862b5b5d7482003ae92 | refs/heads/master | 2020-03-30T19:05:45.994484 | 2018-10-05T02:21:07 | 2018-10-05T02:21:07 | 151,527,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.1.2 on 2018-10-04 05:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"me@me.com"
] | me@me.com |
3d2364b9ada6318447f8017a3da3808db99cbf44 | 93fce31a2308e23bf36926a0d2967369bd20fefa | /1.10_select_column.py | 3cb9ce96553dfc49597b9c8fe2f91d4a2072040b | [] | no_license | mucollabo/pandasForML | 6e2cc98bc33c346a0f20ba9ec326af503d4d0076 | 24303f0da1271c98717be52a21ba32e435d3851e | refs/heads/master | 2023-08-20T08:27:53.078243 | 2021-10-27T12:22:15 | 2021-10-27T12:22:15 | 291,989,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | import pandas as pd
# DataFrame() 함수로 데이터프레임 변환, 변수 df에 저장
exam_data = {'이름':['서준', '우현', '인아'],
'수학':[90, 80, 70],
'영어':[98, 89, 95],
'음악':[85, 95, 100],
'체육':[100, 90, 90]}
df = pd.DataFrame(exam_data)
print(df)
print(type(df))
print('\n')
# '수학' 점수 데이터만 선택, 변수 math1에 저장
math1 = df['수학']
print(math1)
print(type(math1))
print('\n')
# '영어' 점수 데이터만 선택, 변수 english에 저장
english = df.영어
print(english)
print(type(english))
# '음악', '체육' 점수 데이터를 선택, 변수 music_gym에 저장
music_gym = df[['음악', '체육']]
print(music_gym)
print(type(music_gym))
print('\n')
# '수학' 점수 데이터만 선택, 변수 math2에 저장
math2 = df[['수학']]
print(math2)
print(type(math2)) | [
"mucollabo@gmail.com"
] | mucollabo@gmail.com |
28e42ccfbb3a1ccbfdf2ba32bab2ab46def4b7a0 | c109de66500f74d2527c83feb0343179c1af3568 | /mar19/pm3.py | 47f1073a4281f31c9f7d8fde22e1f71186636857 | [] | no_license | DUanalytics/python20 | aadf3ce6bb149ce8fde146972222875911fa8bda | 4b4e2e29851c550533033a039ae8175da65728cd | refs/heads/master | 2021-04-08T09:56:09.798921 | 2020-04-21T12:04:59 | 2020-04-21T12:04:59 | 248,764,764 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | #Topic:
#-----------------------------
#libraries
from pm4py.algo.discovery.alpha import factory as alpha_miner
from pm4py.objects.log.importer.xes import factory as importer
from pm4py.visualization.petrinet import factory as visualizer
from pm4py.objects.log.importer.csv import factory as csv_importer
event_stream = csv_importer.import_event_stream( os.path.join("pmdata/", "running-example.csv") )
event_stream
event_stream_length = len(event_stream)
print(event_stream_length)
for event in event_stream: print(event)
from pm4py.objects.conversion.log import factory as conversion_factory
log = conversion_factory.apply(event_stream)
from pm4py.objects.log.exporter.csv import factory as csv_exporter
csv_exporter.export(event_stream, "data/outputFile1.csv")
#log = importer.apply('pmdata/running-example.xes')
net, initial_marking, final_marking = alpha_miner.apply(log)
gviz = visualizer.apply(net, initial_marking, final_marking)
visualizer.view(gviz) | [
"dup1966@gmail.com"
] | dup1966@gmail.com |
3cc507441b6d0ecb6bc00051f647692ccfb593ae | 3b9bf497cd29cea9c24462e0411fa8adbfa6ba60 | /leetcode/Problems/915--Partition-Array-into-Disjoint-Intervals-Medium.py | ad0e3e8fc948c8a58b3d68af1c55d3bf29f8cc00 | [] | no_license | niteesh2268/coding-prepation | 918823cb7f4965bec096ec476c639a06a9dd9692 | 19be0766f6b9c298fb32754f66416f79567843c1 | refs/heads/master | 2023-01-02T05:30:59.662890 | 2020-10-17T13:12:34 | 2020-10-17T13:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | class Solution:
def partitionDisjoint(self, A: List[int]) -> int:
maxVals = [A[0]]
minVals = [0 for _ in range(len(A))]
for i in range(1, len(A)):
val = max(maxVals[-1], A[i])
maxVals.append(val)
minVals[-1] = A[-1]
for i in range(len(A)-2, -1, -1):
minVals[i] = min(minVals[i+1], A[i])
# print(maxVals, minVals)
for i in range(len(A)):
if maxVals[i] <= minVals[i+1]:
return i+1
return 1 | [
"akulajayaprakash@gmail.com"
] | akulajayaprakash@gmail.com |
25706eb5c23ab48d24561809b3144d55ad0064c2 | 4094eff8c1777e5bc1c412f18cb1e905ab1de302 | /tests/layout_tests/__init__.py | 31a9cfa80e04414bca678a8b62f75042178aba99 | [
"BSD-3-Clause"
] | permissive | Lunderberg/python-prompt-toolkit | fcbfed4cb0e94c1032916b2766bb635079db54d6 | 7456156e4bdbd3c5ec9e95c78546e6323b77e74f | refs/heads/master | 2020-12-28T22:22:32.082247 | 2015-10-08T14:32:38 | 2015-10-08T14:32:38 | 43,893,510 | 1 | 0 | null | 2015-10-08T14:20:04 | 2015-10-08T14:20:02 | Python | UTF-8 | Python | false | false | 2,074 | py | from __future__ import unicode_literals
#from prompt_toolkit.layout.utils import fit_tokens_in_size
from pygments.token import Token
import unittest
#class FitTokensInSizeTest(unittest.TestCase):
# def setUp(self):
# self.tokens = [(Token, 'Hello world'), (Token, '\n'), (Token, 'line2')]
#
# def test_1(self):
# result = fit_tokens_in_size(self.tokens, width=5, height=3, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l'), (Token, u'l'), (Token, u'o')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n'), (Token, u'e'), (Token, u'2')],
# [(Token, u' ')],
# ])
#
# def test_2(self):
# result = fit_tokens_in_size(self.tokens, width=3, height=3, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n')],
# [(Token, u' ')],
# ])
#
# def test_3(self):
# result = fit_tokens_in_size(self.tokens, width=3, height=2, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n')],
# ])
#
# def test_4(self):
# result = fit_tokens_in_size(self.tokens, width=3, height=1, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l')],
# ])
#
# def test_5(self):
# result = fit_tokens_in_size(self.tokens, width=15, height=4, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l'), (Token, u'l'), (Token, u'o'), (Token, u' '),
# (Token, u'w'), (Token, u'o'), (Token, u'r'), (Token, u'l'), (Token, u'd'), (Token, u' ')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n'), (Token, u'e'), (Token, u'2'), (Token, u' ')],
# [(Token, u' ' * 15)],
# [(Token, u' ' * 15)],
# ])
| [
"jonathan@slenders.be"
] | jonathan@slenders.be |
76bf8415808dbc0b27091908445074c49baab840 | c6f97d8a8c9f50d494f6e4dbcdd824cd63133a95 | /main/forms.py | 2d2769f579e28302a75be19c653b1de44d40e8fd | [] | no_license | Pavlenkovv/taxi | baa1d9add4fc167191f3fa68e218d0992263c2f0 | 9e645fc0a2fa75f4aa1f604b355919f23047baae | refs/heads/main | 2023-04-10T20:43:41.373294 | 2021-04-21T19:52:49 | 2021-04-21T19:52:49 | 359,902,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from django import forms
import re
class OrderForm(forms.Form):
customer_name = forms.CharField(max_length=70, required=True, label="Ім'я")
customer_phone = forms.CharField(max_length=30, required=True, label='Телефон')
address_from = forms.CharField(max_length=200, required=True, label='Звідки їхати')
address_to = forms.CharField(max_length=200, required=True, label='Куди їхати')
in_what_time = forms.TimeField(required=True, label='Коли їхати')
def clean_customer_name(self):
customer_name = self.cleaned_data["customer_name"].strip()
if re.search(r"[^\u0400-\u0527 \-\']", customer_name, flags=re.IGNORECASE) is not None:
raise forms.ValidationError("Name should have cyrillic characters only")
return customer_name
def clean_customer_phone(self):
customer_phone = self.cleaned_data["customer_phone"].strip()
if re.search(r"^\+380\(\d{2}\)\d{3}\-\d{2}\-\d{2}$", customer_phone) is None:
raise forms.ValidationError("Phone should be in +380(ХХ)ХХХ-ХХ-ХХ format")
return customer_phone | [
"pavlenko.vyacheslav@gmail.com"
] | pavlenko.vyacheslav@gmail.com |
39494e67be7df7d189cb475268b7807d2c2b24e0 | b2f84608cc28c492430e972028fa0e178865c78c | /source_py2/test_combi/__init__.py | 93535bef676360daad2ef5825782c0dab25df153 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cool-RR/combi | 54efa752403a4acb6933475102702e43de93c81d | 9c5c143a792ffd8fb38b6470f926268c8bacbc31 | refs/heads/master | 2021-09-23T10:02:52.984204 | 2021-09-18T08:45:57 | 2021-09-18T08:45:57 | 25,787,956 | 24 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
import sys
try:
import pathlib
except:
from combi._python_toolbox.third_party import pathlib
def __bootstrap():
'''
Add needed packages in repo to path if we can't find them.
This adds `combi`'s root folder to `sys.path` if it can't
currently be imported.
'''
import os
import sys
import imp
def exists(module_name):
'''
Return whether a module by the name `module_name` exists.
This seems to be the best way to carefully import a module.
Currently implemented for top-level packages only. (i.e. no dots.)
Doesn't support modules imported from a zip file.
'''
assert '.' not in module_name
try:
imp.find_module(module_name)
except ImportError:
return False
else:
return True
if not exists('combi'):
combi_candidate_path = pathlib(__file__).parent.parent.absolute()
sys.path.append(combi_candidate_path)
__bootstrap()
| [
"ram@rachum.com"
] | ram@rachum.com |
fc2e24a083446166a3a346474bce6f3981dec982 | 4a72b43463a9dbc661583d5d0ee264430909dc08 | /dinesh/urls.py | f71d675a3394f2777cd4886963a853f350ae62be | [] | no_license | parvatiandsons2/dinesh | 50da144428572668409a74eac99eac63518d9876 | efab648bdd6af896e6e45a28946754486f6c59e5 | refs/heads/master | 2023-03-30T20:16:42.423599 | 2021-04-08T05:01:46 | 2021-04-08T05:01:46 | 350,943,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('website.urls'))
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"parvatiandsons2@gmail.com"
] | parvatiandsons2@gmail.com |
c51d78ade22a78303932a2159fdb50ace55e9012 | 06bb8e09d8b078707aba33d727876c9f3f24b882 | /class/object_init.py | 167747ecc7c19645d1948140af11607f859d741e | [] | no_license | 82seongkyum/python_lecture | 8a698fdee42d9e110d61a5623afc8ca6dca52411 | f24f684eb440400243b57ea432495493e53f6879 | refs/heads/main | 2023-08-25T20:15:20.653686 | 2021-11-12T09:04:50 | 2021-11-12T09:04:50 | 426,548,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | class Car:
name = ""
speed = 0
def __init__(self, name, speed):
self.name = name
self.speed = speed
def getName(self):
print()
return self.name
def getSpeed(self):
print()
return self.speed | [
"you@example.com"
] | you@example.com |
363dbc525fdce119551e489406287b849a979d1f | cdc0e3f0725519f526e61fe00e42393c59c0b05c | /src/nsf/transforms/base_test.py | 496ccc5760a2da07ba8b90ee34f1532fea69030c | [] | no_license | conormdurkan/lfi | e525dddd2d4c43065e9586f4a61d55c77591399e | c3919c251084763e305f99df3923497a130371a2 | refs/heads/master | 2021-01-01T21:47:52.650312 | 2020-02-11T12:13:29 | 2020-02-11T12:13:29 | 239,355,208 | 41 | 14 | null | 2020-02-11T13:04:24 | 2020-02-09T18:44:18 | Python | UTF-8 | Python | false | false | 5,176 | py | """Tests for the basic transform definitions."""
import unittest
import numpy as np
import torch
from nsf.transforms import base
from nsf.transforms import standard
from nsf.transforms.transform_test import TransformTest
class CompositeTransformTest(TransformTest):
def test_forward(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transforms = [
standard.AffineScalarTransform(scale=2.0),
standard.IdentityTransform(),
standard.AffineScalarTransform(scale=-0.25),
]
composite = base.CompositeTransform(transforms)
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = composite(inputs)
outputs_ref, logabsdet_ref = reference(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
def test_inverse(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transforms = [
standard.AffineScalarTransform(scale=2.0),
standard.IdentityTransform(),
standard.AffineScalarTransform(scale=-0.25),
]
composite = base.CompositeTransform(transforms)
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = composite.inverse(inputs)
outputs_ref, logabsdet_ref = reference.inverse(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
class MultiscaleCompositeTransformTest(TransformTest):
def create_transform(self, shape, split_dim=1):
mct = base.MultiscaleCompositeTransform(num_transforms=4, split_dim=split_dim)
for transform in [
standard.AffineScalarTransform(scale=2.0),
standard.AffineScalarTransform(scale=4.0),
standard.AffineScalarTransform(scale=0.5),
standard.AffineScalarTransform(scale=0.25),
]:
shape = mct.add_transform(transform, shape)
return mct
def test_forward(self):
batch_size = 5
for shape in [(32, 4, 4), (64,), (65,)]:
with self.subTest(shape=shape):
inputs = torch.ones(batch_size, *shape)
transform = self.create_transform(shape)
outputs, logabsdet = transform(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + [np.prod(shape)])
self.assert_tensor_is_good(logabsdet, [batch_size])
def test_forward_bad_shape(self):
shape = (8,)
with self.assertRaises(ValueError):
transform = self.create_transform(shape)
def test_forward_bad_split_dim(self):
batch_size = 5
shape = [32]
inputs = torch.randn(batch_size, *shape)
with self.assertRaises(ValueError):
transform = self.create_transform(shape, split_dim=2)
def test_inverse_not_flat(self):
batch_size = 5
shape = [32, 4, 4]
inputs = torch.randn(batch_size, *shape)
transform = self.create_transform(shape)
with self.assertRaises(ValueError):
transform.inverse(inputs)
def test_forward_inverse_are_consistent(self):
batch_size = 5
for shape in [(32, 4, 4), (64,), (65,), (21,)]:
with self.subTest(shape=shape):
transform = self.create_transform(shape)
inputs = torch.randn(batch_size, *shape).view(batch_size, -1)
self.assert_forward_inverse_are_consistent(transform, inputs)
class InverseTransformTest(TransformTest):
def test_forward(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transform = base.InverseTransform(standard.AffineScalarTransform(scale=-2.0))
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = transform(inputs)
outputs_ref, logabsdet_ref = reference(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
def test_inverse(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transform = base.InverseTransform(standard.AffineScalarTransform(scale=-2.0))
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = transform.inverse(inputs)
outputs_ref, logabsdet_ref = reference.inverse(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
if __name__ == "__main__":
unittest.main()
| [
"conormdurkan@gmail.com"
] | conormdurkan@gmail.com |
0d86ce0ae62e08aae4be88b40a18e089e43ca219 | a262edcef5d1def670103b849eef9cf203510a40 | /tp_analysis/preprocessing/matrix_operation.py | 1c25ff7fcc74e75b2d0c362819425f7c774816e6 | [] | no_license | clarkwkw/GEStatProj | 19439fc50e673cf77444b1513bb5ac9c71fd0022 | 45f9ca393ae179827906c21c1cd758621cfbf4ce | refs/heads/master | 2020-04-03T22:38:38.823677 | 2018-05-22T07:50:53 | 2018-05-22T07:50:53 | 59,290,283 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | py | import json
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA, TruncatedSVD, IncrementalPCA, SparsePCA
import textbook
import nltk.stem
stemmer = nltk.stem.SnowballStemmer('english')
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
def batch_data(series, batch_count):
length = len(series)
batch_size = length // batch_count
arr = []
start = 0
for i in range(batch_count):
end = start + batch_size + (i < length % batch_count)
if end > length:
end = length
arr.append(series[start:end])
start = end
return arr
def by_predefined_words(train_texts, valid_texts = [], words = None, force_dense = True):
vocabs = {}
if words is None:
words = textbook.getTopVocabs("all", 30)
for i in range(len(words)):
vocabs[words[i]] = i
vectorizer = CountVectorizer(vocabulary = vocabs)
train_matrix = vectorizer.transform(train_texts)
valid_matrix = vectorizer.transform(valid_texts)
if force_dense:
train_matrix, valid_matrix = train_matrix.todense(), valid_matrix.todense()
return (train_matrix, valid_matrix, words)
def normalize(train_matrix, valid_matrix = None, norm_info = None):
n_cols = None
norm_dict = {'total': train_matrix.shape[1]}
if norm_info is not None:
n_cols = norm_info["total"]
else:
n_cols = train_matrix.shape[1]
for i in range(n_cols):
mean, std = None, None
if norm_info is not None:
mean = norm_info["%d"%i]["mean"]
std = norm_info["%d"%i]["std"]
else:
mean = np.mean(train_matrix[:, i])
std = np.std(train_matrix[:, i])
norm_dict[i] = {"mean": mean, "std": std}
if std != 0:
train_matrix[:, i] = (train_matrix[:, i] - mean)/std
if valid_matrix is not None:
valid_matrix[:, i] = (valid_matrix[:, i] - mean)/std
else:
train_matrix[:, i] = 0.5
if valid_matrix is not None:
valid_matrix[:, i] = 0.5
return train_matrix, valid_matrix, norm_dict
# Perform 3 steps to generate training/ validating texts:
# 1. Construct the bag of words
# Parameters:
# ngram_rng: tuple, the lower and uppper bound of the length of a ngram
# words_src: "textbook"/"samples" / list of strings, the source to consider
# tb_chs: list of textbook chapters/ None, when words_src = "textbook", the chapters of textbook to consider
# selection: None/ "tfidf"/ "idf", strategy to select the bag of words
# select_top, select_bottom: integer, no. of words to select according the top/ bottom values of selection strategy
#
# 2. Dimensionality reduction
# Parameters:
# reduction: None/ "pca"/ "lsa"/ "ipca", strategy for dimensionality reduction
# reduce_n_attr: integer, desired no. of dimensions after reduction
#
# 3. Normalization
# Parameters:
# normalize_flag: boolean, if set to true, columns will be normalized to 0 mean and variance 1
#
# Other parameters:
# save_dir: string/ None, save preprocessing settings to the specified directory if not None
def preprocess(train_texts, valid_texts = [], normalize_flag = False, ngram_rng = (1,1), words_src = None, tb_chs = None, selection = None, select_top = 0, select_bottom = 0, reduction = None, reduce_n_attr = None, stem_words = False, savedir = None):
vectorizer, vect_texts = None, None
if type(words_src) is list:
train_matrix, valid_matrix, words = by_predefined_words(train_texts, valid_texts, words_src)
else:
if words_src == "textbook":
vect_texts = textbook.getOrderedText(chs = tb_chs)
if stem_words:
vectorizer = textbook.getTfidfVectorizer(ngram_rng, chs = tb_chs)
else:
vectorizer = StemmedTfidfVectorizer(ngram_range = ngram_rng, stop_words = 'english')
vectorizer.fit(textbook.getOrderedText(tb_chs))
elif words_src == "samples":
vect_texts = train_texts
if stem_words:
vectorizer = StemmedTfidfVectorizer(ngram_range = ngram_rng, stop_words = 'english')
else:
vectorizer = TfidfVectorizer(ngram_range = ngram_rng, stop_words = 'english')
vectorizer.fit(train_texts)
elif isinstance(words_src, TfidfVectorizer):
vectorizer = words_src
else:
raise Exception("Unexpected value for 'words_src'")
if selection == "tfidf":
tfidf_matrix = vectorizer.transform(vect_texts).toarray()[0]
tuples = []
for vocab in vectorizer.vocabulary_:
index = vectorizer.vocabulary_[vocab]
if selection == "idf":
score = vectorizer.idf_[index]
elif selection == "tfidf":
score = tfidf_matrix[index]
elif selection is None:
score = vectorizer.idf_[index]
else:
raise Exception("Unexpected selection type")
tuples.append((vocab, score, index))
selected_tuples = []
if selection is None or select_top + select_bottom >= len(tuples):
selected_tuples = tuples
elif select_top + select_bottom > 0:
tuples = sorted(tuples, key = lambda x: x[1], reverse = True)
selected_tuples = tuples[0:select_top] + tuples[(len(tuples)-select_bottom):]
else:
raise Exception("Must specify 'select_top'/'select_bottom' when 'selection' is not None")
selected_words = [tup[0] for tup in selected_tuples]
train_matrix, valid_matrix, words = by_predefined_words(train_texts, valid_texts, selected_words, force_dense = reduction not in ["lsa"])
pca_components, norm_info = None, None
reductor = None
if reduction is not None:
if reduction == "pca":
reductor = PCA(n_components = reduce_n_attr)
elif reduction == "lsa":
reductor = TruncatedSVD(n_components = reduce_n_attr)
elif reduction == "ipca":
reductor = IncrementalPCA(n_components = reduce_n_attr)
else:
raise Exception("Unexpected reduction strategy '%s'"%reduction)
train_matrix = reductor.fit_transform(train_matrix)
valid_matrix = reductor.transform(valid_matrix)
pca_components = reductor.components_
if normalize_flag:
train_matrix, valid_matrix, norm_info = normalize(train_matrix, valid_matrix)
if savedir is not None:
preprocess = {
"words": words,
"pca": reduction is not None
}
if normalize_flag:
preprocess["norm_info"] = norm_info
with open(savedir+'/preprocess.json', "w") as f:
f.write(json.dumps(preprocess, indent = 4))
if reduction is not None:
np.save(savedir+"/pca.npy", pca_components)
return train_matrix, valid_matrix, words
| [
"clarkwkw@yahoo.com.hk"
] | clarkwkw@yahoo.com.hk |
5d0899b6f77856ea65209c4696f20e4176cf521f | 855ff14a494aa47e2ab4c09a58468d99c1eb92f5 | /tests/test_download.py | a663ff12d797820386a8173687d6de5fd601d990 | [
"MIT"
] | permissive | nnnyt/EduData | 9ece284f5143d84ec9eb483fcac3747e2a1fcfba | 1827f12167a68f15776cd303ce550814633f1256 | refs/heads/master | 2020-07-06T00:56:32.627104 | 2019-08-17T05:14:12 | 2019-08-17T05:14:12 | 202,838,340 | 0 | 0 | MIT | 2019-08-17T05:11:15 | 2019-08-17T05:11:15 | null | UTF-8 | Python | false | false | 516 | py | # coding: utf-8
# create by tongshiwei on 2019/7/2
import time
import pytest
from EduData import get_data
from EduData.download_data import url_dict
def test_download(tmp_path):
for url in url_dict:
get_data(url, tmp_path, override=True)
time.sleep(1)
for url in url_dict:
with pytest.raises(FileExistsError):
get_data(url, tmp_path, override=False)
time.sleep(1)
for url in url_dict:
get_data(url, tmp_path, override=True)
time.sleep(1)
| [
"tongsw@mail.ustc.edu.cn"
] | tongsw@mail.ustc.edu.cn |
20f76be639652910aa375130dcb4ae19041a2617 | 23b0203547fdcba7065afba855abb0a3d6d79bc4 | /util/perp.py | 80e33b350269aa15a7fc1a9bb6554e39530a279c | [] | no_license | jielaizhang/archangel | 4336c13fbb67452e5eef868cd7debcb2e5e86a7c | a28cfb509f9ae5e0b0cbd8b25285772da2658c44 | refs/heads/master | 2021-01-17T18:10:53.541526 | 2016-10-28T23:03:22 | 2016-10-28T23:03:22 | 71,165,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | #!/usr/bin/env python
def perp(m,b,x,y):
# find perpenticular distance from line to x,y
if m != 0.:
c=y+x/m
r=(c-b)/(m+1./m)
else:
r=x
s=m*r+b
d=((r-x)**2+(s-y)**2)**0.5
if r <= x:
return d
else:
return -d
| [
"zhang.jielai@gmail.com"
] | zhang.jielai@gmail.com |
2133a820156c371ee986d90af631a6a95f55dabd | 4cdc9ba739f90f6ac4bcd6f916ba194ada77d68c | /剑指offer/第五遍/17.打印从1到最大的n位数.py | bfaf04ffc577d04086babfe256bb04a2bb399b4e | [] | no_license | leilalu/algorithm | bee68690daf836cc5807c3112c2c9e6f63bc0a76 | 746d77e9bfbcb3877fefae9a915004b3bfbcc612 | refs/heads/master | 2020-09-30T15:56:28.224945 | 2020-05-30T03:28:39 | 2020-05-30T03:28:39 | 227,313,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | """
输入数字 n,按顺序打印出从 1 到最大的 n 位十进制数。比如输入 3,则打印出 1、2、3 一直到最大的 3 位数 999。
示例 1:
输入: n = 1
输出: [1,2,3,4,5,6,7,8,9]
说明:
用返回一个整数列表来代替打印
n 为正整数
"""
"""
最大的n位数可能会超出存储范围,因此需要用字符串来保存这些数字,并且模拟加法运算
打印数字时要从第一个非0数字开始
当第一位数字发生进位时,模拟加法结束
"""
class Solution1:
def printNumbers(self, n):
# 首先判断输入是否合法
if n < 1:
return []
res = []
num = ['0'] * n
while not self.Increment(num):
# 打印num,加入res中
number = int(self.PrintNum(num))
res.append(number)
return res
def Increment(self, num):
circle = 0
length = len(num) # n
isOverflow = False
for i in range(length-1, -1, -1):
sumValue = circle + int(num[i]) # 计算每一位的和
# 如果是最后一位,还要加1
if i == length-1:
sumValue += 1
# 计算是否有进位
if sumValue >= 10:
# 如果是第一位要进位,则结束
if i == 0:
isOverflow = True
break
else:
sumValue -= 10
circle = 1
num[i] = str(sumValue)
else:
num[i] = str(sumValue)
break
return isOverflow
def PrintNum(self, num):
for i in range(len(num)):
if num[i] != '0':
return ''.join(num[i:])
class Solution2:
def printNumbers(self, n):
if n < 1:
return []
self.res = []
num = ['0'] * n
for i in range(10):
num[0] = str(i)
self.printNumbersCore(num, n, 0)
# 从1开始打印,0要舍去
return self.res[1:]
def printNumbersCore(self, num, length, index):
if index == length-1:
self.res.append(self.PrintNum(num))
return
for i in range(10):
num[index+1] = str(i)
self.printNumbersCore(num, length, index+1)
def PrintNum(self, num):
for i in range(len(num)):
if num[i] != '0':
return int(''.join(num[i:]))
if __name__ == '__main__':
n = 1
res = Solution2().printNumbers(n)
print(res)
| [
"244492644@qq.com"
] | 244492644@qq.com |
cc4581dcb239c15cbc2e561069528a340c92b34d | f48f9798819b12669a8428f1dc0639e589fb1113 | /programming/misc/googlemock/actions.py | 0e8cff036c9e369f402327537b8b77901e62731c | [] | no_license | vdemir/PiSiPackages-pardus-2011-devel | 781aac6caea2af4f9255770e5d9301e499299e28 | 7e1867a7f00ee9033c70cc92dc6700a50025430f | refs/heads/master | 2020-12-30T18:58:18.590419 | 2012-03-12T03:16:34 | 2012-03-12T03:16:34 | 51,609,831 | 1 | 0 | null | 2016-02-12T19:05:41 | 2016-02-12T19:05:40 | null | UTF-8 | Python | false | false | 1,082 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def setup():
# Remove bundled gtest and use the system one.
# acx_pthread.m4 is needed in configure stage, so keep it.
shelltools.move("gtest", "gtest.bak")
shelltools.makedirs("gtest/m4")
shelltools.copy("gtest.bak/m4/acx_pthread.m4", "gtest/m4")
shelltools.unlinkDir("gtest.bak")
autotools.autoreconf("-vfi")
autotools.configure("--disable-static")
# Remove rpath from speexenc and speexdec
pisitools.dosed("libtool", "^hardcode_libdir_flag_spec=.*", "hardcode_libdir_flag_spec=\"\"")
pisitools.dosed("libtool", "^runpath_var=LD_RUN_PATH", "runpath_var=DIE_RPATH_DIE")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("CHANGES", "COPYING", "CONTRIBUTORS", "README")
| [
"kaptan@pisipackages.org"
] | kaptan@pisipackages.org |
3e2ffe9fb08e3de17a22adf567ea3bae5d89cad3 | a814debee728e59a7a10d8c12b92c1f3ee97e19d | /Atividade/cadastro_alunos.py | 3549954447d09ca6bc18697b41ec82898a0ea902 | [] | no_license | PedroVitor1995/Algoritmo-ADS-2016.1 | 0ee034d2f03b29d3c8177fb3402f7aeae08d07cf | 8e3b6dfb0db188b9f5d68dcb8619f6636883ab89 | refs/heads/master | 2021-01-01T15:51:56.636502 | 2017-07-19T13:47:36 | 2017-07-19T13:47:36 | 81,328,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | def main():
menu = (' 1 - Cadastrar \n 2 - Listar \n 3 - Remover \n 0 - Sair \n Opcao: ')
alunos = []
while True:
opcao = input(menu)
if opcao == 1:
cadastrar(alunos)
print ('Aluno cadastrado com sucesso!')
elif opcao == 2:
listar(alunos)
elif opcao == 3:
remover(alunos)
elif opcao == 0:
print('Saindo do programa')
reposta = raw_input('Dejesa realmente sair S/N: ')
if reposta == 'N' or reposta == 'n':
continue
else:
break
else:
print ('Opcao invalida.')
print ('Programa finalizado.')
def cadastrar(alunos):
aluno = {}
aluno['Nome'] = raw_input('Nome: ')
while True:
aluno['Idade'] = input('Idade: ')
if aluno['Idade'] > 0:
break
else:
continue
while True:
aluno['Sexo'] = raw_input('Sexo M/F: ')
if aluno['Sexo'] == 'F' or aluno['Sexo'] == 'M' or aluno['Sexo'] == 'f' or aluno['Sexo'] == 'm':
break
else:
continue
alunos.append(aluno)
return alunos
def listar(alunos):
print ('Alunos Cadastrados (%d)') % len(alunos)
for i in range(len(alunos)):
print alunos[i]
def remover(alunos):
listar(alunos)
nome = raw_input('Digite o nome do aluno que deseja remover: ')
quantidade = 0
for i in range(len(alunos)):
if alunos[i]['Nome'] == nome:
quantidade += 1
if quantidade == 1:
for i in range(len(alunos)):
if alunos[i]['Nome'] == nome:
del alunos[i]
print ('Aluno %s removido com sucesso!')%(nome)
break
else:
idade = input('Digite a idade do aluno que dejesa remover: ')
for i in range(len(alunos)):
if alunos[i]['Nome'] == nome and alunos[i]['Idade'] == idade:
del alunos[i]
print ('Aluno %s com idade %d removido com sucesso!') % (nome,idade)
break
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | PedroVitor1995.noreply@github.com |
c0fdef125f38ff9c8ea632183c45e314e09b6c90 | 874cb9539283a5dc2616f3c5ae6ca852a63291ed | /classLaneLineHistory.py | ab77f1ddc45c7d4dfef990769231a7b827d77de0 | [] | no_license | redherring2141/ac_lane_detection | 494ddd89ab41a6e63032325127d63f6aee6f4478 | 132da04ac7becd4d8f7750936272bcbaea56d975 | refs/heads/master | 2022-11-05T00:42:32.124816 | 2020-06-22T02:50:57 | 2020-06-22T02:50:57 | 250,525,327 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import math
import numpy as np
from classLaneLine import LaneLine, create_queue
class LaneLineHistory:
def __init__(self, queue_depth=2, test_points=[50, 300, 500, 700], poly_max_deviation_distance=150):
self.lane_lines = create_queue(queue_depth)
self.smoothed_poly = None
self.test_points = test_points
self.poly_max_deviation_distance = poly_max_deviation_distance
def append(self, lane_line, force=False):
if len(self.lane_lines) == 0 or force:
self.lane_lines.append(lane_line)
self.get_smoothed_polynomial()
return True
test_y_smooth = np.asarray(list(map(lambda x: self.smoothed_poly[0] * x**2 + self.smoothed_poly[1] * x + self.smoothed_poly[2], self.test_points)))
test_y_new = np.asarray(list(map(lambda x: lane_line.polynomial_coeff[0] * x**2 + lane_line.polynomial_coeff[1] * x + lane_line.polynomial_coeff[2], self.test_points)))
dist = np.absolute(test_y_smooth - test_y_new)
#dist = np.absolute(self.smoothed_poly - lane_line.polynomial_coeff)
#dist_max = np.absolute(self.smoothed_poly * self.poly_max_deviation_distance)
max_dist = dist[np.argmax(dist)]
if max_dist > self.poly_max_deviation_distance:
print("**** MAX DISTANCE BREACHED ****")
print("y_smooth={0} - y_new={1} - distance={2} - max-distance={3}".format(test_y_smooth, test_y_new, max_dist, self.poly_max_deviation_distance))
return False
self.lane_lines.append(lane_line)
self.get_smoothed_polynomial()
return True
def get_smoothed_polynomial(self):
all_coeffs = np.asarray(list(map(lambda lane_line: lane_line.polynomial_coeff, self.lane_lines)))
self.smoothed_poly = np.mean(all_coeffs, axis=0)
return self.smoothed_poly | [
"redherring2141@kaist.ac.kr"
] | redherring2141@kaist.ac.kr |
8702836fc9279ef168fe2ff273935ae450062b85 | c1e8ddcfda2586ddc6be93ff60a77428150d6921 | /DiegoRocha/NCtBkjKq.py | c0769eb652b1b3dae72bae2d8f527197b1334206 | [] | no_license | weltonvaz/PythonBR | 740fde6aa0040d13100005669f1a011f52573580 | f2bf033d692aee3f79ff1ec2644799cb7f3f5585 | refs/heads/master | 2020-05-09T20:53:06.416836 | 2017-06-15T21:05:38 | 2017-06-15T21:05:38 | 27,550,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def toByteArray(s, withLen):
if (len(s) > 255):
raise Exception('String precisa ter menos que 255 caracteres')
buffer = bytearray()
if withLen:
buffer.append(len(s))
buffer.extend(s)
return buffer
BIN_TO_ASC = [
'+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
def toString(_bytes):
buf = []
try:
_len = len(_bytes)
except:
_len = _bytes.size
flush = False
if (_len % 3 != 0):
_bytes.append(0)
flush = True
counter = last = 0
for i in range(_len):
b = _bytes[i] & 0xFF if i < _len else 0
if counter == 0:
buf.append(BIN_TO_ASC[b >> 2])
last = b
elif counter == 1:
buf.append(BIN_TO_ASC[((last & 0x03) << 4) | ((b & 0xF0) >> 4)])
last = b
elif counter == 2:
buf.append(BIN_TO_ASC[((last & 0x0F) << 2) | ((b & 0xC0) >> 6)])
if ( not (flush and i == _len - 1)):
buf.append(BIN_TO_ASC[b & 0x3F])
last = 0
else:
pass
counter = (counter+1) % 3
return ''.join(buf)
print (toString(146)) | [
"weltonvaz@gmail.com"
] | weltonvaz@gmail.com |
6a8a20770c65bc69e42feab1370ccc2a82883911 | 5ef6b1140f4fe7467595f6f49eeb3ec211424317 | /test/busbooking.py | 676654475e7bdc1d4e4d41316a176d8b48cbf0d2 | [] | no_license | publiccoding/prog_ln | d0ddc5ea69be6de6d2c3da0535f924985fcd2585 | 9c3210d177000d0a05cc9a0f1f281cebb8785adb | refs/heads/master | 2021-09-25T04:15:31.109539 | 2018-10-18T01:04:41 | 2018-10-18T01:04:41 | 117,082,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py |
import sys
#arg1=sys.argv[1]
# Register user for bus booking and stor the data in userdata.txt file
reg = input("Enter your options :\nUser Registartion -> 1 \nUser Login -> 2\n" )
if reg == "1":
uname = input("Enter your username")
pwd = input("Enter your password ")
userloginData = f' {uname} = {pwd} \n'
with open ('userdata.txt','a') as file:
file.write(userloginData)
# User login by validating username and password
elif reg == "2":
username = input("Enter login username")
password = input("Enter password of the user")
logindata = {}
with open('userdata.txt','r') as file:
userdata = file.readlines()
data = [data.strip().split('=') for data in userdata]
for data in data:
logindata[data[0]] = data[1]
if username in logindata:
if logindata[username] == password:
print("login successfully")
exit(0)
print("Login unsuccessfull")
exit(1)
| [
"thimmarayan.krishnappa@gmail.com"
] | thimmarayan.krishnappa@gmail.com |
70ef33ff15c709e33a7c4f4f2936bf571dc0ec70 | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N391_Perfect_Rectangle.py | 044dd2bf7f6161f54a015998313d9f3a11947085 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | import collections
"""
1. sum of the area of all small rectangles equals to the final large rectangle's area
2. all lbnodes and runodes should not be duplicated
3. the four nodes for the final rectangle should only show one time
"""
class Solution(object):
def isRectangleCover(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: bool
"""
leftb,leftu, rightb, rightu = [], [], [], []
area = 0
nodes = []
lbnodes, runodes = [], []
for i, j, p, q in rectangles:
leftb.append((i, j))
rightu.append((p, q))
area += (p - i) * (q - j)
nodes.extend([(i, j), (p, q), (i, q), (p, j)])
lbnodes.append((i, j))
runodes.append((p, q))
l_b = min(leftb)
r_u = max(rightu)
l_u = (l_b[0], r_u[1])
r_b = (r_u[0], l_b[1])
if len(lbnodes) != len(set(lbnodes)) or len(runodes) != len(set(runodes)):
return False
new_area = (r_u[0] - l_b[0]) * (r_u[1] - l_b[1])
if new_area != area:
return False
counter = collections.Counter(nodes)
if counter[l_b] != 1 or counter[l_u] != 1 or counter[r_b] != 1 or counter[r_u] != 1:
return False
for key, value in counter.items():
if value == 1 and key not in [l_b, r_u, l_u, r_b]:
return False
return True
| [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
dead213f276db1180f44d4940987bca1e0b1b23b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/390/usersdata/317/78498/submittedfiles/ex1.py | 3bd112ad4b74a11944a5f2b00b8a9a4df4aca8e1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
#COMECE A PARTIR DAQUI!
import math
a = int(input('Digite a: '))
b = int(input('Digite b: '))
c = int(input('Digite c: '))
d = (-b**2) - (4*a*c)
print('o delta é {}'.format(d)) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
6c3535ee17c4fa616fa64f8e4dae8094aeda358f | 092dd56a1bf9357466c05d0f5aedf240cec1a27b | /tests/pytests/problems/TestProblemDefaults.py | 1744a84c983e9cb7877f2d329d7c84617e83f61c | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | c5f872c6afff004a06311d36ac078133a30abd99 | refs/heads/main | 2023-08-24T18:27:30.877550 | 2023-06-21T22:03:01 | 2023-06-21T22:03:01 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 1,130 | py | #!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
# @file tests/pytests/problems/TestProblemDefaults.py
#
# @brief Unit testing of Python ProblemDefaults object.
import unittest
from pylith.testing.UnitTestApp import TestComponent
from pylith.problems.ProblemDefaults import (ProblemDefaults, problem_defaults)
class TestProblemDefaults(TestComponent):
"""Unit testing of ProblemDefaults object.
"""
_class = ProblemDefaults
_factory = problem_defaults
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestProblemDefaults))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
e1386f9f07d356dd3aa5604b39f780c4f5fd5eb2 | d1b9c5bb6992e1eabe2b5e4eea01f99384d901bb | /aiida_tbextraction/fp_run/wannier_input/_vasp.py | 94e746cd2789d976497857667c684a4aa521e5a4 | [
"Apache-2.0"
] | permissive | zx-sdu/aiida-tbextraction | c01148541aded7324fe8cf8ad01f1a54d9e1bf43 | 0bf6d19cbc643e0bdbbe30fe1dd0c6179eb6a647 | refs/heads/master | 2020-09-27T19:53:05.896439 | 2019-01-24T11:40:40 | 2019-01-24T11:40:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,882 | py | # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines a workflow that calculates the Wannier90 input files using VASP.
"""
from fsc.export import export
import numpy as np
from aiida.orm import Code, DataFactory, CalculationFactory
from aiida.orm.data.array.bands import BandsData
from aiida.work.workchain import ToContext
from aiida_tools import check_workchain_step
from aiida_vasp.io.win import WinParser # pylint: disable=import-error,useless-suppression
from . import WannierInputBase
from .._helpers._inline_calcs import reduce_num_wann_inline
@export
class VaspWannierInput(WannierInputBase):
"""
Calculates the Wannier90 input files using VASP.
"""
@classmethod
def define(cls, spec):
super(VaspWannierInput, cls).define(spec)
ParameterData = DataFactory('parameter')
spec.input('code', valid_type=Code, help='Code that runs VASP.')
spec.input(
'parameters',
valid_type=ParameterData,
help='Parameters for the Vasp2w90 calculation.'
)
spec.input_namespace(
'calculation_kwargs',
required=False,
dynamic=True,
help='Keyword arguments passed to the Vasp2w90 calculation.'
)
spec.outline(cls.submit_calculation, cls.get_result)
@check_workchain_step
def submit_calculation(self):
"""
Run the Vasp2w90 calculation.
"""
self.report("Submitting VASP2W90 calculation.")
return ToContext(
vasp_calc=self.submit(
CalculationFactory('vasp.vasp2w90').process(),
structure=self.inputs.structure,
potential={(kind, ): pot
for kind, pot in self.inputs.potentials.items()},
kpoints=self.inputs.kpoints_mesh,
parameters=self.inputs.parameters,
code=self.inputs.code,
wannier_parameters=self.inputs.get('wannier_parameters', None),
wannier_projections=self.inputs.
get('wannier_projections', None),
**self.inputs.get('calculation_kwargs', {})
)
)
@check_workchain_step
def get_result(self):
"""
Get the VASP result and create the necessary outputs.
"""
self.out(
'wannier_settings',
DataFactory('parameter')(dict={
'seedname': 'wannier90'
})
)
vasp_calc_output = self.ctx.vasp_calc.out
retrieved_folder = vasp_calc_output.retrieved
folder_list = retrieved_folder.get_folder_list()
assert all(
filename in folder_list for filename in
['wannier90.amn', 'wannier90.mmn', 'wannier90.eig']
)
self.report("Adding Wannier90 inputs to output.")
self.out('wannier_input_folder', retrieved_folder)
# reduce 'num_wann' if 'exclude_bands' is given
self.out(
'wannier_parameters',
reduce_num_wann_inline(vasp_calc_output.wannier_parameters)[1]
)
self.out('wannier_bands', self.parse_wannier_bands(retrieved_folder))
self.out('wannier_projections', vasp_calc_output.wannier_projections)
def parse_wannier_bands(self, retrieved_folder):
"""
Parse the Wannier90 bands from the .win and .eig files.
"""
bands = BandsData()
bands.set_kpoints(
self.parse_kpts(retrieved_folder.get_abs_path('wannier90.win'))
)
bands.set_bands(
self.parse_eig(retrieved_folder.get_abs_path('wannier90.eig'))
)
return bands
# TODO: Replace with tools from aiida-wannier90, or integrate in vasp2w90
@staticmethod
def parse_kpts(win_file):
"""
Parse the k-points used by Wannier90 from the .win file.
"""
kpoints = []
for line in WinParser(win_file).result['kpoints']:
kpoints.append([float(x) for x in line.split()])
return np.array(kpoints)
# TODO: Replace with tools from aiida-wannier90, or integrate in vasp2w90
@staticmethod
def parse_eig(eig_file):
"""
Parse the eigenvalues used by Wannier90 from the .eig file.
"""
idx = 1
bands = []
bands_part = []
with open(eig_file, 'r') as in_file:
for line in in_file:
_, idx_new, val = line.split()
idx_new = int(idx_new)
val = float(val)
if idx_new > idx:
idx = idx_new
bands.append(bands_part)
bands_part = []
bands_part.append(val)
bands.append(bands_part)
return np.array(bands)
| [
"greschd@gmx.ch"
] | greschd@gmx.ch |
f0821f868eb86caed7a71549fa9b479021aa452e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03563/s841164729.py | 7bbf69bc6f80a866d0951cf151ac7ae99dd8cfb0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import sys
IS = lambda: sys.stdin.readline().rstrip()
II = lambda: int(IS())
MII = lambda: list(map(int, IS().split()))
MIIZ = lambda: list(map(lambda x: x-1, MII()))
def main():
r = II()
g = II()
print(2*g-r)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a5cbb7d24f452dbe657a97d15550742eaef4cf13 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03409/s468789013.py | b57e5c6785ee11c84c33be14a422e38f546a4aa9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | N = int(input())
R = [tuple(map(int, input().split())) for _ in range(N)]
B = [tuple(map(int, input().split())) for _ in range(N)]
R = sorted(R)
B = sorted(B)
res = 0
for bx, by in B:
idx = -1
tmp = -1
for i, (rx, ry) in enumerate(R):
if rx < bx and ry < by:
if ry >= tmp:
tmp = ry
idx = i
if idx != -1:
R.pop(idx)
res += 1
print(res) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d41e112d9da13c81b9d2b69b9e6a2da687d3b496 | 53db924607abc85139dafc8c490218721c7cf9c3 | /redturtle/smartlink/config.py | e9813a95e40c4e35ca7ecf6dbf28d38387c4216c | [] | no_license | RedTurtle/redturtle.smartlink | 4f6fbf45551b91d9725efd42758a310bd3508fa6 | f06b6dd45361af170127fade78fe5c936426b0cf | refs/heads/master | 2021-01-17T14:00:39.273854 | 2017-04-24T07:50:29 | 2017-04-24T07:50:29 | 4,359,396 | 1 | 1 | null | 2021-07-26T14:56:03 | 2012-05-17T14:38:30 | Python | UTF-8 | Python | false | false | 156 | py | from Products.ATContentTypes.permission import permissions
PROJECTNAME = 'redturtle.smartlink'
ADD_PERMISSIONS = {
'SmartLink': permissions['Link']
}
| [
"luca@keul.it"
] | luca@keul.it |
66542ffb5ad08a5084f91e3a68b4479c2696fe83 | a2098c9c8d39cc9e392f21de64c7ced0549d6f1f | /custom/signup/backends.py | a7f6895e690b4c25fe22dab4e2fadd9368bc9ae8 | [] | no_license | dmitryro/divorcesus | 23fe394b0d065f635ecb11eed945cc4fcb9bb829 | 8ecedb2b8a019e63f37702888dd12e994a75105e | refs/heads/master | 2022-12-11T17:20:13.348413 | 2020-10-01T17:27:57 | 2020-10-01T17:27:57 | 56,432,086 | 0 | 1 | null | 2022-12-08T02:22:29 | 2016-04-17T11:05:27 | JavaScript | UTF-8 | Python | false | false | 128 | py | from social_core.backends.facebook import FacebookOAuth2
class CustomFacebookOauth(FacebookOAuth2):
REDIRECT_STATE = False
| [
"dmitryro@gmail.com"
] | dmitryro@gmail.com |
36a512ba1550fa45e98d9afb5b913aa7b5c5b0b3 | f8ffac4fa0dbe27316fa443a16df8a3f1f5cff05 | /Python/Merge_the_Tools.py | 1b827ae71a08079bff6e8e27327e91e02862fc3c | [] | no_license | ankitniranjan/HackerrankSolutions | e27073f9837787a8af7a0157d95612028c07c974 | e110c72d3b137cf4c5cef6e91f58a17452c54c08 | refs/heads/master | 2023-03-16T19:06:17.805307 | 2021-03-09T16:28:39 | 2021-03-09T16:28:39 | 292,994,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | def merge_the_tools(string, k):
# your code goes here
for i in range(0, len(string), k):
str = string[i:i+k]
sub = ''
for s in str:
if s not in sub: #filtering data
sub += s #merging characters
print(sub)
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
| [
"noreply@github.com"
] | ankitniranjan.noreply@github.com |
afbf98877c1eb4bfdf843e70f406f5061bfa7b3d | 3293dc42e15e956be202e39db196eed9912dcc01 | /estimation/prediction/machine learning/regression/lightgbm_example.py | 1b35f40b8f5ecf19138ca2343133d7ea57f275fc | [] | no_license | bthowe/data_science | c372e5364f24dc29e3de1fca3504211cb93b62fb | 63291df8084e5f62f9ba226e87db2242bb31ac94 | refs/heads/master | 2021-11-24T10:49:00.800890 | 2021-11-02T16:10:16 | 2021-11-02T16:10:16 | 106,839,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import sys
import joblib
import numpy as np
import pandas as pd
import lightgbm as lgb
from scipy.stats import uniform
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
def model_train_random():
X = joblib.load('../data_files/X_train.pkl')
y = joblib.load('../data_files/y_train_reg.pkl')
lgb_parameters = {
'boosting_type': ['gbdt', 'dart', 'goss'],
'max_depth': [-1, 2, 3, 4, 5],
'learning_rate': uniform(),
'n_estimators': [10, 50, 100],
'min_child_weight': uniform(),
'colsample_bytree': uniform(),
'reg_lambda': uniform()
}
grid_search = RandomizedSearchCV(
lgb.LGBMRegressor(objective='regression'),
lgb_parameters,
n_iter=100,
scoring='neg_mean_absolute_error',
verbose=10,
n_jobs=-1,
cv=5
)
grid_search.fit(X, y)
print(grid_search.best_params_)
print(grid_search.best_score_)
if __name__ == '__main__':
model_train_random()
| [
"b.travis.howe@gmail.com"
] | b.travis.howe@gmail.com |
ac2168c980477f2df2b02e347d61bec3f35f7e39 | fe035be449d42bf5d56a67c21eeb13e25db0aea6 | /backend/location/migrations/0001_initial.py | a71f5e5f305cd2b3b28fa5007d0eeaa36e1a325f | [] | no_license | crowdbotics-apps/koxlab2-23672 | 0e5fd0406bb83d449f46cd69bd4d9daf0fda763b | 889c6731266e56624ae84ac33507b01f1f0f5fc0 | refs/heads/master | 2023-02-08T20:18:56.880235 | 2021-01-04T23:16:49 | 2021-01-04T23:16:49 | 326,830,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | # Generated by Django 2.2.17 on 2021-01-04 23:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MapLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('latitude', models.DecimalField(decimal_places=8, max_digits=12)),
('longitude', models.DecimalField(decimal_places=8, max_digits=12)),
],
),
migrations.CreateModel(
name='TaskLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.TextField()),
('zip', models.CharField(max_length=6)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasklocation_location', to='location.MapLocation')),
],
),
migrations.CreateModel(
name='TaskerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latitude', models.DecimalField(decimal_places=8, max_digits=12)),
('longitude', models.DecimalField(decimal_places=8, max_digits=12)),
('last_updated', models.DateTimeField(auto_now=True)),
('address', models.TextField(blank=True, null=True)),
('zip', models.CharField(blank=True, max_length=6, null=True)),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerlocation_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='CustomerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zip', models.CharField(max_length=6)),
('country', models.CharField(max_length=50)),
('customer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerlocation_customer', to='task_profile.CustomerProfile')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customerlocation_location', to='location.MapLocation')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
5d1ad3f78ce3801ebda2fa11170ef8e2a873fa60 | be9a1995c4a3a5eefcfe665801705e5eefa4f4d6 | /backlog.py | f41edd25708886a8185e80b2f8388e3e9cd07bbe | [] | no_license | intelliflovrk/SpareTime | c377bc867fe45a7fd4150095506d3af5e532960f | 975d2150e09e13ec5f3b5bec0016555f5f6ba0b0 | refs/heads/master | 2022-07-28T01:01:12.472879 | 2020-05-24T18:23:51 | 2020-05-24T18:23:51 | 260,218,638 | 0 | 2 | null | 2020-05-24T18:23:53 | 2020-04-30T13:26:55 | Python | UTF-8 | Python | false | false | 1,047 | py | fruit = ["apple", "banana", "mango"]
veg = ['carrot', 'beans', 'potato']
drink = ['milk', 'water', 'juice']
#T1
""" Create a function here that print what kind of item is given from the above lists.
Example: if 'apple' given then it should print 'apple is a fruit.' """
#T2
"""Create a function that accepts unlimited lists and return a new_list with
all combined items from given lists(params)."""
#T3
"""Create a function which accepts a parameter(only string) and print its key where the
value is present by parsing the json file(food.json).
Example: if "milk" is given then it should print "milk is located in item4"
Please find the attached sample json file."""
#T4
"""Write a Python function that accepts a string and calculate the
number of upper case letters and lower case letters. Go to the editor
Sample String : 'Raj Playing COD'
Expected Output :
No. of Upper case characters : 5
No. of Lower case Characters : 8 """
#T5
"""Create a python script for BigBank user to manage current account using BigBank.png flowchart."""
| [
"raj.veeramani@intelliflo.com"
] | raj.veeramani@intelliflo.com |
1f3d959606679dc7ba5947b12daca67bc6146d51 | 13b558c3541ff00402f206c8c82a8ced18f3a76c | /test/kernel/test_kernel_relu.py | 2cb10b308b11f1588ed60af35c03ceb345034eda | [
"MIT"
] | permissive | lvdongxu/UnarySim | aeafa4fff35319a1ccdaca6bd4f227d5f16ccf57 | e32531e452e7831d5e03f9f84b5f35d7e9bceaa9 | refs/heads/master | 2023-06-18T19:56:50.434117 | 2021-07-19T20:27:21 | 2021-07-19T20:27:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py | # %%
import torch
from UnarySim.kernel.relu import FSUReLU
from UnarySim.stream.gen import RNG, SourceGen, BSGen
from UnarySim.metric.metric import ProgError
import matplotlib.pyplot as plt
import time
import math
import numpy as np
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# %%
def test(rng="Sobol",
mode="bipolar",
bitwidth=8,
buf_dep=8,
total_cnt=100,
sr=False
):
stype = torch.float
btype = torch.float
rtype = torch.float
print("========================================================")
print(rng + " " + mode + " using shift register: " + sr)
print("========================================================")
# all input values are non-negative
low_bound = 0
if mode == "unipolar":
up_bound = 2**bitwidth
elif mode == "bipolar":
low_bound = -2**(bitwidth-1)
up_bound = 2**(bitwidth-1)
input_list = []
for input_val in range(low_bound, up_bound+1, 1):
input_list.append(input_val)
input = torch.tensor(input_list).type(torch.float).div(up_bound).to(device)
output = torch.nn.ReLU()(input).to(device)
result_pe_total = []
for rand_idx in range(1, total_cnt+1):
outputPE = ProgError(output, mode=mode).to(device)
inputPE = ProgError(input, mode=mode).to(device)
inputSRC = SourceGen(input, bitwidth, mode=mode, rtype=rtype)().to(device)
dut = FSUReLU(depth=buf_dep, bitwidth=bitwidth, encode="RC", shiftreg=sr, stype=stype, btype=btype).to(device)
inputRNG = RNG(bitwidth, rand_idx, rng, rtype)().to(device)
inputBS = BSGen(inputSRC, inputRNG, stype).to(device)
with torch.no_grad():
start_time = time.time()
for i in range(2**bitwidth):
input_bs = inputBS(torch.tensor([i]))
inputPE.Monitor(input_bs)
ouyput_bs = dut(input_bs)
outputPE.Monitor(ouyput_bs)
# get the result for different rng
result_pe = outputPE()[1].cpu().numpy()
result_pe_total.append(result_pe)
# get the result for different rng
result_pe_total = np.array(result_pe_total)
#######################################################################
# check the error of all simulation
#######################################################################
print("RMSE:{:1.4}".format(math.sqrt(np.mean(result_pe_total**2))))
print("MAE: {:1.4}".format(np.mean(np.abs(result_pe_total))))
print("bias:{:1.4}".format(np.mean(result_pe_total)))
print("max: {:1.4}".format(np.max(result_pe_total)))
print("min: {:1.4}".format(np.min(result_pe_total)))
#######################################################################
# check the error according to input value
#######################################################################
max_total = np.max(result_pe_total, axis=0)
min_total = np.min(result_pe_total, axis=0)
avg_total = np.mean(result_pe_total, axis=0)
axis_len = outputPE()[1].size()[0]
input_x_axis = []
for axis_index in range(axis_len):
input_x_axis.append((axis_index/(axis_len-1)*(up_bound-low_bound)+low_bound)/up_bound)
fig, ax = plt.subplots()
ax.fill_between(input_x_axis, max_total, avg_total, facecolor="red", alpha=0.75)
ax.fill_between(input_x_axis, avg_total, min_total, facecolor="blue", alpha=0.75)
ax.plot(input_x_axis, avg_total, label='Avg error', color="black", linewidth=0.3)
plt.tight_layout()
plt.xlabel('Input value')
plt.ylabel('Output error')
plt.xticks(np.arange(-1.0, 1.1, step=0.5))
# ax.xaxis.set_ticklabels([])
plt.xlim(-1, 1)
plt.yticks(np.arange(-1.0, 1.0, step=0.2))
# ax.yaxis.set_ticklabels([])
plt.ylim(-1, 1)
plt.grid(b=True, which="both", axis="y", linestyle="--", color="grey", linewidth=0.3)
fig.set_size_inches(4, 4)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.show()
plt.close()
# %%
test(rng="Sobol", mode="bipolar", total_cnt=100, bitwidth=8, buf_dep=5, sr=False)
# %%
test(rng="Sobol", mode="bipolar", total_cnt=100, bitwidth=8, buf_dep=16, sr=True)
test(rng="Sobol", mode="bipolar", total_cnt=100, bitwidth=8, buf_dep=4, sr=True) | [
"diwu0111@hotmail.com"
] | diwu0111@hotmail.com |
d79fda38aff980036caca4a217945a5d4e535590 | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /sagemaker-inference-recommender/tensorflow-cloudwatch/code/inference.py | 6c01046988f46bc972a2ad0fae4f1977a5ae7f98 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,746 | py | import io
import json
import numpy as np
from PIL import Image
def input_handler(data, context):
""" Pre-process request input before it is sent to TensorFlow Serving REST API
https://github.com/aws/amazon-sagemaker-examples/blob/0e57a288f54910a50dcbe3dfe2acb8d62e3b3409/sagemaker-python-sdk/tensorflow_serving_container/sample_utils.py#L61
Args:
data (obj): the request data stream
context (Context): an object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == 'application/x-image':
buf = np.fromstring(data.read(), np.uint8)
image = Image.open(io.BytesIO(buf)).resize((224, 224))
image = np.array(image)
image = np.expand_dims(image, axis=0)
return json.dumps({"instances": image.tolist()})
else:
_return_error(415, 'Unsupported content type "{}"'.format(
context.request_content_type or 'Unknown'))
def output_handler(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode('utf-8'))
response_content_type = context.accept_header
prediction = response.content
return prediction, response_content_type
def _return_error(code, message):
raise ValueError('Error: {}, {}'.format(str(code), message)) | [
"noreply@github.com"
] | aws.noreply@github.com |
b4842432df98cdecfd9ed798a4883fad4fd5ec9b | 6bce144a2dc9293f290207d1c6c2d08a63763cd2 | /napari/_vispy/_tests/test_vispy_vectors_layer.py | cee34be9af2a9da5fa15e8c2b4f47f1a69d8ab2a | [
"BSD-3-Clause"
] | permissive | tlambert03/napari | 0f7b90de5333b520567a7eb9f00dea5c15fa448c | 19867df427b1eb1e503618a1ab109e7210ae8a83 | refs/heads/main | 2023-08-30T21:32:29.433620 | 2023-05-08T13:58:18 | 2023-05-08T13:58:18 | 216,388,440 | 5 | 0 | BSD-3-Clause | 2023-05-01T07:58:42 | 2019-10-20T16:02:35 | Python | UTF-8 | Python | false | false | 1,382 | py | import numpy as np
import pytest
from napari._vispy.layers.vectors import (
generate_vector_meshes,
generate_vector_meshes_2D,
)
@pytest.mark.parametrize(
"edge_width, length, dims", [[0, 0, 2], [0.3, 0.3, 2], [1, 1, 3]]
)
def test_generate_vector_meshes(edge_width, length, dims):
n = 10
data = np.random.random((n, 2, dims))
vertices, faces = generate_vector_meshes(
data, width=edge_width, length=length
)
vertices_length, vertices_dims = vertices.shape
faces_length, faces_dims = faces.shape
if dims == 2:
assert vertices_length == 4 * n
assert faces_length == 2 * n
elif dims == 3:
assert vertices_length == 8 * n
assert faces_length == 4 * n
assert vertices_dims == dims
assert faces_dims == 3
@pytest.mark.parametrize(
"edge_width, length, p",
[[0, 0, (1, 0, 0)], [0.3, 0.3, (0, 1, 0)], [1, 1, (0, 0, 1)]],
)
def test_generate_vector_meshes_2D(edge_width, length, p):
n = 10
dims = 2
data = np.random.random((n, 2, dims))
vertices, faces = generate_vector_meshes_2D(
data, width=edge_width, length=length, p=p
)
vertices_length, vertices_dims = vertices.shape
faces_length, faces_dims = faces.shape
assert vertices_length == 4 * n
assert vertices_dims == dims
assert faces_length == 2 * n
assert faces_dims == 3
| [
"noreply@github.com"
] | tlambert03.noreply@github.com |
1e5273c1dacc874b90160d3690e51cca256c9cef | 3b7ea74de26931e95eb76d1d27621e5d744f81f4 | /hashtat/hashing/migrations/0001_initial.py | 3e4bbb2dc78a2ba9357a4a0f5dad48615c707d00 | [] | no_license | RoodrigoRoot/TDD-django | abf7c70bde2c6095a27ad6415330476449f71f4a | 6ad752cb75dbef3c6f720b071f3db61c2409bd23 | refs/heads/main | 2023-03-24T11:47:43.683202 | 2021-03-23T17:02:34 | 2021-03-23T17:02:34 | 350,515,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # Generated by Django 3.1.7 on 2021-03-22 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hash',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('haash', models.CharField(max_length=64)),
],
),
]
| [
"roodrigoroot@gmail.com"
] | roodrigoroot@gmail.com |
7b897209074e91145f84ce321c1f8c1d4c601389 | 63ec00220da0cbaf125bf2e879ff63ce432f7227 | /tests/multiloop/test_alternative_loops.py | 000aed652ca7d15d93e11eaa8c07acccd54c801e | [
"Apache-2.0"
] | permissive | munderseth/pytest-asyncio | 3cbfc49a82f5f8d503f5429d43bc3720993ecee4 | 2f37e873e0977861d24a018e06fa9f469470eaf0 | refs/heads/master | 2021-07-24T22:10:19.852111 | 2017-11-03T22:41:09 | 2017-11-03T22:41:09 | 109,448,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | """Unit tests for overriding the event loop."""
import asyncio
import pytest
@pytest.mark.asyncio
def test_for_custom_loop():
"""This test should be executed using the custom loop."""
yield from asyncio.sleep(0.01)
assert type(asyncio.get_event_loop()).__name__ == "CustomSelectorLoop"
@pytest.mark.asyncio
@asyncio.coroutine
def test_dependent_fixture(dependent_fixture):
yield from asyncio.sleep(0.1)
| [
"tinchester@gmail.com"
] | tinchester@gmail.com |
cf34dafd04f5380b38b80c00bd543830273f58c1 | 96f79e659344edb2c6e50d1dd9660b6858054fdc | /lesson_04/server.py | cae3d711a46d4efab2511d01722bf145f97c0015 | [] | no_license | mr-Robot-777/client-server_python | af5b2d7b0bdbec9666e1771afd260b0afd39372a | 977c5cb8bfc57d6dc477eaf9083f154f848d637f | refs/heads/master | 2023-03-20T21:57:44.605765 | 2021-03-19T09:27:30 | 2021-03-19T09:27:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py |
"""Программа-сервер"""
import json
from socket import AF_INET, SOCK_STREAM, socket
from utils import create_parser
from variables import ENCODING, MAX_CONNECTIONS, MAX_PACKAGE_LENGTH
RESPONSE_ERROR = 400
RESPONSE_OK = 200
class Server:
def __init__(self):
self.transport = socket(AF_INET, SOCK_STREAM)
self.addr, self.port = create_parser()
def create_connection(self):
self.transport.bind((self.addr, self.port))
self.transport.listen(MAX_CONNECTIONS)
while True:
client, client_address = self.transport.accept()
response = RESPONSE_ERROR
data = client.recv(MAX_PACKAGE_LENGTH)
if data:
json_answer = data.decode(ENCODING)
response = self.process_client_message(json.loads(json_answer))
print(f'Отвечаем клиенту', response)
client.send(f'{response}'.encode(ENCODING))
client.close()
def process_client_message(self, message):
print('process_client_message', message)
if message['action'] == 'presence' and message['user']['account_name'] == 'GUEST':
return RESPONSE_OK
return RESPONSE_ERROR
def main():
server = Server()
server.create_connection()
if __name__ == '__main__':
main()
| [
"ershgun@mail.ru"
] | ershgun@mail.ru |
514cdf3548a97713adfb8111d14217eea97c17cf | 19ed724002351006b25175ad584c89f774bd9527 | /cvmodels/models/layers/se_module.py | 7283ff8a838fbc5a58077543bf837198de517ec7 | [
"MIT"
] | permissive | welkin-feng/ComputerVision | 60df6aa36c26c630bcb3676c4ef0df6762d5329f | 667488e41878d7f0376142a7ae9e1b43c0edd68a | refs/heads/master | 2021-06-25T07:56:07.238130 | 2021-02-02T05:37:23 | 2021-02-02T05:37:23 | 193,149,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from torch import nn
__all__ = ['SEModule']
class SEModule(nn.Module):
def __init__(self, channels, reduction=16, act_layer=nn.ReLU):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
reduction_channels = max(channels // reduction, 8)
self.fc1 = nn.Conv2d(
channels, reduction_channels, kernel_size=1, padding=0, bias=True)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(
reduction_channels, channels, kernel_size=1, padding=0, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.fc1(x_se)
x_se = self.act(x_se)
x_se = self.fc2(x_se)
return x * x_se.sigmoid()
| [
"382523558@qq.com"
] | 382523558@qq.com |
08b693a29c92e026ff58954982400db441b2cfbc | 010215c1421f5275a846e7154189b22cdd3c89bc | /Data Structures/Tree/identical_trees.py | dc9daa8e21e85efd3307005747d65175e3db3330 | [] | no_license | bsextion/CodingPractice_Py | ab54d5715298645a8fd7ab6945bf3b22d4e6a874 | da2847a04705394c32a6fe1b5f6c6b64c24647a3 | refs/heads/master | 2023-08-16T17:14:47.643989 | 2021-09-28T19:23:40 | 2021-09-28T19:23:40 | 383,658,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# below data members used only for some of the problems
self.next = None
self.parent = None
self.count = None
def are_identical(root1:BinaryTreeNode, root2:BinaryTreeNode):
word_left = depth_first(root1, "")
word_right = depth_first(root2, "")
if word_left == word_right:
return True
else:
return False
def depth_first(root, word):
if root:
word = depth_first(root.left, word)
word += "" + str(root.data)
word = depth_first(root.right, word)
return word
root1 = BinaryTreeNode(6)
root2 = BinaryTreeNode(6)
root1.left = BinaryTreeNode(4)
root2.left = BinaryTreeNode(4)
root1.right = BinaryTreeNode(7)
root1.right = BinaryTreeNode(7)
are_identical(root1, root2)
| [
"bsextion@gmail.com"
] | bsextion@gmail.com |
3c07919c47445d995fbca14d989d44437fbce99f | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /6.redis/redis11_持久化.py | 419d53bb9708d30f4e0f3001c584df957d59fc05 | [] | no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="redis"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("持久化")
r2=s2.getRootTopic()
r2.setTitle("持久化")
content={
'目的':[
'解决宕机数据丢失问题'
],
'RDB':[
'默认持久化方式',
'定时将内存的数据以快照形式保存到硬盘',
'dump.rdb',
{'写磁盘方式':[
'二进制 + 数据压缩的方式写磁盘,文件体积小,数据恢复速度快'
]},
{'快照条件':[
'save 900 1 (900秒内至少有一个键被更改)'
]},
{'快照过程':[
{'rdbSave(生成RDB文件)':[
'fork函数(创建子进程)+cow函数(使用写时复制copy-on-write策略):',
'父子进程共享数据段,父进程继续提供读写服务,写脏的页面数据会逐渐和子进程分离开来'
]},
'rdbLoad(从文件加载内存)'
]},
{'加载':[
'redis启动后会读取RDB快照文件,将数据从磁盘载入内存'
]},
{'风险':[
'redis异常退出,会丢失最后一次快照后的更改数据'
]}
],
'AOF':[
'Append Only File----每次写操作都持久到磁盘',
'通过参数appendonly yes开启,默认文件appendonly.aof',
{'写磁盘方式':[
'纯文本文件,内容为redis客户端向redis发送的原始通信协议内容',
'记录的是每一次写命令,数据最全,但文件体积大,数据恢复速度慢'
]},
{'加载':[
'从持久化的日志中文件恢复数据'
]},
{'风险':[
'操作系统的缓存机制,数据并没有真正写入磁盘,只是进入系统的磁盘缓存,默认30s同步一次',
'通过参数优化此行为:appendfsync everysec(默认),每秒执行一次同步操作'
]},
'对AOF文件定时rewrite,避免文件体积持续膨胀'
],
'混合持久化':[
'AOF rewrite时,以RDB格式在AOF文件中写入一个数据快照,再把在这期间产生的每一个写命令,追加到AOF文件中',
'RDB是二进制压缩写入,AOF文件体积变小',
'Redis 4.0 以上版本支持'
],
'持久化策略选择':[
'Redis中的数据完全丢弃也没有关系,可以不进行任何持久化',
'单机环境,如可接受十几分钟或更多数据丢失,选择RDB;如只能接受秒级数据丢失,选择AOF',
'多数情况,会配置主从环境,slave既可实现数据的热备,也可分担Redis读请求,以及在master宕掉后继续提供服务'
],
'常见性能问题':[
{'master写内存快照':[
'save命令调度rdbSave函数',
'会阻塞主线程工作',
'当快照比较大时对性能影响非常大,会间断性暂停服务'
]},
{'master AOF持久化':[
'如不重写AOF文件,对性能的影响较小',
'但AOF文件会不断增大,AOF文件过大会影响Master重启的恢复速度',
'Master调用BGREWRITEAOF重写AOF文件,会占大量的CPU和内存资源,导致服务load过高,出现短暂服务暂停现象'
]},
'总结:Master最好不做任何持久化工作,如RDB内存快照和AOF日志文件',
{'建议':[
'如数据重要,某个slave开启AOF备份数据,策略设置为每秒同步一次',
'为了主从复制的速度和连接的稳定性,master和slave最好在同一个局域网'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"xiongmengmeng@qipeipu.com"
] | xiongmengmeng@qipeipu.com |
af8b8ae708671da42ab4c86b784d196be55e8ce3 | 4b55e257e0af5181c75a3b840a2c206cc6caec92 | /lightreid/optim/__init__.py | 05dac52793b24aa106f2ff3dcec83b52c1b6d1b8 | [] | no_license | Leopold0801/light-reid | 1799374b6e6552edeade737b137364de34d5b810 | aeb9fb8494611512dc9e3f3e3e7c9f7513c27a99 | refs/heads/master | 2022-11-26T21:19:39.005565 | 2020-08-10T16:56:05 | 2020-08-10T16:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """
@author: Guan'an Wang
@contact: guan.wang0706@gmail.com
"""
from .lr_scheduler import WarmupMultiStepLR, DelayedCosineAnnealingLR
class Optimizer(object):
KWARGS = ['fix_cnn_epochs']
def __init__(self, optimizer, lr_scheduler, max_epochs, **kwargs):
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.max_epochs = max_epochs
for key, value in kwargs.items():
assert key in Optimizer.KWARGS, 'expect {}, but got {}'.format(Optimizer.KWARGS, key)
setattr(self, key, value)
| [
"guan.wang0706@gmail.com"
] | guan.wang0706@gmail.com |
43714fe8477ba7894bde249b6e9a5d3cb9754e61 | c887e00981e6368e94916ca9b93c4de79a5c1a22 | /lawncare/blog/views.py | e3b3935c8c13e11b84ee6b90db6746f75b732bc7 | [] | no_license | devArist/school_project | 18dc0427e2d6a45abfff8a72dbe2c52a7afd8778 | 4d1c1ba5e2a9b4253e950e2c95e0ce6ef22efe3f | refs/heads/main | 2023-05-07T09:51:50.664546 | 2021-05-28T12:44:11 | 2021-05-28T12:44:11 | 368,508,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from django.shortcuts import render
from . import models
# Create your views here.
def blog(request):
blogs = models.Blog.objects.filter(status=True).order_by('-date_update')
return render(request, 'blog/blog.html', locals())
def detail(request, pk):
blog = models.Blog.objects.get(pk=pk)
return render(request, 'blog/blog-single.html', locals()) | [
"aridev97@gmail.com"
] | aridev97@gmail.com |
6af0eef7eaecf4e08598669f69be9120a2059704 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03722/s165384014.py | 281a6c4886b4c12376aea57bca9bad100458536f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | N, M = map(int, input().split())
road = []
for i in range(M):
a, b, c = map(int, input().split())
road.append((a-1, b-1, -c))
INF = 10**15
d = [INF] * N
d[0] = 0
def bellman_ford():
global d
for i in range(N):
for a, b, c in road:
d[b] = min(d[b], d[a] + c)
bellman_ford()
for a, b, c in road:
new_d = d[a] + c
if new_d < d[b]:
d[b] = - float('inf')
bellman_ford()
if d[-1] > -float('inf'):
print(int(-d[-1]))
else:
print('inf') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9ca967cb2d1a93ca45878ccdbaa7a626b963fab4 | ef1d38cfef63f22e149d6c9dd14e98955693c50d | /webhook/protos/pogoprotos/data/ditto/rpc_response_event_params_pb2.py | e4f473d06c0e00583908cdcab8447be689edd11e | [] | no_license | Kneckter/WebhookListener | 4c186d9012fd6af69453d9d51ae33a38aa19b5fd | ea4ff29b66d6abf21cc1424ed976af76c3da5511 | refs/heads/master | 2022-10-09T04:26:33.466789 | 2019-11-24T17:30:59 | 2019-11-24T17:30:59 | 193,372,117 | 2 | 0 | null | 2022-09-23T22:26:10 | 2019-06-23T16:39:34 | Python | UTF-8 | Python | false | true | 3,011 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/ditto/rpc_response_event_params.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/ditto/rpc_response_event_params.proto',
package='pogoprotos.data.ditto',
syntax='proto3',
serialized_pb=_b('\n5pogoprotos/data/ditto/rpc_response_event_params.proto\x12\x15pogoprotos.data.ditto\"J\n\x16RpcResponseEventParams\x12\x0e\n\x06rpc_id\x18\x01 \x01(\r\x12\x0e\n\x06status\x18\x02 \x01(\r\x12\x10\n\x08payloads\x18\x03 \x03(\x0c\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RPCRESPONSEEVENTPARAMS = _descriptor.Descriptor(
name='RpcResponseEventParams',
full_name='pogoprotos.data.ditto.RpcResponseEventParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rpc_id', full_name='pogoprotos.data.ditto.RpcResponseEventParams.rpc_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='pogoprotos.data.ditto.RpcResponseEventParams.status', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payloads', full_name='pogoprotos.data.ditto.RpcResponseEventParams.payloads', index=2,
number=3, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=154,
)
DESCRIPTOR.message_types_by_name['RpcResponseEventParams'] = _RPCRESPONSEEVENTPARAMS
RpcResponseEventParams = _reflection.GeneratedProtocolMessageType('RpcResponseEventParams', (_message.Message,), dict(
DESCRIPTOR = _RPCRESPONSEEVENTPARAMS,
__module__ = 'pogoprotos.data.ditto.rpc_response_event_params_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.ditto.RpcResponseEventParams)
))
_sym_db.RegisterMessage(RpcResponseEventParams)
# @@protoc_insertion_point(module_scope)
| [
"kasmar@gitlab.com"
] | kasmar@gitlab.com |
f9ce9ebbf41ca7bdf7c0ae9d1b3acfbe30350953 | 525c6a69bcf924f0309b69f1d3aff341b06feb8e | /sunyata/layer/recurrent/lstm.py | ab6c9420189140a770d997facf97b92ca2501266 | [] | no_license | knighton/sunyata_2017 | ba3af4f17184d92f6277d428a81802ac12ef50a4 | 4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8 | refs/heads/master | 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | from ... import backend as Z
from ... import init
from ..base import LinkBuilder
from .base import RecurrentLayer, RecurrentSpec
class LSTMLayer(RecurrentLayer):
def __init__(self, forward, last, input_kernel, recurrent_kernel, bias):
dim = input_kernel.shape[1] // 4
dtype = input_kernel.dtype.name
super().__init__(dim, dtype, forward, last, dim)
self.input_kernel = self.add_param(input_kernel)
self.recurrent_kernel = self.add_param(recurrent_kernel)
self.bias = self.add_param(bias)
def step(self, x, prev_state, prev_internal_state):
a = Z.matmul(x, self.input_kernel) + \
Z.matmul(prev_state, self.recurrent_kernel) + self.bias
index = self.out_dim
i = Z.sigmoid(a[:, :index])
f = Z.sigmoid(a[:, index:2 * index])
o = Z.sigmoid(a[:, 2 * index:3 * index])
g = Z.tanh(a[:, 3 * index:])
next_internal_state = f * prev_internal_state + i * g
next_state = o * Z.tanh(next_internal_state)
return next_state, next_internal_state
class LSTMSpec(RecurrentSpec):
def __init__(self, dim=None, forward=True, last=False,
input_kernel_init='glorot_uniform',
recurrent_kernel_init='orthogonal', bias_init='zeros'):
super().__init__(dim, forward, last)
self.input_kernel_init = init.get(input_kernel_init)
self.recurrent_kernel_init = init.get(recurrent_kernel_init)
self.bias_init = init.get(bias_init)
def make_layer(self, in_dim, out_dim, dtype):
input_kernel_shape = in_dim, 4 * out_dim
input_kernel = self.input_kernel_init(
input_kernel_shape, dtype, 'conv_kernel')
recurrent_kernel_shape = out_dim, 4 * out_dim
recurrent_kernel = self.recurrent_kernel_init(
recurrent_kernel_shape, dtype)
bias_shape = 4 * out_dim,
bias = self.bias_init(bias_shape, dtype)
return LSTMLayer(self.go_forward, self.ret_last, input_kernel,
recurrent_kernel, bias)
LSTM = LinkBuilder(LSTMSpec)
| [
"iamknighton@gmail.com"
] | iamknighton@gmail.com |
2bc6ce1e38b0ff11a43a0471d5895cf0445c4e75 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/admob/v1/ads-admob-v1-py/google/ads/admob/__init__.py | 99b0f676816b735ae222673d657ca23ba987d36e | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,606 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.ads.admob_v1.services.ad_mob_api.client import AdMobApiClient
from google.ads.admob_v1.services.ad_mob_api.async_client import AdMobApiAsyncClient
from google.ads.admob_v1.types.admob_api import GenerateMediationReportRequest
from google.ads.admob_v1.types.admob_api import GenerateMediationReportResponse
from google.ads.admob_v1.types.admob_api import GenerateNetworkReportRequest
from google.ads.admob_v1.types.admob_api import GenerateNetworkReportResponse
from google.ads.admob_v1.types.admob_api import GetPublisherAccountRequest
from google.ads.admob_v1.types.admob_api import ListPublisherAccountsRequest
from google.ads.admob_v1.types.admob_api import ListPublisherAccountsResponse
from google.ads.admob_v1.types.admob_resources import DateRange
from google.ads.admob_v1.types.admob_resources import LocalizationSettings
from google.ads.admob_v1.types.admob_resources import MediationReportSpec
from google.ads.admob_v1.types.admob_resources import NetworkReportSpec
from google.ads.admob_v1.types.admob_resources import PublisherAccount
from google.ads.admob_v1.types.admob_resources import ReportFooter
from google.ads.admob_v1.types.admob_resources import ReportHeader
from google.ads.admob_v1.types.admob_resources import ReportRow
from google.ads.admob_v1.types.admob_resources import ReportWarning
from google.ads.admob_v1.types.admob_resources import StringList
from google.ads.admob_v1.types.admob_resources import SortOrder
__all__ = ('AdMobApiClient',
'AdMobApiAsyncClient',
'GenerateMediationReportRequest',
'GenerateMediationReportResponse',
'GenerateNetworkReportRequest',
'GenerateNetworkReportResponse',
'GetPublisherAccountRequest',
'ListPublisherAccountsRequest',
'ListPublisherAccountsResponse',
'DateRange',
'LocalizationSettings',
'MediationReportSpec',
'NetworkReportSpec',
'PublisherAccount',
'ReportFooter',
'ReportHeader',
'ReportRow',
'ReportWarning',
'StringList',
'SortOrder',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
774941c1e4550ae6504fd4d14f90db5cb0b4ea86 | a71fbf421c43fcb34fe7c8000eb807677821683c | /python_import/p42_from_module.py | 6adea0acaab4069110e5384b1022f70212ec8b84 | [] | no_license | leekyunghun/bit_seoul | ccd96dca3774f259e04b8388e134d6183b974268 | b76a3d5f83b77f5345d61cf3baa68aaefc25cd2a | refs/heads/master | 2023-02-06T08:12:17.768076 | 2020-12-22T13:35:19 | 2020-12-22T13:35:19 | 311,286,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from machine.car import drive
from machine.tv import watch
drive()
watch()
from machine import car
from machine import tv
car.drive()
tv.watch()
| [
"oh_pizza@naver.com"
] | oh_pizza@naver.com |
c98dfb752acee87d50afcc78ce6a2ff51cfc674f | 89f3169a2393bff8880f657d9bb4c12b40729e9a | /2020-06/abc051_b.py | dbd393e5b7ceda1a54913e15b23019463161141a | [] | no_license | YutaGoto/daily_atcoder | c087adbb7fa03f0cdc4291c806f21b1b93130d86 | 113d4e25f1d3bb0e665f9154bc0afaecae5ea7bf | refs/heads/main | 2023-06-19T00:56:12.359473 | 2021-07-16T12:33:06 | 2021-07-16T12:33:06 | 273,282,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | k, s = map(int, input().split())
a = 0
for x in range(0, k+1):
for y in range(0, k+1):
z = s - x - y
if z >= 0 and z <= k:
a += 1
print(a)
| [
"you.goto.510@gmail.com"
] | you.goto.510@gmail.com |
90168e13f27d0f16c51ca125a2be4b1e7e075cfc | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /Darlington/phase1/python Basic 1/day 13 solution/qtn2.py | d0cbe7f2d2874086c5319bbe0c7055094a0354db | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 208 | py | #program to remove the first item from a specified list.
names = ["peter", "winner", "Austin", "ND", "Darlington"]
print("\nFirst Name: ",names)
del names[0]
print("After removing the first : ",names)
print() | [
"darlingtonchibuzor64@gmail.com"
] | darlingtonchibuzor64@gmail.com |
ed12611516370eb81804ac8f6bdfcdcbd60a9752 | 4c984a318ccf26e765f902669399da66497e194d | /pollexe/settings/summernote/conf.py | 392425c7e9f0d2983aec17391359a06fce26c55d | [] | no_license | sajalmia381/pollexe | 914af663bad6becb4308c738a16240028f37f99b | 3ead47fee43855aba1ee0f4c2b3f222cac6a9a68 | refs/heads/master | 2020-04-21T12:42:49.283843 | 2019-02-07T13:43:40 | 2019-02-07T13:43:40 | 169,572,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | # SUMMERNOTE_THEME = 'bs4'
SUMMERNOTE_CONFIG = {
# Using SummernoteWidget - iframe mode, default
'iframe': True,
# Or, you can set it as False to use SummernoteInplaceWidget by default - no iframe mode
# In this case, you have to load Bootstrap/jQuery stuff by manually.
# Use this when you're already using Bootstraip/jQuery based themes.
'iframe': False,
# You can put custom Summernote settings
'summernote': {
# As an example, using Summernote Air-mode
'airMode': False,
# Change editor size
'width': '100%',
'height': '480',
# Use proper language setting automatically (default)
'lang': None,
# Or, set editor language/locale forcely
'lang': 'ko-KR',
# ...
# You can also add custom settings for external plugins
'print': {
'stylesheetUrl': '/some_static_folder/printable.css',
},
},
# Need authentication while uploading attachments.
'attachment_require_authentication': True,
# Set `upload_to` function for attachments.
'attachment_upload_to': '/media/',
# Set custom storage class for attachments.
'attachment_storage_class': 'my.custom.storage.class.name',
# Set custom model for attachments (default: 'django_summernote.Attachment')
'attachment_model': 'my.custom.attachment.model', # must inherit 'django_summernote.AbstractAttachment'
# You can disable attachment feature.
'disable_attachment': False,
# You can add custom css/js for SummernoteWidget.
'css': (
),
'js': (
),
# You can also add custom css/js for SummernoteInplaceWidget.
# !!! Be sure to put {{ form.media }} in template before initiate summernote.
'css_for_inplace': (
),
'js_for_inplace': (
),
# Codemirror as codeview
# If any codemirror settings are defined, it will include codemirror files automatically.
'css': {
'//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',
},
'codemirror': {
'mode': 'htmlmixed',
'lineNumbers': 'true',
# You have to include theme file in 'css' or 'css_for_inplace' before using it.
'theme': 'monokai',
},
# Lazy initialize
# If you want to initialize summernote at the bottom of page, set this as True
# and call `initSummernote()` on your page.
'lazy': True,
# To use external plugins,
# Include them within `css` and `js`.
'js': {
'/some_static_folder/summernote-ext-print.js',
'//somewhere_in_internet/summernote-plugin-name.js',
},
} | [
"sajal_mia@ymail.com"
] | sajal_mia@ymail.com |
bb6034702ab10541abffa775201702a7d77dc308 | 41249d7d4ca9950b9c6fee89bf7e2c1929629767 | /results/rabi_and_lmg_optimizations_different_constraints_20190228/script_rabi_bangramp_neldermead_bounds12.py | a389f358b918172fdab3458cc34595ac1a335274 | [
"MIT"
] | permissive | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | f739b3baad1d2aadda576303bb0bbe9d48ec204a | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | refs/heads/master | 2022-11-22T00:44:09.998199 | 2020-07-21T08:35:28 | 2020-07-21T08:35:28 | 281,237,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'rabi'
model_parameters = dict(N=100, Omega=100, omega_0=1.)
protocol = 'bangramp'
optimization_method = 'Nelder-Mead'
parameters_constraints = [-12, 12]
# ------ build and check name for output file
additional_file_name_qualifiers = None
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method,
parameters_constraints=parameters_constraints
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| [
"lukeinnocenti@gmail.com"
] | lukeinnocenti@gmail.com |
941761be2cc2b83e4fb14b8e9bd41be81dd7e935 | 3330090c2b3608eedbce99e55a2a8a5d87cc163f | /lib/actions/drivers_test.py | f78eb2b1553a49633af7f65761e717c7c87f933c | [
"Apache-2.0"
] | permissive | Venseer/glazier | ea935af0afff001a26538972d24622c69598628f | db24121e5b79bd377c721ca8d9c391db56841a4f | refs/heads/master | 2021-01-11T05:22:52.400113 | 2018-06-25T10:52:19 | 2018-06-25T10:52:19 | 79,860,410 | 0 | 0 | Apache-2.0 | 2018-06-25T10:52:20 | 2017-01-23T23:42:37 | Python | UTF-8 | Python | false | false | 3,986 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.actions.drivers."""
from glazier.lib.actions import drivers
from glazier.lib.buildinfo import BuildInfo
import mock
from google.apputils import basetest
class DriversTest(basetest.TestCase):
@mock.patch.object(BuildInfo, 'ReleasePath')
@mock.patch('glazier.lib.download.Download.VerifyShaHash', autospec=True)
@mock.patch('glazier.lib.download.Download.DownloadFile', autospec=True)
@mock.patch.object(drivers, 'Execute', autospec=True)
@mock.patch.object(drivers.file_util, 'CreateDirectories', autospec=True)
def testDriverWIM(self, mkdir, exe, dl, sha, rpath):
bi = BuildInfo()
# Setup
remote = '@Drivers/Lenovo/W54x-Win10-Storage.wim'
local = r'c:\W54x-Win10-Storage.wim'
sha_256 = (
'D30F9DB0698C87901DF6824D11203BDC2D6DAAF0CE14ABD7C0A7B75974936748')
conf = {
'data': {
'driver': [[remote, local, sha_256]]
},
'path': ['/autobuild']
}
rpath.return_value = '/'
# Success
dw = drivers.DriverWIM(conf['data']['driver'], bi)
dw.Run()
dl.assert_called_with(
mock.ANY, ('https://glazier-server.example.com/'
'bin/Drivers/Lenovo/W54x-Win10-Storage.wim'),
local,
show_progress=True)
sha.assert_called_with(mock.ANY, local, sha_256)
cache = drivers.constants.SYS_CACHE
exe.assert_called_with([[('X:\\Windows\\System32\\dism.exe /Unmount-Image '
'/MountDir:%s\\Drivers\\ /Discard' % cache)]],
mock.ANY)
mkdir.assert_called_with('%s\\Drivers\\' % cache)
# Invalid format
conf['data']['driver'][0][1] = 'C:\\W54x-Win10-Storage.zip'
dw = drivers.DriverWIM(conf['data']['driver'], bi)
self.assertRaises(drivers.ActionError, dw.Run)
conf['data']['driver'][0][1] = 'C:\\W54x-Win10-Storage.wim'
# Mount Fail
exe.return_value.Run.side_effect = drivers.ActionError()
self.assertRaises(drivers.ActionError, dw.Run)
# Dism Fail
exe.return_value.Run.side_effect = iter([0, drivers.ActionError()])
self.assertRaises(drivers.ActionError, dw.Run)
# Unmount Fail
exe.return_value.Run.side_effect = iter([0, 0, drivers.ActionError()])
self.assertRaises(drivers.ActionError, dw.Run)
def testDriverWIMValidate(self):
g = drivers.DriverWIM('String', None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([[1, 2, 3]], None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([[1, '/tmp/out/path']], None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([['/tmp/src.zip', 2]], None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([['https://glazier/bin/src.wim', '/tmp/out/src.zip']],
None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([['https://glazier/bin/src.wim', '/tmp/out/src.wim']],
None)
g.Validate()
g = drivers.DriverWIM(
[['https://glazier/bin/src.wim', '/tmp/out/src.wim', '12345']], None)
g.Validate()
g = drivers.DriverWIM(
[['https://glazier/bin/src.zip', '/tmp/out/src.zip', '12345', '67890']],
None)
self.assertRaises(drivers.ValidationError, g.Validate)
if __name__ == '__main__':
basetest.main()
| [
"mattl@google.com"
] | mattl@google.com |
b4d48deeab90710e4f81fb5ff97b545bb3a77179 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/incremental-memory-leak.py | 1705ea35d0108812a35a7859a8fdf28edbb2a50f | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 1,064 | py | # Time: O(1)
# Space: O(1)
# Same problem from https://codingcompetitions.withgoogle.com/codejam/round/000000000019ffb9/00000000003384ea
class Solution(object):
def memLeak(self, memory1, memory2):
"""
:type memory1: int
:type memory2: int
:rtype: List[int]
"""
def s(a, d, n):
return (2*a + (n-1)*d)*n//2
def f(a, d, x):
r = int((-(2*a-d)+((2*a-d)**2+8*d*x)**0.5)/(2*d))
if s(a, d, r) > x: # adjust float accuracy
r -= 1
return r
is_swapped = False
if memory1 < memory2:
memory1, memory2 = memory2, memory1
is_swapped = True
n = f(1, 1, memory1-memory2)
memory1 -= s(1, 1, n)
if memory1 == memory2:
is_swapped = False
l = f(n+1, 2, memory1)
r = f(n+2, 2, memory2)
memory1 -= s(n+1, 2, l)
memory2 -= s(n+2, 2, r)
if is_swapped:
memory1, memory2 = memory2, memory1
return [n+l+r+1, memory1, memory2]
| [
"noreply@github.com"
] | kamyu104.noreply@github.com |
1204ca4653c8968bf60985caa1ab1428e568e339 | d9cb81209d452b7c3180cd2e2b3e1b00279b469c | /proc_TED_2_NOV20161_adjusted.py | 623ca2a355b2c5efeb7490545a9385aca30410f3 | [
"CC0-1.0"
] | permissive | riceissa/total-economy-database | 3857a0b9bc1de393fc681b43914b26c0adf2c8bc | 0052bb2202458a7e908203b222d404b266ee1c0d | refs/heads/master | 2022-08-12T16:10:30.891745 | 2022-07-23T05:48:34 | 2022-07-23T05:48:34 | 107,448,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | #!/usr/bin/env python3
import csv
import sys
import re
from devec_sql_common import *
insert_line = "insert into data(region, odate, database_url, data_retrieval_method, metric, units, value, notes) values"
count = 0
first = True
with open("../total-economy-database-data/TED_2_NOV20161_adjusted.csv", newline='') as f:
reader = csv.DictReader(f)
for row in reader:
for year in range(1995, 2016):
y = str(year)
if row[y]:
if first:
print(insert_line)
print(" " + ("" if first else ",") + "(" + uniq_join([
mysql_quote(region_normalized(row['COUNTRY'])), # region
mysql_string_date(y), # odate
mysql_quote("https://www.conference-board.org/retrievefile.cfm?filename=TED_2_NOV20161.xlsx&type=subsite"), # database_url
mysql_quote(""), # data_retrieval_method
mysql_quote(row['INDICATOR'] + " (adjusted)"), # metric
mysql_quote(row['MEASURE']), # units
mysql_float(row[y]), # value
mysql_quote(""), # notes
]) + ")")
first = False
count += 1
if count > 5000:
count = 0
first = True
print(";")
if not first:
print(";")
| [
"riceissa@gmail.com"
] | riceissa@gmail.com |
17a0ccae6225c5831dfd34cfb3a9e83af48d05fc | ffdc77394c5b5532b243cf3c33bd584cbdc65cb7 | /tests/st/networks/models/bert/src/__init__.py | 4f4584a4b483b32d6ddacc12923d127d5a549061 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | mindspore-ai/mindspore | ca7d5bb51a3451c2705ff2e583a740589d80393b | 54acb15d435533c815ee1bd9f6dc0b56b4d4cf83 | refs/heads/master | 2023-07-29T09:17:11.051569 | 2023-07-17T13:14:15 | 2023-07-17T13:14:15 | 239,714,835 | 4,178 | 768 | Apache-2.0 | 2023-07-26T22:31:11 | 2020-02-11T08:43:48 | C++ | UTF-8 | Python | false | false | 1,653 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bert Init."""
from .bert_for_pre_training import BertNetworkWithLoss, BertPreTraining, \
BertPretrainingLoss, GetMaskedLMOutput, GetNextSentenceOutput, \
BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell
from .bert_model import BertAttention, BertConfig, BertEncoderCell, BertModel, \
BertOutput, BertSelfAttention, BertTransformer, EmbeddingLookup, \
EmbeddingPostprocessor, RelaPosEmbeddingsGenerator, RelaPosMatrixGenerator, \
SaturateCast, CreateAttentionMaskFromInputMask
__all__ = [
"BertNetworkWithLoss", "BertPreTraining", "BertPretrainingLoss",
"GetMaskedLMOutput", "GetNextSentenceOutput", "BertTrainOneStepCell", "BertTrainOneStepWithLossScaleCell",
"BertAttention", "BertConfig", "BertEncoderCell", "BertModel", "BertOutput",
"BertSelfAttention", "BertTransformer", "EmbeddingLookup",
"EmbeddingPostprocessor", "RelaPosEmbeddingsGenerator",
"RelaPosMatrixGenerator", "SaturateCast", "CreateAttentionMaskFromInputMask"
]
| [
"leon.wanghui@huawei.com"
] | leon.wanghui@huawei.com |
3dfe5bdfaef0f2bb0484a3cedcb49aace1286dab | 496a63f41fa32e2bb3ecce0d35ff4374f1c02ad5 | /src/data/handlers/options.py | 1e2f69ca64a2a2b696cedf6bff9de1e3eb46b311 | [
"BSD-3-Clause"
] | permissive | vincent-lg/avenew.one | bbfa8d44e68db943b8825e9d4a32a43e985778fe | fb7f98d331e47e2032ee1e51bf3e4b2592807fdf | refs/heads/main | 2023-02-14T00:28:53.511552 | 2021-01-13T11:13:07 | 2021-01-13T11:13:07 | 330,207,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | # Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Option handler, to work with an inline dictionary.
Contrary to most handlers, the OptionHandler requires a
`binary_options` field, set as a required byte string on the
entity it modifies.
"""
import pickle
from collections.abc import MutableMapping
class OptionHandler(MutableMapping):
"""Option handler, to handle options in a dictionary-like object.
The option handler is an object which uses a binary representation,
stored in the entity itself. It has all the methods one can expect
from a dictionary and can be used as such.
>>> session.options["username"] = "someone"
>>> session.options["username"]
'someone'
>>> len(session.options)
1
>>> del session.options["username"]
>>> sesession.options.get("username", "")
''
>>> # ...
"""
__slots__ = ("__owner", "__binary_field", "__options")
def __init__(self, owner, binary_field="binary_options"):
self.__owner = owner
self.__binary_field = binary_field
binary = getattr(owner, binary_field)
self.__options = pickle.loads(binary)
def __len__(self):
return len(self.__options)
def __iter__(self):
return iter(self.__options)
def __getitem__(self, key):
return self.__options[key]
def __setitem__(self, key, value):
self.__options[key] = value
setattr(self.__owner, self.__binary_field, pickle.dumps(
self.__options))
def __delitem__(self, key):
del self.__options[key]
setattr(self.__owner, self.__binary_field, pickle.dumps(
self.__options))
| [
"vincent.legoff.srs@gmail.com"
] | vincent.legoff.srs@gmail.com |
7fb0fcce7a07d40707030a7b6d5b1f5de8882482 | 1a1c372244ef0e64da4629496bb3eb1b00cb47fc | /configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py | dabdf6c9864e1a9dad1e759165d76472618e78b0 | [
"Apache-2.0"
] | permissive | speedinghzl/mmdetection | 2ab7926251ed3ee8f86dcba6f0b85081eac0ef53 | 339f37a21b6e4001e90734f6fce1559843e83487 | refs/heads/master | 2020-05-30T09:54:02.302563 | 2019-06-02T03:41:45 | 2019-06-02T03:41:45 | 189,658,179 | 4 | 0 | Apache-2.0 | 2019-05-31T21:03:41 | 2019-05-31T21:03:40 | null | UTF-8 | Python | false | false | 5,721 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(
type='IoULoss', style='bounded', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=1e-3, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ga_faster_rcnn_x101_32x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"chenkaidev@gmail.com"
] | chenkaidev@gmail.com |
c018206b30963896b42d4d8f91602b19fbb8578c | 36afa271f080459adf1014cd23f4be9f954dfee6 | /Crawler/Requests/douban.py | 5160d23321fc240a3aeba30552e69eecea7db83c | [] | no_license | King-Of-Game/Python | b69186a7574ce1c0b7097207cfe9a2eb38a90bc0 | 643b9fd22efd78f6679735f23432943a57b5f5bb | refs/heads/master | 2023-05-25T05:35:14.473114 | 2021-10-24T12:52:21 | 2021-10-24T12:52:21 | 151,251,434 | 3 | 0 | null | 2023-05-01T20:51:50 | 2018-10-02T12:34:04 | HTML | UTF-8 | Python | false | false | 2,155 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib.request
import lxml.html
import csv
url = 'https://movie.douban.com/top250?start={}&filter='
# 得到当前Url的数据,返回类型为string
def getSource(url):
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0"
}
request = urllib.request.Request(url=url, headers=header)
response = urllib.request.urlopen(request)
data = response.read().decode('utf-8')
return data
# 得到每一个页面对应的数据,用lxml模块解析后把数据存入列表并返回
def everyPage(source):
select = lxml.html.document_fromstring(source)
movieList = select.xpath('//div[@class="info"]')
showList = []
for i in movieList:
movieDict = {}
title = i.xpath('div[@class="hd"]/a/span[@class="title"]/text()')[0]
otherTitle = i.xpath('div[@class="hd"]/a/span[@class="other"]/text()')[0]
mainning = i.xpath('div[@class="bd"]/p[@class=""]/text()')[0]
star = i.xpath('//div[@class="star"]/span[@class="rating_num"]/text()')[0]
quote = i.xpath('//p[@class="quote"]/span/text()')[0]
link = i.xpath('div[@class="hd"]/a/@href')[0]
movieDict['片名'] = ''.join(title + otherTitle)
movieDict['演职员'] = mainning
movieDict['评分'] = star
movieDict['名言'] = quote
movieDict['链接'] = link
showList.append(movieDict)
return showList
# 生成CSV文件
def getCsv(movieList):
f = open('douban1.csv', 'w', encoding='utf-8', newline='')
writer = csv.DictWriter(f, fieldnames=['片名', '演职员', '评分', '名言', '链接'])
writer.writeheader()
for i in movieList:
writer.writerow(i)
if __name__ == '__main__':
movieList = []
for i in range(10):
nowUrl = url.format(i*25) # 循环得到每一个Url
print(nowUrl)
source = getSource(nowUrl) # 循环得到每一个Url的数据
movieList += everyPage(source) # 循环累加得到的数据
print(movieList)
getCsv(movieList) # 把数据传入生成CSV文件的方法中
| [
"871437338@qq.com"
] | 871437338@qq.com |
6314341c47af973fbafa71b98b6a1e0add874c4e | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon9996.py | b916bd74061aa9b0b4371a44afce755157f406d1 | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | n=int(input())
pattern=input().strip().split("*")
for _ in range(n):
text=input().strip()
print("DA" if text.index(pattern[0])<text.index(pattern[1]) else "NE") | [
"be_ok91@naver.com"
] | be_ok91@naver.com |
662b4cbedb8253d593dc611960818f152b7a1d55 | d78dfc5089717fc242bbd7097f507d811abb4260 | /USA/script.icechannel.extn.common/plugins/liveresolvers/kingofplayerscom_lrv.py | 4de6efd8dce31783d522389a9397402c3a1d5726 | [] | no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 1,760 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveResolver
from entertainment.plugnplay import Plugin
from entertainment import common
class kingofplayercom(LiveResolver):
implements = [LiveResolver]
name = 'kingofplayer.com'
def ResolveLive(self, content, url):
import re
new_content = re.search('src=[\'"]{1}(http://cdn\.kingofplayers\.com/.+?\.(?:js|html))[\'"]{1}', content)
if new_content:
page_url = new_content.group(1)
from entertainment.net import Net
net = Net()
new_content = net.http_GET( page_url, headers={'Referer':url} ).content
streamer = re.search('[,\: \'"=]{1,5}((?:rtmp\://|rtmpe\://).+?[^\'"&=]+?)[\'"&]{1}', new_content)
if not streamer:
new_content = re.search('src=[\'"]{1}(http://cdn\.kingofplayers\.com/.+?\.html)[\'"]{1}', new_content)
new_url = new_content.group(1)
new_content = net.http_GET( new_url, headers={'Referer':page_url} ).content
page_url = new_url
streamer = re.search('[,\: \'"=]{1,5}((?:rtmp\://|rtmpe\://).+?[^\'"&=]+?)[\'"&]{1}', new_content)
streamer = streamer.group(1)
swf_url = re.search('[,\: \'"=]{1,5}(http\://.+?\.swf)[\'"&]{1}', new_content).group(1)
playpath = re.search('file[,\: \'"=]*([^\'"]+?)[\'"&]{1}', new_content).group(1)
content = streamer + ' playpath=' + playpath + ' swfUrl=' + swf_url + ' pageUrl=' + page_url + ' timeout=15 live=1'
return (True, True, content, url)
return (False, False, content, url)
| [
"ke.xiao@netxeon.com"
] | ke.xiao@netxeon.com |
9e68fb8d64881f8571975e3a2f5f99c37158d357 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_53/247.py | 266388f5f966aaf62ae3d3e229214be70a147baa | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | def snapper(n, k):
return (k+1) & ((1<<n) - 1) == 0
def main():
labels = ["OFF", "ON"]
try:
cases = xrange(1, int(raw_input())+1)
for case in cases:
n, k = map(int, raw_input().split())
print "Case #%d: %s" % (case, labels[snapper(n, k)])
except ValueError:
print "INVALID INPUT"
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9218a02e256da0f5f46dbfa773a0d6eccd56e154 | 0521afa39b2c9b64977da622779c906970af865b | /script/latent_factor_model.py | 7c5b4ec47ab25332ccedb53548d30c37e12042cc | [] | no_license | DamonHao/rec_sys | f48234f1689fb8f353a80a301647fa40bda9086d | e9272676d3794136f908eb9521a2944eefd9b38c | refs/heads/master | 2021-01-13T10:56:38.309864 | 2016-10-30T03:16:56 | 2016-10-30T03:16:56 | 72,264,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,438 | py | # -*- coding: utf-8 -*-
import random
import heapq
import math
class LatentFactorModel(object):
def __init__(self, filePath):
self._filePath = filePath
self._splitData(5, 1, 0)
def precisionAndRecall(self, N):
hit = 0
precision_all = 0
recall_all = 0
train = self._train
test = self._test
for user in train.iterkeys():
test_set_user_items = test.get(user, None)
if test_set_user_items is None:
continue
rank = self.recommend(user, N)
for item, pui in rank:
if item in test_set_user_items:
hit += 1
precision_all += N
recall_all += len(test_set_user_items)
return hit / float(precision_all), hit / float(recall_all)
def coverage(self, N):
recommend_items = set()
all_items = set()
train = self._train
num = 0
for user in train.iterkeys():
for item in train[user].iterkeys():
all_items.add(item)
rank = self.recommend(user, N)
num += len(rank)
for item, _ in rank:
recommend_items.add(item)
print "coverage", num, len(recommend_items), len(all_items)
return len(recommend_items) / float(len(all_items))
def popularity(self, N):
item_popularity = {}
popularity = 0
num = 0
for user, items in self._train.iteritems():
for item in items.iterkeys():
item_popularity.setdefault(item, 0)
item_popularity[item] += 1
for user in self._train.iterkeys():
rank = self.recommend(user, N)
for item, _ in rank:
popularity += math.log(1 + item_popularity[item])
num += 1
popularity /= float(num)
return popularity
def _splitData(self, totalSplitNum, kthAsTest, seed):
data = open(self._filePath)
random.seed(seed)
train = {} # {user : {item: score}}
test = {}
count = 0
for line in data:
user, item, score, _ = line.strip().split("::")
if random.randint(1, totalSplitNum) == kthAsTest:
test.setdefault(user, {})
test[user][item] = int(score)
else:
train.setdefault(user, {})
train[user][item] = int(score)
count += 1
if count == MAX_DATA_NUM:
break
print "data num:", count
self._train = train
self._test = test
def _sortPopularityItem(self):
itemsPopularity = {}
for items in self._train.itervalues():
for item in items:
itemsPopularity.setdefault(item, 0)
itemsPopularity[item] += 1
validNun = int(len(itemsPopularity) * 0.3)
print "total, valid num", len(itemsPopularity), validNun
assert validNun
sortedItems = heapq.nlargest(validNun, itemsPopularity.iteritems(), key=lambda e:e[1])
self._sortedItems = [item for item, _ in sortedItems]
def buildUserAction(self, negativeRatio):
self._sortPopularityItem()
sortedItems = self._sortedItems
maxSortedItemIndex = len(sortedItems)-1
usersAction = {}
for user, items in self._train.iteritems():
action = {}
# positive
for item in items:
action[item] = 1
negative_num = 0
# negative
itemsLen = len(items)
targetNegativeNum = itemsLen * negativeRatio
for i in xrange(0, itemsLen * 2):
item = sortedItems[random.randint(0, maxSortedItemIndex)]
if item in action:
continue
action[item] = 0
negative_num += 1
if negative_num >= targetNegativeNum :
break
usersAction[user] = action
self._usersAction = usersAction
def trainModel(self, itemClassNum, iterNum, learnRate, overfitParam):
self._itemClassNum = itemClassNum
self._initModel()
userToClass = self._userToClass
itemToClass = self._itemToClass
for step in xrange(iterNum):
for user, items in self._train.iteritems():
userAction = self._usersAction[user]
for item, interest in userAction.iteritems():
interestDiff = interest - self._predict(user, item)
for classIndex in xrange(itemClassNum):
userWeight = userToClass[user][classIndex]
itemWeight = itemToClass[item][classIndex]
userToClass[user][classIndex] += learnRate * (interestDiff * itemWeight - overfitParam * userWeight)
itemToClass[item][classIndex] += learnRate * (interestDiff * userWeight - overfitParam * itemWeight)
learnRate *= 0.9
def recommend(self, user, N):
has_items = self._train.get(user, None)
if not has_items:
return []
candidates = []
for item in self._itemToClass.iterkeys():
if item in has_items:
continue
interest = self._predict(user, item)
candidates.append((item, interest))
return heapq.nlargest(N, candidates, key=lambda e:e[1])
def _predict(self, user, item):
interest = 0
userToClass = self._userToClass
itemToClass = self._itemToClass
for index in xrange(self._itemClassNum):
interest += userToClass[user][index] * itemToClass[item][index]
return interest
def _initModel(self):
userToClass = {}
itemToClass = {}
epsilon = 0.1
itemClassNum = self._itemClassNum
for user, items in self._train.iteritems():
userToClass[user] = [random.uniform(0, epsilon) for i in xrange(itemClassNum)]
for item in items:
if item not in itemToClass:
itemToClass[item] = [random.uniform(0, epsilon) for i in xrange(itemClassNum)]
self._userToClass = userToClass
self._itemToClass = itemToClass
MAX_DATA_NUM = 100000
if __name__ == '__main__':
import os
filePath = os.path.join(os.path.dirname(__file__), '../ml-1m/ratings.dat')
lfm = LatentFactorModel(filePath)
lfm.buildUserAction(1)
lfm.trainModel(5, 100, 0.02, 0.01)
# print lfm.recommend('1', 10)
# print lfm.precisionAndRecall(10)
print lfm.coverage(10), lfm.popularity(10) | [
"haolinknight@gmail.com"
] | haolinknight@gmail.com |
faa20f2c671bf2819106d451219245da38e4ce8b | fe33bdb20436a379a17d56b83816d7064cb75d90 | /src/rocon_concert/concert_conductor/src/concert_conductor/transitions.py | f32eb0dbebdae0d4994c354a4df9ab4426789719 | [] | no_license | uml-robotics/catkin_tester | 764744614782acaff46f66f25dbd1650d0fcd5e8 | dfc8bb2026c06d0f97696a726a6773ff8b99496e | refs/heads/master | 2022-10-31T11:48:27.207535 | 2017-11-27T18:09:38 | 2017-11-27T18:09:38 | 111,495,779 | 0 | 1 | null | 2022-10-19T14:49:44 | 2017-11-21T03:45:59 | C | UTF-8 | Python | false | false | 4,511 | py | #!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_concert/license/LICENSE
#
"""
.. module:: transitions
This module does transition handling for the concert client state machine.
"""
##############################################################################
# Imports
##############################################################################
import concert_msgs.msg as concert_msgs
##############################################################################
# Aliases
##############################################################################
State = concert_msgs.ConcertClientState
##############################################################################
# Transitions
##############################################################################
class Dummy(object):
"""
Dummy transition handler for when there is nothing to do.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self):
"""
Nothing to do here.
"""
pass
class TransitionToGone(object):
"""
Transition handler when moving from any state to the gone state. This will always
occur if the remote gateway has disappeared from the hub's awareness (happens
when the remote gateway shuts down) or has been missing too long. We manually
update the fact that the gateway is no longer available in the concert client's
data here.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self, local_gateway):
"""
Nothing to do here.
"""
self.concert_client.msg.conn_stats.gateway_available = False
local_gateway.request_pulls(self.concert_client.msg.gateway_name, cancel=True)
class PendingToUninvited(object):
"""
Triggered when information about this client has been gathered.
This information is relayed to the concert client object itself in this transition.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self, platform_info, rapps):
"""
:param platform_info rocon_std_msgs/PlatformInfo: retrieved information about this client
:param rapps rocon_app_manager_msgs/Rapp[]: list of rapps runnable by this client.
"""
# this is legacy, and I think it's broken - I use concert alias now
# self.msg.name = rocon_uri.parse(platform_info.uri).name.string
self.concert_client.msg.platform_info = platform_info
self.concert_client.msg.rapps = rapps
class AvailableToMissing(object):
"""
Triggered when a robot is still with the concert, but has dropped its connection.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self):
# Not implemented yet, thought I'd need to update something here,
# but may actually not be necessary..
pass
##############################################################################
# Transition Table
##############################################################################
StateTransitionTable = {
(State.PENDING, State.BAD) : Dummy, #@IgnorePep8 noqa
# (State.PENDING, State.BLOCKING) : Dummy,
# (State.PENDING, State.BUSY) : Dummy,
(State.PENDING, State.UNINVITED) : PendingToUninvited,
(State.PENDING, State.GONE) : TransitionToGone,
(State.UNINVITED, State.BAD) : Dummy,
(State.UNINVITED, State.BLOCKING) : Dummy,
(State.UNINVITED, State.BUSY) : Dummy,
(State.UNINVITED, State.JOINING) : Dummy,
(State.UNINVITED, State.GONE) : TransitionToGone,
# (State.JOINING, State.BAD) : Dummy,
(State.JOINING, State.AVAILABLE) : Dummy,
(State.JOINING, State.GONE) : TransitionToGone,
# (State.AVAILABLE, State.BAD) : Dummy,
(State.AVAILABLE, State.MISSING) : AvailableToMissing,
(State.AVAILABLE, State.UNINVITED): Dummy,
(State.AVAILABLE, State.GONE) : TransitionToGone,
(State.MISSING, State.AVAILABLE) : Dummy,
(State.MISSING, State.GONE) : TransitionToGone,
(State.BUSY, State.PENDING) : Dummy,
(State.BUSY, State.GONE) : TransitionToGone,
(State.BLOCKING, State.GONE) : TransitionToGone,
(State.BAD, State.GONE) : TransitionToGone,
}
"""
Table of valid transitions and their transition handlers.
"""
| [
"james.perl12@gmail.com"
] | james.perl12@gmail.com |
90a4237ca61b7f9c9261dcd9d368c2d88f4d51a1 | 2ab1aea0a5c9556b3ebc4aab3d436779e153ec03 | /repro_lap_reg/covar_results.py | a7653e5b937e9839d43d12252725bbcad6c52f82 | [
"MIT"
] | permissive | idc9/repro_lap_reg | 8454b85df5d931dd3654dc4bdf50b3aacdaa185c | 1d3e846f8f2c3d04b4153d9ac56e0e9bd37198ca | refs/heads/main | 2023-05-31T21:57:35.379643 | 2021-07-06T19:14:46 | 2021-07-06T19:14:46 | 383,571,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from fclsp.reshaping_utils import vec_hollow_sym
from repro_lap_reg.utils import merge_dicts
from repro_lap_reg.results_utils import compare_vecs, compare_adj_mats
def get_covar_results(est, true, zero_tol=0):
"""
Parameters
----------
est: an Estimator
A covariance estimator.
true: array-like, shape (n_features, n_features)
zero_tol: float
Output
------
out: dict with keys 'utri' and 'graph'
"""
covar_est = get_covar(est)
est_utri = vec_hollow_sym(covar_est)
true_utri = vec_hollow_sym(true)
utri_results = compare_vecs(est=est_utri, truth=true_utri,
zero_tol=zero_tol)
graph_results = compare_adj_mats(est=covar_est, truth=true,
zero_tol=zero_tol)
results = merge_dicts(utri_results, graph_results, allow_key_overlap=False)
return results
def get_covar(estimator):
if hasattr(estimator, 'covariance_'):
return estimator.covariance_
elif hasattr(estimator, 'best_estimator_'):
return get_covar(estimator.best_estimator_)
else:
raise ValueError('No covariance matrix found')
| [
"idc9@cornell.edu"
] | idc9@cornell.edu |
eb7d4abc7fd412fb1fe580ea71764e891c5d8a3e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/R2/benchmark/startQiskit81.py | f56bf0234644cb7f5c31a5b7d04b23a9581890e7 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | # qubit number=2
# total number=9
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[1]) # number=5
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit81.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
29bbb97b476679fe6e940996af412393027ec248 | 36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1 | /restaurants/migrations/0026_auto_20181123_2205.py | 9c1f8e9c7b83d80b4adf2a46dac2d6330574bfcf | [] | no_license | phufoxy/fotourNew | 801ab2518424118020dc6e5f31a7ba90a654e56a | 6048c24f5256c8c5a0d18dc7b38c106a7c92a29c | refs/heads/master | 2023-04-13T01:34:22.510717 | 2018-12-26T03:46:09 | 2018-12-26T03:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 2.1 on 2018-11-23 15:05
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0025_auto_20181123_2155'),
]
operations = [
migrations.AlterField(
model_name='comment_restaurant',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 23, 22, 5, 52, 500874)),
),
]
| [
"vanphudhsp2015@gmail.com"
] | vanphudhsp2015@gmail.com |
6efa9c6232634b06fe3bf53c306b0b495efe8926 | 93c30152f4afa5c8feefa401e796529545e52ed2 | /Device/urls.py | c4af24562b1f856fcd9a76d87dfe4cc60ee38eac | [] | no_license | FellowCode/SmartHome | a32fa813a14b5b88d3f100736d062f0424208e1a | 58055d23c566f4f0030189c8157a7de1660cd991 | refs/heads/master | 2020-05-04T07:12:35.831914 | 2019-06-12T15:53:29 | 2019-06-12T15:53:29 | 179,022,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('change/', change_termo),
path('settings/<id>/', settings),
path('stat/<id>/', statistic),
] | [
"31797522+FellowCode@users.noreply.github.com"
] | 31797522+FellowCode@users.noreply.github.com |
bd7736ed9e1654b7791ad680e574daee736bddc6 | 7c63130ae44e773a51fcd38c5dc3116f46daecd7 | /error/Predicted_Results/test_sample7_7recom_model1.py | 8a8c36851f91301d7ddcb8594c7239eacbeb0a5d | [] | no_license | GitHubdeWill/code_recom | a4e8e393592d210b0481f61a3cc89ea475c95153 | 954c334e4abb25aa96786c9efa8f8ca22bc286aa | refs/heads/master | 2020-04-12T15:14:02.395548 | 2018-12-20T12:07:31 | 2018-12-20T12:07:31 | 162,574,531 | 0 | 1 | null | 2019-12-02T14:28:38 | 2018-12-20T12:07:00 | Python | UTF-8 | Python | false | false | 1,179 | py | class Canvas:
def __init__(self, width, height):
self.width = width
self.height = height
self.data = [[' '] * width for i in range(height)]
def setpixel(self, row, col):
self.data[row][col] = '*'
def getpixel(self, row, col):
return self.data[row][col]
def display(self):
print "\n".join(["".join(row) for row in self.data])
class Shape:
def paint(self, canvas): pass
class Rectangle(Shape):
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def hline(self, x, y, w):
pass
def vline(self, x, y, h):
pass
def paint(self, canvas):
hline(self.x, self.y, self.w)
hline(self.x, self.y + self.h, self.w)
vline(self.x, self.y, self.h)
vline(self.x + self.w, self.y, self.h)
class Square(Rectangle):
def __init__(self, x, y, size):
Rectangle.__init__(self, x, y, size, self._download_class=True)
)
class CompoundShape(Shape):
def __init__(self, shapes):
self.shapes = shapes
def paint(self, canvas):
for s in self.shapes:
s.paint(canvas) | [
"wtemp@cs.umass.edu"
] | wtemp@cs.umass.edu |
74ee475e802a1523ea256fe0e91b157437daa072 | 10d98fecb882d4c84595364f715f4e8b8309a66f | /pruning_identified_exemplars/save_checkpoint/imagenet_train_eval.py | 6e55cfcb1026e17d743d3876706537783e2cf68a | [
"LicenseRef-scancode-proprietary-license",
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 7,235 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Training script to sparsify a ResNet-50.
"""
import os
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from pruning_identified_exemplars.utils import model_utils
# model params
flags.DEFINE_integer(
'steps_per_checkpoint', 500,
'Controls how often checkpoints are generated. More steps per '
'checkpoint = higher utilization of TPU and generally higher '
'steps/sec')
flags.DEFINE_float('label_smoothing', 0.1,
'Relax confidence in the labels by (1-label_smoothing).')
flags.DEFINE_integer('steps_per_eval', 1251,
'Controls how often evaluation is performed.')
flags.DEFINE_integer('num_cores', 8, 'Number of cores.')
flags.DEFINE_string('output_dir', '',
'Directory where to write event logs and checkpoint.')
flags.DEFINE_string('mode', 'train',
'One of {"train_and_eval", "train", "eval"}.')
flags.DEFINE_string('train_dir', '',
'The location of the tfrecords used for training.')
flags.DEFINE_string('eval_dir', '',
'The location of the tfrecords used for eval.')
flags.DEFINE_string('master', 'local', 'Name of the TensorFlow master to use.')
# pruning flags
flags.DEFINE_string('pruning_hparams', '',
'Comma separated list of pruning-related hyperparameters')
flags.DEFINE_float('end_sparsity', 0.1,
'Target sparsity desired by end of training.')
flags.DEFINE_integer('sparsity_begin_step', 5000, 'Step to begin pruning at.')
flags.DEFINE_integer('sparsity_end_step', 8000, 'Step to end pruning at.')
flags.DEFINE_integer('pruning_frequency', 500, 'Step interval between pruning.')
flags.DEFINE_enum(
'pruning_method', 'baseline',
('threshold', 'random_independent', 'random_cumulative', 'baseline'),
'Method used for pruning'
'Specify as baseline if no pruning is used.')
flags.DEFINE_bool('log_class_level_summaries', True,
'Boolean for whether to log class level precision/accuracy.')
flags.DEFINE_float('expansion_factor', 6.,
'how much to expand filters before depthwise conv')
flags.DEFINE_float(
'training_steps_multiplier', 1.0,
'Training schedule is shortened or extended with the '
'multiplier, if it is not 1.')
flags.DEFINE_integer('block_width', 1, 'width of block')
flags.DEFINE_integer('block_height', 1, 'height of block')
# set this flag to true to do a test run of this code with synthetic data
flags.DEFINE_bool('test_small_sample', True,
'Boolean for whether to test internally.')
FLAGS = flags.FLAGS
imagenet_params = {
'sloppy_shuffle': True,
'num_cores': 8,
'train_batch_size': 4096,
'num_train_images': 1281167,
'num_eval_images': 50000,
'num_label_classes': 1000,
'num_train_steps': 32000,
'base_learning_rate': 0.1,
'weight_decay': 1e-4,
'eval_batch_size': 1024,
'mean_rgb': [0.485 * 255, 0.456 * 255, 0.406 * 255],
'stddev_rgb': [0.229 * 255, 0.224 * 255, 0.225 * 255]
}
def main(argv):
del argv # Unused.
initial_sparsity = 0.0
pruning_hparams_string = ('begin_pruning_step={0},'
'sparsity_function_begin_step={0},'
'end_pruning_step={1},'
'sparsity_function_end_step={1},'
'target_sparsity={2},'
'initial_sparsity={3},'
'pruning_frequency={4},'
'threshold_decay=0,'
'block_width={5},'
'block_height={6}'.format(
FLAGS.sparsity_begin_step,
FLAGS.sparsity_end_step, FLAGS.end_sparsity,
initial_sparsity, FLAGS.pruning_frequency,
FLAGS.block_width, FLAGS.block_height))
params = imagenet_params
if FLAGS.test_small_sample:
output_dir = '/tmp/imagenet_train_eval/'
else:
# configures train directories based upon hyperparameters.
if FLAGS.pruning_method:
folder_stub = os.path.join(FLAGS.pruning_method, str(FLAGS.end_sparsity),
str(FLAGS.sparsity_begin_step),
str(FLAGS.sparsity_end_step))
else:
folder_stub = os.path.join('baseline', str(0.0), str(0.0), str(0.0),
str(0.0), str(FLAGS.resnet_depth))
output_dir = os.path.join(FLAGS.output_dir, folder_stub)
update_params = {
'lr_schedule': [(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)],
'momentum': 0.9,
'data_format': 'channels_last',
'output_dir': output_dir,
'label_smoothing': FLAGS.label_smoothing,
}
params.update(update_params)
if FLAGS.pruning_method != 'baseline':
params['pruning_method'] = FLAGS.pruning_method
else:
params['pruning_method'] = None
params['mode'] = FLAGS.mode
if FLAGS.mode == 'train':
params['batch_size'] = params['train_batch_size']
params['task'] = 'imagenet_training'
params['data_dir'] = FLAGS.train_dir
else:
params['batch_size'] = params['eval_batch_size']
params['task'] = 'imagenet_eval'
params['data_dir'] = FLAGS.eval_dir
if FLAGS.test_small_sample:
update_params = {
'batch_size': 2,
'num_train_steps': 10,
'num_images': 2,
'num_train_images': 10,
'num_eval_images': 10,
}
params['test_small_sample'] = True
params.update(update_params)
else:
params['test_small_sample'] = False
if FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf2.train.checkpoints_iterator(params['output_dir']):
tf.logging.info('Starting to evaluate.')
try:
_ = model_utils.initiate_task_helper(
ckpt_directory=ckpt, model_params=params, pruning_params=None)
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= params['num_train_steps']:
tf.logging.info('Evaluation finished')
break
except tf.errors.NotFoundError:
tf.logging.info('Checkpoint was not found, skipping checkpoint.')
else:
if FLAGS.mode == 'train':
tf.logging.info('start training...')
model_utils.initiate_task_helper(
ckpt_directory=None,
model_params=params,
pruning_params=pruning_hparams_string)
tf.logging.info('finished training.')
if __name__ == '__main__':
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
9d191ebc88daf5624bd0ef8db05ca5582c623a17 | 3cdd7019f3acbf7b7a7e879444454703fcc73d62 | /solutions/57.insert-interval.py | 5d2417be706aee6d99ed4e78e3e33eba5144c8ed | [] | no_license | quixoteji/Leetcode | 1dc2e52e53a7b58d9bae15ce2d5c4142cbd365af | 00bf9a8164008aa17507b1c87ce72a3374bcb7b9 | refs/heads/master | 2021-07-15T07:59:21.294297 | 2020-05-13T03:08:47 | 2020-05-13T03:08:47 | 138,812,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | #
# @lc app=leetcode id=57 lang=python3
#
# [57] Insert Interval
#
# @lc code=start
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
return self.sol1(intervals, newInterval)
# Solution 1
def sol1(self, intervals, newInterval) :
ans = []
n = len(intervals)
for i in range(len(intervals)):
if intervals[i][0] > newInterval[0] :
intervals.insert(i, newInterval)
if len(intervals) == n : intervals.append(newInterval)
print(intervals)
for interval in intervals :
if not ans or interval[0] > ans[-1][1] :
ans.append(interval)
else :
ans[-1][1] = max(ans[-1][1], interval[1])
return ans
# @lc code=end
| [
"jxj405@case.edu"
] | jxj405@case.edu |
638c780ac1d2224bab250dbad8d30263e3c8425f | 159c3669bfe0525b0608bb658971cf4a7d82c7c5 | /query_learn/models.py | ab0f74ef981aa922e6f460db759d0b96ddd4ec01 | [] | no_license | jatinkatyal13/Django_Boiler_Plate | eb163486dc0307c8c0a5e4cbcdfee53826a3640c | 550f50f33be396e3c82082cc722ec897dadf04a8 | refs/heads/master | 2020-03-11T03:49:39.089700 | 2018-04-28T10:50:31 | 2018-04-28T10:50:31 | 129,759,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
name = models.CharField(max_length = 100)
website = models.URLField()
def __str__ (self):
return self.name
class Author(models.Model):
name = models.CharField(max_length = 200)
score = models.IntegerField(default = 1)
def __str__ (self):
return self.name
class Entry(models.Model):
text = models.TextField()
blog = models.ForeignKey(Blog, on_delete = models.CASCADE)
author = models.ManyToManyField(Author) | [
"jatin.katyal13@gmail.com"
] | jatin.katyal13@gmail.com |
f566ce02ce3a9f62a9487b3ae38419afe38437c6 | 40699a136c4f4500833e21e0d7863a1ba624a5cd | /pde/tools/tests/test_parameters.py | 761a0ce0f072ff26b20329f71a0ec5e47ec257c4 | [
"MIT"
] | permissive | binder-oilgains/py-pde | 0e0226678b2598b50aa72762d9a70bb8e9536e43 | d76977095f1e915c63230e6895391f063d0778d8 | refs/heads/main | 2023-02-23T09:09:05.543386 | 2021-02-02T00:55:40 | 2021-02-02T00:55:40 | 334,454,351 | 0 | 0 | MIT | 2021-01-30T16:09:14 | 2021-01-30T16:09:13 | null | UTF-8 | Python | false | false | 5,307 | py | """
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import itertools
import logging
import pickle
import numpy as np
import pytest
from pde.tools.parameters import (
DeprecatedParameter,
HideParameter,
Parameter,
Parameterized,
get_all_parameters,
sphinx_display_parameters,
)
def test_parameters():
""" test mixing Parameterized """
param = Parameter("a", 1, int, "help", extra={"b": 3})
assert isinstance(str(param), str)
p_string = pickle.dumps(param)
param_new = pickle.loads(p_string)
assert param.__dict__ == param_new.__dict__
assert param is not param_new
assert param_new.extra["b"] == 3
class Test1(Parameterized):
parameters_default = [param]
t = Test1()
assert t.parameters["a"] == 1
assert t.get_parameter_default("a") == 1
t = Test1(parameters={"a": 2})
assert t.parameters["a"] == 2
assert t.get_parameter_default("a") == 1
with pytest.raises(ValueError):
t = Test1(parameters={"b": 3})
t = Test1()
ps = t._parse_parameters({"b": 3}, check_validity=False)
assert ps["a"] == 1
assert ps["b"] == 3
class Test2(Test1):
# also test conversion of default parameters
parameters_default = [Parameter("b", "2", int, "help")]
t = Test2()
assert t.parameters["a"] == 1
assert t.parameters["b"] == 2
t = Test2(parameters={"a": 10, "b": 20})
assert t.parameters["a"] == 10
assert t.parameters["b"] == 20
assert t.get_parameter_default("a") == 1
assert t.get_parameter_default("b") == "2"
with pytest.raises(KeyError):
t.get_parameter_default("c")
class Test3(Test2):
# test overwriting defaults
parameters_default = [Parameter("a", 3), Parameter("c", 4)]
t = Test3()
assert t.parameters["a"] == 3
assert t.get_parameter_default("a") == 3
assert set(t.parameters.keys()) == {"a", "b", "c"}
# test get_all_parameters function after having used Parameters
p1 = get_all_parameters()
for key in ["value", "description"]:
p2 = get_all_parameters(key)
assert set(p1) == p2.keys()
# test whether sphinx_display_parameters runs
lines = [":param parameters:"]
sphinx_display_parameters(None, "class", "Test1", Test1, None, lines)
assert len(lines) > 1
def test_parameters_simple():
""" test adding parameters using a simple dictionary """
class Test(Parameterized):
parameters_default = {"a": 1}
t = Test()
assert t.parameters["a"] == 1
def test_parameter_help(monkeypatch, capsys):
""" test how parameters are shown """
class Test1(Parameterized):
parameters_default = [DeprecatedParameter("a", 1, int, "random string")]
class Test2(Test1):
parameters_default = [Parameter("b", 2, int, "another word")]
t = Test2()
for in_jupyter in [False, True]:
monkeypatch.setattr("pde.tools.output.in_jupyter_notebook", lambda: in_jupyter)
for flags in itertools.combinations_with_replacement([True, False], 3):
Test2.show_parameters(*flags)
o1, e1 = capsys.readouterr()
t.show_parameters(*flags)
o2, e2 = capsys.readouterr()
assert o1 == o2
assert e1 == e2 == ""
def test_hidden_parameter():
""" test how hidden parameters are handled """
class Test1(Parameterized):
parameters_default = [Parameter("a", 1), Parameter("b", 2)]
assert Test1().parameters == {"a": 1, "b": 2}
class Test2(Test1):
parameters_default = [HideParameter("b")]
class Test2a(Parameterized):
parameters_default = [Parameter("a", 1), Parameter("b", 2, hidden=True)]
for t_class in [Test2, Test2a]:
assert "b" not in t_class.get_parameters()
assert len(t_class.get_parameters()) == 1
assert len(t_class.get_parameters(include_hidden=True)) == 2
t2 = t_class()
assert t2.parameters == {"a": 1, "b": 2}
assert t2.get_parameter_default("b") == 2
with pytest.raises(ValueError):
t2._parse_parameters({"b": 2}, check_validity=True, allow_hidden=False)
class Test3(Test1):
parameters_default = [Parameter("b", 3)]
t3 = Test3()
assert t3.parameters == {"a": 1, "b": 3}
assert t3.get_parameter_default("b") == 3
def test_convert_default_values(caplog):
""" test how default values are handled """
class Test1(Parameterized):
parameters_default = [Parameter("a", 1, float)]
with caplog.at_level(logging.WARNING):
t1 = Test1()
assert "Default value" not in caplog.text
assert isinstance(t1.parameters["a"], float)
class Test2(Parameterized):
parameters_default = [Parameter("a", np.arange(3), np.array)]
t2 = Test2()
np.testing.assert_equal(t2.parameters["a"], np.arange(3))
class Test3(Parameterized):
parameters_default = [Parameter("a", [0, 1, 2], np.array)]
t3 = Test3()
np.testing.assert_equal(t3.parameters["a"], np.arange(3))
class Test4(Parameterized):
parameters_default = [Parameter("a", 1, str)]
with caplog.at_level(logging.WARNING):
t4 = Test4()
assert "Default value" in caplog.text
np.testing.assert_equal(t4.parameters["a"], "1")
| [
"david.zwicker@ds.mpg.de"
] | david.zwicker@ds.mpg.de |
4f0e1f57ce50c3a051c432c5570ea57775a38300 | 4589a9ea76e458793ad78059839b81d365f433de | /athena_automation/athenataf/tests/group_management/group_management/non_default_value_check/NonDefaultValueCheck.py | b7af3ed0749dbbab1b9fdca59e171016b0940a66 | [] | no_license | cash2one/reautomation_handoff | 5e2c4c432d8f658d1b57211782744bd0b56c52f6 | 7ef83572d659db35036189eb394f99de1369db5a | refs/heads/master | 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,970 | py | import logging
logger = logging.getLogger('athenataf')
import time
from athenataf.lib.functionality.test.AthenaGUITestCase import AthenaGUITestCase
class NonDefaultValueCheck(AthenaGUITestCase):
'''
Test class for NonDefaultValueCheck.
'''
def _create_network(self , network_page):
time.sleep(10)
network_page.delete_network_if_present()
network_page.delete_wired_network_if_present()
basic_info = network_page.create_new_network()
vlan_page = basic_info.employee_network_info()
security_page = vlan_page.use_vlan_defaults()
access_page = security_page.set_default_settings()
access_page.click_role_radio_and_click_finish_button()
basic_info = network_page.create_new_network()
vlan_obj = basic_info.wired_employee_network_info()
security = vlan_obj.wired_vlan_defaults()
security.wired_employee_security_defaults()
access = security.wired_security_defaults()
network_assign = access.use_access_defaults()
network_assign.finish_network_setup()
def test_ath_11748_group_configuration(self):
inner_left_panel = self.TopPanel.click_slider_icon()
self.take_s1_snapshot()
if inner_left_panel.assert_group():
if inner_left_panel.assert_sample_group_with_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller()
inner_left_panel.manage_group()
manage_group_page.delete_empty_group()
elif inner_left_panel.assert_sample_group_without_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_group()
create_group_page = inner_left_panel.add_group()
create_group_page.create_multiple_groups()
create_group_page = inner_left_panel.add_group()
create_group_page.create_empty_group()
inner_left_panel.select_samplegroup()
network_page = self.LeftPanel.go_to_network_page()
self._create_network(network_page)
inner_left_panel.click_all_groups_label()
inner_left_panel = self.TopPanel.click_slider_icon()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller2()
inner_left_panel.select_samplegroup()
network_page = self.LeftPanel.go_to_network_page()
self._create_network(network_page)
self.take_s2_snapshot()
network_page.delete_network_if_present()
network_page.delete_wired_network_if_present()
inner_left_panel.click_all_groups_label()
inner_left_panel = self.TopPanel.click_slider_icon()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_group1()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_group()
self.browser.refresh()
self.take_s3_snapshot()
self.assert_s1_s2_diff(None)
self.assert_s1_s3_diff()
self.clear()
def test_ath_11521_create_group(self):
conf=self.config.config_vars
inner_left_panel = self.TopPanel.click_slider_icon()
self.take_s1_snapshot()
if inner_left_panel.assert_mygroup():
if inner_left_panel.assert_mygroup_with_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
elif inner_left_panel.assert_mygroup_without_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
if inner_left_panel.assert_mynew_group():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mynew_group()
create_group_page = inner_left_panel.add_group()
create_group_page.create_multiple_empty_groups(conf.mynew)
create_group_page = inner_left_panel.add_group()
create_group_page.create_group_with_vc(conf.Mygroup)
create_group_page = inner_left_panel.add_group()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.assert_mygroup_and_mynew()
manage_group_page.click_manage_group_close_button()
self.take_s2_snapshot()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
manage_group_page.delete_empty_mynew_group()
self.browser.refresh()
self.take_s3_snapshot()
self.assert_s1_s2_diff(None)
self.assert_s1_s3_diff()
self.clear()
def test_ath_11523_delete_group(self):
conf=self.config.config_vars
inner_left_panel = self.TopPanel.click_slider_icon()
self.take_s1_snapshot()
if inner_left_panel.assert_mygroup():
if inner_left_panel.assert_mygroup_with_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
elif inner_left_panel.assert_mygroup_without_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
if inner_left_panel.assert_mynew_group():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mynew_group()
manage_group_page.click_manage_group_close_button()
create_group_page = inner_left_panel.add_group()
create_group_page.create_multiple_empty_groups(conf.mynew)
create_group_page = inner_left_panel.add_group()
create_group_page.create_group_with_vc(conf.Mygroup)
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mynew_group()
manage_group_page.delete_empty_mygroup()
manage_group_page.assert_group_has_swarm()
self.take_s2_snapshot()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
self.browser.refresh()
self.take_s3_snapshot()
self.assert_s1_s2_diff(None)
self.assert_s1_s3_diff()
self.clear() | [
"raju_set@testmile.com"
] | raju_set@testmile.com |
c05a4d2c8f71e43c742416226d3a37a8bec52e4e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HvkPdhijquecKASdF_4.py | 28fe72dc5e2ebc80b433f466d3102b76d2b8443e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | """
Create a function that takes a dictionary of student names and returns a list
of student names in **alphabetical order**.
### Examples
get_student_names({
"Student 1" : "Steve",
"Student 2" : "Becky",
"Student 3" : "John"
}) ➞ ["Becky", "John", "Steve"]
### Notes
* Don't forget to `return` your result.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def get_student_names(students):
return sorted([students[items] for items in students])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
698c729f923f3786348bd575ba07df82bc2097ed | cd57ad36685cc188ea42219bd220905e23e61f4c | /tests/logic/test_time.py | b2789a57f2b7b17fc9152dbc40382c556402ac8a | [
"BSD-3-Clause"
] | permissive | gitCommitWiL/ChatterBot | fa404848c7eb8f8ffb07c80c7d3ec47aeb2fe177 | 4f2275ec8a6e3546c4251db9e9938f7b3fd29e68 | refs/heads/master | 2021-04-22T14:52:18.175648 | 2020-03-26T11:22:16 | 2020-03-26T11:22:16 | 249,854,439 | 2 | 0 | BSD-3-Clause | 2020-03-25T01:02:46 | 2020-03-25T01:02:46 | null | UTF-8 | Python | false | false | 815 | py | from tests.base_case import ChatBotTestCase
from chatterbot.logic import TimeLogicAdapter
from chatterbot.conversation import Statement
class TimeAdapterTests(ChatBotTestCase):
def setUp(self):
super().setUp()
self.adapter = TimeLogicAdapter(self.chatbot)
def test_positive_input(self):
statement = Statement(text="Do you know what time it is?")
response = self.adapter.process(statement)
self.assertEqual(response.confidence, 1)
self.assertIn("The current time is ", response.text)
def test_negative_input(self):
statement = Statement(text="What is an example of a pachyderm?")
response = self.adapter.process(statement)
self.assertEqual(response.confidence, 0)
self.assertIn("The current time is ", response.text)
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
20ef6a2b50cafbfbe8d5fc7d867265690b62d4c0 | 535503dc18c38b92f8520289da5b4fa42b0a722a | /code/exp_control/sequencer/sequences/Ramsey2_D52_D32.py | f1fc50821392952cb3b5aa8a7897c1b7850731bf | [] | no_license | jamesbate/phd_code | fbbbf7657c428a0a1f18768edca1dfce56801cc1 | 7e71d7f041835497fb421dd741c644ab5c8e3805 | refs/heads/master | 2023-05-07T10:31:22.168217 | 2021-05-26T15:00:40 | 2021-05-26T15:00:40 | 371,073,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,555 | py | # General S/D experiment
<VARIABLES>
# initializing the ion to the s state with 854 laser
MeasPoints=self.set_variable("float","MeasPoints",50,1,2e5)
n_loops=self.set_variable("float","n_loops",1,1,500000)
p729_pulse=self.set_variable("bool","p729_pulse",0)
pulse729_length=self.set_variable("float","pulse729_length",1000.000000,0,2e5)
# Raman pulse
pulse_393=self.set_variable("bool","pulse_393",1)
raman_length=self.set_variable("float","raman_length",50.000000,0,2e5)
# ion analys
do_pipulse=self.set_variable("bool","do_pipulse",0)
pitime_carrier=self.set_variable("float","pitime_carrier",2.,0,2e5)
do_pihalf=self.set_variable("bool","do_pihalf",0)
qubit_pihalf=self.set_variable("float","qubit_pihalf",2.,0,2e5)
analyse_ion=self.set_variable("bool","analyse_ion",1)
new_zealand=self.set_variable("float","new_zealand",2.,0,2e5)
phase=self.set_variable("float","phase",0.,0,50)
# Doppler cooling
doppler_length=self.set_variable("float","doppler_length",5000.000000,1,2e5)
# detection time
det_time=self.set_variable("float","det_time",5000.000000,0.01,2e7)
# sideband cooling
sb_cool_com=self.set_variable("bool","sb_cool_com",0)
#sb_cool_strech=self.set_variable("bool","sb_cool_strech",0)
SBCool_time=self.set_variable("float","SBCool_time",2000.000000,1,2e5)
sb_cool_rad1=self.set_variable("bool","sb_cool_rad1",0)
sb_cool_rad2=self.set_variable("bool","sb_cool_rad2",0)
SBCool_time_rad=self.set_variable("float","SBCool_time_rad",5000.000000,1,2e5)
#optical pumping with 397 sigma
opt_pumping=self.set_variable("bool","opt_pumping",1)
pump_length=self.set_variable("float","pump_length",40.000000,1,2e5)
repump866_length=self.set_variable("float","repump_length866",20.000000,1,2e5)
# delays during the experiment
delay=self.set_variable("float","delay",2,0,500000)
repump_length=self.set_variable("float","repump_length854",20.000000,1,2e5)
# before detecting we can switch on the 854 to see if it can depopulate the D state
#repump_test=self.set_variable("bool","repump_test",0)
#repump_test_length=self.set_variable("float","repump_test_length",20.000000,1,2e5)
#dummy_var=int(self.set_variable("float","maesurement_type",0,0,1e5))
mes_type=self.set_variable("float","mes_type",0,0,2e5)
</VARIABLES>
<TRANSITIONS>
</TRANSITIONS>
# The save form specifies which data will be saved and how, when a scan is performed.
# If this is omitted a standard form is used
<SAVE FORM>
.dat ; %1.2f
PMTcounts; 1;sum; (1:N); %1.0f
</SAVE FORM>
# Here the sequence can override program parameters. Syntax follows from "Write Token to Params.vi"
<PARAMS OVERRIDE>
AcquisitionMode fluorescence
Cycles 50
</PARAMS OVERRIDE>
#<TRANSITIONS>
# work around to "error while getting tag" info
#</TRANSITIONS>
<SEQUENCE>
#turning all Lasers off
#ttl_pulse("31",10,is_last=True)
TTLsOff(["854 sw","Pi397","Sigma397","dp397","397det","866 sw"])
InitPulse(50)
delay = 2
# rf_on(150, -100, dds_address=0, start_time = 0)
#setTTLOn("729_not_393second",0,is_last=True)
for i in range(int(n_loops)):
ttl_pulse("854 sw",repump_length,is_last=False)
DopplerCooling(doppler_length, is_last=False)
PMTDetection(doppler_length)
seq_wait(1)
if opt_pumping:
ttl_pulse(["Sigma397","dp397"],pump_length,is_last=False)
ttl_pulse("866 sw",pump_length+repump866_length,is_last=True)
#ttl_pulse(["854"],repump_length,is_last=False)
seq_wait(0.1)
if sb_cool_com:
setTTLOff("729SPswitch",0,is_last=True)
SBCooling2(length = SBCool_time)
setTTLOn("729SPswitch",0,is_last=True)
seq_wait(0.1)
seq_wait(7)
if sb_cool_rad1:
SBCooling2(length = SBCool_time_rad, transition="sideband_cool_rad1")
if sb_cool_rad2:
SBCooling2(length = SBCool_time_rad, transition="sideband_cool_rad2")
seq_wait(0.1)
seq_wait(7)
if p729_pulse:
setTTLOff("729SPswitch",0,is_last=True)
rf_pulse(pulse729_length, 0, ion=1, transition_param='729_Probe', is_last=True, address=1)
setTTLOn("729SPswitch",0,is_last=True)
seq_wait(delay)
if pulse_393:
seq_wait(delay)
if 1:#(mes_type >5):
RamanPulse(raman_length)
seq_wait(delay)
if analyse_ion:
do_pipulse = 1
do_pihalf =1
#pitime_carrier = 1.0
#qubit_pihalf = 0.5
if do_pihalf: #mes_type % 3 == 2:
setTTLOn("729SPswitch",0,is_last=True)
rf_pulse(qubit_pihalf, 0*3.14159*0.25, ion=1, transition_param='729_qubit', is_last=True, address=1) #sigma y
seq_wait(1)
setTTLOff("729SPswitch",0,is_last=True)
seq_wait(1)
if(do_pipulse):
#seq_wait(1000) # new zealand
setTTLOn("729SPswitch",0,is_last=True)
rf_pulse(pitime_carrier, 0, ion=1, transition_param='729_Carrier', is_last=True, address=1)
seq_wait(delay)
setTTLOff("729SPswitch",0,is_last=True)
seq_wait(1)
if analyse_ion:
setTTLOn("729SPswitch",0,is_last=True)
seq_wait(new_zealand) # new zealand
rf_pulse(pitime_carrier, 0, ion=1, transition_param='729_Carrier', is_last=True, address=1)
seq_wait(delay)
rf_pulse(qubit_pihalf, phase*3.14159*0.5, ion=1, transition_param='729_qubit', is_last=True, address=1)
seq_wait(delay)
setTTLOff("729SPswitch",0,is_last=True)
seq_wait(delay)
ttl_pulse(["Pi397","dp397","866 sw","397det"],det_time,is_last=False)
PMTDetection(det_time)
seq_wait(1)
TTLsOff(["Pi397","866 sw"])
</SEQUENCE>
<AUTHORED BY LABVIEW>
1
</AUTHORED BY LABVIEW>
| [
"james.bate@oriel.ox.ac.uk"
] | james.bate@oriel.ox.ac.uk |
e37e107190dff0ae34e8723deb1d746d87aba1fb | 2181795d5c380fef6d929f28fb4c90c0b1ffdc50 | /PythonScript/twitter_streaming.py | c6658c25cdde056fd59a770e2c3519d64af1bda2 | [] | no_license | pvhuu/Social-Network-Analysis | fa2c69460b7f811b16d4edfcdd99359825ca046e | c54d25eaa5838ea1e118cf8000a3a0bedf3b1ccd | refs/heads/master | 2020-04-24T04:55:24.381193 | 2019-02-25T12:31:57 | 2019-02-25T12:31:57 | 171,719,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | import sys
import string
import time
from tweepy import Stream
from tweepy.streaming import StreamListener
from twitter_client import get_twitter_auth
class CustomListener(StreamListener):
""" Custom StreamListener from streaming Twitter data """
def __init__(self,fname):
safe_fname = format_filename(fname)
self.outfile = "stream_%s.jsonl" % safe_fname
def on_data(self,data):
try:
with open(self.outfile, 'a') as f:
f.write(data)
return True
except BaseException as e:
sys.stderr.write("Error on_data: {}\n".format(e))
time.sleep(5)
return True
def on_error(self,status):
if status == 420:
sys.stderr.write("Rate limit exceeded\n")
return False
else:
sys.stderr.write("Error {}\n".format(status))
return True
def format_filename(fname):
"""
Convert fname into a safe string for a file name.
Return: string
"""
return ''.join(convert_valid(one_char) for one_char in fname)
def convert_valid (one_char):
"""
Convert a character into '_' if "invalid".
Return: string
"""
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
if one_char in valid_chars:
return one_char
else:
return '_'
if __name__ == '__main__':
query = sys.argv[1:] #list of CLI arguments
query_fname =' '.join(query) #string
auth = get_twitter_auth()
twitter_stream = Stream(auth, CustomListener(query_fname)
twitter_stream.filter(track=query,async=True)
| [
"="
] | = |
e6ea1c5b7867d5bcc40adbeab05230c3eb764d24 | b9963ffb80aad7e057bc375edb85ac7ed5a837d0 | /knowit2016/knowit19.py | 24d450541ccf6ab2c23a57b0b5a410272753a1e4 | [
"MIT"
] | permissive | matslindh/codingchallenges | a2db9f4579e9f35189f5cdf74590863cf84bdf95 | a846e522f7a31e988c470cda87955ee3ef20a274 | refs/heads/main | 2022-12-23T15:56:19.776354 | 2022-12-15T21:03:37 | 2022-12-15T21:03:37 | 76,491,177 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import string
out = open("input/knowit19_output.pgm", "w")
#out_bin = open("input/knowit19_output.bin", "wb")
s = ''.join(open("input/knowit19").readlines()).replace("\n", '')
for i in range(0, len(s), 2):
pass
#out_bin.write(chr(int(s[i:i + 2])).encode("ascii"))
height = 21
width = int(len(s) / (height * 2))
out.write("P2\n" + str(width) + ' ' + str(height) + "\n99\n")
for i in range(0, len(s), 2):
letter = '99' if int(s[i:i+2]) % 2 == 0 else '0'
if len(letter) < 2:
letter = ' ' + letter
out.write(letter + ' ')
if (i + 2) % width == 0:
out.write("\n")
for line in open("input/knowit19").readlines():
line = line.strip()
# print(int(line)&0xff)
str = ''.join(open("input/knowit19").readlines()).replace("\n", '')
freq = {}
for i in range(2, len(str), 2):
v = int(str[i:i+2])
v_diff = v - int(str[i-2:i])
if v not in freq:
freq[v] = 0
freq[v] += 1
for k in freq:
print(k, freq[k])
"""
v = int(str)
while v:
x = v & 0xff
print(chr(x))
v >>= 8
print(v)""" | [
"mats@lindh.no"
] | mats@lindh.no |
51d60aa46fddbf0f3fe82bc84c9e6e73eba242fd | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_1/_pkg0_0_0_1_1/_mod0_0_0_1_1_4.py | 837b2352d702ad8b1907a0715fb60cbf0eaa700d | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 128 | py | name0_0_0_1_1_4_0 = None
name0_0_0_1_1_4_1 = None
name0_0_0_1_1_4_2 = None
name0_0_0_1_1_4_3 = None
name0_0_0_1_1_4_4 = None | [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
4bffa371b0f85e0309c820ac059e41d57de17199 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py | 3dc6aadd0b7a2f0ce6914768e70f511615cc8346 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 4,782 | py | from __future__ import absolute_import
from celery.backends import couchbase as module
from celery.backends.couchbase import CouchBaseBackend
from celery.exceptions import ImproperlyConfigured
from celery import backends
from celery.tests.case import (
AppCase, MagicMock, Mock, SkipTest, patch, sentinel,
)
try:
import couchbase
except ImportError:
couchbase = None # noqa
COUCHBASE_BUCKET = 'celery_bucket'
class test_CouchBaseBackend(AppCase):
def setup(self):
if couchbase is None:
raise SkipTest('couchbase is not installed.')
self.backend = CouchBaseBackend(app=self.app)
def test_init_no_couchbase(self):
"""test init no couchbase raises"""
prev, module.couchbase = module.couchbase, None
try:
with self.assertRaises(ImproperlyConfigured):
CouchBaseBackend(app=self.app)
finally:
module.couchbase = prev
def test_init_no_settings(self):
"""test init no settings"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = []
with self.assertRaises(ImproperlyConfigured):
CouchBaseBackend(app=self.app)
def test_init_settings_is_None(self):
"""Test init settings is None"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None
CouchBaseBackend(app=self.app)
def test_get_connection_connection_exists(self):
with patch('couchbase.connection.Connection') as mock_Connection:
self.backend._connection = sentinel._connection
connection = self.backend._get_connection()
self.assertEqual(sentinel._connection, connection)
self.assertFalse(mock_Connection.called)
def test_get(self):
"""test_get
CouchBaseBackend.get should return and take two params
db conn to couchbase is mocked.
TODO Should test on key not exists
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {}
x = CouchBaseBackend(app=self.app)
x._connection = Mock()
mocked_get = x._connection.get = Mock()
mocked_get.return_value.value = sentinel.retval
# should return None
self.assertEqual(x.get('1f3fab'), sentinel.retval)
x._connection.get.assert_called_once_with('1f3fab')
def test_set(self):
"""test_set
CouchBaseBackend.set should return None and take two params
db conn to couchbase is mocked.
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None
x = CouchBaseBackend(app=self.app)
x._connection = MagicMock()
x._connection.set = MagicMock()
# should return None
self.assertIsNone(x.set(sentinel.key, sentinel.value))
def test_delete(self):
"""test_delete
CouchBaseBackend.delete should return and take two params
db conn to couchbase is mocked.
TODO Should test on key not exists
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {}
x = CouchBaseBackend(app=self.app)
x._connection = Mock()
mocked_delete = x._connection.delete = Mock()
mocked_delete.return_value = None
# should return None
self.assertIsNone(x.delete('1f3fab'))
x._connection.delete.assert_called_once_with('1f3fab')
def test_config_params(self):
"""test_config_params
celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS is properly set
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {
'bucket': 'mycoolbucket',
'host': ['here.host.com', 'there.host.com'],
'username': 'johndoe',
'password': 'mysecret',
'port': '1234',
}
x = CouchBaseBackend(app=self.app)
self.assertEqual(x.bucket, 'mycoolbucket')
self.assertEqual(x.host, ['here.host.com', 'there.host.com'],)
self.assertEqual(x.username, 'johndoe',)
self.assertEqual(x.password, 'mysecret')
self.assertEqual(x.port, 1234)
def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'):
from celery.backends.couchbase import CouchBaseBackend
backend, url_ = backends.get_backend_by_url(url, self.app.loader)
self.assertIs(backend, CouchBaseBackend)
self.assertEqual(url_, url)
def test_backend_params_by_url(self):
url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
with self.Celery(backend=url) as app:
x = app.backend
self.assertEqual(x.bucket, 'mycoolbucket')
self.assertEqual(x.host, 'myhost')
self.assertEqual(x.username, 'johndoe')
self.assertEqual(x.password, 'mysecret')
self.assertEqual(x.port, 123)
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
3264b1c0d30de6484403a37d07b39896f9a20180 | ee4768fe781f5099e4fee5a5d6d1b53146d21f80 | /src/pyutil/zeroconf.py | 8cd6428b09322cc329a3a760cb0492c81d261946 | [
"LicenseRef-scancode-public-domain",
"Unlicense",
"CC0-1.0",
"BSD-3-Clause"
] | permissive | nuin/ampify | e55eff2953ae25907df52a909ecb7be7e468c9ae | dd3ed2eece37652e604f223658c028e01e6bdfa3 | refs/heads/master | 2021-01-15T23:50:50.748938 | 2011-01-29T04:36:41 | 2011-01-29T04:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | # Public Domain (-) 2010-2011 The Ampify Authors.
# See the Ampify UNLICENSE file for details.
"""
================
ZeroConf Support
================
This module provides support functions to register and query ZeroConf records.
The ``register`` function returns either a ``1`` or a ``0`` to indicate a
successful or failed registration.
>>> register('foo', '_test._tcp', 1234)
1
And, similarly, the ``query`` function can be used to find registrations for a
given ``regtype``. It takes an optional ``timeout`` value (in seconds) as a
second parameter, e.g.
>>> query('_test._tcp', 1.0)
{u'foo._test._tcp.local.': {...'port': 1234...}}
"""
import atexit
import threading
from select import select
from time import time
try:
import pybonjour
except Exception:
pybonjour = None
state = threading.local()
state.announce = None
state.query = None
state.current = None
state.timeout = None
def registration_callback(sdRef, flags, errorCode, name, regtype, domain):
if errorCode == pybonjour.kDNSServiceErr_NoError:
state.announce = 1
else:
state.announce = 0
def register(name, regtype, port):
if not pybonjour:
return
sdRef = pybonjour.DNSServiceRegister(
name=name, regtype=regtype, port=port, callBack=registration_callback
)
try:
while 1:
ready = select([sdRef], [], [])
if sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(sdRef)
return state.announce
finally:
state.announce = None
atexit.register(sdRef.close)
def resolve_callback(
sdRef, flags, interfaceIndex, errorCode, fullname, hosttarget, port,
txtRecord
):
if errorCode == pybonjour.kDNSServiceErr_NoError:
record = state.query[fullname] = state.current
record['host'] = hosttarget
record['port'] = port
def query_callback(
sdRef, flags, interfaceIndex, errorCode, serviceName, regtype, replyDomain
):
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
if not (flags & pybonjour.kDNSServiceFlagsAdd):
return
if state.timeout:
timeout = state.timeout
else:
timeout = None
state.current = {
'name': serviceName,
'type': regtype
}
sdRef = pybonjour.DNSServiceResolve(
0, interfaceIndex, serviceName, regtype, replyDomain, resolve_callback
)
try:
while 1:
ready = select([sdRef], [], [], timeout)
if sdRef not in ready[0]:
break
return pybonjour.DNSServiceProcessResult(sdRef)
finally:
state.current = None
sdRef.close()
def query(regtype, timeout=5.0):
if not pybonjour:
return {}
sdRef = pybonjour.DNSServiceBrowse(regtype=regtype, callBack=query_callback)
start = time()
if timeout:
state.timeout = timeout
state.query = {}
try:
while (time() - start) <= timeout:
ready = select([sdRef], [], [], timeout)
if sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(sdRef)
return state.query
finally:
state.query = None
state.timeout = None
sdRef.close()
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE)
| [
"tav@espians.com"
] | tav@espians.com |
4e3bed99e1ea699ca9f133ea0ba788cc2e25882b | 2451ca9bc9ae43bd3b070fa362aa13646ff06f13 | /03_Standard_Library/unittest[Unit_testing_framework]/_note_unittest.py | 6c98c6865211e88f416708deb5099c1269bdaaf9 | [] | no_license | MacHu-GWU/six-demon-bag | 5cd1cf5d56d4c42cff013ab80dd4fc838add7195 | 10d772d6b876086f64db39f6ddbc07e08e35a122 | refs/heads/master | 2020-05-17T17:26:15.961833 | 2015-09-15T21:50:20 | 2015-09-15T21:50:20 | 26,669,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | ##################################
#encoding=utf8 #
#version =py27, py33 #
#author =sanhe #
#date =2014-11-15 #
# #
# (\ (\ #
# ( -.-)o I am a Rabbit! #
# o_(")(") #
# #
##################################
"""
Ref = https://docs.python.org/2/library/unittest.html#basic-example
useful method:
assertEqual to check for an expected result
assertTrue to verify a condition
assertRaises verify that an expected exception gets raised
"""
from __future__ import print_function
import random
import unittest
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
# should raise an exception for an immutable sequence
self.assertRaises(TypeError, random.shuffle, (1,2,3))
def test_choice(self):
element = random.choice(self.seq)
self.assertTrue(element in self.seq)
def test_sample(self):
with self.assertRaises(ValueError):
random.sample(self.seq, 20)
for element in random.sample(self.seq, 5):
self.assertTrue(element in self.seq)
if __name__ == '__main__':
unittest.main() | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
5d72e5ef4d774b54baceacc28da305d03a59b235 | c499492bec3337319e9b186645d19edd019ca221 | /raSAT-0.2/starexec_run_default_0.2.py | b0f9baa22c4b3e4c3d145af736cd0a106ef30f92 | [] | no_license | tungvx/smt_test_tools | 6f60e186f86120d35c9d1479333de179a9296d96 | f466b0faa4a90363c905f9acba3f7f5fbf11427e | refs/heads/master | 2020-12-06T13:12:29.198696 | 2017-12-25T06:03:57 | 2017-12-25T06:03:57 | 67,702,683 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | #!/usr/bin/env python
# Run: python smt.py filename.smt2 timeout
# timeout is in seconds
import os
import subprocess
import sys
import stat
import time
current_path = os.path.dirname(os.path.realpath(__file__))
def remove_tmp (filename, version):
try:
os.remove(filename + '.' + version + '.tmp')
except OSError:
pass
try:
os.remove(os.path.splitext(filename)[0] + '.' + version + '.out')
except OSError:
pass
try:
os.remove(os.path.splitext(filename)[0] + '.' + version + '.in')
except OSError:
pass
def run_raSAT (filename, bounds, sbox, timeout):
startTime = time.time()
raSATResult = "unknown"
# remove tmps files:
remove_tmp(filename, "0.2")
subprocess.call([os.path.join(current_path, "./raSAT-0.2"), filename, bounds, 'sbox=' + str(sbox), 'tout=' + str(timeout-(time.time() - startTime))])
try:
with open(filename + '.0.2.tmp', 'r') as outfile:
raSATResult = outfile.read().rstrip()
outfile.close()
if raSATResult == "unknown":
sbox /= 10
remove_tmp(filename, "0.2")
proc2 = subprocess.Popen([os.path.join(current_path, "./raSAT-0.2"), filename, bounds, 'sbox=' + str(sbox), 'tout=' + str(timeout-(time.time() - startTime))])
except IOError:
pass
return raSATResult, sbox
def run(filename, initLowerBound, initUpperBound, sbox, timeout):
lowerBound = initLowerBound
upperBound = initUpperBound
raSATResult = "unknown"
startTime = time.time()
while (raSATResult == 'unknown'):
(raSATResult, sbox) = run_raSAT(filename, 'lb=' + str(lowerBound) + ' ' + str(upperBound), sbox, timeout - (time.time() - startTime))
if raSATResult == 'unsat':
(raSATResult, sbox) = run_raSAT(filename, 'lb=-inf inf', sbox, timeout - (time.time() - startTime))
print (raSATResult)
# remove tmps files:
remove_tmp(filename, "0.2")
# get timeout from environment
timeout = float(os.environ.get('STAREXEC_CPU_LIMIT'))
run(sys.argv[1], -10, 10, 0.1, timeout) | [
"toilatung90@gmail.com"
] | toilatung90@gmail.com |
69be6a4feb0fe66a029a87ff314c6f77dd9fb8ff | 39de3097fb024c67a00c8d0e57c937d91f8b2cc9 | /Graphs/Good_graphs.py | b9f3b6c78d943e723f30cb7b4fcce9287c4d89f0 | [] | no_license | srajsonu/InterviewBit-Solution-Python | 4f41da54c18b47db19c3c0ad0e5efa165bfd0cd0 | 6099a7b02ad0d71e08f936b7ac35fe035738c26f | refs/heads/master | 2023-03-07T05:49:15.597928 | 2021-02-24T18:20:07 | 2021-02-24T18:20:07 | 249,359,666 | 0 | 2 | null | 2020-10-06T10:54:07 | 2020-03-23T07:09:53 | Python | UTF-8 | Python | false | false | 849 | py | class DSU:
def __init__(self,m):
self.parent=[i for i in range(m+1)]
self.height=[0 for _ in range(m+1)]
self.ans=m
def find_root(self,A):
if self.parent[A]==A:
return A
return self.find_root(self.parent[A])
def Union(self,A,B):
C=self.find_root(A)
D=self.find_root(B)
if C==D:
return
if self.height[C] < self.height[D]:
C,D = D,C
self.parent[D]=C
if self.height[C]==self.height[D]:
self.height[C]+=1
self.ans-=1
class Solution:
def Solve(self,A):
m=len(A)
dsu=DSU(m)
for i in range(m):
if A[i] != 1:
dsu.Union(i+1,A[i])
else:
dsu.ans-=1
return dsu.ans
A=[1,2,1,2]
B=Solution()
print(B.Solve(A))
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
8befe5a68bd5acbf2c15b8f57452161d188e1aa6 | b8bd4fa3b9d7a39c13f4d897e71f933ceb542cbd | /examples/beta_bernoulli_tf.py | ddf9eaef35b18932c469810138e071ac69c30a41 | [
"Apache-2.0"
] | permissive | chagge/edward | 0b481b8de08ddc1e4be6ea1d868e5c8c0da0bb06 | 1e5563eba7bad5d6338615dff9dfcceef2cd436f | refs/heads/master | 2021-01-17T02:15:00.119122 | 2016-05-12T01:27:47 | 2016-05-12T07:04:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/env python
"""
A simple example from Stan. The model is written in TensorFlow.
Probability model
Prior: Beta
Likelihood: Bernoulli
Variational model
Likelihood: Mean-field Beta
"""
import edward as ed
import tensorflow as tf
from edward.stats import bernoulli, beta
from edward.variationals import Variational, Beta
class BetaBernoulli:
"""
p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)
"""
def __init__(self):
self.num_vars = 1
def log_prob(self, xs, zs):
log_prior = beta.logpdf(zs, a=1.0, b=1.0)
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z)) \
for z in tf.unpack(zs)])
return log_lik + log_prior
ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.num_vars))
data = ed.Data(tf.constant((0, 1, 0, 0, 0, 0, 0, 0, 0, 1), dtype=tf.float32))
inference = ed.MFVI(model, variational, data)
inference.run(n_iter=10000)
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
ca2d3bcceb6ab5411980dd400babb51d77d2eb51 | 6249a81e81c3b3b37e6d03cd7112e9a981cec8e2 | /python/jobovy/apogee/setup.py | ee1fa5ae558eecf49e17c2bed29820ca33297774 | [
"BSD-3-Clause"
] | permissive | dnidever/apogee | e883c7d352abb1b99c938f7de38313b5cd4d2164 | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | refs/heads/master | 2021-07-06T22:23:13.839478 | 2020-08-08T22:42:42 | 2020-08-08T22:42:42 | 149,676,202 | 0 | 0 | BSD-3-Clause | 2018-09-20T22:07:43 | 2018-09-20T22:07:43 | null | UTF-8 | Python | false | false | 5,445 | py | import os
from setuptools import setup #, Extension
import sys
import shutil
import subprocess
import tempfile
long_description = "Tools for APOGEE data analysis; see `here <https://github.com/jobovy/apogee>`__ for further documentation"
# Install FERRE when specifying --install-ferre; needs a FORTRAN compiler, e.g., http://hpc.sourceforge.net/
try:
ferre_pos= sys.argv.index('--install-ferre')
except ValueError:
_INSTALL_FERRE= False
else:
del sys.argv[ferre_pos]
_INSTALL_FERRE= True
try:
ferre_openmp= sys.argv.index('--ferre-noopenmp')
except ValueError:
_FERRE_NO_OPENMP= False
else:
del sys.argv[ferre_openmp]
_FERRE_NO_OPENMP= True
try:
ferre_flen= sys.argv.index('--ferre-flen')
except ValueError:
_FERRE_FLEN= 180
else:
_FERRE_FLEN= int(sys.argv[ferre_flen+1])
del sys.argv[ferre_flen]
del sys.argv[ferre_flen]
if _INSTALL_FERRE:
# Code to determine the binary install directory, from http://jasonstitt.com/setuptools-bin-directory
from setuptools import Distribution
from setuptools.command.install import install
class OnlyGetScriptPath(install):
def run(self):
self.distribution.install_scripts = self.install_scripts
def get_setuptools_script_dir():
" Get the directory setuptools installs scripts to for current python "
dist = Distribution({'cmdclass': {'install': OnlyGetScriptPath}})
dist.dry_run = True # not sure if necessary
dist.parse_config_files()
command = dist.get_command_obj('install')
command.ensure_finalized()
command.run()
return dist.install_scripts
if _INSTALL_FERRE:
# Download the code
#_FERRE_FILE= 'ferre_4.5.6.tar.gz'
_FERRE_FILE= 'ferre_4.6.6.tar.gz'
#_FERRE_URL= 'http://leda.as.utexas.edu/ferre/%s' % _FERRE_FILE
_FERRE_URL= 'http://www.as.utexas.edu/~hebe/ferre/%s' % _FERRE_FILE
print('\033[1m'+"Downloading and installing FERRE from %s ..." % _FERRE_URL +'\033[0m')
# Create temporary directory
tmpdir= tempfile.mkdtemp(dir='./')
os.mkdir(os.path.join(tmpdir,'ferre'))
try:
subprocess.check_call(['wget',_FERRE_URL,'-O',
os.path.join(tmpdir,'ferre',_FERRE_FILE)])
except subprocess.CalledProcessError:
print('\033[1m'+"Downloading FERRE from %s failed ..." % _FERRE_URL +'\033[0m')
# Unpack and install
os.chdir(os.path.join(tmpdir,'ferre'))
try:
subprocess.check_call(['tar','xvzf',_FERRE_FILE])
except subprocess.CalledProcessError:
print('\033[1m'+"Untarring/gunzipping FERRE failed ..." % _FERRE_URL +'\033[0m')
os.chdir('src')
# Change flen in share.f90
with open("tmp.f90", "w") as fout:
with open("share.f90", "r") as fin:
for line in fin:
fout.write(line.replace('flen=120','flen=%i' % _FERRE_FLEN))
os.rename('tmp.f90','share.f90')
# Change output format in ferre.f90
with open("tmp.f90", "w") as fout:
with open("ferre.f90", "r") as fin:
for line in fin:
fout.write(line.replace("write(3,'(1x,a30,100(1x,F9.3))')",
"write(3,'(1x,a40,100(1x,F9.4))')"))
os.rename('tmp.f90','ferre.f90')
try:
if _FERRE_NO_OPENMP:
subprocess.check_call(['make','OPT=-O2'])
else:
subprocess.check_call(['make'])
except subprocess.CalledProcessError:
print('\033[1m'+"Compiling FERRE failed ..." % _FERRE_URL +'\033[0m')
os.rename('a.out','../../../ferre')
os.rename('ascii2bin','../../../ascii2bin')
# Remove everything
os.chdir('../../../')
try:
subprocess.check_call(['rm','-rf',tmpdir])
except subprocess.CalledProcessError:
print('\033[1m'+"Removing FERRE temporary files failed ..." % _FERRE_URL +'\033[0m')
shutil.copy('ferre',get_setuptools_script_dir())
shutil.copy('ascii2bin',get_setuptools_script_dir())
setup(name='apogee',
version='1.',
description='APOGEE data tools',
author='Jo Bovy',
author_email='bovy@ias.edu',
license='New BSD',
long_description=long_description,
url='https://github.com/jobovy/apogee',
package_dir = {'apogee/': ''},
packages=['apogee','apogee/tools','apogee/select','apogee/test',
'apogee/util','apogee/samples','apogee/spec','apogee/modelatm',
'apogee/modelspec'],
package_data={'apogee/samples':['data/rcmodel_mode_jkz_ks_parsec_newlogg.sav',
'data/rcmodel_mode_jkz_h_parsec_newlogg.sav',
'data/rcmodel_mass_agez.sav',
'data/rcmodel_mass_agez_coarseage.sav',
'data/rcmodel_omega_agez.sav'],
'apogee/spec':['filter/dr12/*.filt',
'cannon/training/*.txt',
'cannon/trained/*.txt'],
'apogee/modelspec':['scripts/makemoogmodel.awk'],},
dependency_links = ['https://github.com/jobovy/galpy/tarball/master#egg=galpy',
'https://github.com/jobovy/isodist/tarball/master#egg=isodist'],
install_requires=['numpy','scipy','matplotlib',
'astropy','galpy',
'isodist','periodictable','tqdm']
)
| [
"holtz@nmsu.edu"
] | holtz@nmsu.edu |
6af1cb0e42170d903f01a9c7990a8f4ff4dc38c0 | 4e503761d091f3f284763d63c89861f6c26c1015 | /语法基础/jinzhi_base.py | ec273df6619f3f2be6874ed53805093eb88e0fd0 | [] | no_license | L-ingqin12/Algorithm_LanQiaobei | b2e08d755cacaaa0ff96108ca3f13d648b3b6fd7 | 9dd7b05eaf1cfc02eca52ee4f97466de961e592c | refs/heads/main | 2023-04-03T14:08:04.389344 | 2021-04-19T03:14:29 | 2021-04-19T03:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import math
import cmath
import sys
import string
import heapq
import bisect
from queue import Queue,PriorityQueue,LifoQueue
from collections import Counter,deque
from itertools import permutations,combinations
from functools import cmp_to_key
# str=input()
# print(int(str,16))
# a=int(str,16)
# print("{:0b}-666".format(a,2))
# print("{:0o}".format(a))
# print("{:0x}".format(a))#X
def baseN(num, b):
if num==0:
return "0"
else:
return (baseN(num // b, b).lstrip("0") + "0123456789ABCDEFghijklmnopqrstuvwxyz"[num % b])
#注意.lstrip("0") 记住就完了 就结合短除法理解下这个递归过程
#return ((num == 0) and "0") or (baseN(num // b, b).lstrip("0") + "0123456789ABCDEFghijklmnopqrstuvwxyz"[num % b])
if __name__=="__main__":
# n=int(input().strip())
# str1=input().strip()
# m=int(input().strip())
# temp=int(str1,n)
# print(baseN(temp,m))
print(baseN(0,16))
| [
"54010254+lixiang007666@users.noreply.github.com"
] | 54010254+lixiang007666@users.noreply.github.com |
c1b1dc0c1b17afb0bda43f4b02f94622623041a8 | 9398d8433fdb29ee630a6ee43a07bc36a2adbd88 | /ryu/__init__.py | e7cc55df725de6f5401b96f6663236b53a10e5d9 | [] | no_license | bopopescu/OpenStack_Liberty_Control | ca5a21d0c32c55dc8c517f5c7c9938ce575a4888 | 0f6ec1b4d38c47776fdf8935266bcaef2464af4c | refs/heads/master | 2022-12-03T10:41:53.210667 | 2016-03-29T06:25:58 | 2016-03-29T06:25:58 | 282,089,815 | 0 | 0 | null | 2020-07-24T01:04:15 | 2020-07-24T01:04:14 | null | UTF-8 | Python | false | false | 683 | py | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version_info = (3, 29, 1)
version = '.'.join(map(str, version_info))
| [
"tony.pig@gmail.com"
] | tony.pig@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.