blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
698adef33400954612ddb390d4e2b4be321adb6a
|
9788df18d5adaa469a0cb51f47309cd7401201e5
|
/alisdk/top/api/rest/SimbaNonsearchDemographicsUpdateRequest.py
|
a8f2e81f002dde34594772cd1fb1c4a9b8124c77
|
[] |
no_license
|
sevennothing/aliyunTestPrj
|
cf690ce4765497f1b16359b82ef64f1ef992713c
|
1b3e883d32c759e03fe5053c50e9a973f59bbffc
|
refs/heads/master
| 2021-01-17T03:15:59.082544
| 2015-03-11T14:16:58
| 2015-03-11T14:16:58
| 32,001,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
'''
Created by auto_sdk on 2014-11-20 12:53:43
'''
from top.api.base import RestApi
class SimbaNonsearchDemographicsUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.campaign_id = None
self.demographic_id_price_json = None
self.nick = None
def getapiname(self):
return 'taobao.simba.nonsearch.demographics.update'
|
[
"licj@out.lrserver"
] |
licj@out.lrserver
|
bc486f952345fcf08f137b8312608b15be52db9c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03146/s029247670.py
|
51dd873af5ba4128376ae5aa863e5f55ee218fdc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
import sys
s = int(input())
a = []
a.append(s)
i = 1
while True:
n = 0
if a[i-1] %2 ==0:
n = a[i-1]/2
else:
n = a[i-1]*3 +1
i +=1
if n in a:
print(i)
sys.exit()
a.append(n)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3035286654d17c751f091358d055f45857303648
|
b136cbf689dfd1171679b1d7741ba910f2ed2161
|
/flask_appbuilder/messages.py
|
465d23c1e5a6b4d9d53374333a4046e1e9253990
|
[
"BSD-3-Clause"
] |
permissive
|
dbongo/Flask-AppBuilder
|
7b34b582f10eef2877b010128ea3d7bfa6f23907
|
2de58428507afec0595fa762e977f539448878d5
|
refs/heads/master
| 2020-12-25T22:06:48.882882
| 2013-12-16T23:39:27
| 2013-12-16T23:39:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from flask.ext.babel import lazy_gettext as _
"""
This Module is not used.
Just use it to automate Babel extraction
"""
auto_translations_import = [
_("Security"),
_("List Users"),
_("Base Permissions"),
_("Views/Menus"),
_("Permission on Views/Menus"),
_("Search"),
_("Back"),
_("Save"),
_("This field is required."),
_("Not a valid date value"),
_("No records found")
]
|
[
"danielvazgaspar@gmail.com"
] |
danielvazgaspar@gmail.com
|
fc55a5be31881904c162e9a36f5926be2272163b
|
930ef8a8ec0338e497be3a9475af1b5244f01dc1
|
/drl_net.py
|
2ae907fa51e49a14821b1db1b815e50dc6c805d8
|
[] |
no_license
|
xiaogaogaoxiao/DQN_user_grouping
|
837c48c051f32d848f135bebcea3410aeba68ca7
|
e694dcebacb74b1c0530adc892398616b15d0fc1
|
refs/heads/main
| 2023-04-17T07:46:08.182794
| 2021-04-30T15:14:42
| 2021-04-30T15:14:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,274
|
py
|
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from MecOpt import MecEnv
import math
import random
print(torch.__version__)
EPS_START = 0.8
EPS_END = 0.01
EPS_DECAY = 2000
steps_done = 0
class QNet(nn.Module):
def __init__(self, n_inputs, n_outputs):
hidden1 = 3 * n_outputs
hidden2 = 2 * n_outputs
super(QNet, self).__init__()
self.fc1 = nn.Linear(n_inputs, hidden1)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc2.weight.data.normal_(0, 0.1)
self.fc3 = nn.Linear(hidden2, n_outputs)
self.fc3.weight.data.normal_(0, 0.1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.tanh(self.fc3(x))
return x
class dqn:
def __init__(self,
n_inputs=1,
n_outputs=1,
memory_size=1,
batch_size=32,
learning_rate=1e-3,
training_interval=10,
epsilon_greedy=0.9,
gamma=0.6,
):
self.memory_low = 1000
self.state_dim = n_inputs
self.action_dim = n_outputs
self.memory_size = memory_size
self.batch_size = batch_size
self.learning_rate = learning_rate
self.training_interval = training_interval
self.epsilon_greedy = epsilon_greedy
self.gamma = gamma
self.eval_net = QNet(self.state_dim, self.action_dim)
self.target_net = QNet(self.state_dim, self.action_dim)
self.learn_step_counter = 0
self.memory_counter = 0
self.memory = np.zeros((self.memory_size, self.state_dim * 2 + 2))
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=learning_rate)
self.criterion = nn.MSELoss()
def choose_action(self, s):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
s = Variable(torch.unsqueeze(torch.Tensor(s), 0))
if sample > eps_threshold:
action = torch.max(self.eval_net(s), 1)[1].data[0]
return action
else:
return random.randrange(self.action_dim)
def store_memory(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target net parameter update
# sample experience
# data from mini batch
if self.memory_low <= self.memory_counter < self.memory_size:
sample_index = np.random.choice(self.memory_counter, self.batch_size)
elif self.memory_counter >= self.memory_size:
sample_index = np.random.choice(self.memory_size, self.batch_size)
else:
return
if self.learn_step_counter % self.training_interval == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample experience
# data from mini batch
b_memory = self.memory[sample_index, :]
b_s = Variable(torch.FloatTensor(b_memory[:, :self.state_dim]))
b_a = Variable(torch.LongTensor(b_memory[:, self.state_dim:self.state_dim + 1].astype(int)))
b_r = Variable(torch.FloatTensor(b_memory[:, self.state_dim + 1: self.state_dim + 2]))
b_s_ = Variable(torch.FloatTensor(b_memory[:, -self.state_dim:]))
self.eval_net.eval()
self.target_net.eval()
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach() # detach
q_target = b_r + self.gamma * q_next.max(1)[0].view(self.batch_size, 1) # shape (batch, 1)
loss = self.criterion(q_target, q_eval) # MSE loss
# update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
|
[
"noreply@github.com"
] |
xiaogaogaoxiao.noreply@github.com
|
643fd19f16b4df78eeb49c578ac040f68bb0cae2
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/python/kernel_tests/signal/dct_ops_test.py
|
51206abed17e08efa63d4f1a13a2483bc0fb34ff
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 7,880
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DCT operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import dct_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
fftpack = try_import("scipy.fftpack")
def _modify_input_for_dct(signals, n=None):
""" This is a supporting function for the numpy implementation
of DCT operations. If n < signal size, it returns the first n elements,
else it pads the signal with zeros.
"""
signal = np.array(signals)
if n is None or n == signal.shape[-1]:
signal_mod = signal
elif n >= 1:
signal_len = signal.shape[-1]
if n <= signal_len:
signal_mod = signal[..., 0:n]
else:
output_shape = list(signal.shape)
output_shape[-1] = n
signal_mod = np.zeros(output_shape)
signal_mod[..., 0:signal.shape[-1]] = signal
if n:
assert signal_mod.shape[-1] == n
return signal_mod
def _np_dct1(signals, n=None, norm=None):
"""Computes the DCT-I manually with NumPy."""
# X_k = (x_0 + (-1)**k * x_{N-1} +
# 2 * sum_{n=0}^{N-2} x_n * cos(\frac{pi}{N-1} * n * k) k=0,...,N-1
del norm
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size - 1) * k / (dct_size - 1))
dct[..., k] = 2 * np.sum(
signals_mod[..., 1:-1] * phi, axis=-1) + (
signals_mod[..., 0] + (-1)**k * signals_mod[..., -1])
return dct
def _np_dct2(signals, n=None, norm=None):
"""Computes the DCT-II manually with NumPy."""
# X_k = sum_{n=0}^{N-1} x_n * cos(\frac{pi}{N} * (n + 0.5) * k) k=0,...,N-1
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * (np.arange(dct_size) + 0.5) * k / dct_size)
dct[..., k] = np.sum(signals_mod * phi, axis=-1)
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
if norm == "ortho":
# The orthonormal scaling includes a factor of 0.5 which we combine with
# the overall scaling of 2.0 to cancel.
dct[..., 0] *= np.sqrt(1.0 / dct_size)
dct[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
dct *= 2.0
return dct
def _np_dct3(signals, n=None, norm=None):
"""Computes the DCT-III manually with NumPy."""
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
signals_mod = np.array(signals_mod) # make a copy so we can modify
if norm == "ortho":
signals_mod[..., 0] *= np.sqrt(4.0 / dct_size)
signals_mod[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
signals_mod *= 2.0
dct = np.zeros_like(signals_mod)
# X_k = 0.5 * x_0 +
# sum_{n=1}^{N-1} x_n * cos(\frac{pi}{N} * n * (k + 0.5)) k=0,...,N-1
half_x0 = 0.5 * signals_mod[..., 0]
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size) * (k + 0.5) / dct_size)
dct[..., k] = half_x0 + np.sum(signals_mod[..., 1:] * phi, axis=-1)
return dct
NP_DCT = {1: _np_dct1, 2: _np_dct2, 3: _np_dct3}
NP_IDCT = {1: _np_dct1, 2: _np_dct3, 3: _np_dct2}
class DCTOpsTest(parameterized.TestCase, test.TestCase):
def _compare(self, signals, n, norm, dct_type, atol=5e-4, rtol=5e-4):
"""Compares (I)DCT to SciPy (if available) and a NumPy implementation."""
np_dct = NP_DCT[dct_type](signals, n=n, norm=norm)
tf_dct = dct_ops.dct(signals, n=n, type=dct_type, norm=norm).eval()
self.assertAllClose(np_dct, tf_dct, atol=atol, rtol=rtol)
np_idct = NP_IDCT[dct_type](signals, n=None, norm=norm)
tf_idct = dct_ops.idct(signals, type=dct_type, norm=norm).eval()
self.assertAllClose(np_idct, tf_idct, atol=atol, rtol=rtol)
if fftpack:
scipy_dct = fftpack.dct(signals, n=n, type=dct_type, norm=norm)
self.assertAllClose(scipy_dct, tf_dct, atol=atol, rtol=rtol)
scipy_idct = fftpack.idct(signals, type=dct_type, norm=norm)
self.assertAllClose(scipy_idct, tf_idct, atol=atol, rtol=rtol)
# Verify inverse(forward(s)) == s, up to a normalization factor.
# Since `n` is not implemented for IDCT operation, re-calculating tf_dct without n.
tf_dct = dct_ops.dct(signals, type=dct_type, norm=norm).eval()
tf_idct_dct = dct_ops.idct(
tf_dct, type=dct_type, norm=norm).eval()
tf_dct_idct = dct_ops.dct(
tf_idct, type=dct_type, norm=norm).eval()
if norm is None:
if dct_type == 1:
tf_idct_dct *= 0.5 / (signals.shape[-1] - 1)
tf_dct_idct *= 0.5 / (signals.shape[-1] - 1)
else:
tf_idct_dct *= 0.5 / signals.shape[-1]
tf_dct_idct *= 0.5 / signals.shape[-1]
self.assertAllClose(signals, tf_idct_dct, atol=atol, rtol=rtol)
self.assertAllClose(signals, tf_dct_idct, atol=atol, rtol=rtol)
@parameterized.parameters([
[[2]], [[3]], [[10]], [[2, 20]], [[2, 3, 25]]])
@test_util.run_deprecated_v1
def test_random(self, shape):
"""Test randomly generated batches of data."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signals = np.random.rand(*shape).astype(np.float32)
n = np.random.randint(1, 2 * signals.shape[-1])
n = np.random.choice([None, n])
# Normalization not implemented for orthonormal.
self._compare(signals, n, norm=None, dct_type=1)
for norm in (None, "ortho"):
self._compare(signals, n=n, norm=norm, dct_type=2)
self._compare(signals, n=n, norm=norm, dct_type=3)
def test_error(self):
signals = np.random.rand(10)
# Unsupported type.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=5)
# Invalid n.
with self.assertRaises(ValueError):
dct_ops.dct(signals, n=-2)
# DCT-I normalization not implemented.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=1, norm="ortho")
# DCT-I requires at least two inputs.
with self.assertRaises(ValueError):
dct_ops.dct(np.random.rand(1), type=1)
# Unknown normalization.
with self.assertRaises(ValueError):
dct_ops.dct(signals, norm="bad")
with self.assertRaises(NotImplementedError):
dct_ops.dct(signals, axis=0)
if __name__ == "__main__":
test.main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
cfd943a80e044add71dc7c4249a4404a20ce5e87
|
cb848d0c80abb04c080155d1502d22391423c4e8
|
/build_isolated/sick_ldmrs_driver/catkin_generated/pkg.develspace.context.pc.py
|
def09cbfec31e6271e0038e6e4a28f39cdfcd982
|
[] |
no_license
|
MTU-Autobot/catkin_ws
|
d8bc9b0de46befc53282b9b7e6d338a7ff7e3a0c
|
cf104fe048c6101f50be1b87e181d80a4be3e770
|
refs/heads/master
| 2020-03-13T23:14:56.276075
| 2018-04-27T18:28:01
| 2018-04-27T18:28:01
| 131,331,599
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/catkin_ws/devel_isolated/sick_ldmrs_driver/include;/home/ubuntu/catkin_ws/src/sick_ldmrs_laser/sick_ldmrs_driver/include;/usr/include".split(';') if "/home/ubuntu/catkin_ws/devel_isolated/sick_ldmrs_driver/include;/home/ubuntu/catkin_ws/src/sick_ldmrs_laser/sick_ldmrs_driver/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;diagnostic_updater;dynamic_reconfigure;pcl_conversions;sick_ldmrs_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-l:/usr/lib/aarch64-linux-gnu/libboost_system.so".split(';') if "-l:/usr/lib/aarch64-linux-gnu/libboost_system.so" != "" else []
PROJECT_NAME = "sick_ldmrs_driver"
PROJECT_SPACE_DIR = "/home/ubuntu/catkin_ws/devel_isolated/sick_ldmrs_driver"
PROJECT_VERSION = "0.0.0"
|
[
"spartanhaden@gmail.com"
] |
spartanhaden@gmail.com
|
0690db07264c5795d1457e10640984b025aa63e7
|
155bf47fa1b33a31576f6b8b90aaa74cd41e352a
|
/lianjia-spider/test/date_test.py
|
bdb011ebe1d659ecb20cac9fcfe8c34d272f7d4a
|
[] |
no_license
|
ares5221/Python-Crawler-Projects
|
af4ec40a26f4f69ef285a0edf0428192a594d4cd
|
45b496000631f0f3b887501d9d67f3e24f5e6186
|
refs/heads/master
| 2021-07-03T07:11:25.474055
| 2020-09-08T08:17:17
| 2020-09-08T08:17:17
| 145,980,513
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
import unittest
from lib.utility.date import *
class DateTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_time_string(self):
self.assertEqual(len(get_time_string()), 14)
def test_date_string(self):
self.assertEqual(len(get_date_string()), 8)
def test_year_string(self):
self.assertEqual(len(get_year_month_string()), 6)
if __name__ == '__main__':
unittest.main()
|
[
"674361437@qq.com"
] |
674361437@qq.com
|
02bdf2ff0b549bdfb9f180710387a1f670c585c1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/466/usersdata/283/111275/submittedfiles/Av2_Parte2.py
|
37c67dbe12d8064c666d9cb7468d46f05bb3de9c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
# -*- coding: utf-8 -*-
a=[]
b=[]
c=[]
n=int(input('Digite o número de elementos: '))
while n<=0:
print('Número inválido!')
n=int(input('Digite o número de elemento: '))
for i in range(0,n,1):
a.append(input('Digite um elemento para a: '))
for j in range(0,n,1):
b.append(input('Digite um elemento para b: '))
for k in range(0,n,1):
c.append(input('Digite um elemento para c: '))
g=[]
o=[]
for l in range(0,n,1):
if (l+1) == n:
break
if a[l]<a[l+1]
g.append(a[l])
g.append(a[len(a)-1])
print(g)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3b436ade09c46670b26faecdb2da74694f10439e
|
058c258ecb9d456dce6dc9ff41d9d2c9e5a5c489
|
/view/plat/Content.py
|
c46abb112ef988737d99b17d3bb343e70441c33e
|
[] |
no_license
|
lukoou3/Toolbox
|
7f64f49ab5b24e8ff3a7334a794a1ef8be520dc0
|
d23c1531adc4b03c8df043e05daa6dec4f3afaa9
|
refs/heads/master
| 2020-07-26T22:55:00.141722
| 2020-03-20T03:35:37
| 2020-03-20T03:35:37
| 208,787,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
from PyQt5.QtWidgets import QTabWidget
from view.content.DbTablesWidget import DbTablesWidget
from view.content.FileRenameWidget import FileRenameWidget
from view.content.JsonParseWidget import JsonParseWidget
from view.content.MarkdownWidget import MarkdownWidget
from view.content.SqlParseWidget import SqlParseWidget
from view.content.DbTableWidget import DbTableWidget
from view.content.StrMapReduceWidget import StrMapReduceWidget
from view.content.TransformWidget import TransformWidget
class Content(QTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.menuMap = {}
self.initUI()
def initUI(self):
"""http://www.jsons.cn/unicode/"""
self.setContentsMargins(0, 0, 0, 0)
self.tabBar().hide()
str_mapreduce_widget = StrMapReduceWidget()
self.menuMap["str_mapreduce_widget"] = str_mapreduce_widget
self.addTab(str_mapreduce_widget, "")
str_json_widget = JsonParseWidget()
self.menuMap["str_json_widget"] = str_json_widget
self.addTab(str_json_widget, "")
str_sql_widget = SqlParseWidget()
self.menuMap["str_sql_widget"] = str_sql_widget
self.addTab(str_sql_widget, "")
str_transform_widget = TransformWidget()
self.menuMap["str_transform_widget"] = str_transform_widget
self.addTab(str_transform_widget, "")
str_markdown_widget = MarkdownWidget()
self.menuMap["str_markdown_widget"] = str_markdown_widget
self.addTab(str_markdown_widget, "")
file_rename_widget = FileRenameWidget()
self.menuMap["file_rename_widget"] = file_rename_widget
self.addTab(file_rename_widget, "")
db_tables_widget = DbTablesWidget()
self.menuMap["db_tables_widget"] = db_tables_widget
self.addTab(db_tables_widget, "")
# db_table_widget = DbTableWidget()
# self.menuMap["db_table_widget"] = db_table_widget
# self.addTab(db_table_widget, "")
self.setCurrentIndex(0)
def setCurrentWidgetByMenu(self, menu):
widget = self.menuMap.get(menu.get("contentWidget", "str_mapreduce_widget"))
self.setCurrentWidget(widget)
loadData = getattr(widget, "loadData", None)
if callable(loadData):
loadData()
|
[
"18638489474@163.com"
] |
18638489474@163.com
|
313a00b61f3722dff02dbad8119a1b9e42205264
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02715/s675628627.py
|
35d438a031aa3325f3538e0e2d2ff10f00f4b32d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
N, K = map(int, input().split())
mod = 10 ** 9 + 7
G = [1] * (K + 1) #そのindexを最大公約数にもつ数列の数
ans = 0
for k in range(K, 0, -1):
x = K // k
t = int(pow(x, N, mod))
for j in range(x - 1):
t -= G[(j + 2) * k]
G[k] = t
ans += t * k
ans %= mod
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3ecae40e32b5b7054eba8fd90a4dc60f9c611a72
|
9a358fbd62eaed4ef96c7a0c607322e11aa7d3bf
|
/api/com_dayoung_api/cop/act/model/actor_ai.py
|
c54bc66f60fa2e0f084ebbe04e5208998db8dea6
|
[] |
no_license
|
ysk1026/project_dayoungi
|
2b8a8cb569f1687024a00e7f3a3af6501aa67fb1
|
cecb3a42496164b84ece1912932fe58de8537e46
|
refs/heads/master
| 2023-01-20T14:38:17.898499
| 2020-11-28T08:25:06
| 2020-11-28T08:25:06
| 311,549,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,101
|
py
|
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
# pip install sklearn
# conda install python-graphviz
import pydotplus # pip install pydotplus
from IPython.core.display import Image
from IPython.display import display
# pip install Ipython
# conda install -c anaconda ipython
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn import metrics
from six import StringIO
import os, sys
# PATH = r'C:/Program Files/Graphviz 2.44.1/bin'
# os.environ["PATH"] += os.pathsep+ PATH
class ActorAi:
def __init__(self):
...
def train_actors(self):
df = self.bring_dfo() # shape: (340, 10)
df = df[df['state'] == 1] # 현재 보이는 배우들만 확인
# df = df.head()
# print(df)
# age name real_name religion agency spouse children debut_year gender state
# 0 50 이병헌 no real name 불교 BH엔터테인먼트 이민정 이준후(아들) 1991 m 1
# 1 39 전지현 왕지현(王智賢) no religion 문화창고 최준혁 2남 1997 f 1
# 2 38 손예진 손언진 no religion 엠에스팀엔터테인먼트 no spouse no child 1999 f 1
# 3 28 안소희 no real name 불교 BH엔터테인먼트 no spouse no child 2004 f 1
# 4 39 강동원 no real name 무신론[1] YG 엔터테인먼트 no spouse no child 2003 m 1
# print(df.columns.values.tolist())
# ['age', 'name', 'real_name', 'religion', 'agency', 'spouse', 'children','debut_year', 'gender', 'state']
# 총 9개의 column 이지만 8개의 질문만 하면 됨
# 처음부터 state 는 1인걸 알고 있음
# 1st Question: 남자 입니까?
# 2nd Question: 자녀가 있습니까?
# 3rd Question: 배우자가 있습니까?
# 4th Question: 소속사가 관련 ->
# 5th Question: 종교 관련 ->
# 6th Question: 본명으로 활동 하나요?
# 7th Question: 나이가 어떻게 됩니까?
# 8th Question: 데뷔년도가 어떻게 됩니까?
# x = df['age', 'real_name', 'religion', 'agency', 'spouse', 'children','debut_year', 'gender', 'state']
# print(x)
print("-----------------------------------")
y_train = df.filter(["name"]) # 구할 것 Output
X_train = df.filter(['act_id','age', 'real_name', 'religion', 'agency', 'spouse', 'children','debut_year', 'gender', 'state'])
print("**************************************")
print(y_train)
print(X_train)
y_test = y_train
# 모르는 것을 예측 하는 것이 아니기 때문에 pred 에 train_set 과 같은 value
# 예상 100퍼 맞춤
print("-----------------------------------------------------------------------")
for set_max_depth in range(1,15):
set_random_state = 0
clf = tree.DecisionTreeClassifier(criterion = 'entropy', max_depth=set_max_depth, random_state=set_random_state)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_train)
print("Accuracy :", metrics.accuracy_score(y_test, y_pred))
print("raondom state: ", set_random_state)
print("Max Depth: ", set_max_depth)
print("-----------------------------------------------------------------------")
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.write_png("max_depth{}.png".format(set_max_depth))) # png file 생성
# ---------------------------------------------------------------------------------------------
# Actor ID 를 Drop 했을때
# 총 9개의 컬럼이 있기 때문에 max_depth 가 9 개면 100퍼 가까이 나올거라 예상.
# Accuracy : 0.9766763848396501
# raondom state: 0
# Max Depth: 9
# 내 예상으로는 100프로 나올거라 생각했지만 나오지 않았음
# 배우 수 = 343
# 343 * 0.9766763848396501 = 335
# 343 - 335 = 8명의 데이터가 겹치는 것을 알 수 있음!
# Actor ID 를 Drop "안" 했을 때
# dot_data = StringIO(Accuracy : 1.0
# raondom state: 0
# Max Depth: 9
# ----------------------------------------------------------------------
# 하지만 Actor ID 는 유저는 모르기 때문에 아무 의미 없음.
# 실제 이용할 데이터셋은 Drop Actor ID
def bring_dfo(self):
df = pd.read_csv("./data/actors2.csv")
# print(df.shape) # (340, 13) 13개중 두개의 컬럼은 actor_id 와 photo url 이기 때문에 필요 없음), 그래서 두개를 drop 하겠음
# 더해서 index 도 필요 없으니 삭제 하겠음
# print(df.columns)
# Index(['Unnamed: 0', 'photo_url', 'age', 'act_id', 'name', 'real_name',
# 'religion', 'agency', 'spouse', 'children', 'debut_year', 'gender',
# 'state'], dtype='object')
df = df.drop('photo_url',1) # 0 means to drop rows, 1 means drop columns
df = df.drop('act_id',1)
# print(df.shape) # (340, 10)
return df
if __name__ == "__main__":
ai = ActorAi()
# df = pd.read_csv("./data/actors2.csv")
# df = df.drop('photo_url',1) # 0 means to drop rows, 1 means drop columns
# df = df.drop('act_id',1)
# df = df[df['state'] == 1]
# print(df)
ai.train_actors()
|
[
"fkqoseka@gmail.com"
] |
fkqoseka@gmail.com
|
c7f06138cb8e969387fdcd3d5ab3508c3ed9bf9d
|
297b5e4e39fe9d5add2face0e246cd5317caa005
|
/tests/settings.py
|
544d69eb06f49d49e7af36cc41efd90486f0828c
|
[
"MIT"
] |
permissive
|
alexdlaird/django-maintenance-mode
|
b2073971062839c5ee8c9fe5a65f99206a250a83
|
b71d7d2d0c9a7af3f81e23c40b2010b9413aba79
|
refs/heads/master
| 2021-08-29T20:18:55.602310
| 2017-10-18T13:52:20
| 2017-10-18T13:52:20
| 114,284,763
| 0
| 0
| null | 2017-12-14T18:45:03
| 2017-12-14T18:45:03
| null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
# -*- coding: utf-8 -*-
import django
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'django-maintenance-mode'
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'maintenance_mode',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'maintenance_mode.middleware.MaintenanceModeMiddleware',
]
ROOT_URLCONF = 'tests.urls'
if django.VERSION < (1, 8):
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'maintenance_mode.context_processors.maintenance_mode',
)
else:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'maintenance_mode.context_processors.maintenance_mode',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
[
"fabio.caccamo@gmail.com"
] |
fabio.caccamo@gmail.com
|
961aa82417237ecf10d6d5d56faa8015967b798a
|
1dfba6d8c60a534d6bdeb985697fba913da5fe9b
|
/src/mcedit2/rendering/loadablechunks.py
|
39ce8cd7239bb07b73d8222a3f8fb9d0aecc2e1f
|
[
"BSD-3-Clause"
] |
permissive
|
shipbiulder101/mcedit2
|
2d88a6933bac3010f5bedcdd65d542587841a19f
|
44179472b7834c803da243a82d731f9ef555764d
|
refs/heads/master
| 2021-01-12T21:52:56.581572
| 2015-10-20T21:30:34
| 2015-10-20T21:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,704
|
py
|
"""
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
import logging
import numpy
from OpenGL import GL
from mcedit2.rendering.scenegraph import scenenode, rendernode
from mcedit2.util.glutils import Texture, gl
from mcedit2.rendering.depths import DepthOffset
log = logging.getLogger(__name__)
log.info("Making checkerboard texture...")
color0 = (0xff, 0xff, 0xff, 0x22)
color1 = (0xff, 0xff, 0xff, 0x44)
floorTexImage = numpy.array([color0, color1, color1, color0], dtype='uint8')
class LoadableChunksRenderNode(rendernode.RenderNode):
floorTexture = None
def compile(self):
if self.floorTexture is None:
self.floorTexture = Texture(image=floorTexImage, width=2, height=2,
minFilter=GL.GL_NEAREST,
magFilter=GL.GL_NEAREST,
)
self.floorTexture.load()
super(LoadableChunksRenderNode, self).compile()
def drawSelf(self):
with gl.glPushAttrib(GL.GL_FOG_BIT | GL.GL_ENABLE_BIT):
GL.glDisable(GL.GL_FOG)
GL.glEnable(GL.GL_BLEND)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(DepthOffset.ChunkMarkers, DepthOffset.ChunkMarkers)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.floorTexture.bind()
for vertexArray in self.sceneNode.createVertexArrays():
GL.glVertexPointer(3, GL.GL_FLOAT, 0, vertexArray.ravel())
# chunkPositions *= 8
GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, (vertexArray[..., (0, 2)] / 32).ravel())
GL.glDrawArrays(GL.GL_QUADS, 0, len(vertexArray) * 4)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
class LoadableChunksNode(scenenode.Node):
skipLargeLevels = False
RenderNodeClass = LoadableChunksRenderNode
def __init__(self, dimension):
super(LoadableChunksNode, self).__init__()
self.dimension = dimension
# if self.skipLargeLevels: # and hasattr(self.dimension.worldEditor, 'worldFolder'):
# try:
# p = self.dimension.worldEditor.adapter.selectedRevision.getFolderPath('region')
# if len(os.listdir(p)) > 50: # 50 * 1024 chunks
# return
#
# except AttributeError:
# log.exception("Don't know how to count region files in %s", self.dimension)
# raise
def createVertexArrays(self):
if self.dimension.chunkCount:
chunkSet = set(self.dimension.chunkPositions())
sizedChunks = chunkMarkers(chunkSet)
def arrays():
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPositions = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPositions[:, :, (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPositions[:, :, (0, 2)] *= size
chunkPositions[:, :, (0, 2)] += chunks[:, numpy.newaxis, :]
chunkPositions *= 16
yield chunkPositions
return list(arrays())
def chunkMarkers(chunkSet):
""" Returns a mapping { size: [position, ...] } for different powers of 2
as size.
"""
sizedChunks = defaultdict(list)
size = 1
def all4(cx, cz):
cx &= ~size
cz &= ~size
return [(cx, cz), (cx + size, cz), (cx + size, cz + size), (cx, cz + size)]
# lastsize = 6
size = 1
while True:
nextsize = size << 1
chunkSet = set(chunkSet)
while len(chunkSet):
cx, cz = chunkSet.pop()
chunkSet.add((cx, cz))
o = all4(cx, cz)
others = set(o).intersection(chunkSet)
if len(others) == 4:
sizedChunks[nextsize].append(o[0])
for c in others:
chunkSet.discard(c)
else:
for c in others:
sizedChunks[size].append(c)
chunkSet.discard(c)
if len(sizedChunks[nextsize]):
chunkSet = set(sizedChunks[nextsize])
sizedChunks[nextsize] = []
size <<= 1
else:
break
return sizedChunks
|
[
"codewarrior@hawaii.rr.com"
] |
codewarrior@hawaii.rr.com
|
6eab8917304d95312065f0cf0e49b6057e96f5c3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/131/usersdata/232/37596/submittedfiles/al10.py
|
4832cdecb96c52b8bdafae42640590571de174b0
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
# -*- coding: utf-8 -*-
#NÃO APAGUE A LINHA ACIMA. COMECE ABAIXO DESTA LINHA
n=int(input('Digite o número de termos a ser calculado: '))
i=2
pi=1
for i in range (1,n+1,1):
pi=pi*(i/(i-1))*(i/(i+1))
pi=pi*2
print('%.5f'%pi)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
05ad692df50100b660ac54b791457f586c290261
|
a209ce9617d2e135954d1e713b66540c252e3ea6
|
/myvenv/bin/easy_install-3.8
|
0f5e416a38cdba8d6b743a9eb2177223b5a34e2a
|
[] |
no_license
|
margaux-byte/mon-nouveau-blog
|
cff654eb216cb31180348056a483b6f50c7b206c
|
c16ff0300377ec7a450181c8c61b12a3096560b9
|
refs/heads/master
| 2020-08-22T10:05:46.031358
| 2019-10-20T13:24:54
| 2019-10-20T13:24:54
| 216,371,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
8
|
#!/Users/carlamoltosylvander/Documents/djangogirls/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"you@example.com"
] |
you@example.com
|
5c72c3a1d0abf844c2e1fb52ff54d7df6d7b1685
|
4d98ac51b576e1d104cec50ecb510202b3f1fdaa
|
/pkg_config/__main__.py
|
f07c587299aaaed79dcbde454fb37c673d990455
|
[] |
no_license
|
cournape/pkg-config
|
8b0ef687a4e0888d905d3eeb3fe56dd8e618a38c
|
ac7a6e61140b2cc588b514d02c62bdc401f41d73
|
refs/heads/master
| 2021-01-22T02:13:02.314974
| 2017-02-06T00:14:20
| 2017-02-06T00:14:20
| 81,031,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,384
|
py
|
from __future__ import print_function
import argparse
import sys
from pkg_config.errors import PCFileNotFound
from pkg_config._commands import find_pc_file, list_all
from pkg_config._models import PackageInfo
VERSION = "0.0.1"
SEARCH_DIRECTORIES = [
"/usr/local/lib/pkgconfig",
"/usr/local/share/pkgconfig",
"/usr/lib/pkgconfig",
"/usr/local/Homebrew/Library/Homebrew/os/mac/pkgconfig/10.11",
]
def main(argv=None):
argv = argv or sys.argv[1:]
parser = argparse.ArgumentParser(
description=u"pkg-config reimplementation in python.")
parser.add_argument(
u"--cflags", help=u"output all pre-processor and compiler flags",
action="store_true"
)
parser.add_argument(
u"--libs", help=u"output all linker flags", action="store_true"
)
parser.add_argument(
u"--list-all", help=u"list all known packages", action="store_true"
)
parser.add_argument(u"--modversion", action="store_true")
parser.add_argument(
u"--print-requires-private", action="store_true",
)
parser.add_argument(
u"--version", help=u"Print version and exits", action="store_true"
)
parser.add_argument(u"pc_file", nargs="?")
namespace = parser.parse_args(argv)
if namespace.version:
print(VERSION)
sys.exit(0)
if namespace.list_all:
list_all(SEARCH_DIRECTORIES)
sys.exit(0)
if namespace.pc_file is None:
print(u"Must specify package names on the command line")
sys.exit(0)
try:
p = find_pc_file(SEARCH_DIRECTORIES, namespace.pc_file)
except PCFileNotFound:
print(
u"Package tls was not found in the pkg-config search path.\n"
"Perhaps you should add the directory containing `{0}.pc'\n"
"to the PKG_CONFIG_PATH environment variable\n"
"No package '{0}' found".format(namespace.pc_file)
)
sys.exit(1)
pkg_info = PackageInfo.from_path(p)
if namespace.cflags:
print(pkg_info.cflags)
sys.exit(0)
if namespace.libs:
print(pkg_info.libs)
sys.exit(0)
if namespace.modversion:
print(pkg_info.version)
sys.exit(0)
if namespace.print_requires_private:
print("\n".join(pkg_info.requires_private))
sys.exit(0)
if __name__ == "__main__":
main()
|
[
"cournape@gmail.com"
] |
cournape@gmail.com
|
b069a9412f83db8f978c0847ed1620c7df76136a
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/aldebaran/qibuild/python/qisrc/snapshot.py
|
f59cbcb2a82c4aea7f2a376ac8bec88db32698d8
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 4,166
|
py
|
## Copyright (c) 2012-2016 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
"""Functions to generate and load snapshot."""
import collections
import json
from qisys import ui
import qisys.error
import qisrc.git
import qisrc.status
import qisrc.reset
import qisrc.sync
class Snapshot(object):
""" Just a container for a git worktree snapshot """
def __init__(self):
self.refs = collections.OrderedDict()
self.manifest = qisrc.sync.LocalManifest()
self.format_version = None
def dump(self, output_path, deprecated_format=True):
""" Dump the snapshot into a human readable file """
if deprecated_format:
self._dump_deprecated(output_path)
else:
self._dump_json(output_path)
def _dump_deprecated(self, output_path):
srcs = self.refs.keys()
with open(output_path, 'w') as fp:
for src in srcs:
fp.write(src + ":" + self.refs[src] + "\n")
def _dump_json(self, output_path):
with open(output_path, "w") as fp:
serializable_manifest = dict()
serializable_manifest["url"] = self.manifest.url
serializable_manifest["branch"] = self.manifest.branch
serializable_manifest["groups"] = self.manifest.groups
if self.manifest.ref:
serializable_manifest["ref"] = self.manifest.ref
to_dump = {
"format" : 2,
"manifest" : serializable_manifest,
"refs" : self.refs
}
json.dump(to_dump, fp, indent=2)
def load(self, source):
""" Load a snapshot from a file path or a file object """
# Try to open, else assume it's a file object
try:
fp = open(source, "r")
data = fp.read()
except TypeError:
data = source.read()
try:
# Load JSON into an OrderedDict
parsed = json.loads(data, object_pairs_hook=collections.OrderedDict)
self._load_json(parsed)
except ValueError:
self._load_deprecated(data)
try:
source.close()
except AttributeError:
pass
def _load_deprecated(self, source):
for line in source.splitlines():
try:
(src, sha1) = line.split(":")
except ValueError:
ui.error("could not parse", line)
continue
src = src.strip()
sha1 = sha1.strip()
self.refs[src] = sha1
def _load_json(self, parsed_json):
self.format_version = parsed_json["format"]
if self.format_version == 1:
manifest_json = parsed_json["manifests"]["default"]
elif self.format_version == 2:
manifest_json = parsed_json["manifest"]
else:
raise qisys.error.Error(
"unknown format: %s" % self.format_version)
self.refs = parsed_json["refs"]
for key, value in manifest_json.iteritems():
setattr(self.manifest, key, value)
def __eq__(self, other):
if not isinstance(other, Snapshot):
return False
return other.refs == self.refs and other.manifest == self.manifest
def __ne__(self, other):
return not self.__eq__(other)
def generate_snapshot(git_worktree, output_path, deprecated_format=True):
snapshot = git_worktree.snapshot()
ui.info(ui.green, "Snapshot generated in", ui.white, output_path)
return snapshot.dump(output_path, deprecated_format=deprecated_format)
def load_snapshot(git_worktree, input_path):
"""Load a snapshot file and reset projects."""
snapshot = Snapshot()
ui.info(ui.green, "Loading snapshot from", ui.white, input_path)
snapshot.load(input_path)
for (src, ref) in snapshot.refs.iteritems():
ui.info("Loading", src)
git_project = git_worktree.get_git_project(src, raises=False)
if git_project:
qisrc.reset.clever_reset_ref(git_project, ref)
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
675c537063a61902fa38a06372e2646e5734afe6
|
5ddcd95c0bbf27573f60cffd43fbe872432bb8fe
|
/test/language/offsets/python/ParameterOffsetTest.py
|
b833caa24291a1f9d3c1c94b74a316d188e65caa
|
[
"BSD-3-Clause"
] |
permissive
|
chenpeihua/zserio
|
def7ba52b27a20673561e9f0fa9a78b12627fcc1
|
c021d6f943f25c2eb7d91712eb7bd5de13f9c8bc
|
refs/heads/master
| 2021-05-18T11:33:07.688831
| 2020-06-21T13:25:50
| 2020-06-21T13:25:50
| 251,227,439
| 0
| 0
|
BSD-3-Clause
| 2020-06-21T13:25:51
| 2020-03-30T07:04:56
| null |
UTF-8
|
Python
| false
| false
| 4,049
|
py
|
import unittest
import zserio
from testutils import getZserioApi
class ParameterOffsetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "offsets.zs").parameter_offset
def testBitSizeOf(self):
createWrongOffset = False
school = self._createSchool(createWrongOffset)
self.assertEqual(self.SCHOOL_BIT_SIZE, school.bitSizeOf())
def testBitSizeOfWithPosition(self):
createWrongOffset = False
school = self._createSchool(createWrongOffset)
bitPosition = 2
self.assertEqual(self.SCHOOL_BIT_SIZE + 8 - bitPosition, school.bitSizeOf(bitPosition))
def testInitializeOffsets(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
bitPosition = 0
self.assertEqual(self.SCHOOL_BIT_SIZE, school.initializeOffsets(bitPosition))
self._checkSchool(school)
def testInitializeOffsetsWithPosition(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
bitPosition = 2
self.assertEqual(self.SCHOOL_BIT_SIZE + 8, school.initializeOffsets(bitPosition))
self._checkSchool(school, bitPosition)
def testRead(self):
writeWrongOffset = False
writer = zserio.BitStreamWriter()
self._writeSchoolToStream(writer, writeWrongOffset)
reader = zserio.BitStreamReader(writer.getByteArray())
school = self.api.School.fromReader(reader)
self._checkSchool(school)
def testReadWrongOffsets(self):
writeWrongOffset = True
writer = zserio.BitStreamWriter()
self._writeSchoolToStream(writer, writeWrongOffset)
reader = zserio.BitStreamReader(writer.getByteArray())
with self.assertRaises(zserio.PythonRuntimeException):
self.api.School.fromReader(reader)
def testWrite(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
writer = zserio.BitStreamWriter()
school.write(writer)
self._checkSchool(school)
reader = zserio.BitStreamReader(writer.getByteArray())
readSchool = self.api.School.fromReader(reader)
self._checkSchool(readSchool)
self.assertTrue(school == readSchool)
def testWriteWithPosition(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
writer = zserio.BitStreamWriter()
bitPosition = 2
writer.writeBits(0, bitPosition)
school.write(writer)
self._checkSchool(school, bitPosition)
def testWriteWrongOffset(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
school.write(writer, callInitializeOffsets=False)
def _writeSchoolToStream(self, writer, writeWrongOffset):
writer.writeBits(self.SCHOOL_ID, 16)
writer.writeBits(self.WRONG_ROOM_OFFSET if writeWrongOffset else self.ROOM_OFFSET, 32)
writer.writeBits(self.ROOM_ID, 16)
def _checkSchool(self, school, bitPosition=0):
self.assertEqual(self.SCHOOL_ID, school.getSchoolId())
expectedRoomOffset = (self.ROOM_OFFSET if (bitPosition == 0) else
self.ROOM_OFFSET + (bitPosition // 8) + 1)
self.assertEqual(expectedRoomOffset, school.getOffsetHolder().getRoomOffset())
self.assertEqual(self.ROOM_ID, school.getRoom().getRoomId())
def _createSchool(self, createWrongOffset):
roomOffset = self.WRONG_ROOM_OFFSET if createWrongOffset else self.ROOM_OFFSET
offsetHolder = self.api.OffsetHolder.fromFields(roomOffset)
room = self.api.Room.fromFields(offsetHolder, self.ROOM_ID)
return self.api.School.fromFields(self.SCHOOL_ID, offsetHolder, room)
SCHOOL_ID = 0x01
ROOM_ID = 0x11
WRONG_ROOM_OFFSET = 0
ROOM_OFFSET = 6
SCHOOL_BIT_SIZE = (6 + 2) * 8
|
[
"mikulas.rozloznik@eccam.com"
] |
mikulas.rozloznik@eccam.com
|
ca361226e992558e3c170b106de71efa1cc2421d
|
cb491f83882fea0627460f1de1e223309eb930c3
|
/src/part_two/ex10.py
|
be95f30ad8ea6464077227c7198a09e3cf3ff2f4
|
[] |
no_license
|
impreza555/geekbrains-python-exercises
|
4b1bef4a284ac1c6f4c9191644f31f2f99a90711
|
1e56b0820cc85f516c132d8c8aa0f8c3c60daafb
|
refs/heads/master
| 2022-06-17T17:51:36.540907
| 2020-05-04T16:25:20
| 2020-05-09T00:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
"""
Есть файл example.txt, в нем записано несколько строк
необходимо выполнить подсчет количества строк и количества слов в каждой строке.
Вывести результат в формате:
строк - X, слов - Y
Пример файла:
```
first
second-line
third line
fourth line
```
"""
with open('example.txt') as f:
rows = f.readlines()
words = [row.split() for row in rows]
rows_count, words_count = len(rows), sum([len(word_list) for word_list in words])
print(f"строк - {rows_count}, слов - {words_count}")
|
[
"artyom@manchenkoff.me"
] |
artyom@manchenkoff.me
|
5ce8c78a24d4151458505b17c21bcfdc5fff63f7
|
dd098f8a93f787e38676283679bb39a290ba28b4
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/model/ipv6_format.py
|
ec484a1d8a41c468b910cc4568018f0f6d1782d7
|
[
"Apache-2.0"
] |
permissive
|
InfoSec812/openapi-generator
|
727c0235d3bad9b85ac12068808f844287af6003
|
e0c72702c3d5dae2a627a2926f0cddeedca61e32
|
refs/heads/master
| 2022-10-22T00:31:33.318867
| 2022-08-20T14:10:31
| 2022-08-20T14:10:31
| 152,479,633
| 1
| 0
|
Apache-2.0
| 2023-09-04T23:34:09
| 2018-10-10T19:38:43
|
Java
|
UTF-8
|
Python
| false
| false
| 628
|
py
|
# coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import functools # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
Ipv6Format = schemas.AnyTypeSchema
|
[
"noreply@github.com"
] |
InfoSec812.noreply@github.com
|
53fac1b43cd3394624481aba748efd21b8096893
|
c0e819c144aa85b860c9da29d5b7a93d5fad1ee6
|
/exercises/05_basic_scripts/test_task_5_1.py
|
68d9cc60b0dfa25c038ef7e435b50cf410968caf
|
[] |
no_license
|
haskhr/pyneng-examples-exercises-en
|
ecf9fa78e57409cbab3e94d3d7a952ac966c0477
|
52e804f2942afefd626ebbddd8f4ec8a2b467b69
|
refs/heads/main
| 2023-03-12T14:41:43.293908
| 2021-03-10T05:32:25
| 2021-03-10T05:32:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
from importlib import reload
import sys
import pytest
# Checking that the test is called via pytest ... and not python ...
from _pytest.assertion.rewrite import AssertionRewritingHook
if not isinstance(__loader__, AssertionRewritingHook):
print(f"Tests should be called using this expression:\npytest {__file__}\n\n")
def test_task_r2(capsys, monkeypatch):
"""
Task check for r2
"""
monkeypatch.setattr("builtins.input", lambda x=None: "r2")
import task_5_1
out, err = capsys.readouterr()
r2_dict = {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.2",
}
assert (
out
), "Nothing is printed to stdout. It is necessary not only to get the correct result, but also to print it to the stdout using printprint"
assert (
str(r2_dict) in out.strip()
), "Wrong output is printed to stdout"
def test_task_sw1(capsys, monkeypatch):
"""
Task check for sw1
"""
monkeypatch.setattr("builtins.input", lambda x=None: "sw1")
if sys.modules.get("task_5_1"):
reload(sys.modules["task_5_1"])
import task_5_1
out, err = capsys.readouterr()
sw1_dict = {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "3850",
"ios": "3.6.XE",
"ip": "10.255.0.101",
"vlans": "10,20,30",
"routing": True,
}
assert (
out
), "Nothing is printed to stdout. It is necessary not only to get the correct result, but also to print it to the stdout using printprint"
assert (
str(sw1_dict) in out.strip()
), "Wrong output is printed to stdout"
|
[
"nataliya.samoylenko@gmail.com"
] |
nataliya.samoylenko@gmail.com
|
a55f535653ad76ffb57e459e3eb819f76a4d41bc
|
962feeffee41625ff841f6590f97bb09cef9be4c
|
/torch_glow/tests/nodes/sigmoid_test.py
|
d7959a93086ff7d53a580260fb035b023882494c
|
[
"Apache-2.0"
] |
permissive
|
SushantDaga/glow
|
8c4c3fbc58c3ae760bdd8e1df2e8c05a72ff07bc
|
aab22c3e0421dadd29950c2ebfa88b86027cecf5
|
refs/heads/master
| 2022-11-03T08:39:33.958233
| 2020-06-19T17:03:14
| 2020-06-19T17:05:42
| 273,568,864
| 2
| 0
|
Apache-2.0
| 2020-06-19T19:12:31
| 2020-06-19T19:12:30
| null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests.utils import jitVsGlow
import unittest
class TestSigmoid(unittest.TestCase):
def test_sigmoid_basic(self):
"""Basic test of the PyTorch sigmoid Node on Glow"""
def sigmoid_basic(a):
c = a + a
return c.sigmoid()
x = torch.randn(6)
jitVsGlow(sigmoid_basic, x, expected_fused_ops={"aten::sigmoid"})
def test_sigmoid_inplace(self):
"""Test of the inplace PyTorch sigmoid Node on Glow"""
def sigmoid_inplace(a):
c = a + a
return c.sigmoid_()
x = torch.randn(6)
jitVsGlow(sigmoid_inplace, x, expected_fused_ops={"aten::sigmoid_"})
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c1c5cf8d4cdec5bc603ee6a8b608d8826d56dc84
|
4910c0f3d03935fc8ee03f1e9dc20dfdb2c7c04b
|
/Resueltos/Luciano_Chavarria/Python/WERTYU.py
|
95ff18884e0265efec156419c034b72763c5a589
|
[] |
no_license
|
roca12/gpccodes
|
ab15eeedc0cadc0735651262887b44f1c2e65b93
|
aa034a3014c6fb879ec5392c51f9714bdc5b50c2
|
refs/heads/master
| 2023-02-01T13:49:27.563662
| 2023-01-19T22:50:58
| 2023-01-19T22:50:58
| 270,723,328
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
while True:
try:
res = ''
l = ('`', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=',
'Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', '[', ']','\\',
'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', ';', "'",
'Z', 'X', 'C', 'V', 'B', 'N', 'M', ',', '.', '/')
string = input()
for i in range(len(string)):
if string[i] == ' ':
res += ' '
else:
res += l[l.index(string[i])-1]
print(res)
except EOFError:
break
|
[
"noreply@github.com"
] |
roca12.noreply@github.com
|
81ff25c56aa409ab69cb2482550934bbdb000ca9
|
d0758e0ca004226cec8ad8b26c9565c98534a8b8
|
/02-core/notebook2slides.py
|
0cb0af330774502355decf098328bb702e6ddd7c
|
[] |
no_license
|
pythoncanarias/eoi
|
334d64a96afc76ac1fa10282378f291b6d8c94b3
|
349367254f85e3e4273cede067ca950913a1332c
|
refs/heads/master
| 2023-07-06T08:00:11.366345
| 2023-06-30T15:19:33
| 2023-06-30T15:19:33
| 222,742,870
| 26
| 19
| null | 2023-06-25T16:03:46
| 2019-11-19T16:41:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
#!/usr/bin/env python
'''
Inspired by https://goo.gl/SYWRbM and https://t.ly/8LAeY
Convert a jupyter notebook to slides (html) and apply some changes to default
settings (reveal.js, mathjax, ...)
Usage:
> nb.py <notebook.ipynb>
'''
import fileinput
import re
import shlex
import subprocess
import sys
from pathlib import Path
from prettyconf import config
# https://pygments.org/demo/#try
PYGMENTS_STYLE = config('PYGMENTS_STYLE', default='default')
# list of modifications to be made after generating the html slides
# each tuple has the form: (pattern, replacement) as regex
SETTINGS = [
(
r"(Reveal.addEventListener\('slidechanged', setScrollingSlide\);)",
# next slide with right cursor, previous slide with left cursor
# source: https://github.com/hakimel/reveal.js#keyboard-bindings
"Reveal.configure({ keyboard: {37:'prev', 39:'next',} });"
),
(
r'(MathJax.Hub.Config\({)',
# show the equation numbers
'TeX: { equationNumbers: {autoNumber: \"AMS\"} },'
),
(
r'(http[\S]+/reveal.js/)\d\.\d\.\d',
# update version of reveal.js
# https://cdnjs.com/libraries/reveal.js/3.7.0
'3.7.0'
),
(
r'(href=")custom.css',
# common css for all notebooks
'../custom.css'
)
]
def notebook_to_slides(ipynbfile_path):
print(f'Converting {ipynbfile_path} to html...')
notebook_path = Path(ipynbfile_path)
html_path = notebook_path.parent.joinpath(notebook_path.stem +
'.slides.html')
cmd = shlex.split(f'''
jupyter nbconvert {notebook_path}
--to slides --CSSHTMLHeaderPreprocessor.style={PYGMENTS_STYLE}''')
subprocess.run(cmd)
return html_path
def change_settings(htmlfile_path):
print(f'Changing settings of {htmlfile_path}...')
with fileinput.input(files=htmlfile_path, inplace=True) as f:
for line in f:
for setting in SETTINGS:
pattern, replace = setting
if re.search(pattern, line):
new_line = re.sub(pattern, rf'\g<1>{replace}', line)
break
else:
new_line = line
print(new_line, end='')
for file in sys.argv[1:]:
rendered_html_file = notebook_to_slides(file)
change_settings(rendered_html_file)
|
[
"euribates@gmail.com"
] |
euribates@gmail.com
|
36c86b6336cccb99ca8f04fc10b155ab44100c37
|
890612db0bc6209134b6d7017775d5a86604b285
|
/tests/data/text/bpe_test.py
|
33e5ecb73b283cce3f305c3f6b8775c656b05f4c
|
[
"Apache-2.0"
] |
permissive
|
hiyoung-asr/st
|
6277fc5c1f123b5c6b09bb9ebbad779f6e08c987
|
634a71e3f1860c0db2f4f304a7828bb5560c34f0
|
refs/heads/master
| 2023-03-15T04:30:15.652714
| 2020-11-12T03:47:18
| 2020-11-12T03:47:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tempfile
import tensorflow as tf
from neurst.data.text.bpe import BPE
def test():
codes = ["技 术</w>", "发 展</w>"]
tmp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(tmp_file.name, "w") as fw:
fw.write("version\n")
fw.write("\n".join(codes) + "\n")
bpe = BPE(lang="zh",
glossaries=["迅速", "<-neplhd-hehe>"])
bpe.init_subtokenizer(tmp_file.name)
tokens = bpe.tokenize("技术 发展 迅猛", return_str=True)
assert tokens == "技术 发展 迅@@ 猛"
assert bpe.detokenize(tokens) == "技术 发展 迅猛"
tokens = bpe.tokenize("技术发展迅猛", return_str=True)
assert tokens == "技@@ 术@@ 发@@ 展@@ 迅@@ 猛"
assert bpe.detokenize(tokens) == "技术发展迅猛"
tokens = bpe.tokenize("技术迅速发展迅速 迅速 <-neplhd-hehe>", return_str=True)
assert tokens == "技术@@ 迅速@@ 发展@@ 迅速 迅速 <-neplhd-hehe>"
assert bpe.detokenize(tokens) == "技术迅速发展迅速 迅速 <-neplhd-hehe>"
os.remove(tmp_file.name)
if __name__ == "__main__":
test()
|
[
"zhaochengqi.d@bytedance.com"
] |
zhaochengqi.d@bytedance.com
|
b7723e87a26067ac539b187244e80cd998ae5c3a
|
f5cd89e46b7e9fb22b422557a3c4d0354e501110
|
/app/main/admin.py
|
b8aafe5cb4c14a84808c29044c111203e8256f69
|
[] |
no_license
|
Alerion/Pharmacology-DB
|
14d081fbab80db974258ebad7db4ab285ccdfda5
|
86ef48feecedce6fc1adc9aa1c4363044e9454f0
|
refs/heads/master
| 2021-07-25T00:55:14.142794
| 2021-02-12T18:18:12
| 2021-02-12T18:18:12
| 302,310
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import Drug, FarmAction, Illness
from django.http import HttpResponse
class DrugAdmin(admin.ModelAdmin):
def edit_vector(self, request, pk):
return HttpResponse('Hello %s' % pk)
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(DrugAdmin, self).get_urls()
my_urls = patterns('',
url(r'^em/(?P<pk>\d+)/$', self.admin_site.admin_view(self.edit_vector), name='edit_vector')
)
return my_urls + urls
admin.site.register(Drug, DrugAdmin)
admin.site.register(FarmAction)
admin.site.register(Illness)
|
[
"alerion.um@gmail.com"
] |
alerion.um@gmail.com
|
140d8a10408bebea7a12712c607cf0a7278e11a1
|
010c5fbc97731286be00028ff33fc981d943bca3
|
/primal/src/code/impute/tests/data/pedigree/pedigree_old_study.py
|
bffae63b82e6442b7adfd83f8252213996c0fefb
|
[] |
no_license
|
orenlivne/ober
|
6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30
|
810b16b2611f32c191182042240851152784edea
|
refs/heads/master
| 2021-01-23T13:48:49.172653
| 2014-04-03T13:57:44
| 2014-04-03T13:57:44
| 6,902,212
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
'''
============================================================
A pedigree loaded from an input file from a previous study
at the Ober Lab. Provided by Jessica and Gaixin. Includes
node annotations (generated by Mark) - old_generation #.
Here 'old' refers to 'from the study'.
Created on May 30, 2012
@author: Oren Livne <livne@uchicago.edu>
============================================================
'''
import numpy as np
from impute.data.Pedigree import Pedigree
from impute.data import io_pedigree
class PedigreeOldStudy(Pedigree):
def __init__(self, pedigree, old_generation):
'''Constructor'''
super(PedigreeOldStudy, self).__init__(pedigree.graph,
sample_id=pedigree.sample_id,
sex=pedigree.sex,
phenotype=pedigree.phenotype,
node_type=pedigree.node_type,
sample_index=pedigree.sample_index,
num_genotyped=pedigree.num_genotyped)
# Generation number of each node provided by the input file from the study
self.old_generation = old_generation
class PedigreeOldStudyReader(object):
#---------------------------------------------
# Methods
#---------------------------------------------
def read(self, file_name, genotyped_id_file=None):
'''Load pedigree from file in old format.'''
p = io_pedigree.read(file_name, genotyped_id_file)
# Load data from text file a second time to read the old-study-specific-column. Not efficient.
data = np.genfromtxt(file_name, np.dtype(int))
old_generation = dict(data[:,(1,6)])
# Wrap by old pedigree object
return PedigreeOldStudy(p, old_generation)
|
[
"oren.livne@gmail.com"
] |
oren.livne@gmail.com
|
5aaf61fe69ee9ad1529a5d0daae9be1d9ed286b2
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.0_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=82/sched.py
|
485e768f9a31d6cd6d14a9155b7252114127319a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
-X FMLP -Q 0 -L 3 96 300
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 0 -L 3 75 400
-X FMLP -Q 1 -L 1 63 250
-X FMLP -Q 1 -L 1 60 200
-X FMLP -Q 1 -L 1 48 150
-X FMLP -Q 2 -L 1 41 300
-X FMLP -Q 2 -L 1 33 125
-X FMLP -Q 2 -L 1 32 100
-X FMLP -Q 3 -L 1 27 200
-X FMLP -Q 3 -L 1 26 150
-X FMLP -Q 3 -L 1 24 300
18 150
18 150
7 100
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
6e67540d0a1f799bb87d998cdd83312283346dab
|
3c8701e04900389adb40a46daedb5205d479016c
|
/test/fortresstest/fortress_lfzb/test.py
|
8d8c20326ce571cfe13ee15976a521188594afda
|
[] |
no_license
|
huboa/xuexi
|
681300653b834eaf506f49987dcca83df48e8db7
|
91287721f188b5e24fbb4ccd63b60a80ed7b9426
|
refs/heads/master
| 2020-07-29T16:39:12.770272
| 2018-09-02T05:39:45
| 2018-09-02T05:39:45
| 73,660,825
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
#!/bin/python
# # -*- coding: utf-8 -*-
# import sys,json,urllib,urllib2
# reload(sys)
# sys.setdefaultencoding('utf-8')
#
# url = "http://cd-ztree-api.inc-mtime.com/getalluserpassword"
# result = urllib2.urlopen(url).read()
# result = json.loads(result)
#
# for i in result:
# for k,v in i.items():
# if k == 'jie.wang':
# print v
|
[
"wxcr11@gmail.com"
] |
wxcr11@gmail.com
|
e853121de9b9ac889b80e8139983297bc65d2faa
|
7a88fc18f30d5dd3ac935877d4d9268a56c296be
|
/di_website/blog/migrations/0020_auto_20191023_0650.py
|
55f86e1326edb75e8034c449a21e59133ae334f2
|
[] |
no_license
|
devinit/DIwebsite-redesign
|
745a480b7ba0feffa34dc664548ee4c5a7b4d470
|
9ec46823c67cdd4f35be255896bf30d8f6362666
|
refs/heads/develop
| 2023-08-30T04:06:20.951203
| 2023-08-07T12:06:07
| 2023-08-07T12:06:07
| 184,287,370
| 1
| 0
| null | 2023-08-28T14:34:57
| 2019-04-30T15:29:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 782
|
py
|
# Generated by Django 2.2.2 on 2019-10-23 06:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0019_auto_20191011_1357'),
]
operations = [
migrations.AlterField(
model_name='blogarticlepage',
name='hero_image_credit_name',
field=models.TextField(blank=True, help_text='Name of source of image used in hero if any', null=True, verbose_name='Image credit name'),
),
migrations.AlterField(
model_name='blogindexpage',
name='hero_image_credit_name',
field=models.TextField(blank=True, help_text='Name of source of image used in hero if any', null=True, verbose_name='Image credit name'),
),
]
|
[
"alex.k.miller@gmail.com"
] |
alex.k.miller@gmail.com
|
3c6b94c42cc74ed86c4168785aa1625444219fae
|
c2d436fecd486a412eae5171882110e324b2fc1c
|
/chap8/78.py
|
ec1d6a31f6f9efeb259b6ef3476282a255d11d7d
|
[] |
no_license
|
uenewsar/nlp100fungos
|
0150bacf835f3734dd76a25b079ec6c61efb4d83
|
7f745abb97c3129818ec6cf5f69abca15c50e451
|
refs/heads/master
| 2020-04-14T23:47:20.482910
| 2019-01-12T13:32:09
| 2019-01-12T13:32:36
| 164,216,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,023
|
py
|
# -*- coding: utf-8 -*-
'''
78. 5分割交差検定
76-77の実験では,学習に用いた事例を評価にも用いたため,
正当な評価とは言えない.すなわち,分類器が訓練事例を丸暗記する
際の性能を評価しており,モデルの汎化性能を測定していない.
そこで,5分割交差検定により,極性分類の正解率,適合率,再現率,
F1スコアを求めよ.
'''
import re
import sys
import numpy as np
import stemming.porter2
from sklearn.linear_model import LogisticRegression
# as stemming.porter2.stem is a little bit slow, use cache.
stem_cache = {}
def stem(inp):
if inp not in stem_cache:
stem_cache[inp] = stemming.porter2.stem(inp)
return stem_cache[inp]
# from https://gist.github.com/sebleier/554280
stop_words = {
"i", "me", "my", "myself", "we", "our",
"ours", "ourselves", "you", "your", "yours",
"yourself", "yourselves", "he", "him", "his", "himself",
"she", "her", "hers", "herself", "it", "its",
"itself", "they", "them", "their", "theirs", "themselves",
"what", "which", "who", "whom", "this", "that",
"these", "those", "am", "is", "are", "was",
"were", "be", "been", "being", "have", "has",
"had", "having", "do", "does", "did", "doing",
"a", "an", "the", "and", "but", "if",
"or", "because", "as", "until", "while", "of",
"at", "by", "for", "with", "about", "against",
"between", "into", "through", "during", "before", "after",
"above", "below", "to", "from", "up", "down",
"in", "out", "on", "off", "over", "under",
"again", "further", "then", "once", "here", "there",
"when", "where", "why", "how", "all", "any",
"both", "each", "few", "more", "most", "other",
"some", "such", "no", "nor", "not", "only",
"own", "same", "so", "than", "too", "very",
"s", "t", "can", "will", "just", "don", "should",
"now"}
class Instance(object):
# class to store one instance of training/evaluation data
def __init__(self):
self.label = None
self.sentence = None
self.words = None
self.feat = None
self.feat_vec = None
def __str__(self):
ret = 'label=[{}]'.format(self.label)
ret += ', sentence="{}"'.format(self.sentence)
ret += ', words={}'.format(self.words)
ret += ', feat={}'.format(self.feat)
ret += ', feat_vec={}'.format(self.feat_vec)
return ret
def create_feat(org_words, feat2id=None):
# make unigram and bigram feat
# to avoid changing original memory
words = list(org_words)
# delete symbol tokens
tmp = []
for e in words:
if not re.search(r'^[^0-9a-zA-Z]+$', e):
# use if the word is NOT only-symbol word
tmp.append(e)
words = tmp
# stemming
for i in range(len(words)):
words[i] = stem(words[i])
# assign flag for showing stop words
for i in range(len(words)):
if is_stop_word(words[i]):
words[i] = '__stop__'
feat = {}
# add BOS and EOS
words.insert(0, 'BOS')
words.append('EOS')
## make unigram
for i in range(len(words)):
if words[i] == '__stop__':
continue
feat[words[i]] = 1
## make bigram
for i in range(len(words)-1):
if words[i] == '__stop__' or words[i+1] == '__stop__':
continue
feat['{}_{}'.format(words[i], words[i+1])] = 1
# no matter how much one feature exist in one sentence,
# the value of feature is set to 1.
# if each feature is not defined in feat2id, delete
vec = None
if feat2id is not None:
tmp = {}
for ef in feat.keys():
if ef in feat2id:
tmp[ef] = 1
feat = tmp
# also make feature vector
vec = [0.0] * len(feat2id)
for ef in feat.keys():
vec[feat2id[ef]] = 1.0
# debug
#sys.stderr.write('[{}]\n -> [{}]\n'.format(' '.join(org_words), ' '.join(sorted(feat.keys()))))
return (feat, vec)
def normalize_stc(inp):
# delete duplicated space
inp = re.sub(r' +', ' ', inp)
# lower
inp = inp.lower()
return inp
def read_data(fn):
data = []
fr = open(fn, 'r', encoding='utf-8')
for e in fr:
e = e.rstrip()
e = normalize_stc(e)
tab = e.split(' ')
# label -> [0]
label = int(tab[0])
# words -> [1, 2, ...]
words = tab[1:]
# sentence
sentence = ' '.join(tab[1:])
ins = Instance()
ins.label = label
ins.words = words
ins.sentence = sentence
data.append(ins)
fr.close()
return data
def is_stop_word(inp):
if inp in stop_words:
return True
else:
return False
def make_feat_to_be_used(data):
# from raw features, extract actual features to be used.
# creat feat vs. freq
feat2freq = {}
for e in data:
for ef in e.feat:
if ef not in feat2freq:
feat2freq[ef] = 0
feat2freq[ef] += 1
# delete singleton and make feat to be used
feat2id = {}
for k, v in feat2freq.items():
if v>1:
feat2id[k] = len(feat2id)
else:
#print('{} is deleted.'.format(k))
pass
return feat2id
## main
data = read_data('sentiment.txt')
# divide data to 5 folds
data_fold = {}
for i in range(len(data)):
fold_idx = int(float(i) / len(data) * 5)
if fold_idx not in data_fold:
data_fold[fold_idx] = []
data_fold[fold_idx].append(data[i])
# reset metrics
mat = {'TP':0, 'FN':0, 'FP':0, 'TN':0}
cor = 0
# loop all folds
for fold_idx in sorted(data_fold.keys()):
print('fold: {}/{}'.format(fold_idx+1, len(data_fold)))
# make evaluation data
eval_data = data_fold[fold_idx]
#for e in eval_data:
# print(e)
# make training data
train_data = []
for i in sorted(data_fold.keys()):
if i != fold_idx:
train_data.extend(data_fold[i])
#for e in train_data:
# print(e)
print(' num of eval data: {}'.format(len(eval_data)))
print(' num of train data: {}'.format(len(train_data)))
## train
# first, makes all possible features
for ed in train_data:
(ed.feat, _) = create_feat(ed.words)
# make actual features to be used
feat2id = make_feat_to_be_used(train_data)
#for k, v in feat2id.items():
# print(' {} {}'.format(k, v))
# make feature vector
for ed in train_data:
(ed.feat, ed.feat_vec) = create_feat(ed.words, feat2id)
#print(' feat: {}'.format(ed.feat))
#print(' feat_vec: {}'.format(ed.feat_vec))
# model training
x = []
y = []
for ed in train_data:
#print('ed.feat_vec: {}'.format(list(ed.feat_vec)))
#x.append(list(ed.feat_vec))
x.append(ed.feat_vec)
y.append(ed.label)
#print('x:{}'.format(x))
#print('y:{}'.format(y))
lr = LogisticRegression(solver='liblinear')
lr.fit(x, y)
#exit()
# evaluation
for ed in eval_data:
(ed.feat, ed.feat_vec) = create_feat(ed.words, feat2id)
est_label = lr.predict([ed.feat_vec])[0]
est_prob = lr.predict_proba([ed.feat_vec])[0][np.where(lr.classes_==est_label)][0]
if est_label==ed.label:
cor += 1
if est_label==1 and ed.label==1:
mat['TP'] += 1
elif est_label==1 and ed.label==-1:
mat['FP'] += 1
elif est_label==-1 and ed.label==1:
mat['FN'] += 1
elif est_label==-1 and ed.label==-1:
mat['TN'] += 1
else:
raise Exception('error')
print(' accuracy: {}'.format(float(cor)/len(data)))
precision = float(mat['TP']) / (mat['TP']+mat['FP'])
print('precision: {}'.format(precision))
recall = float(mat['TP']) / (mat['TP']+mat['FN'])
print(' recall: {}'.format(recall))
print(' f1: {}'.format( 2 * precision * recall / (precision + recall) ))
|
[
"none@none"
] |
none@none
|
49bd991042559fc02150d178e511e172a8bb31e5
|
5f845ebbc2c9b40eea702833c91928ae90ae7ee5
|
/data-structures/array-left-rotation.py
|
d20dde7750484854c21b9132f1597e1f7a1f439a
|
[
"MIT"
] |
permissive
|
imgeekabhi/HackerRank
|
7a1917fee5af01976aebb9c82aa1045a36487016
|
7fe4a308abad85ce446a28328324be480672e6fc
|
refs/heads/master
| 2022-12-28T19:13:49.098090
| 2020-10-11T09:29:08
| 2020-10-11T09:29:08
| 300,023,395
| 1
| 0
|
MIT
| 2020-09-30T18:48:12
| 2020-09-30T18:48:11
| null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#!/bin/python3
import sys
def leftRotation(a, d):
out = list(a)
a_len = len(a)
for ind, el in enumerate(a):
out[(ind + a_len - d) % a_len] = el
return out
if __name__ == "__main__":
n, d = input().strip().split(' ')
n, d = [int(n), int(d)]
a = list(map(int, input().strip().split(' ')))
result = leftRotation(a, d)
print (" ".join(map(str, result)))
|
[
"sergey.n.nemov@gmail.com"
] |
sergey.n.nemov@gmail.com
|
1c157b3dc596401cbdacaf303f49abd65fd7dc33
|
a686db263a544c42ccfea566f19fba5443515357
|
/server.py
|
7a61d8d1b1fc25d4593cfbce61fbe3bf85d13541
|
[] |
no_license
|
merli027/apis
|
8fd3ea6489f416d2dd1304db51dae5d3a23cffc1
|
4136e10fcbdfc36b7665233eddce913888e1e59f
|
refs/heads/master
| 2022-12-13T13:03:05.026252
| 2020-02-25T22:28:05
| 2020-02-25T22:28:05
| 243,116,270
| 0
| 0
| null | 2022-12-08T03:41:20
| 2020-02-25T22:27:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
from flask import Flask, render_template, request
from pprint import pformat
import os
import requests
app = Flask(__name__)
app.secret_key = 'SECRETSECRETSECRET'
API_KEY = os.environ['TICKETMASTER_KEY']
@app.route('/')
def homepage():
"""Show homepage."""
return render_template('homepage.html')
@app.route('/afterparty')
def show_afterparty_form():
"""Show event search form"""
return render_template('search-form.html')
@app.route('/afterparty/search')
def find_afterparties():
"""Search for afterparties on Eventbrite"""
keyword = request.args.get('keyword', '')
postalcode = request.args.get('zipcode', '')
radius = request.args.get('radius', '')
unit = request.args.get('unit', '')
sort = request.args.get('sort', '')
url = 'https://app.ticketmaster.com/discovery/v2/events'
payload = {'apikey': API_KEY, 'keyword': keyword, 'postalcode': postalcode,
'radius': radius, 'unit': unit, 'sort': sort}
res = requests.get(url, params=payload)
data = res.json()
print(data.keys())
#events = data['_embedded']['events']
events = []
# TODO: Make a request to the Event Search endpoint to search for events
#
# - Use form data from the user to populate any search parameters
#
# - Make sure to save the JSON data from the response to the `data`
# variable so that it can display on the page. This is useful for
# debugging purposes!
#
# - Replace the empty list in `events` with the list of events from your
# search results
# data = {'Test': ['This is just some test data'],
# 'page': {'totalElements': 1}}
return render_template('search-results.html',
pformat=pformat,
data=data,
results=events)
# ===========================================================================
# FURTHER STUDY
# ===========================================================================
@app.route('/event/<id>')
def get_event_details(id):
"""View the details of an event."""
# TODO: Finish implementing this view function
return render_template('event-details.html')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
[
"you@example.com"
] |
you@example.com
|
95f4c2bf1d1943ec5cf66207d1c6179d21703460
|
f47863b3a595cbe7ec1c02040e7214481e4f078a
|
/plugins/scan/esccms/2555.py
|
dbdde80f807c5821d4d411301730134a2ac42e6a
|
[] |
no_license
|
gobiggo/0bscan
|
fe020b8f6f325292bda2b1fec25e3c49a431f373
|
281cf7c5c2181907e6863adde27bd3977b4a3474
|
refs/heads/master
| 2020-04-10T20:33:55.008835
| 2018-11-17T10:05:41
| 2018-11-17T10:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
#!/usr/bin/evn python
#-*-:coding:utf-8 -*-
#Author:404
#Name:易创思教育建站系统未授权访问可查看所有注册用户
#Refer:http://www.wooyun.org/bugs/wooyun-2010-086704
def assign(service,arg):
if service=="esccms":
return True,arg
def audit(arg):
url=arg+"operationmanage/selectunitmember.aspx"
code,head,res,errcode,_=curl.curl2(url)
if code==200 and "doPostBack" in res and 'gvUnitMember' in res:
security_hole(url)
if __name__=="__main__":
audit(assign('esccms','http://www.yclfzx.com/')[1])
audit(assign('esccms','http://www.qzxx.net/')[1])
|
[
"zer0i3@aliyun.com"
] |
zer0i3@aliyun.com
|
383c97c1e717ee09c481c9a9bcaafaf22a6aa0cd
|
4144df22392350035a9a24fcbc23fd1c6bce5c12
|
/Lib/glyphNameFormatter/rangeProcessors/katakana.py
|
080574bddaaaa12ee38391a29264d9162345e529
|
[
"BSD-3-Clause",
"Adobe-Glyph"
] |
permissive
|
danielgrumer/glyphNameFormatter
|
55b6076684bed7ff4cc6e37ce4a0bb0e2ce86a4a
|
9a41b3ef02c01cd18afe0232f6e436a2f7379178
|
refs/heads/master
| 2020-12-11T05:35:47.835908
| 2016-03-19T09:50:33
| 2016-03-19T09:50:33
| 53,578,090
| 0
| 0
| null | 2016-03-10T11:07:31
| 2016-03-10T11:07:30
| null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
def process(self):
self.edit("KATAKANA-HIRAGANA", "kana")
self.edit("SOUND MARK")
self.edit("MARK")
self.edit("LETTER")
self.edit("SMALL", "small")
self.editToFinal("KATAKANA", "katakana")
self.lower()
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.test import printRange
printRange("Katakana")
|
[
"erik@letterror.com"
] |
erik@letterror.com
|
d1bdf920154ffffe0e5e7314a926015d1e892b85
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/operations/opR2kM14LSbSGpKxeZWzfXaj32AP29B2iJ88hss1mZRxXAMkR2U/test_forge_opR2kM.py
|
fe34ad1cf8192a126f57b15b7ea1af6b39a5c26b
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.operation.forge import forge_operation_group
class OperationForgingTestopR2kM(TestCase):
def setUp(self):
self.maxDiff = None
def test_forge_opR2kM(self):
expected = get_data(
path='operations/opR2kM14LSbSGpKxeZWzfXaj32AP29B2iJ88hss1mZRxXAMkR2U/forged.hex')
actual = forge_operation_group(get_data(
path='operations/opR2kM14LSbSGpKxeZWzfXaj32AP29B2iJ88hss1mZRxXAMkR2U/unsigned.json'))
self.assertEqual(expected, actual)
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
41b9f5fefd62fafb4c0703fcbb3f4278fb7479a8
|
8e1668e35a8df9968ab14d16db089b51dbe6dd51
|
/python/algorithms/arrays/distributed_candies.py
|
36ce7af5034497a847bd5c0a47921763dfd79336
|
[] |
no_license
|
Chalmiller/competitive_programming
|
f1ec0184d1ff247201522ab90ca8e66b3f326afc
|
b437080d1ba977c023baf08b7dc5c3946784e183
|
refs/heads/master
| 2021-03-24T05:11:59.383916
| 2020-08-24T22:07:41
| 2020-08-24T22:07:41
| 247,519,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from typing import *
import collections
class Solution:
def distributeCandies(self, candies: List[int]) -> int:
return min(len(candies) / 2, len(set(candies)))
obj = Solution()
print(obj.distributeCandies([1,1,2,3]))
|
[
"chalmiller1@gmail.com"
] |
chalmiller1@gmail.com
|
340c22294da42b53386bdaea4cfe8593715817c1
|
644b019a4792b6c7d9e5352e6330069850cc07e7
|
/dentexchange/apps/matches/jobs/daily/periodic_automatches_email.py
|
a7eb338df9d3424e9594a60e71c556e2f72d00b6
|
[
"BSD-3-Clause"
] |
permissive
|
jpchauvel/dentexchange
|
db0611c8c45365db30bdc15e3005c6eeac104c73
|
58ae303e842404fc9e1860f294ec8044a332bef3
|
refs/heads/master
| 2021-10-10T12:19:00.985034
| 2014-09-24T03:42:20
| 2014-09-24T03:42:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
# -*- coding:utf-8 -*-
import calendar
from django_extensions.management.jobs import DailyJob
from django.utils.timezone import now
from ...tasks import SendPeriodicAutomatchesEmailTask
from ... import constants
class Job(DailyJob):
help = '''
Sends periodic email notifications to users notifying the total automatches
they have in their profiles
'''
def execute(self):
today = now()
week_day = calendar.weekday(today.year, today.month, today.day)
if week_day in constants.PERIODIC_AUTOMATCHES_PROGRAMMED_WEEK_DAYS:
SendPeriodicAutomatchesEmailTask.delay()
|
[
"jchauvel@gmail.com"
] |
jchauvel@gmail.com
|
4e7cd1f106c73485b089537adf4a40e89a4adc54
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/generated_samples/application_get_operation_example.py
|
f7d2aec71c49e97b87b077dfa6dfe7232f1e77d0
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.servicefabricmanagedclusters import ServiceFabricManagedClustersManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-servicefabricmanagedclusters
# USAGE
python application_get_operation_example.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ServiceFabricManagedClustersManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.applications.get(
resource_group_name="resRg",
cluster_name="myCluster",
application_name="myApp",
)
print(response)
# x-ms-original-file: specification/servicefabricmanagedclusters/resource-manager/Microsoft.ServiceFabric/preview/2022-08-01-preview/examples/ApplicationGetOperation_example.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
d4f4e5362e3781d0329078dc23911c801727ea8a
|
3806db5b4bb7a638f30c818a29ccaf2b0ddb2836
|
/test_188.py
|
184ae4a7f28d2b1a7ebb44b0521862a3a9e86548
|
[] |
no_license
|
EomAA/fenics-qa
|
d0a687a7b84c51417e96eeeef9855c0d4ba27dea
|
c37a36a14450d0e7f6432c4726c5d96e0d6c4e96
|
refs/heads/master
| 2021-12-15T12:07:10.316478
| 2017-08-18T09:16:01
| 2017-08-18T09:16:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
from dolfin import *
import numpy as np
# Create mesh and define function space
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, "Lagrange", 1)
u_e=Expression('1+x[0]*x[0]+2*x[1]*x[1]') #exact solutin
# Define Dirichlet boundary (x = 0 or x = 1)
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0]*(1-x[0]),0.0)
#Define the right dirichlet boundary condition
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1]*(1-x[1]), 0.0)
left=Left()
right=Right()
# Define boundary condition
u0 = Expression('1+x[0]*x[0]+2*x[1]*x[1]')
bc = DirichletBC(V, u0, left)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("-6")
ur = Expression('4*x[1]')
a = inner(grad(u), grad(v))*dx
L = f*v*dx + ur*v*ds
# Compute solution
u = Function(V) # u is the solution with CG method
solve(a == L, u, bc)
u_e_Ve = interpolate(u_e, V)
error = (u - u_e_Ve)**2*dx
k=sqrt(assemble(u_e_Ve**2*dx))
E = assemble(error)
print E
k=sqrt(assemble(u_e_Ve**2*dx)) #to get relative L2 norm
#print k
#print E
print('L2 norm using CG Method : ',E/k)
#plot(u)
#plot(u_e_Ve)
#interactive()
|
[
"miroslav.kuchta@gmail.com"
] |
miroslav.kuchta@gmail.com
|
3c53678e97a6f2552793138d9aeca60f467499e7
|
3a121f4953c430e450c448417ca40e7dfae9db9a
|
/analysis/visualization.py
|
6efc277f9555343a2669a6bfd4681c32de907bb9
|
[
"MIT"
] |
permissive
|
sadscv/sentiment.datalogue
|
cdcbaa71a16be07f99f6ae502e2da3a4df08cd3f
|
3e7bde9e03394774bfab2582bd936c090639ddc2
|
refs/heads/master
| 2021-05-01T21:53:53.478139
| 2017-03-01T08:11:06
| 2017-03-01T08:11:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
sns.set_style('white')
def training_plot(history, outfile, metric='categorical_accuracy', title=''):
"""
Plot training accuracy for each epoch
"""
## Set output file for plot
basepath = os.path.split(os.path.expanduser(outfile))[0]
plotfile = basepath + '_train_plot.png'
## Plot accuracy
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(history.history['val_'+metric], label='test')
ax.plot(history.history[metric], label='train')
ax.set_title(title)
ax.set_xlabel('Epochs')
ax.set_ylabel(metric)
ax.legend()
f.savefig(plotfile)
return f, ax
def plot_single_auc(fpr, tpr, auc_, ax=None, c='b', label=''):
"""
Plots the receiver operating characteristic curve for a single
sequence of false positive rates, true postive rates and auc
"""
ax_ = ax
if ax is None:
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(fpr, tpr, lw=2, color=c,\
label=label + ' AUC:' + str(auc_) )
if ax_ is None:
return f, ax
else:
return ax
def plot_auc(fprs, tprs, aucs, title='Receiver Operating Characteristc', labels=None):
assert len(fprs) == len(tprs), 'must have equal number of FPRs and TPRS'
assert len(tprs) == len(aucs), 'must have equal number of tprs and aucs'
COLORS = sns.color_palette(n_colors=len(aucs))
fig = plt.figure()
ax = fig.add_subplot(111)
labels = [''] * len(aucs) if not labels else labels
assert len(labels) == len(aucs), 'must have equal number of labels as aucs'
# should probably be more descirptive with variable names...
for f, t, a, c, l in zip(fprs, tprs, aucs, COLORS, labels):
plot_single_auc(f, t, a, ax=ax, c=c, label= l)
ax.plot([0, 1], [0, 1], lw=2, linestyle='--', color='k', label='Random')
ax.set_xlabel('false positive rates')
ax.set_ylabel('true positive rates')
ax.legend()
ax.set_title(title)
return fig, ax
|
[
"zafarali.ahmed@gmail.com"
] |
zafarali.ahmed@gmail.com
|
a94bd8b5497a0c76c0e2d552e57e1fbcfae2cd6f
|
8f436dff6c0681a673d517a1973b6f6b9a43674e
|
/liberapay/testing/mangopay.py
|
4239d88976094ed3b87124cb95f85d07e308d40d
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ddai00bit/liberapay.com
|
fc483c9b18dcc016bac84f5b4ccf397a3cb25214
|
78c5eb910877e936b91d1dae274b8cf1f82f3191
|
refs/heads/master
| 2023-04-05T21:44:45.641171
| 2021-05-04T07:28:31
| 2021-05-04T07:28:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,193
|
py
|
import itertools
from unittest import mock
from mangopay.resources import (
BankAccount, CardRegistration, NaturalUser, Wallet,
)
import requests
from liberapay.i18n.currencies import Money
from liberapay.models.exchange_route import ExchangeRoute
from liberapay.testing import Harness
from liberapay.testing.vcr import use_cassette
class MangopayHarness(Harness):
def setUp(self):
Harness.setUp(self)
self.david = self.make_participant(
'david', mangopay_user_id=self.david_id,
mangopay_wallet_id=self.david_wallet_id, email='david@example.org'
)
self.janet = self.make_participant(
'janet', mangopay_user_id=self.janet_id,
mangopay_wallet_id=self.janet_wallet_id, email='janet@example.net'
)
self.janet_route = ExchangeRoute.insert(
self.janet, 'mango-cc', self.card_id, 'chargeable', currency='EUR'
)
self.homer = self.make_participant(
'homer', mangopay_user_id=self.homer_id,
mangopay_wallet_id=self.homer_wallet_id, email='homer@example.com'
)
self.homer_route = ExchangeRoute.insert(
self.homer, 'mango-ba', self.bank_account.Id, 'chargeable'
)
def fake_transfer(tr):
tr.Status = 'SUCCEEDED'
tr.ErrorCoce = '000000'
tr.ErrorMessage = None
tr.Id = -1
def fake_wallet(w):
w.Balance = Money.ZEROS[w.Currency]
w.Id = -next(FakeTransfersHarness.wallet_id_serial)
class FakeTransfersHarness(Harness):
wallet_id_serial = itertools.count(1000000)
def setUp(self):
super().setUp()
self.transfer_patch = mock.patch('mangopay.resources.Transfer.save', autospec=True)
_mock = self.transfer_patch.__enter__()
_mock.side_effect = fake_transfer
self.transfer_mock = _mock
self.wallet_patch = mock.patch('mangopay.resources.Wallet.save', autospec=True)
_mock = self.wallet_patch.__enter__()
_mock.side_effect = fake_wallet
self.wallet_mock = _mock
def tearDown(self):
self.transfer_patch.__exit__(None, None, None)
self.wallet_patch.__exit__(None, None, None)
super().tearDown()
def make_mangopay_account(FirstName):
account = NaturalUser()
account.FirstName = FirstName
account.LastName = 'Foobar'
account.CountryOfResidence = 'BE'
account.Nationality = 'BE'
account.Birthday = 0
account.Email = 'nobody@example.net'
account.save()
return account.Id
def make_wallet(mangopay_user_id):
w = Wallet()
w.Owners = [mangopay_user_id]
w.Description = 'test wallet'
w.Currency = 'EUR'
w.save()
return w
def create_card(mangopay_user_id):
cr = CardRegistration()
cr.UserId = mangopay_user_id
cr.Currency = 'EUR'
cr.CardType = 'CB_VISA_MASTERCARD'
cr.save()
data = dict(
accessKeyRef=cr.AccessKey,
cardNumber='3569990000000132',
cardExpirationDate='1234',
cardCvx='123',
data=cr.PreregistrationData,
)
cr.RegistrationData = requests.post(cr.CardRegistrationURL, data).text
cr.save()
return cr
with use_cassette('MangopayOAuth'):
import mangopay
mangopay.get_default_handler().auth_manager.get_token()
with use_cassette('MangopayHarness'):
cls = MangopayHarness
cls.david_id = make_mangopay_account('David')
cls.david_wallet_id = make_wallet(cls.david_id).Id
cls.janet_id = make_mangopay_account('Janet')
cls.janet_wallet_id = make_wallet(cls.janet_id).Id
cr = create_card(cls.janet_id)
cls.card_id = cr.CardId
del cr
cls.homer_id = make_mangopay_account('Homer')
cls.homer_wallet_id = make_wallet(cls.homer_id).Id
ba = BankAccount(user_id=cls.homer_id, type='IBAN')
ba.OwnerName = 'Homer Jay'
ba.OwnerAddress = {
'AddressLine1': 'Somewhere',
'City': 'The City of Light',
'PostalCode': '75001',
'Country': 'FR',
}
ba.IBAN = 'FR1420041010050500013M02606'
ba.save()
cls.bank_account = ba
ba = BankAccount()
ba.Type = 'IBAN'
ba.IBAN = 'IR861234568790123456789012'
cls.bank_account_outside_sepa = ba
|
[
"changaco@changaco.oy.lc"
] |
changaco@changaco.oy.lc
|
316a57fe50150f51e9655515eaec2356b5cbcff5
|
8f64d50494507fd51c0a51010b84d34c667bd438
|
/BeautyForMe/myvenv/Lib/site-packages/phonenumbers/shortdata/region_GU.py
|
05be0b455abfd583e4469c62b75308e3f386e1f1
|
[
"MIT"
] |
permissive
|
YooInKeun/CAU_CSE_Capstone_3
|
5a4a61a916dc13c8635d25a04d59c21279678477
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
refs/heads/master
| 2022-12-11T15:39:09.721019
| 2021-07-27T08:26:04
| 2021-07-27T08:26:04
| 207,294,862
| 6
| 1
|
MIT
| 2022-11-22T04:52:11
| 2019-09-09T11:37:13
|
Python
|
UTF-8
|
Python
| false
| false
| 654
|
py
|
"""Auto-generated file, do not edit by hand. GU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GU = PhoneMetadata(id='GU', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d\\d', possible_length=(3,)),
toll_free=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_data=True)
|
[
"keun0390@naver.com"
] |
keun0390@naver.com
|
35e1031be1362e0bcb23587c0b39087847e40de3
|
db053c220094368ecb784fbe62375378c97457c2
|
/680.valid-palindrome-ii.py
|
f8da057ab7cd5e88321a11b6221d0afbf1d7bfce
|
[] |
no_license
|
thegamingcoder/leetcode
|
8c16e7ac9bda3e34ba15955671a91ad072e87d94
|
131facec0a0c70d319982e78e772ed1cb94bc461
|
refs/heads/master
| 2020-03-22T14:51:45.246495
| 2018-07-09T00:00:06
| 2018-07-09T00:00:06
| 140,211,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#
# [680] Valid Palindrome II
#
# https://leetcode.com/problems/valid-palindrome-ii/description/
#
# algorithms
# Easy (32.37%)
# Total Accepted: 34.1K
# Total Submissions: 105.4K
# Testcase Example: '"aba"'
#
#
# Given a non-empty string s, you may delete at most one character. Judge
# whether you can make it a palindrome.
#
#
# Example 1:
#
# Input: "aba"
# Output: True
#
#
#
# Example 2:
#
# Input: "abca"
# Output: True
# Explanation: You could delete the character 'c'.
#
#
#
# Note:
#
# The string will only contain lowercase characters a-z.
# The maximum length of the string is 50000.
#
#
#
class Solution(object):
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
|
[
"sharanbale@yahoo-inc.com"
] |
sharanbale@yahoo-inc.com
|
acd12224b507826a13418f4571a4bf7e1932ceaa
|
74ace85cc5b5e721f6c2433153277c60135f356a
|
/jlm/src/jlm/tests/conftest.py
|
467723d5465a376bcfd3f986602b9c1e1fd87ea7
|
[
"MIT"
] |
permissive
|
tkf/JuliaManager.jl
|
c24839777bf8d11bf72eeeaf0d0fe5d59715c1fe
|
be4586e0965a7beb6248ea503ac48ac3d43ec0f0
|
refs/heads/master
| 2020-05-03T16:59:36.062145
| 2020-02-10T08:47:38
| 2020-02-10T08:47:38
| 178,736,172
| 9
| 2
|
MIT
| 2020-02-10T08:47:40
| 2019-03-31T20:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
import pytest # type: ignore
from .. import cli
from .testing import changingdir
@pytest.fixture
def cleancwd(tmp_path):
newcwd = tmp_path / "cleancwd"
with changingdir(newcwd):
yield newcwd
@pytest.fixture
def initialized(cleancwd):
cli.run(["--verbose", "init"])
return cleancwd
|
[
"aka.tkf@gmail.com"
] |
aka.tkf@gmail.com
|
076707f145a54563bd0cbe046327482dd9339a70
|
0728513cfd064b8f6c130d42ad8ef79f49b6b9b2
|
/test/test_tpc_gain.py
|
49c8104d09f97361636986a1b645e67262dc1a47
|
[] |
no_license
|
XENONnT/pmts-api-client
|
7e70574e45c3e1e639b066513c7f07047ac4dd30
|
2b1025fc6cec01726e2d555f609c148891c6d879
|
refs/heads/master
| 2022-12-10T02:04:12.942994
| 2020-09-27T15:39:09
| 2020-09-27T15:39:09
| 276,297,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
# coding: utf-8
"""
XENON PMT API
API for the XENON PMT database # noqa: E501
The version of the OpenAPI document: 1.0
Contact: joe.mosbacher@gmail.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import xepmts
from xepmts.models.tpc_gain import TpcGain # noqa: E501
from xepmts.rest import ApiException
class TestTpcGain(unittest.TestCase):
"""TpcGain unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test TpcGain
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = xepmts.models.tpc_gain.TpcGain() # noqa: E501
if include_optional :
return TpcGain(
detector = 'tpc',
experiment = 'xenonnt',
run_id = '0',
timestamp = 56,
pmt_index = 56,
gain = 1.337,
gain_err = 1.337,
gain_stat_err = 1.337,
gain_sys_err = 1.337,
voltage = 1.337,
occupancy = 1.337,
occupancy_err = 1.337,
id = '0'
)
else :
return TpcGain(
detector = 'tpc',
experiment = 'xenonnt',
run_id = '0',
pmt_index = 56,
gain = 1.337,
gain_err = 1.337,
)
def testTpcGain(self):
"""Test TpcGain"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"joe.mosbacher@gmail.com"
] |
joe.mosbacher@gmail.com
|
3cacf1d37e787bfb185abf4a6735e3618ff9d9a5
|
2491df3f643539e6055bb0b2a4b659474c57491f
|
/computeFactorial.py
|
6c4b4ad44d45c903c4df51a2cc44c0863dc5ec5f
|
[] |
no_license
|
ghilbing/Ejemplos
|
85efc91346028b8a3d26d7680d9286b26234c771
|
339a45ef48c9a61002a01f7c823cc42d34fab409
|
refs/heads/master
| 2021-05-13T13:58:33.010157
| 2018-02-26T20:44:44
| 2018-02-26T20:44:44
| 116,724,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
def factorial(A):
if A <= 1:
return 1
else:
A = A * factorial(A-1)
return A
A = 6
print factorial(A)
|
[
"ghilbing@gmail.com"
] |
ghilbing@gmail.com
|
f1edb501954b262818ad2951e48337e3c1f506aa
|
a5103b7d5066138ac1a9aabc273361491a5031cd
|
/daily/8/DeepLearning/myproject/beatifulFace/blend.py
|
bbc8d6693925aac1e83b1ac66618bd37ee1b3f74
|
[] |
no_license
|
mckjzhangxk/deepAI
|
0fa2f261c7899b850a4ec432b5a387e8c5f13e83
|
24e60f24b6e442db22507adddd6bf3e2c343c013
|
refs/heads/master
| 2022-12-13T18:00:12.839041
| 2021-06-18T03:01:10
| 2021-06-18T03:01:10
| 144,862,423
| 1
| 1
| null | 2022-12-07T23:31:01
| 2018-08-15T14:19:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,159
|
py
|
import cv2
import numpy as np
from collections import defaultdict
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import spsolve,cg,eigsh
def gauss_pyramid(I):
ret=[I]
n=int(np.ceil(np.log2(min(I.shape[:2])//16)))
for i in range(1,n+1):
ret.append(cv2.pyrDown(ret[i-1]))
return ret
def laplacian_pyramid(gs):
ret=[gs[-1]]
n=len(gs)
for i in range(n-2,-1,-1):
g=gs[i]
H,W=g.shape[:2]
L=cv2.subtract(g,cv2.pyrUp(gs[i+1],dstsize=(W,H)))
ret.append(L)
ret.reverse()
return ret
def blend_laplician_pyramid(ls_a,ls_b,gs_mask):
final_la=[]
for m,la,lb in zip(gs_mask,ls_a,ls_b):
m=m[:,:,np.newaxis]
final_la.append(m*la+(1-m)*lb)
return final_la
def sum_laplacian_pyramid(ls):
ret=ls[-1]
n=len(ls)
for i in range(n-2,-1,-1):
L=ls[i]
H,W=L.shape[:2]
ret=cv2.add(L,cv2.pyrUp(ret,dstsize=(W,H)))
return ret
def blend(img_a,img_b,mask):
la_=laplacian_pyramid(gauss_pyramid(img_a))
lb_=laplacian_pyramid(gauss_pyramid(img_b))
g_mask=gauss_pyramid(mask)
return sum_laplacian_pyramid(blend_laplician_pyramid(la_,lb_,g_mask))
def isOMEGA(mask):
nz=np.nonzero(mask)
return set(zip(nz[1],nz[0]))
def getBoundary(mask):
kernel=np.ones((3,3),'int')
inside=cv2.erode(mask,kernel)
boundary=cv2.bitwise_xor(mask,inside)
return isOMEGA(boundary),boundary
def point2VectorIndex(pts):
return {(x[0],x[1]):i for i,x in enumerate(pts)}
def adj(x,y):
return [(x-1,y),(x+1,y),(x,y-1),(x,y+1)]
def grid_matrix_param(mask):
'''
:param mask:array(H,W) 0/1
:return:
data:(x,y,value)
N:矩阵的大小
T:key =矩阵的行索引, value=(x,y) 表示邻接点的坐标
'''
pts=isOMEGA(mask)
boundary_pts,_=getBoundary(mask)
dict_index=point2VectorIndex(pts)
N=len(pts)
data=[]
row=[]
col=[]
T=defaultdict(list)
def f(p):
pindex=dict_index[p]
data.append(4.0)
row.append(pindex)
col.append(pindex)
if p not in boundary_pts:
for q in adj(*p):
data.append(-1.0)
row.append(pindex)
col.append(dict_index[q])
else:
for q in adj(*p):
if q in pts:
data.append(-1.0)
row.append(pindex)
col.append(dict_index[q])
else:
T[pindex].append(q)
for _ in map(f,pts):pass
return (data,(row,col)),N,T,dict_index
def dict_index_to_array(data):
index,xs,ys=[],[],[]
for pts,i in data.items():
index.append(i)
xs.append(pts[0])
ys.append(pts[1])
return index,xs,ys
def process(source, target, mask):
data,N,T,dict_index=grid_matrix_param(mask)
indexes,xs,ys=dict_index_to_array(dict_index)
A = csc_matrix(data, dtype=float)
# Create B matrix
channels=source.shape[2]
b = np.zeros((N,channels), dtype=float)
b[indexes]=source[ys,xs]
for index,pts in T.items():
for p in pts:
b[index]+=target[p[1],p[0]]
composite = np.copy(target)
# x = spsolve(A, b)
for i in range(channels):
x=cg(A,b[:,i])
composite[ys,xs,i]=np.clip(x[0][indexes],0,255)
return composite
from datetime import datetime
if __name__ == '__main__':
mask=np.zeros((800,600),'uint8')
mask[30:130,70:150]=1
src=np.zeros((800,600,3),'uint8')
target=np.zeros((800,600,3),'uint8')
# omada=isOMEGA(mask)
#
# boundary,boundary_img=getBoundary(mask)
#
# for x,y in boundary:
# mask[y,x]=128
# d=point2VectorIndex(omada)
# print(len(d))
# print(boundary)
# data,N,T,dict_index=grid_matrix_param(mask)
# a,b,c=dict_index_to_array(dict_index)
# assert N==len(dict_index)
# for k,v in T.items():
# for vv in v:
# mask[vv[1],vv[0]]=128
# cv2.imshow('mask',mask*255)
# cv2.waitKey(0)
s=datetime.now()
sss=process(src,target,mask)
print(sss.dtype)
print(datetime.now()-s)
|
[
"mckj_zhangxk@163.com"
] |
mckj_zhangxk@163.com
|
f727a53af8f9c8d1bfa78ce5468ab0fbad85aca9
|
abc422f58ad053bcbb6653ba15b66e46d220a199
|
/tcutils/pkgs/Traffic/traffic/utils/util.py
|
61b1ab3bf3f6a08f841d5824248dd1046f7f4d8e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
tungstenfabric/tf-test
|
d3efff59bca931b614d0008260b2c0881d1fc009
|
4b9eca7eb182e5530223131ecab09d3bdf366407
|
refs/heads/master
| 2023-02-26T19:14:34.345423
| 2023-01-11T08:45:18
| 2023-01-11T10:37:25
| 265,231,958
| 8
| 22
| null | 2023-02-08T00:53:29
| 2020-05-19T11:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
import socket
def is_v4(address):
try:
socket.inet_pton(socket.AF_INET, address)
except socket.error:
return False
return True
def is_v6(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
f010a4d16c12e85270a596fc2f31a8841ac64dc2
|
9a04de8acae6b9d5f134ab04ce4573acd05be10c
|
/facebook_pages/factories.py
|
7b37712ec2d1dfb0311b86476d9e42424e912116
|
[
"BSD-3-Clause"
] |
permissive
|
bmcool/django-facebook-pages
|
046fb5727008dc0f5bf20a6201006466e89bec1d
|
44ae645c93a37e741ceda018daaa8def10acd1ad
|
refs/heads/master
| 2021-01-18T07:48:13.249597
| 2013-06-09T13:37:16
| 2013-06-09T13:37:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from models import Page
import factory
import random
class PageFactory(factory.Factory):
FACTORY_FOR = Page
graph_id = factory.Sequence(lambda n: n)
|
[
"ramusus@gmail.com"
] |
ramusus@gmail.com
|
e2cd93ae33ad1783ad4ed4faeafd03fbf503f425
|
515a97129ce1b2b8eecca4b2087fde8985b82d5b
|
/Code-Scraps/old_modules/SpiceBot/Main/muricah.py
|
703d9a78bcb6c74111c29fcabd8c8e38187eb98e
|
[] |
no_license
|
SpiceBot/scraps
|
3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63
|
90125e1397b57ac87cae5f3e506363aa04ddffdc
|
refs/heads/master
| 2020-05-02T21:51:01.297114
| 2019-03-28T15:38:28
| 2019-03-28T15:38:28
| 178,232,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
# author jimender2
@sopel.module.commands('muricah')
def mainfunction(bot, trigger):
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'muricah')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
msg = trigger.nick + " shoots a toaster or something."
osd(bot, trigger.sender, 'say', msg)
|
[
"sam@deathbybandaid.net"
] |
sam@deathbybandaid.net
|
6e63e02f7cb85f88fae930c14c63504884d425e5
|
163808746e51d378f69a966645b8bb8a855b4625
|
/MyMain1012/MyMain1012/MyModules.py
|
1044ab01075533ee8a21af408e08c251ab99f0f0
|
[] |
no_license
|
0024thiroshi/comm5.0_fall_semester
|
02b26b506b759dd7b18b963295a8908cb4a78245
|
db350599b7085e56fbf2c316e74cd7a5b48f02b8
|
refs/heads/main
| 2023-02-12T13:07:34.080809
| 2021-01-13T06:03:04
| 2021-01-13T06:03:04
| 329,202,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
def getDF(file_name,sheet_name):
import pandas as pd
DF1=pd.read_excel(file_name,sheet_name=sheet_name)
return DF1
def getS(DF,n1):
import pandas as pd
S1=pd.Series(DF.iloc[:,n1])
return S1
def extractDF(DF,n1,n2):
DF2=DF.iloc[n1:n1+n2,:]
return DF2
def drawS(S1,S2):
import matplotlib.pyplot as plt
if len(S1)==len(S2):
plt.scatter(S1,S2)
plt.show()
else:
print("2つのSeriesのサイズが異なります")
def extractDFRow(DF,n1,n2):
DF2=DF.iloc[:,n1:n1+n2]
return DF2
def getDFAverage(DF):
import pandas as pd
a=[]
for i in range(len(DF)):
a.append(sum(DF.iloc[i])/len(DF.iloc[i]))
S1=pd.Series(a)
return S1
def get_corr(v1,v2):
import pandas as pd
V1=pd.Series(v1)
V2=pd.Series(v2)
d=V1.corr(V2)
return d
import pandas as pd
def compoundSeries(s1: pd.Series, s2:pd.Series)->pd.DataFrame:
df=pd.DataFrame([s1,s2])
return df
def get_sin(a: list, Nsample: int, time_step: float)->list:
import math
amp=[0]*Nsample
for i in range(len(a)):
for j in range(Nsample):
amp[j]+=(math.sin(2*math.pi*a[i]*j*time_step))
return amp
from scipy.signal import butter,lfilter
def butter_bandpass(lowcut,highcut, fs, order=5):
nyq = 0.5*fs
low = lowcut/nyq
high = highcut/nyq
b,a = butter(order,[low, high],btype='band')
return b,a
def butter_bandpass_filter(data,lowcut,highcut, fs, order=5):
b, a = butter_bandpass(lowcut,highcut, fs, order=5)
y = lfilter(b, a, data)
return y
def myConv(stim: list, base:list)->list:
import numpy as np
conv=np.convolve(stim,base)
return conv
def myConvError(stim:list, base:list, data:list)->float:
import numpy as np
conv=np.convolve(stim,base)
sum=0
for i in range(len(data)):
sum+=(data[i]-conv[i])**2
return sum
|
[
"“0024thiroshi@gmail.com”"
] |
“0024thiroshi@gmail.com”
|
fa9400116b1cf68b3c2af2c6480e3869053378ed
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2573/60719/278572.py
|
ef5e2025d2f5fb9f2a0e35de41649f4a13d5b420
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
def handle_each_use_case():
return 2 ** (int(input())-1)
num = int(input())
for i in range(num):
res = handle_each_use_case()
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e028a43d424f376814f87e346f021b1ca842d883
|
6c898145b3581b87b76a2b16658ad1d0a2aeee4a
|
/demo4_redrect.py
|
ab84ca2e0e3c203f437ab67ac1b26e110626d070
|
[] |
no_license
|
Jasonmes/Flask-model
|
080f3e44f64d7684c9fe1edf731cf7481615ea0f
|
99f9ff9141434baedc7d048ac3bfb51134919591
|
refs/heads/master
| 2020-03-26T11:47:39.081133
| 2018-08-15T13:59:40
| 2018-08-15T13:59:40
| 144,860,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
from flask import Flask,redirect,url_for
app = Flask(__name__)
@app.route('/')
def index():
"""
自定义状态码
返回的形式是一个元祖
:return:
"""
return "反向函数在调用index", 666
@app.route("/demo1")
def demo():
# 重定向到黑马官网
# 参数:重定向网页即可
return redirect("http://www.itheima.com")
@app.route('/demo2')
def demo2():
# 重定向自己的主页
# url_for 反向解析函数
# 作用url_for(函数名称)根据函数名称获取到这个视图函数对应的url
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True)
|
[
"wow336@163.com"
] |
wow336@163.com
|
74928f2f18abb1478e911324438ca62f5b05c88f
|
9f059fd982f2c0a9d6a43cb4665b5adf0552c889
|
/src/models/model.py
|
0d66e2a7ab258eb5b83d9f4ecd74681b12da1539
|
[] |
no_license
|
yamad07/domain-transfer-network
|
2a42de636febd4da0ceaacac32832a7f9605f820
|
b767628f9afa6e760a0708dedd22e6a530cd730b
|
refs/heads/master
| 2020-06-12T06:06:35.578911
| 2019-07-12T05:22:52
| 2019-07-12T05:22:52
| 194,216,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers.cnn import encoder_layer
from .layers.cnn import decoder_layer
from .layers.cnn import discriminator_layer
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.c1 = encoder_layer(3, 64, 3)
self.c2 = encoder_layer(64, 128, 3)
self.c3 = encoder_layer(128, 256, 3)
self.c4 = nn.Sequential(
nn.Conv2d(256,
128,
stride=2,
kernel_size=4,
padding=0,
),
nn.ReLU(inplace=True)
)
def forward(self, x):
batch_size = x.size(0)
h = self.c1(x)
h = self.c2(h)
h = self.c3(h)
h = self.c4(h)
h = h.view(batch_size, -1)
return h
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.conv_block1 = decoder_layer(128, 512, 4, 0)
self.conv_block2 = decoder_layer(512, 256, 4, 1)
self.conv_block3 = decoder_layer(256, 128, 4, 1)
self.conv4 = nn.ConvTranspose2d(128, 3, kernel_size=4,
stride=2, padding=1)
def forward(self, x):
batch_size = x.size(0)
x = x.view(batch_size, 128, 1, 1)
x = self.conv_block1(x)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv4(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv_block1 = discriminator_layer(3, 128)
self.conv_block2 = discriminator_layer(128, 256)
self.conv_block3 = discriminator_layer(256, 512)
self.c4 = nn.Sequential(
nn.Conv2d(512,
3,
stride=2,
kernel_size=4,
padding=0,
),
nn.ReLU(inplace=True)
)
def forward(self, x):
batch_size = x.size(0)
h = self.conv_block1(x)
h = self.conv_block2(h)
h = self.conv_block3(h)
h = self.c4(h)
return F.log_softmax(h.view(batch_size, -1), dim=1)
|
[
"yuhsukeshootsfc@gmail.com"
] |
yuhsukeshootsfc@gmail.com
|
f18c3055fb82ab2adce6fe45db715962d9b8bc34
|
6c26a9bd075d3d54a307d7c1e5a0bc67b50df8c2
|
/python_basics/python3/04_less_than.py
|
f7630bf4edcf6b730f1c11ee4f5d8c76607a9ec6
|
[] |
no_license
|
marialobillo/dataquest
|
86efc49c0339c07e6263d428b5ecd2f80d395ecb
|
49e8b653adf23a12fb9eb6a972d85bc1797dba0a
|
refs/heads/master
| 2021-08-28T08:01:36.301087
| 2017-12-11T16:02:18
| 2017-12-11T16:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
print(crime_rates)
second_500 = (crime_rates[1] < 500)
second_371 = (crime_rates[1] <= 371
second_last = (crime_rates[1] <= crime_rates[len(crime_rates) - 1])
|
[
"maria.lobillo.santos@gmail.com"
] |
maria.lobillo.santos@gmail.com
|
4391865f95a88bc614dc1f2ea5a691b2ae243675
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-servicefabric/azure/servicefabric/models/paged_secret_resource_description_list.py
|
8ec32f9fc767fa8832874709ee2fc8da16810dc3
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PagedSecretResourceDescriptionList(Model):
"""The list of secret resources. The list is paged when all of the results
cannot fit in a single message. The next set of results can be obtained by
executing the same query with the continuation token provided in this list.
:param continuation_token: The continuation token parameter is used to
obtain next set of results. The continuation token is included in the
response of the API when the results from the system do not fit in a
single response. When this value is passed to the next API call, the API
returns next set of results. If there are no further results, then the
continuation token is not included in the response.
:type continuation_token: str
:param items: One page of the list.
:type items: list[~azure.servicefabric.models.SecretResourceDescription]
"""
_attribute_map = {
'continuation_token': {'key': 'ContinuationToken', 'type': 'str'},
'items': {'key': 'Items', 'type': '[SecretResourceDescription]'},
}
def __init__(self, **kwargs):
super(PagedSecretResourceDescriptionList, self).__init__(**kwargs)
self.continuation_token = kwargs.get('continuation_token', None)
self.items = kwargs.get('items', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
4ee25d36a93847380f36f2e3bf144325c47882a5
|
d7e65c505573b90916a953d7a13d29a801c226f9
|
/test.py
|
418e12a1921b1086465a0f47ec8d2d2ecd6d9422
|
[] |
no_license
|
smartfile/client-js
|
1f1e60c4fb758aff3b9e371a937e7aa2c83f8dbc
|
6338a1442dc6298450ea1f6e15430cb4d1a092ec
|
refs/heads/master
| 2021-01-17T11:28:05.853979
| 2016-05-31T15:07:06
| 2016-05-31T15:07:06
| 3,065,301
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
#!/bin/env python
import os, string, cgi, time, webbrowser, threading, socket
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
JSON = '{ text: "This is the response." }'
PORT = 8000
class LaunchBrowser(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.start()
def run(self):
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('localhost', PORT))
s.shutdown(2)
break
except:
time.sleep(0.5)
webbrowser.open('file://%s' % os.path.join(os.getcwd(), 'test.html'))
class TestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
try:
self.path, qs = self.path.split('?', 2)
qs = cgi.parse_qs(qs)
except ValueError:
qs = {}
if self.path == '/ajax/':
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.send_header('Access-Control-Allow-Origin', self.headers.get('Origin', '*'))
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
if 'callback' in qs: #jsonp:
self.wfile.write('%s(%s);' % (qs['callback'][0], JSON))
else:
self.wfile.write(JSON)
return
except Exception, e:
self.send_error(500, str(e))
self.send_error(404, 'File Not Found: %s' % self.path)
def do_POST(self):
self.send_error(404, 'File Not Found: %s' % self.path)
def main():
try:
launch = LaunchBrowser()
server = HTTPServer(('localhost', PORT), TestHandler)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == '__main__':
main()
|
[
"btimby@gmail.com"
] |
btimby@gmail.com
|
42189d44df4bedda4aa9fd28ec1a2b8f5dd5d4fd
|
d993f821da125498b6dfb01792fcd24c83ae7e34
|
/AllAboutDictionaries/DictionaryMethods.py
|
eb5648801932cd448a1ea6c71d34ab68bef54352
|
[] |
no_license
|
Arjuna1513/Python_Practice_Programs
|
2c8370d927c8bade2d2b0b5bd0345c7d5f139202
|
7c72600d72f68afee62ee64be25d961822429aeb
|
refs/heads/master
| 2020-06-24T02:36:03.186924
| 2019-07-25T14:31:02
| 2019-07-25T14:31:02
| 198,824,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
dict1 = {1:2, 3:4, 'a':'b', 5:{1:2}}
# print(dict1)
#
# print(len(dict1)) # prints length of dict1
#
# print(dict1.items()) # Returns a list of items with both key and value pairs and since the list is
# # returned we are able to iterate over it
#
# print(dict1.values()) # returns only list of values
#
# print(dict1.keys()) # returns list of keys
#
# print(dict1.get('a')) # returns value associated with key, if not found none is returned.
#
# print(dict1.copy()) # returns a copy of the dictionary
#
# dict2 = dict1.copy()
# print(dict2)
# print(dict1.popitem()) # popitem removes the last element
# print(dict1)
# print(dict1.pop('a')) # deletes the key, value pair of mentioned key
# print(dict1)
print(dict1.__getitem__('a')) # returns the value of key 'a'
print(dict1.__contains__('a')) # returns true if 'a' key is present else returns false
print(dict1.__delitem__('a')) # deleted the given item but wont return the deleted item.
|
[
"malli00022@gmail.com"
] |
malli00022@gmail.com
|
ca3c3609c7fadfa9093e7241d467a95b7f74bf4e
|
1346ea1f255d3586442c8fc1afc0405794206e26
|
/알고리즘/day16/two_string.py
|
48ecca480a31b18ae28d058cc47f4bd46267826e
|
[] |
no_license
|
Yun-Jongwon/TIL
|
737b634b6e75723ac0043cda9c4f9acbc2a24686
|
a3fc624ec340643cdbf98974bf6e6144eb06a42f
|
refs/heads/master
| 2020-04-12T00:41:03.985080
| 2019-05-01T07:55:25
| 2019-05-01T07:55:25
| 162,208,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
T=int(input())
for t in range(T):
num1,num2=map(int,input().split())
data1=list(map(int,input().split()))
data2=list(map(int,input().split()))
if len(data1)>len(data2):
short_data=data2
long_data=data1
else:
short_data=data1
long_data=data2
sum=-500
for i in range(len(long_data)-len(short_data)+1):
new_sum=0
for j in range(len(short_data)):
new_sum+=short_data[j]*long_data[j+i]
if new_sum>sum:
sum=new_sum
print(sum)
|
[
"dmdmdkdk@daum.net"
] |
dmdmdkdk@daum.net
|
f664f43615dfd3188c09cb82b2cee07f916100ce
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/virtual_network.py
|
c9cd60b38e95b54f4fe594909f1af0f04be05a36
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,724
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""Virtual Network resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param address_space: The AddressSpace that contains an array of IP
address ranges that can be used by subnets.
:type address_space: ~azure.mgmt.network.v2019_02_01.models.AddressSpace
:param dhcp_options: The dhcpOptions that contains an array of DNS servers
available to VMs deployed in the virtual network.
:type dhcp_options: ~azure.mgmt.network.v2019_02_01.models.DhcpOptions
:param subnets: A list of subnets in a Virtual Network.
:type subnets: list[~azure.mgmt.network.v2019_02_01.models.Subnet]
:param virtual_network_peerings: A list of peerings in a Virtual Network.
:type virtual_network_peerings:
list[~azure.mgmt.network.v2019_02_01.models.VirtualNetworkPeering]
:param resource_guid: The resourceGuid property of the Virtual Network
resource.
:type resource_guid: str
:param provisioning_state: The provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param enable_ddos_protection: Indicates if DDoS protection is enabled for
all the protected resources in the virtual network. It requires a DDoS
protection plan associated with the resource. Default value: False .
:type enable_ddos_protection: bool
:param enable_vm_protection: Indicates if VM protection is enabled for all
the subnets in the virtual network. Default value: False .
:type enable_vm_protection: bool
:param ddos_protection_plan: The DDoS protection plan associated with the
virtual network.
:type ddos_protection_plan:
~azure.mgmt.network.v2019_02_01.models.SubResource
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'enable_ddos_protection': {'key': 'properties.enableDdosProtection', 'type': 'bool'},
'enable_vm_protection': {'key': 'properties.enableVmProtection', 'type': 'bool'},
'ddos_protection_plan': {'key': 'properties.ddosProtectionPlan', 'type': 'SubResource'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetwork, self).__init__(**kwargs)
self.address_space = kwargs.get('address_space', None)
self.dhcp_options = kwargs.get('dhcp_options', None)
self.subnets = kwargs.get('subnets', None)
self.virtual_network_peerings = kwargs.get('virtual_network_peerings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.enable_ddos_protection = kwargs.get('enable_ddos_protection', False)
self.enable_vm_protection = kwargs.get('enable_vm_protection', False)
self.ddos_protection_plan = kwargs.get('ddos_protection_plan', None)
self.etag = kwargs.get('etag', None)
|
[
"noreply@github.com"
] |
xiafu-msft.noreply@github.com
|
8f1d1c60025749c1d3af208a4bd1b6b6cfc35348
|
94fb04ab0cb16fd180b6ef0ca22176dd31dea4f8
|
/code@smart_irrigation.py
|
007ab4961e728e9d563d1e1a4796bc2309d6224a
|
[] |
no_license
|
SmartPracticeschool/llSPS-INT-2310-smart-irrigation-system-based-on-IOT-using-random-values-n-weather-api-
|
97a5fda6e640767a9ee830a709240df57cbf9750
|
1de1e04929ef8ea052e7ed70acd97b87e77bdfab
|
refs/heads/master
| 2022-11-04T00:49:22.602410
| 2020-06-17T14:05:48
| 2020-06-17T14:05:48
| 265,819,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
import requests
import sys
import time
import ibmiotf.application
import ibmiotf.device
import random
r=requests.get('http://api.openweathermap.org/data/2.5/weather?q=Guntur,IN&appid=42a67b9e8ecd9620c2fe1471361c3e53')
#Provide your IBM Watson Device Credentials
organization = "w1gnzn"
deviceType = "raspberrypi"
deviceId = "123456"
authMethod = "token"
authToken = "123456789"
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data['command'])
if cmd.data['command']=='motoron':
print("Motor is ON")
elif cmd.data['command']=='motoroff':
print("Motor is OFF")
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
# Connect and send a datapoint "hello" with value "world" into the cloud as an event of type "greeting" 10 times
deviceCli.connect()
#print("response is")
#print(r.json())
#for i in r.json():
#print(i)
#print(r.json()["main"])
#print("temparature value:")
#print(r.json()["main"]["temp"])
while True:
print("humidity value:")
print(r.json()["main"]["humidity"])
hum=r.json()["main"]["humidity"]
temk=r.json()["main"]["temp"]
#print("temperature in kelvin is:",temk)
temperature=temk-272.15
print("temperature in celcius is:",temperature)
mois=random.randrange(20,60,2)
print("moisture level of soil is:",mois)
if(temperature>32 | mois<35):
req_sms=requests.get('https://www.fast2sms.com/dev/bulk?authorization=TPnud1eh5Bfyt2FpHoWXGwlC7NSsKYLmIz6MEvRi8a93jgAZbDDvuxwEg9eBdjmP7OLRpJ2MsIhoZ54a&sender_id=FSTSMS&message=Temperature,Moisture%20level%20of%20soil%20are%20improper&language=english&route=p&numbers=7075001212,9121852344')
data = { 'Temperature' : temperature, 'Moisture': mois, 'Humidity': hum }
#print (data)
def myOnPublishCallback():
print ("Published Temperature = %s C" % temperature, "Humidity = %s %%" % hum, "to IBM Watson")
success = deviceCli.publishEvent("Weather", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(2)
deviceCli.commandCallback = myCommandCallback
# Disconnect the device and application from the cloud
deviceCli.disconnect()
|
[
"noreply@github.com"
] |
SmartPracticeschool.noreply@github.com
|
034ff7aa8e6769c53f7c8c08a4bf5c226f1a1f80
|
48114b2186c96afce9a00c86eed8739853e8a71e
|
/eptools/gspread_utils.py
|
6ab72a98450912f2e91365ff769292cf14ce4630
|
[
"MIT"
] |
permissive
|
PythonSanSebastian/ep-tools
|
78b299eca763cc345da15e2984d7d08e67dc0c8d
|
d9a0e3c1d97df9f8bd94023e150b568e5619a482
|
refs/heads/master
| 2021-01-20T21:57:06.463661
| 2018-05-31T09:46:22
| 2018-05-31T09:46:22
| 51,786,311
| 0
| 0
| null | 2016-02-15T21:15:50
| 2016-02-15T21:15:50
| null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
"""
Functions to access the data in google drive spreadsheets
"""
from docstamp.gdrive import (get_spreadsheet,
worksheet_to_dict)
def get_api_key_file():
""" Return the api_key_file path imported from the config.py file"""
try:
from .config import api_key_file
except:
raise ImportError('Could not find a path to the Google credentials file. '
'You can set it up permanently in the config.py file.')
else:
return api_key_file
def get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None, start_row=1):
""" Return the content of the spreadsheet in the ws_tab_idx tab of
the spreadsheet with doc_key as a pandas DataFrame.
Parameters
----------
api_key_file: str
Path to the Google API key json file.
doc_key: str
ws_tab_idx: int
Index of the worksheet within the spreadsheet.
header: List[str]
List of values to assign to the header of the result.
start_row: int
Row index from where to start collecting the data.
Returns
-------
content: pandas.DataFrame
"""
import pandas as pd
spread = get_spreadsheet(api_key_file, doc_key)
ws = spread.get_worksheet(ws_tab_idx)
ws_dict = worksheet_to_dict(ws, header=header, start_row=start_row)
return pd.DataFrame(ws_dict)
def find_one_row(substr, df, col_name):
""" Return one row from `df`. The returned row has in `col_name` column
a value with a sub-string as `substr.
Raise KeyError if no row is found.
"""
for name in df[col_name]:
if substr.lower() in name.lower():
return df[df[col_name] == name]
raise KeyError('Could not find {} in the '
'pandas dataframe.'.format(substr))
|
[
"alexsavio@gmail.com"
] |
alexsavio@gmail.com
|
4827119c0da3a1ec929ea1870f9ff11d5289f6df
|
1b461ec82c8dd1099021ce3a32a7f649fa970226
|
/1.Python_basics/00. First_steps.py
|
de81272da5e7285c2ecc00f70c4e38d5bd64453f
|
[] |
no_license
|
AdamSierzan/Learn-to-code-in-Python-3-basics
|
9df20c80c33f40da8800d257ee2ec05881198419
|
ef298bcba72250e19080283cb81dbecf6a245563
|
refs/heads/master
| 2022-11-06T00:48:17.413322
| 2020-06-16T20:52:08
| 2020-06-16T20:52:08
| 250,247,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
msg = "Hello World"
print(msg)
x = 2
y = 7232
sum = x + y
print(sum)
x = 3
print(x)
x = 2*y
y = 69780
print(x)
print(y)
print(x)
x = 2*y
print(x)
print"hello world"
help
|
[
"adagioo1993@gmail.com"
] |
adagioo1993@gmail.com
|
291d6c66a8448ced95fc18bbfadb84c49f58a446
|
323716a35ee2b649031ec8a09b196b8e7b833e8d
|
/lab9/hhback/api/migrations/0001_initial.py
|
a18a2d38b64a756ff8b961f72e74525684e761d8
|
[] |
no_license
|
Zhaisan/WebDev
|
0377cec0c553900c5126794a8addc16e2e62b558
|
959ecf5b2e5032ccd2ab704b840e8f680dbcfc42
|
refs/heads/main
| 2023-05-27T17:24:17.026750
| 2021-05-31T15:02:15
| 2021-05-31T15:02:15
| 334,424,629
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
# Generated by Django 2.1 on 2021-04-13 19:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('city', models.CharField(max_length=100)),
('address', models.TextField()),
],
options={
'verbose_name': 'Company',
'verbose_name_plural': 'Companies',
},
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('salary', models.FloatField(default='')),
('company', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacancies', to='api.Company')),
],
options={
'verbose_name': 'Vacancy',
'verbose_name_plural': 'Vacancies',
},
),
]
|
[
"noreply@github.com"
] |
Zhaisan.noreply@github.com
|
ad6818b1cb0641df553b96ea5a6a81a2964f6ddf
|
1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c
|
/paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/EGG-INFO/scripts/euare-instanceprofilelistbypath
|
c55ad41537406ff822d69daf820d23cc6176d1e1
|
[
"MIT"
] |
permissive
|
cirobessa/receitas-aws
|
c21cc5aa95f3e8befb95e49028bf3ffab666015c
|
b4f496050f951c6ae0c5fa12e132c39315deb493
|
refs/heads/master
| 2021-05-18T06:50:34.798771
| 2020-03-31T02:59:47
| 2020-03-31T02:59:47
| 251,164,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
#!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
import euca2ools.commands.iam.listinstanceprofiles
if __name__ == '__main__':
euca2ools.commands.iam.listinstanceprofiles.ListInstanceProfiles.run()
|
[
"cirobessa@yahoo.com"
] |
cirobessa@yahoo.com
|
|
4d137f2610e281cad85b0440573d0513db16ccdb
|
fe3a29aefb4f9bb38d6d4bc61ef9839521823dc2
|
/server/src/oscarbundles/migrations/0009_auto_20180319_1116.py
|
f8f0a02a9366aa3258ef06133916ac5a59f7ae57
|
[
"ISC"
] |
permissive
|
thelabnyc/django-oscar-bundles
|
a5f73edd26d3c930a32cdaa4a2142cfd44a74294
|
d8dc00edbcc57cbe18b274905beef533a8a642f7
|
refs/heads/master
| 2023-05-25T01:13:10.769112
| 2023-05-16T15:42:01
| 2023-05-16T15:42:01
| 81,470,009
| 6
| 3
|
ISC
| 2023-03-14T17:27:07
| 2017-02-09T16:24:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-19 15:16
from __future__ import unicode_literals
from django.db import migrations
def make_group_trigger_data_unique(apps, schema_editor):
Bundle = apps.get_model("oscarbundles", "Bundle")
for bundle in Bundle.objects.order_by("id").all():
conflicts = (
Bundle.objects.filter(bundle_group=bundle.bundle_group)
.filter(triggering_product=bundle.triggering_product)
.exclude(pk=bundle.pk)
.order_by("id")
.all()
)
for conflict in conflicts:
for suggested_product in conflict.suggested_products.all():
bundle.suggested_products.add(suggested_product)
bundle.save()
conflict.suggested_products.remove(suggested_product)
conflict.save()
class Migration(migrations.Migration):
dependencies = [
("oscarbundles", "0008_auto_20180318_1933"),
]
operations = [
migrations.RunPython(make_group_trigger_data_unique),
]
|
[
"crgwbr@gmail.com"
] |
crgwbr@gmail.com
|
d82b63b927f20bd2f9ea34dd627297fedd1bd24d
|
cb6b1aa2d61b80cba29490dfe8755d02c7b9a79f
|
/lobbyapp/dbmangr/root.py
|
68b7357ed39d3aeacd4c771556566af010222663
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
piotrmaslanka/Ninja-Tower
|
c127a64888bc3306046e4b400ce3a8c6764b5481
|
7eca86e23513a8805dd42c3c542b7fae0499576b
|
refs/heads/master
| 2021-12-06T07:56:13.796922
| 2015-10-15T08:10:35
| 2015-10-15T08:10:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
import MySQLdb
from satella.instrumentation.counters import PulseCounter
from satella.instrumentation import CounterCollection
from satella.db.pool import DatabaseDefinition, ConnectionPool
from lobbyapp.selectlayer.api import PDBHelperInterface as SelectLayerInterface
from lobbyapp.dbmangr.proxies import SelectLayerProxy, PlayerDBProxy
from lobbyapp.playerdb.api import PDBHelperInterface as PlayerDBInterface
class DatabaseManager(object):
def __init__(self, host, username, password, dbname, rootcc, dbtype='mysql'):
"""@type rootcc: L{satella.instrumentation.CounterCollection}"""
assert dbtype == 'mysql', 'I cannot support other databases!'
dd = DatabaseDefinition(MySQLdb.connect,
(MySQLdb.OperationalError, MySQLdb.InterfaceError),
(host, username, password, dbname))
self.cp = ConnectionPool(dd)
# Set up instrumentation
insmgr = CounterCollection('database')
self.cursors_counter = PulseCounter('cursors', resolution=60,
units=u'cursors per minute',
description='SQL cursors created')
insmgr.add(self.cursors_counter)
rootcc.add(insmgr)
def query_interface(self, ifc):
if ifc == SelectLayerInterface:
return SelectLayerProxy(self)
elif ifc == PlayerDBInterface:
return PlayerDBProxy(self)
else:
raise ValueError, 'Unknown interface'
def __call__(self):
"""
Use as in:
with database_manager() as cur:
cur.execute('I CAN DO SQL')
"""
self.cursors_counter.update()
return self.cp.cursor()
|
[
"piotr.maslanka@henrietta.com.pl"
] |
piotr.maslanka@henrietta.com.pl
|
67685bc853b72f28dfc50d9e13c6874b050911f5
|
0c89b4b021d469f5209753f2ab75de06c4925497
|
/setup.py
|
d92ceb35d542fabdf9f5253fce19017a14c8b384
|
[
"BSD-3-Clause"
] |
permissive
|
mrusoff/sos
|
6fc474a7a8da49f7d0ff6f4ae11ce8ea3d5283ea
|
e60f2944d4f4c623191dbe7b1ba6a092f0dc5e94
|
refs/heads/master
| 2020-07-01T19:54:28.724508
| 2019-08-08T13:52:12
| 2019-08-08T13:52:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,404
|
py
|
#!/usr/bin/env python
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import sys
from setuptools import find_packages, setup
from setuptools.command.bdist_egg import bdist_egg
_py_ver = sys.version_info
if _py_ver.major == 2 or (_py_ver.major == 3 and
(_py_ver.minor, _py_ver.micro) < (6, 0)):
raise SystemError(
'sos requires Python 3.6 or higher. Please upgrade your Python {}.{}.{}.'
.format(_py_ver.major, _py_ver.minor, _py_ver.micro))
# obtain version of SoS
with open('src/sos/_version.py') as version:
for line in version:
if line.startswith('__version__'):
__version__ = eval(line.split('=')[1])
break
description = '''\
Computationally intensive disciplines such as computational biology often
requires one to exploit a variety of tools implemented in different programming
languages, and to analyze large datasets on high performance computing systems.
Although scientific workflow systems are powerful in organizing and executing
large-scale data analysis processes, there are usually non-trivial learning
curve and engineering overhead in creating and maintaining such workflows,
making them unsuitable for data exploration and prototyping. To bridge the
gap between interactive analysis and workflow systems, we developed Script
of Scripts (SoS), a system with strong emphases on readability, practicality,
and reproducibility for daily computational research. For exploratory analysis
SoS provides a multi-language file format and scripting engine that centralizes
all computations, and creates dynamic report documents for publishing and
sharing. As a workflow engine, SoS provides an intuitive syntax to create
workflows in process-oriented, outcome-oriented and mixed styles, as well as
a unified interface to executing and managing tasks on a variety of computing
platforms with automatic synchronization of files between isolated systems.
In this paper we illustrate with real-world examples the use of SoS as both
interactive analysis tool and pipeline platform for all stages of methods
development and data analysis projects. In particular we demonstrate how SoS
can easily be adopted based on existing scripts and pipelines, yet resulting
in substantial improvement in terms of organization, readability and
cross-platform computation management.
Please refer to http://vatlab.github.io/SOS/ for more details on SoS.
'''
class bdist_egg_disabled(bdist_egg):
"""Disabled version of bdist_egg
Prevents setup.py install performing setuptools' default easy_install,
which it should never ever do.
"""
def run(self):
sys.exit(
"Aborting implicit building of eggs. Use `pip install -U --upgrade-strategy only-if-needed .` to install from source."
)
cmdclass = {
'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled
}
setup(
name="sos",
version=__version__,
description='Script of Scripts (SoS): an interactive, cross-platform, and cross-language workflow system for reproducible data analysis',
long_description=description,
author='Bo Peng',
url='https://github.com/vatlab/SoS',
author_email='bpeng@mdanderson.org',
maintainer='Bo Peng',
maintainer_email='bpeng@mdanderson.org',
license='3-clause BSD',
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages('src'),
cmdclass=cmdclass,
package_dir={'': 'src'},
python_requires='>=3.6',
install_requires=[
'psutil',
# progress bar
'tqdm',
# for file lock
'fasteners',
'pyyaml',
'pygments',
# for DAG, some version requires pydot, some requires pydotplus
'networkx',
'pydot',
'pydotplus',
'pexpect',
# for report regeneration
'jinja2',
# to execute workflow embedded in .ipynb files
'nbformat',
# zeromq for IPC
'pyzmq',
],
entry_points='''
[console_scripts]
sos = sos.__main__:main
sos-runner = sos.__main__:sosrunner
[pygments.lexers]
sos = sos.converter:SoS_Lexer
[sos_targets]
file_target = sos.targets:file_target
dynamic = sos.targets:dynamic
remote = sos.targets:remote
executable = sos.targets:executable
sos_variable = sos.targets:sos_variable
sos_step = sos.targets:sos_step
env_variable = sos.targets:env_variable
sos_targets = sos.targets:sos_targets
system_resource = sos.targets:system_resource
Py_Module = sos.targets_python:Py_Module
R_library = sos.targets_r:R_library
[sos_actions]
script = sos.actions:script
sos_run = sos.actions:sos_run
fail_if = sos.actions:fail_if
warn_if = sos.actions:warn_if
stop_if = sos.actions:stop_if
done_if = sos.actions:done_if
skip_if = sos.actions:skip_if
download = sos.actions:download
run = sos.actions:run
bash = sos.actions_bash:bash
csh = sos.actions_bash:csh
tcsh = sos.actions_bash:tcsh
zsh = sos.actions_bash:zsh
sh = sos.actions_bash:sh
node = sos.actions_javascript:node
julia = sos.actions_julia:julia
matlab = sos.actions_matlab:matlab
octave = sos.actions_matlab:octave
python = sos.actions_python:python
python2 = sos.actions_python:python2
python3 = sos.actions_python:python3
R = sos.actions_r:R
Rmarkdown = sos.actions_r:Rmarkdown
ruby = sos.actions_ruby:ruby
perl = sos.actions:perl
report = sos.actions:report
pandoc = sos.actions:pandoc
docker_build = sos.docker.actions:docker_build
singularity_build = sos.singularity.actions:singularity_build
[sos_taskengines]
process = sos.tasks:BackgroundProcess_TaskEngine
[sos_previewers]
*.pdf,1 = sos.preview:preview_pdf
*.html,1 = sos.preview:preview_html
*.csv,1 = sos.preview:preview_csv
*.xls,1 = sos.preview:preview_xls
*.xlsx,1 = sos.preview:preview_xls
*.gz,1 = sos.preview:preview_gz
*.txt,1 = sos.preview:preview_txt
*.md,1 = sos.preview:preview_md
*.dot,1 = sos.preview:preview_dot [dot]
*.svg,1 = sos.preview:preview_svg
imghdr:what,1 = sos.preview:preview_img
zipfile:is_zipfile,1 = sos.preview:preview_zip
tarfile:is_tarfile,1 = sos.preview:preview_tar
*,0 = sos.preview:preview_txt
[sos_converters]
sos-html.parser = sos.converter:get_script_to_html_parser
sos-html.func = sos.converter:script_to_html
''',
# [sos_installers]
# vim-syntax.parser = sos.install:get_install_vim_syntax_parser
# vim-syntax.func = sos.install:install_vim_syntax
extras_require={
':sys_platform=="win32"': ['colorama'],
# faster hashlib
':sys_platform!="win32"': ['xxhash'],
'dot': ['graphviz', 'pillow'],
})
|
[
"ben.bog@gmail.com"
] |
ben.bog@gmail.com
|
fc03332dcbf0200d0f9e90b5bfe070525ec87bf7
|
1336896824c8937cc744a112661061c7b89beb73
|
/Tag02/dozent_pool.py
|
5500d3d8b5f588eca1e8359d02afe868322021ae
|
[] |
no_license
|
anna-s-dotcom/python01-python08
|
df457fc1f93b74d91037fd7d62db5fa53baa8616
|
cf3d539800ee7e83f1d32010481c7a9ee2d58858
|
refs/heads/master
| 2020-12-30T06:12:10.785131
| 2020-02-07T09:40:01
| 2020-02-07T09:40:01
| 238,888,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import os
from multiprocessing import Pool
import time
def quadfunc(n):
time.sleep(0.2)
return n*n
if __name__ == '__main__':
print(os.cpu_count())
t = time.time()
p = Pool(processes = 5)
result = p.map(quadfunc, [1, 2, 3, 4, 5])
p.close()
print('Pool time:', time.time()-t)
t = time.time()
result2 = list(map(quadfunc, [1, 2, 3, 4, 5]))
print('Serial time:', time.time()-t)
# print(result)
|
[
"noreply@github.com"
] |
anna-s-dotcom.noreply@github.com
|
cf67b46f033723c3e6c4b4a18b3594092ee467a7
|
53c3462ff265b6273f4a4fa17f6d59688f69def0
|
/数据结构/quick_sort.py
|
9b98643d357f31d649bf67308b1db09a87523eb6
|
[] |
no_license
|
17764591637/jianzhi_offer
|
b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4
|
27e420ee302d5ab6512ecfdb8d469b043fb7102d
|
refs/heads/master
| 2023-08-03T01:32:51.588472
| 2019-10-13T07:56:21
| 2019-10-13T07:56:21
| 197,692,548
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
'''
快速排序使用分治法来把一个串(list)分为两个子串(sub-lists)。
具体算法描述如下:
1.从数列中挑出一个元素,称为 “基准”(pivot);
2.重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准的后面(相同的数可以到任一边)。
在这个分区退出之后,该基准就处于数列的中间位置。这个称为分区(partition)操作;
3.递归地(recursive)把小于基准值元素的子数列和大于基准值元素的子数列排序。
时间复杂度:O(nlogn) 不稳定
'''
def quick_sort(nums,start,end):
#递归退出的条件
if start >= end:
return
mid = nums[start]
left = start
right = end
while left < right:
while left < right and nums[right] >= mid:
right -= 1
nums[left] = nums[right]
while left < right and nums[left] < mid:
left += 1
nums[right] = nums[left]
nums[left] = mid
# 对基准元素左边的子序列进行快速排序
quick_sort(nums, start, left - 1)
# 对基准元素右边的子序列进行快速排序
quick_sort(nums, left + 1, end)
alist = [54,26,93,17,77,31,44,55,20]
quick_sort(alist,0,len(alist)-1)
print(alist)
|
[
"17764591637@163.com"
] |
17764591637@163.com
|
72b224e612831d78026a2b1d1c2e5fa5338f40d2
|
60f96f12bcac952de88e1f62a785149a4e0a6746
|
/mixins_views/views.py
|
b9029128c1818ea8e61d60eadd773cc8c72674d8
|
[] |
no_license
|
jayednahain/Django-Rest_api-with-mixins.
|
6242e5d4678ef6b77a4f3ced16f791f8997b6748
|
4536fad0ded80d70453ab46ce185e9d8ce16d2e1
|
refs/heads/main
| 2023-03-24T20:08:58.916183
| 2021-03-21T14:06:21
| 2021-03-21T14:06:21
| 350,015,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from mixins_views.models import Student
from mixins_views.serializers import StudentSerializer
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView #useing for class based views !
from rest_framework import generics,mixins
class StudentListView(mixins.ListModelMixin,mixins.CreateModelMixin,generics.GenericAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def get(self,request):
return self.list(request)
def post(self,request):
return self.create(request)
#primary key based operation
class StudentDetailView(mixins.RetrieveModelMixin,mixins.UpdateModelMixin,mixins.DestroyModelMixin,generics.GenericAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def get(self,request,pk):
return self.retrieve(request,pk)
def put(self,request,pk):
return self.update(request,pk)
def delete(self,request,pk):
return self.destroy(request,pk)
|
[
"jayednahian@yahoo.com"
] |
jayednahian@yahoo.com
|
35b3a29158681480060f348b5446d99387856c6b
|
998c2105908e0a4463075a84f9e3f1678ffcdfb3
|
/keras_video_object_detector/library/yolo_utils.py
|
ca0e5e348b8fcb14721858d241185efece4ba0ac
|
[
"MIT"
] |
permissive
|
chen0040/keras-video-object-detector
|
a4bb2a080d62c0ecb56c12096ffe1f161b6d2c71
|
52f07ff4047dcc8732015c3debba1fa3eb7f2c56
|
refs/heads/master
| 2021-09-03T09:38:01.520006
| 2018-01-08T03:22:42
| 2018-01-08T03:22:42
| 116,548,809
| 15
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,459
|
py
|
import colorsys
import imghdr
import os
import random
from keras import backend as K
import numpy as np
from PIL import Image, ImageDraw, ImageFont
def read_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def read_anchors(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def generate_colors(class_names):
hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
return colors
def scale_boxes(boxes, image_shape):
""" Scales the predicted boxes in order to be drawable on the image"""
height = image_shape[0]
width = image_shape[1]
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
return boxes
def preprocess_image(img_path, model_image_size):
image_type = imghdr.what(img_path)
image = Image.open(img_path)
resized_image = image.resize(tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def preprocess_image_data(image):
image_data = np.array(image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors):
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i], outline=colors[c])
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
|
[
"xs0040@gmail.com"
] |
xs0040@gmail.com
|
2adc6545b8d6dcc6760907fe667129c5002aca2a
|
c14debb710242571769587d9e8fb1d0ecfbcd15b
|
/query_csv/utils.py
|
ea9bf421ec92f261c090af6872c3e1b44db13cf2
|
[] |
no_license
|
jayrbolton/query_csv
|
f464a6ad714372c41390b15f838c99c1e4c250ec
|
4299f9e179d9fcaf41560c30372cb65b57d1756f
|
refs/heads/master
| 2022-10-03T18:23:33.052523
| 2020-06-06T14:42:36
| 2020-06-06T14:42:36
| 270,006,356
| 0
| 0
| null | 2020-06-06T14:42:37
| 2020-06-06T14:29:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
import gzip
import shutil
import tempfile
import csv
import os
from typing import Union, List, Generator
# yielded by the CSV parsers below. Generator of lists of column values for
# every row in a CSV
Rows = Generator[List[str], None, None]
def convert_col_type(val: str) -> Union[str, float, int]:
"""
Convert a CSV column into an integer, a float, or keep as a string based on
its format.
Args:
val: column value
Returns:
Int if numeric without decimal, float if numeric with decimal, and
string otherwise
Examples:
"hi" -> "hi"
"10" -> 10 (int)
"10.0" -> 10.0 (float)
"""
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return val
def iter_csv_rows(path: str, delim: str) -> Rows:
"""
Only loads one row at a time into memory and yields it.
Args:
path: path to a .csv file
delim: string column delimiter
Yields:
List of string values for every column.
"""
with open(path) as fd:
reader = csv.reader(fd, delimiter=delim)
for row in reader:
yield row
def iter_gzip_csv_rows(path: str, delim: str) -> Rows:
"""
Args:
path: path to a .csv.gz file
delim: string column delimiter
Yields:
List of string values for every column.
"""
# Decompress the gzip contents into a tempfile without loading into memory
with gzip.open(path, 'rb') as fdout:
with tempfile.NamedTemporaryFile('w+b') as fdin:
# Copies by chunks
shutil.copyfileobj(fdout, fdin)
# Flush buffer to disk
fdin.flush()
for row in iter_csv_rows(fdin.name, delim):
yield row
# Tempfile delete at end of context
def dict_is_subset(subset: dict, superset: dict) -> bool:
"""
Check that all keys in `subset` are present in `superset` and have all the
same values by `==`.
Args:
subset: All keys and values in the dict must match those in `superset`
superset: Must contain all keys/vals from subset
Returns:
boolean result
Examples:
dict_is_subset({'x': 1}, {'x': 1, 'y': 2}) -> True
dict_is_subset({'x': 1, 'z': 2}, {'x': 1, 'y': 2}) -> False
"""
return all(
key in superset and superset[key] == subset[key]
for key in subset.keys()
)
def get_extension(path):
"""
Get the file extension of a given path. Returns double extensions, such as
'.csv.gz'
"""
(name, ext) = os.path.splitext(path)
(_, subext) = os.path.splitext(name)
# Get the double extension as '.csv.gz'
# `subext` will be '' if not present
ext = subext + ext
return ext
|
[
"jayrbolton@gmail.com"
] |
jayrbolton@gmail.com
|
ada3a03be028e3b915389cada419c859da69736d
|
eb42558f56fdb41526cc31ac4ef3a6937bf39e96
|
/ConfigDefinitions/UserConfigs/SMHTT_2018_Configs_Deep/ST_tW_antitopConfig.py
|
378774e807e5da7f892c2a679902bc63f061b479
|
[] |
no_license
|
samhiggie/Jesterworks
|
6906b042d3e200efb9bd10b70284ccd30661aa53
|
562e8cbb20d7e4b1d5b9bdba3715578cc66f097d
|
refs/heads/master
| 2020-09-11T19:35:59.770456
| 2019-11-16T12:37:35
| 2019-11-16T12:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
from ConfigDefinitions.JesterworksConfigurations import JesterworksConfiguration as Config
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.SMHTT_2018_MC_Collection import MCCollection as BranchCollection
from ConfigDefinitions.CuttingDefinitions.UserCutConfigs.SMHTT2018Cuts_MC_NoEmbeddedOverlap_wDeep import SMHTT2018Cuts as CutConfig
from ConfigDefinitions.EndActionDefinitions.UserConfigs.GrabHistograms import HistogramGrabber as HistogramGrabber
DataConfig = Config()
DataConfig.Path = "/data/ccaillol/smhmt2018_svfitted_12oct/"
DataConfig.Files = ["ST_tW_antitop.root"]
DataConfig.InputTreeName = "mutau_tree"
DataConfig.SampleName = "ST_tW_antitop"
DataConfig.OutputPath = "/data/aloeliger/SMHTT_Selected_2018_Deep/"
DataConfig.OutputFile = "ST_tW_antitop.root"
DataConfig.OutputTreeName = "mt_Selected"
DataConfig.BranchCollection = BranchCollection
DataConfig.CutConfig = CutConfig
DataConfig.EndAction = HistogramGrabber
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
cdd87b5b84d7dc7c907de04cbd185430dfb253e2
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/Taustar/Taustar_TauG_L10000_m4000_13TeV_pythia8.py
|
9cdc43d01bf5f29ac69ce08fdf5c53e3f219175d
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExcitedFermion:qqbar2tauStartau = on',
'ExcitedFermion:Lambda= 10000',
'4000015:onMode = off',
'4000015:onIfMatch = 15 22',
'4000015:m0 = 4000'),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
|
[
"dnash@cern.ch"
] |
dnash@cern.ch
|
cc3756f9d748169d46b374c902c181e512a382fa
|
eafabc5e332f5fc0153e166d992ac0711cf90cd6
|
/BOJ/11021/11021.py
|
196aea58fa768861cb7e5f8f574957c8f0801695
|
[] |
no_license
|
PARKINHYO/Algorithm
|
96038ce21bd9f66208af0886208ef6ed925c23e2
|
0ed8687fe971fc2b05e2f50f62c0d0e47c368a6c
|
refs/heads/master
| 2021-12-23T23:48:25.247979
| 2021-08-20T01:52:50
| 2021-08-20T01:52:50
| 196,219,508
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
T = int(input())
AB = [[int(x) for x in input().split()] for y in range(T)]
for i in range(T):
C = AB[i][0] + AB[i][1]
print("Case #%d: %d" % (i+1, C))
|
[
"you@example.com"
] |
you@example.com
|
323135bcedfd94ec7cd2eae4703e33dde6537de0
|
ab1c920583995f372748ff69d38a823edd9a06af
|
/shultais_courses/data_types/type_conversion/type_conversion.py
|
1adc77061b934ef1b1a664bba675429f0fe1b226
|
[] |
no_license
|
adyadyat/pyprojects
|
5e15f4e33892f9581b8ebe518b82806f0cd019dc
|
c8f79c4249c22eb9e3e19998d5b504153faae31f
|
refs/heads/master
| 2022-11-12T16:59:17.482303
| 2020-07-04T09:08:18
| 2020-07-04T09:08:18
| 265,461,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
salary = "50000"
salary1 = "50000.5"
year_salary = int(salary) * 12
year_salary1 = float(salary1) * 12
print(year_salary, year_salary1)
print("Ваша годовая зарплата: " + str(year_salary))
# Преобразование типов
|
[
"omorbekov.a@gmail.com"
] |
omorbekov.a@gmail.com
|
632df10af90453376bd5a9c07308d6d702f9eab6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_318/ch23_2019_03_31_22_15_03_222154.py
|
ababaf32eacb836c1ef4fd1dcb5fe1051412ae6a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
def verifica_idade(x):
if(x>20):
print("Liberado EUA e BRASIL")
return x
elif(x>17 and x<21):
print("Liberado BRASIL")
return x
else:
print("Não está liberado")
return x
|
[
"you@example.com"
] |
you@example.com
|
e731f34764d4a0c183cb174840d6cc907ce618bd
|
b4a58df63b7e42085d7b4a90cce184bab4039e97
|
/src/config_29.py
|
0b783d5a2b9d9b38f8b373fc503a67e5a2acd268
|
[] |
no_license
|
shinglyu/MusicPupil
|
4f82a2240b99c98ec7eb8db1017cfa232cf21bb9
|
edfc6da085e9433f347301d7f6ccc49eab45d14f
|
refs/heads/master
| 2021-01-10T03:50:32.670628
| 2013-08-14T08:52:37
| 2013-08-14T08:52:37
| 51,300,212
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,595
|
py
|
import os.path
#DEBUG = True
DEBUG = False
defaultTrainSampleList= "../training_samples/trainSampleList.txt"
unittestTrainSampleList="../training_samples/trainSampleList.txt"
defaultGenScore= "../testing_scores/chop_nc_phrase001"
#defaultTrainFeatsFilename="../output/trainFeats.json" #may need to prepend file name
#defaultGenFeatFilename="../output/genFeat.json"
#defaultModelFilename= "../output/model.bin"
defaultOutputDir= "../output/"
scoreFeatsList = [ "PosInPhrasePercent",
"PitchMidiNum",
"PitchDiffNextMidiNum",
"PitchDiffPrevMidiNum",
"Beat",
"BeatStrength",
"DurationQNote",
"DurationRatioNextPercent",
"DurationRatioPrevPercent", ]
perfFeatsList = [ "OnsetDiffQNote",
"DurationPercent",
"VelocityMidiScale",
]
modelFuncName = [ #"modelMultiLinearRegress",
"modelSVMStruct",
#"ha",
]
quantizerName= [ "quantizerLinear",
#"ha",
]
musicOutputFormat= [ "Midi",
#"ha",
]
#SVM^HMM related parameters
#svmhmm_c = None
svmhmm_c = 0.00000000001
def printDebug(string):
if DEBUG:
print("[DEBUG]"),
print(string)
def sanitizeDirPath(dirPath):
if not (dirPath.endswith("/")):
return dirPath + "/";
else:
return dirPath;
def getTrainSampleName(trainSampleFilename):
return os.path.splitext(os.path.basename(trainSampleFilename))[0]
def getTrainInFeatFilename(args):
trainFeatsFilename = sanitizeDirPath(args.outputDir)
trainFeatsFilename += getTrainSampleName(args.inputList)
trainFeatsFilename += ".train.allFeats.json"
return trainFeatsFilename
def getGenSampleName(genSampleFilename):
return os.path.basename(genSampleFilename)
def getGenInFeatFilename(args):
trainFeatsFilename = sanitizeDirPath(args.outputDir)
trainFeatsFilename += getGenSampleName(args.input)
trainFeatsFilename += ".gen.scoreFeats.json"
return trainFeatsFilename
def getGenOutFeatFilename(args):
trainFeatsFilename = sanitizeDirPath(args.outputDir)
trainFeatsFilename += getGenSampleName(args.input)
trainFeatsFilename += ".gen.perfFeats.json"
return trainFeatsFilename
def getModelFilename(args):
modelFilename = sanitizeDirPath(args.outputDir)
modelFilename += getTrainSampleName(args.inputList) + "."
modelFilename += modelFuncName[0] + ".model"
return modelFilename
|
[
"shing.lyu@gmail.com"
] |
shing.lyu@gmail.com
|
8c9f827f7dd01ae5a14d2a256505ffc43d563601
|
605c10db2f950a506af60d57a2074f97ebcf89ab
|
/code/MODULE/img_processing/record.py
|
224137ab62610a7abcbd7067dcc47e6f658b24e3
|
[] |
no_license
|
MulongXie/Research-ReverselyGeneratingWebCode
|
928f90d6b4f80ebff40a9a3a48f8b564277a0987
|
2c1598a765166f30786b0e6a22c485358ca2e98d
|
refs/heads/master
| 2020-05-17T18:14:02.241209
| 2020-04-10T00:19:16
| 2020-04-10T00:19:16
| 183,857,077
| 0
| 3
| null | 2020-02-03T04:31:34
| 2019-04-28T04:51:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
import cv2
import numpy as np
def find_contour():
img = cv2.imread('0.png')
img = cv2.blur(img, (3,3))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
cv2.imwrite('bb.png', binary)
binary, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 输出为三个参数
cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
cv2.imshow("img", img)
cv2.imwrite('bc.png', img)
cv2.waitKey(0)
def gradient():
img = cv2.imread("1.png", 0)
row, column = img.shape
img_f = np.copy(img)
# img_f = img_f.astype("float")
gradient = np.zeros((row, column))
for x in range(row - 1):
for y in range(column - 1):
gx = abs(img_f[x + 1, y] - img_f[x, y])
gy = abs(img_f[x, y + 1] - img_f[x, y])
gradient[x, y] = gx + gy
cv2.imshow("gradient", gradient)
cv2.imwrite('ab.png', gradient)
cv2.waitKey(0)
def hough():
img = cv2.imread('x.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 100)
for line in lines:
for rho, theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000 * (-b))
y1 = int(y0 + 2000 * (a))
x2 = int(x0 - 2000 * (-b))
y2 = int(y0 - 2000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('houghlines', img)
cv2.imshow('edg', edges)
cv2.waitKey(0)
def houghp():
img = cv2.imread('x.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, None, minLineLength, maxLineGap)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imshow('img', img)
cv2.imshow('edge', edges)
cv2.waitKey(0)
# find_contour()
gradient()
|
[
"dsh15325@163.com"
] |
dsh15325@163.com
|
6fd957dec3b88887df1e62ab9b4bc131e1c557b1
|
b8461afd9d11457a91ae803987bde74337ad4fd1
|
/docs/source/reference-core/channels-shutdown.py
|
dcd35767ae1921678a10099dd6e99150a70a52b7
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abispo/trio
|
d9750920091fc4b77e4d8386def45ea727eb2218
|
5bcfb1b9b90cc6bbf517468251597e8b262ca789
|
refs/heads/master
| 2020-06-20T21:10:38.474717
| 2019-07-15T05:56:33
| 2019-07-15T05:56:33
| 197,250,586
| 1
| 0
|
NOASSERTION
| 2019-07-16T18:53:03
| 2019-07-16T18:53:03
| null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
import trio
async def main():
async with trio.open_nursery() as nursery:
send_channel, receive_channel = trio.open_memory_channel(0)
nursery.start_soon(producer, send_channel)
nursery.start_soon(consumer, receive_channel)
async def producer(send_channel):
async with send_channel:
for i in range(3):
await send_channel.send("message {}".format(i))
async def consumer(receive_channel):
async with receive_channel:
async for value in receive_channel:
print("got value {!r}".format(value))
trio.run(main)
|
[
"njs@pobox.com"
] |
njs@pobox.com
|
5d1f056703f1b727bc1edd6a7ae06a89636722a4
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/BlendLuxCore/ui/halt.py
|
07a3ec885fcf3cd6db5a21ef53e0815aa858c782
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
from bl_ui.properties_render import RenderButtonsPanel
from bl_ui.properties_render_layer import RenderLayerButtonsPanel
from bpy.types import Panel
from ..utils import ui as utils_ui
from . import icons
def draw(layout, context, halt):
layout.active = halt.enable
row = layout.row()
row.prop(halt, "use_time")
split = row.split()
split.active = halt.use_time
split.prop(halt, "time")
if halt.use_time and halt.time > 60:
time_humanized = utils_ui.humanize_time(halt.time)
row = layout.row()
row.alignment = "RIGHT"
row.label(time_humanized, icon="TIME")
row = layout.row()
row.prop(halt, "use_samples")
split = row.split()
split.active = halt.use_samples
split.prop(halt, "samples")
config = context.scene.luxcore.config
if halt.use_samples and config.engine == "PATH" and config.use_tiles:
# some special warnings about tile path usage
aa = config.tile.path_sampling_aa_size
samples_per_pass = aa**2
if config.tile.multipass_enable and halt.samples % samples_per_pass != 0:
layout.label("Should be a multiple of %d" % samples_per_pass, icon=icons.WARNING)
if context.scene.luxcore.denoiser.enabled and context.scene.luxcore.denoiser.type == "BCD":
# BCD Denoiser needs one warmup pass plus at least one sample collecting pass
min_samples = samples_per_pass * 2
else:
min_samples = samples_per_pass
if halt.samples < min_samples:
layout.label("Use at least %d samples!" % min_samples, icon=icons.WARNING)
if not config.tile.multipass_enable and halt.samples > min_samples:
layout.label("Samples halt condition overriden by disabled multipass", icon=icons.INFO)
col = layout.column(align=True)
col.prop(halt, "use_noise_thresh")
if halt.use_noise_thresh:
col.prop(halt, "noise_thresh")
col.prop(halt, "noise_thresh_warmup")
col.prop(halt, "noise_thresh_step")
class LUXCORE_RENDER_PT_halt_conditions(Panel, RenderButtonsPanel):
"""
These are the global halt conditions shown in the render settings
"""
bl_label = "LuxCore Halt Conditions"
COMPAT_ENGINES = {"LUXCORE"}
@classmethod
def poll(cls, context):
return context.scene.render.engine == "LUXCORE"
def draw_header(self, context):
halt = context.scene.luxcore.halt
self.layout.prop(halt, "enable", text="")
def draw(self, context):
layout = self.layout
halt = context.scene.luxcore.halt
draw(layout, context, halt)
layers = context.scene.render.layers
overriding_layers = [layer for layer in layers if layer.use and layer.luxcore.halt.enable]
if overriding_layers:
layout.separator()
col = layout.column(align=True)
row = col.row()
split = row.split(percentage=0.8)
split.label("Render Layers Overriding Halt Conditions:")
op = split.operator("luxcore.switch_space_data_context",
text="Show", icon="RENDERLAYERS")
op.target = "RENDER_LAYER"
for layer in overriding_layers:
halt = layer.luxcore.halt
conditions = []
if halt.use_time:
conditions.append("Time (%ds)" % halt.time)
if halt.use_samples:
conditions.append("Samples (%d)" % halt.samples)
if halt.use_noise_thresh:
conditions.append("Noise (%d)" % halt.noise_thresh)
if conditions:
text = layer.name + ": " + ", ".join(conditions)
col.label(text, icon="RENDERLAYERS")
else:
text = layer.name + ": No Halt Condition!"
col.label(text, icon=icons.ERROR)
class LUXCORE_RENDERLAYER_PT_halt_conditions(Panel, RenderLayerButtonsPanel):
"""
These are the per-renderlayer halt condition settings,
they can override the global settings and are shown in the renderlayer settings
"""
bl_label = "Override Halt Conditions"
COMPAT_ENGINES = {"LUXCORE"}
@classmethod
def poll(cls, context):
return context.scene.render.engine == "LUXCORE"
def draw_header(self, context):
rl = context.scene.render.layers.active
halt = rl.luxcore.halt
self.layout.prop(halt, "enable", text="")
def draw(self, context):
rl = context.scene.render.layers.active
halt = rl.luxcore.halt
draw(self.layout, context, halt)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
adc9e3d8973dbb3380952f23d6606d8fea4fa7a0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03425/s431844808.py
|
f6ac9253a929b3316b46eab8732c3022a81c6fb6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
n = int(input())
s = [input() for i in range(n)]
l = [0] * 5
for i in range(n):
if s[i][0] == "M":
l[0] += 1
elif s[i][0] == "A":
l[1] += 1
elif s[i][0] == "R":
l[2] += 1
elif s[i][0] == "C":
l[3] += 1
elif s[i][0] == "H":
l[4] += 1
ans = 0
for j in range(3):
for k in range(j+1, 4):
for i in range(k+1, 5):
ans += l[j] * l[k] * l[i]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5ef99950fd6011c61d4088bd8fe2d21e6a1acc15
|
f2601a5678fbc04738eff8393a42c07c87ca4d9c
|
/licenses/management/commands/upload_license_messages.py
|
e69e87ba92c4d6da391efd55cd87d58d77121df6
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"LicenseRef-scancode-public-domain",
"CC0-1.0",
"CC-BY-NC-4.0",
"CC-BY-NC-ND-4.0",
"LicenseRef-scancode-unknown"
] |
permissive
|
sp35/cc-licenses
|
1c2e713fbc4ec96a90bdb6f5c8b5c4750fee3632
|
42573273bac4136adf9f482db75314d81efdcdcf
|
refs/heads/main
| 2023-03-08T11:28:06.885986
| 2021-02-25T21:23:14
| 2021-02-25T21:23:14
| 342,029,287
| 0
| 0
|
MIT
| 2021-02-24T20:38:45
| 2021-02-24T20:38:45
| null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
from django.core.management import BaseCommand
from licenses.models import License
class Command(BaseCommand):
def handle(self, **options):
for license in License.objects.filter(
version="4.0", license_code__startswith="by"
):
license.tx_upload_messages()
|
[
"dpoirier@caktusgroup.com"
] |
dpoirier@caktusgroup.com
|
ec737f98eaab6f5935ea821568169b9097114b80
|
cfd9fa1af735ac3572954704a47e35543850b244
|
/run.py
|
5dcf8a98e292f9b7dbf746fb300f36d20052741f
|
[] |
no_license
|
xingyueGK/hjsg
|
c1844ea8161d254f6d6cf70f42d1ac849e117438
|
be0c4c457bdfaa9178f25f9f722dc78d88f24540
|
refs/heads/master
| 2022-12-12T08:28:55.823357
| 2020-12-05T12:02:06
| 2020-12-05T12:02:06
| 147,184,573
| 0
| 1
| null | 2022-01-06T22:26:48
| 2018-09-03T09:47:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,701
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/21 11:15
# @Author : xingyue
# @File : run.py
#运行方法
import threading
import os,time,apscheduler
from task.base import SaoDangFb
from task.talent import Talent
from task.hero_soul import DragonBoat,GoBoat
from task.glory_front import glory_front
from task.caomujiebing import Flag
from task.autoCountryBanquet import autoCountryBanquet
class run(SaoDangFb,Talent):
#'天赋卷轴'
pass
class duanwu(SaoDangFb,DragonBoat):
#'龙舟比赛'
pass
class longzhou(SaoDangFb,GoBoat):
pass
class dongxizhanxian(SaoDangFb,glory_front):
#'东西战线'
pass
class banquet(SaoDangFb,autoCountryBanquet):
pass
class cmjb(SaoDangFb,Flag):
pass
if __name__ == '__main__':
s1 = threading.Semaphore(3)
def act(user, apass, addr):
s1.acquire()
action = dongxizhanxian(user, apass, addr)
if action.level()< 150:
s1.release()
return False
action.zhanxian(s1)
s1.release()
def flag(user, apass, addr):
s1.acquire(blocking=False)
action = cmjb(user, apass, addr)
schedule = action.get_today_schedule()
if schedule['status'] == -2:
print schedule['msg']
exit(1)
elif schedule['status'] != 1:
print schedule['msg']
exit(1)
try:
self_server = schedule['data']['self_server']
except:
exit(3)
get_enter_list = action.get_enter_list(self_server)
enter_cd = get_enter_list['enter_cd']
print enter_cd
time.sleep(enter_cd)
action.enter(self_server,1)
s1.release()
def lz(user, apass, addr):
s1.acquire()
action = longzhou(user, apass, addr)
action.buytimes(200)
action.longzhou()
# action.meter_reward()
# action.bug_meter_reward()
s1.release()
def guoyan(user, apass, addr):
s1.acquire()
action = banquet(user, apass, addr)
action.jion_team()
s1.release()
filepath = os.path.dirname(os.path.abspath(__file__))
# cont = ['21user.txt', 'autouser.txt','gmnewyear.txt', 'user.txt', 'alluser.txt']
cont = ['user.txt']
for t in cont:
with open('%s/users/%s' % (filepath, t), 'r') as f:
for i in f:
if i.strip() and not i.startswith('#'):
name = i.split()[0]
passwd = i.split()[1]
addr = i.split()[2]
# addr = 147
t1 = threading.Thread(target=lz, args=(name, passwd, addr))
t1.start()
|
[
"a413728161@vip.qq.com"
] |
a413728161@vip.qq.com
|
4bf49fded33b7c4a27f087f75b6783e70d7a0f6f
|
4ec1eda7669dbe2dd67ac7218421fae62b5ef741
|
/userauth/urls.py
|
d8645c6d93d55fb5f15ec3a2f8a0d8f8793391d7
|
[] |
no_license
|
atul8727/medical_helper
|
075284335644343d71d4c4d92f1e4e92b67089aa
|
2e03f70b82834b95cb4d424d22f2bd5b82f652c8
|
refs/heads/master
| 2023-07-01T03:15:46.146540
| 2021-08-01T19:06:03
| 2021-08-01T19:06:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from django.urls import path,include
from .views import dashboard_view,register_user
from app.views import *
urlpatterns = [
path('dashboard/', dashboard_view, name='dashboard'),
path('register/',register_user, name='register'),
path('oauth/',include('social_django.urls')),
]
|
[
"xaidmetamorphos@gmail.com"
] |
xaidmetamorphos@gmail.com
|
9696a8447135bae26d4e81268979efa949782a04
|
cc8416a20b3aa9832dabf29112e52b5dfb367157
|
/stable_nalu/layer/regualized_linear_nac.py
|
295fd28e4264cbe51423d12d827218e558f7c0b5
|
[
"MIT"
] |
permissive
|
AndreasMadsen/stable-nalu
|
ff877592ec965dca49a48bf94b38e343ba407411
|
b3296ace137ffa4854edeef3759f1578b7650210
|
refs/heads/master
| 2023-05-22T04:53:17.495712
| 2021-08-19T18:15:14
| 2021-08-19T18:23:45
| 177,330,156
| 149
| 19
|
MIT
| 2020-01-15T08:06:12
| 2019-03-23T19:13:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
import scipy.optimize
import numpy as np
import torch
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
class RegualizedLinearNACLayer(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
regualizer_shape='squared',
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self._regualizer_bias = Regualizer(
support='nac', type='bias',
shape=regualizer_shape
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.W)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W)
})
def forward(self, input, reuse=False):
self.writer.add_histogram('W', self.W)
self.writer.add_tensor('W', self.W, verbose_only=False)
return torch.nn.functional.linear(input, self.W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class RegualizedLinearNACCell(AbstractRecurrentCell):
"""Implements the RegualizedLinearNAC as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(RegualizedLinearNACLayer, input_size, hidden_size, **kwargs)
|
[
"amwebdk@gmail.com"
] |
amwebdk@gmail.com
|
a2f9ac6a7a2e1e0f0409cdcdb101ee1a3326ccff
|
ca1e432c66ca9289cc25039f6c035c292e298d15
|
/content_management_portal/migrations/0002_auto_20200629_1454.py
|
98fb46bcc42bd507c329ea7f4728238c6de54c3e
|
[] |
no_license
|
raviteja1766/ib_mini_projects
|
9bf091acf34e87d7a44bec51a504bdb81aceae27
|
3fa36b97cfa90b5f5853253480934cf27714aa15
|
refs/heads/master
| 2022-11-19T07:08:27.061315
| 2020-07-02T16:54:42
| 2020-07-02T16:54:42
| 272,033,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
# Generated by Django 2.2.1 on 2020-06-29 14:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('content_management_portal', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='user',
new_name='user_id',
),
]
|
[
"ravitejak125@gmail.com"
] |
ravitejak125@gmail.com
|
14fa26d61098fd50e84ed8cab47b9e770689805e
|
8f8498bb6f56b19d45a1989c8113a077348c0a02
|
/백준/Silver/미로 탐색.py
|
d234250c529ddb20d32d95135c9f6906a5932cda
|
[] |
no_license
|
gjtjdtn201/practice
|
a09b437c892b0b601e156c09cb1f053b52fab11b
|
ea45582b2773616b2b8f350b927559210009d89f
|
refs/heads/master
| 2021-01-01T13:29:46.640740
| 2020-11-28T00:55:37
| 2020-11-28T00:55:37
| 239,299,485
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
import sys
sys.stdin = open('미로 탐색.txt', 'r')
from collections import deque
def BFS(y,x):
queue = deque()
queue.append((y,x))
visit[y][x] = 1
while queue:
a, b = queue.popleft()
if (a, b) == (N-1, M-1):
print(visit[a][b])
return
for i in range(4):
ny = a + dy[i]
nx = b + dx[i]
if 0 <= ny < N and 0 <= nx < M and matrix[ny][nx] != 0 and visit[ny][nx] == 0:
visit[ny][nx] = visit[a][b] + 1
queue.append((ny, nx))
N, M = map(int, input().split())
matrix = []
for i in range(N):
matrix.append(list(map(int, input())))
dy = [1,-1,0,0]
dx = [0,0,1,-1]
visit = [[0]*M for _ in range(N)]
BFS(0, 0)
|
[
"gjtjdtn201@naver.com"
] |
gjtjdtn201@naver.com
|
8a5a13a59d69a8b4e360050051c66440c9475be8
|
00b1fe62aff1bbad885a1b13354239b07925c5c1
|
/catalyst_rl/dl/callbacks/inference.py
|
0e865e1f3b6ece6a4f57fa06a27b1a96f3d7c224
|
[
"Apache-2.0"
] |
permissive
|
catalyst-team/catalyst-rl
|
a78675c477bef478d73cd1e7101be6dbb7b586aa
|
75ffa808e2bbb9071a169a1a9c813deb6a69a797
|
refs/heads/master
| 2021-09-22T08:36:12.161991
| 2021-09-13T05:59:12
| 2021-09-13T05:59:12
| 247,928,934
| 50
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,901
|
py
|
from collections import defaultdict
import os
import imageio
import numpy as np
from skimage.color import label2rgb
import torch
import torch.nn.functional as F
from catalyst_rl.dl import Callback, CallbackOrder, State, utils
# @TODO: refactor
class InferCallback(Callback):
def __init__(self, out_dir=None, out_prefix=None):
super().__init__(CallbackOrder.Internal)
self.out_dir = out_dir
self.out_prefix = out_prefix
self.predictions = defaultdict(lambda: [])
self._keys_from_state = ["out_dir", "out_prefix"]
def on_stage_start(self, state: State):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
# assert self.out_prefix is not None
if self.out_dir is not None:
self.out_prefix = str(self.out_dir) + "/" + str(self.out_prefix)
if self.out_prefix is not None:
os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True)
def on_loader_start(self, state: State):
self.predictions = defaultdict(lambda: [])
def on_batch_end(self, state: State):
dct = state.batch_out
dct = {key: value.detach().cpu().numpy() for key, value in dct.items()}
for key, value in dct.items():
self.predictions[key].append(value)
def on_loader_end(self, state: State):
self.predictions = {
key: np.concatenate(value, axis=0)
for key, value in self.predictions.items()
}
if self.out_prefix is not None:
for key, value in self.predictions.items():
suffix = ".".join([state.loader_name, key])
np.save(f"{self.out_prefix}/{suffix}.npy", value)
class InferMaskCallback(Callback):
def __init__(
self,
out_dir=None,
out_prefix=None,
input_key=None,
output_key=None,
name_key=None,
mean=None,
std=None,
threshold: float = 0.5,
mask_strength: float = 0.5,
mask_type: str = "soft"
):
super().__init__(CallbackOrder.Internal)
self.out_dir = out_dir
self.out_prefix = out_prefix
self.mean = mean or np.array([0.485, 0.456, 0.406])
self.std = std or np.array([0.229, 0.224, 0.225])
assert input_key is not None
assert output_key is not None
self.threshold = threshold
self.mask_strength = mask_strength
self.mask_type = mask_type
self.input_key = input_key
self.output_key = output_key
self.name_key = name_key
self.counter = 0
self._keys_from_state = ["out_dir", "out_prefix"]
def on_stage_start(self, state: State):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
# assert self.out_prefix is not None
self.out_prefix = self.out_prefix \
if self.out_prefix is not None \
else ""
if self.out_dir is not None:
self.out_prefix = str(self.out_dir) + "/" + str(self.out_prefix)
os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True)
def on_loader_start(self, state: State):
lm = state.loader_name
os.makedirs(f"{self.out_prefix}/{lm}/", exist_ok=True)
def on_batch_end(self, state: State):
lm = state.loader_name
names = state.batch_in.get(self.name_key, [])
features = state.batch_in[self.input_key].detach().cpu()
images = utils.tensor_to_ndimage(features)
logits = state.batch_out[self.output_key]
logits = torch.unsqueeze_(logits, dim=1) \
if len(logits.shape) < 4 \
else logits
if self.mask_type == "soft":
probabilities = torch.sigmoid(logits)
else:
probabilities = F.softmax(logits, dim=1)
probabilities = probabilities.detach().cpu().numpy()
masks = []
for probability in probabilities:
mask = np.zeros_like(probability[0], dtype=np.int32)
for i, ch in enumerate(probability):
mask[ch >= self.threshold] = i + 1
masks.append(mask)
for i, (image, mask) in enumerate(zip(images, masks)):
try:
suffix = names[i]
except IndexError:
suffix = f"{self.counter:06d}"
self.counter += 1
mask = label2rgb(mask, bg_label=0)
image = image * (1 - self.mask_strength) \
+ mask * self.mask_strength
image = (image * 255).clip(0, 255).round().astype(np.uint8)
filename = f"{self.out_prefix}/{lm}/{suffix}.jpg"
imageio.imwrite(filename, image)
__all__ = ["InferCallback", "InferMaskCallback"]
|
[
"scitator@gmail.com"
] |
scitator@gmail.com
|
807ce2ef2b7594044dfcedb3be33a7a555fbea60
|
f24c35bb0919f9ad75f45e7906691c3189536b33
|
/chengbinWorkSpace/droneLanding/python/Tello/path-plan.py
|
a09ea6ba4b28dcee3cbd474fc8a53b485a698cdf
|
[] |
no_license
|
mfkiwl/supreme-xcb
|
9b941f49bab5a811d23a0cd75790d1e5722aa9f0
|
d1287657607bf86d4b1393acf285951760670925
|
refs/heads/main
| 2023-03-07T12:10:28.288282
| 2021-03-02T11:46:00
| 2021-03-02T11:46:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,434
|
py
|
'''
brief:
Version:
Autor: shuike
Date: 2021-01-12 19:23:39
LastEditors: shuike
LastEditTime: 2021-01-12 19:23:39
FilePath: /droneLanding/python/Tello/path-plan.py
'''
#!/usr/bin/python
import pygame
import json
import math
"""
how many pixel = actual distance in cm
70px = 360cm --> 360/70 = MAP_SIZE_COEFF
"""
MAP_SIZE_COEFF = 5.14
pygame.init()
screen = pygame.display.set_mode([720, 720])
screen.fill((255, 255, 255))
running = True
class Background(pygame.sprite.Sprite):
def __init__(self, image, location, scale):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.image = pygame.transform.rotozoom(self.image, 0, scale)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
def get_dist_btw_pos(pos0, pos1):
"""
Get distance between 2 mouse position.
"""
x = abs(pos0[0] - pos1[0])
y = abs(pos0[1] - pos1[1])
dist_px = math.hypot(x, y)
dist_cm = dist_px * MAP_SIZE_COEFF
return int(dist_cm), int(dist_px)
def get_angle_btw_line(pos0, pos1, posref):
"""
Get angle between two lines respective to 'posref'
NOTE: using dot product calculation.
"""
ax = posref[0] - pos0[0]
ay = posref[1] - pos0[1]
bx = posref[0] - pos1[0]
by = posref[1] - pos1[1]
# Get dot product of pos0 and pos1.
_dot = (ax * bx) + (ay * by)
# Get magnitude of pos0 and pos1.
_magA = math.sqrt(ax**2 + ay**2)
_magB = math.sqrt(bx**2 + by**2)
_rad = math.acos(_dot / (_magA * _magB))
# Angle in degrees.
angle = (_rad * 180) / math.pi
return int(angle)
"""
Main capturing mouse program.
"""
# Load background image.
bground = Background('image.png', [0, 0], 1.6)
screen.blit(bground.image, bground.rect)
path_wp = []
index = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
path_wp.append(pos)
if index > 0:
pygame.draw.line(screen, (255, 0, 0), path_wp[index-1], pos, 2)
index += 1
pygame.display.update()
"""
Compute the waypoints (distance and angle).
"""
# Append first pos ref. (dummy)
path_wp.insert(0, (path_wp[0][0], path_wp[0][1] - 10))
path_dist_cm = []
path_dist_px = []
path_angle = []
for index in range(len(path_wp)):
# Skip the first and second index.
if index > 1:
dist_cm, dist_px = get_dist_btw_pos(path_wp[index-1], path_wp[index])
path_dist_cm.append(dist_cm)
path_dist_px.append(dist_px)
# Skip the first and last index.
if index > 0 and index < (len(path_wp) - 1):
angle = get_angle_btw_line(path_wp[index-1], path_wp[index+1], path_wp[index])
path_angle.append(angle)
# Print out the information.
print('path_wp: {}'.format(path_wp))
print('dist_cm: {}'.format(path_dist_cm))
print('dist_px: {}'.format(path_dist_px))
print('dist_angle: {}'.format(path_angle))
"""
Save waypoints into JSON file.
"""
waypoints = []
for index in range(len(path_dist_cm)):
waypoints.append({
"dist_cm": path_dist_cm[index],
"dist_px": path_dist_px[index],
"angle_deg": path_angle[index]
})
# Save to JSON file.
f = open('waypoint.json', 'w+')
path_wp.pop(0)
json.dump({
"wp": waypoints,
"pos": path_wp
}, f, indent=4)
f.close()
|
[
"xiechengbinin@gmail.com"
] |
xiechengbinin@gmail.com
|
c9316b33117f1c9cc1e359f48d5e48384095555c
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/N_Queue/MonotonicQueue/L3_862_Shortest_Subarray_with_Sum_at_Least_K.py
|
75b2d94429dd60dfa0b738814446722e2d7d5e6d
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177
| 2023-06-16T16:25:39
| 2023-06-16T16:25:39
| 69,733,624
| 14
| 3
| null | 2022-05-25T06:48:38
| 2016-10-01T10:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 813
|
py
|
""" https://leetcode.com/problems/shortest-subarray-with-sum-at-least-k/
Transform the problem to find the shortest sliding window with sum >= k,
we can use a monotonic increasing queue to maintain the prefix sum,
and try to make queue head as small(but larger than k) as possible and queue tail as large as possible.
"""
from header import *
class Solution:
def shortestSubarray(self, A: List[int], k: int) -> int:
A = list(accumulate(A, initial=0))
dq = deque()
ans = inf
for i in range(len(A)):
# update ans based on head of queue
while dq and A[i]-A[dq[0]]>=k: ans = min(ans, i-dq.popleft())
# ensure monotonic increasing
while dq and A[dq[-1]]>=A[i]: dq.pop()
dq.append(i)
return ans if ans!=inf else -1
|
[
"zhengyuan.zhu@mavs.uta.edu"
] |
zhengyuan.zhu@mavs.uta.edu
|
d79b390ee107b353b56586cffd43e1fbeba7d65e
|
8998a6cf66578453249544ca10b4239615751c53
|
/setup.py
|
6b66ea18fd958460305ba03c5534b4cbfac428fa
|
[
"MIT"
] |
permissive
|
kwaegema/jicirodsmanager
|
be7c3857989c78a0cde4d8d41da45559dcc15499
|
aca97415acb8f1b40bbb72c1c05b25fe20808d84
|
refs/heads/master
| 2021-04-15T18:39:25.766088
| 2017-11-15T11:19:57
| 2017-11-15T11:19:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
from setuptools import setup
url = "https://github.com/JIC-CSB/jicirodsmanager"
version = "1.1.0"
readme = open('README.rst').read()
dsc = "Python tools to manage users/groups/quotas/namespaces in an iRODS zone",
setup(name="jicirodsmanager",
packages=["jicirodsmanager"],
version=version,
description=dsc,
long_description=readme,
include_package_data=True,
author="Tjelvar Olsson",
author_email="tjelvar.olsson@jic.ac.uk",
url=url,
install_requires=[],
download_url="{}/tarball/{}".format(url, version),
license="MIT")
|
[
"tjelvar.olsson@jic.ac.uk"
] |
tjelvar.olsson@jic.ac.uk
|
ed67f09cdd0c79e6d529bf5923da918fd286314f
|
5492374aeb1df9fb5a2cbef8eb8a48af47556f18
|
/galaga/galaga.py
|
794facec78d3a8fd39356a25dad30224681820b0
|
[] |
no_license
|
HeeeeeJinJeong/Practice_Python
|
136397f43a7ba525ff2561adb85de353c1f1cc21
|
1e8a0c525bf35324d5e9f3f1ff7747b2352de7b3
|
refs/heads/master
| 2020-08-07T02:59:55.583054
| 2019-10-16T03:40:29
| 2019-10-16T03:40:29
| 213,265,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,075
|
py
|
import pygame # pygame 라이브러리 임포트
import random # random 라이브러리 임포트
from time import sleep
# 게임에 사용되는 전역변수 정의
BLACK = (0, 0, 0) # 게임 바탕화면의 색상
RED = (255, 0, 0)
pad_width = 480 # 게임화면의 가로크기
pad_height = 640 # 게임화면의 세로크기
player_width = 36
player_height = 38
enemy_width = 26
enmey_height = 20
# 적을 맞춘 개수 계산하는 함수
def drawScore(count):
global gamepad
font = pygame.font.SysFont(None, 20)
text = font.render('Enemy Kills:' + str(count), True, (255, 255, 255))
gamepad.blit(text, (0, 0))
# 적이 화면 아래로 통과한 개수
def drawPassed(count):
global gamepad
font = pygame.font.SysFont(None, 20)
text = font.render('Enemy Passed:' + str(count), True, RED)
gamepad.blit(text, (360, 0))
# 화면에 글씨 보이게 하기
def dispMessage(text):
global gamepad
textfont = pygame.font.Font('freesansbold.ttf', 80)
text = textfont.render(text, True, RED)
textpos = text.get_rect()
textpos.center = (pad_width / 2, pad_height / 2)
gamepad.blit(text, textpos)
pygame.display.update()
sleep(2)
runGame()
# 전투기가 적과 충돌했을 때 메시지 출력
def crash():
global gamepad
dispMessage('Crashed!')
# 게임 오버 메시지 출력
def gameover():
global gamepad
dispMessage('Game Over')
# 게임에 등장하는 객체를 드로잉
def drawObject(obj, x, y):
global gamepad
gamepad.blit(obj, (x, y))
# 게임 실행 메인 함수
def runGame():
global gamepad, clock, player, enemy, bullet
# 전투기 무기에 적이 맞았을 경우 True로 설정되는 플래그
isShot = False
shotcount = 0
enemypassed = 0
# 무기 좌표를 위환 리스트 자료
bullet_xy = []
# 전투기 초기 위치 (x,y)
x = pad_width * 0.45
y = pad_height * 0.9
x_change = 0
# 적 초기위치 설정
enemy_x = random.randrange(0, pad_width - enemy_width)
enemy_y = 0
enemy_speed = 3
ongame = False
while not ongame:
for event in pygame.event.get():
if event.type == pygame.QUIT: # 마우스로 창을 닫는 이벤트
doneFlag = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change -= 5
elif event.key == pygame.K_RIGHT:
x_change += 5
# 왼쪽 컨트롤 키를 누르면 무기 발사. 무기는 한 번에 2발만 발사됨
elif event.key == pygame.K_SPACE:
if len(bullet_xy) < 3:
bullet_x = x + player_width / 2
bullet_y = y - player_height
bullet_xy.append([bullet_x, bullet_y])
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
gamepad.fill(BLACK) # 게임화면을 검은색으로 채우고 화면을 업데이트함
# 전투기 위치를 재조정
x += x_change
if x < 0:
x = 0
elif x > pad_width - player_width:
x = pad_width - player_width
# 게이머 전투기가 적과 충돌했는지 체크
if y < enemy_y + enmey_height:
if (enemy_x > x and enemy_x < x + player_width) or \
(enemy_x + enemy_width > x and enemy_x + enemy_width < x + player_width):
crash()
drawObject(player, x, y) # 비행기를 게임 화면의 (x,y) 좌표에 그림
# 총알 발사 구현
if len(bullet_xy) != 0:
for i, bxy in enumerate(bullet_xy):
bxy[1] -= 10 # 총알의 y좌표를 -10함 (위로 이동)
bullet_xy[i][1] = bxy[1]
# 전투기 무기가 적을 격추했을 경우
if bxy[1] < enemy_y:
if bxy[0] > enemy_x and bxy[0] < enemy_x + enemy_width:
bullet_xy.remove(bxy)
isShot = True
shotcount += 1
if bxy[1] <= 0: # 총알이 화면밖을 벗어나면
try:
bullet_xy.remove(bxy) # 총알을 제거한다.
except:
pass
if len(bullet_xy) != 0:
for bx, by in bullet_xy:
drawObject(bullet, bx, by)
drawScore(shotcount)
# 적을 아래로 움직임
enemy_y += enemy_speed
if enemy_y > pad_height:
enemy_y = 0
enemy_x = random.randrange(0, pad_width - enemy_width)
enemypassed += 1
if enemypassed == 3:
gameover()
drawPassed(enemypassed)
# 적이 무기에 맞았는지 체크하고, 맞았으면 스피드 업
if isShot:
enemy_speed += 1
if enemy_speed >= 10:
enemy_speed = 10
enemy_x = random.randrange(0, pad_width - enemy_width)
enemy_y = 0
isShot = False
drawObject(enemy, enemy_x, enemy_y)
pygame.display.update() # 게임화면 재로딩
clock.tick(60) # 게임화면의 초당 프레임수를 60으로 설정
pygame.quit()
# 게임 초기화 함수
def initGame():
global gamepad, clock, player, enemy, bullet # 게임이 진행될 게임 화면, 게임의 초당 프레임(FPS), 비행기 변수 선언, 적 선언
pygame.init()
gamepad = pygame.display.set_mode((pad_width, pad_height)) # 게임화면의 가로세로크기를 설정
pygame.display.set_caption('Shooting Game') # 게임화면의 제목 지정
player = pygame.image.load('player.png')
enemy = pygame.image.load('enemy.png')
bullet = pygame.image.load('bullet.png')
clock = pygame.time.Clock() # 초당 프레임수를 설정할 수 있는 Clock객체 생성
initGame()
runGame()
|
[
"misakiyoshikuni@gmail.com"
] |
misakiyoshikuni@gmail.com
|
8dcb9d84c112c2909facdc43f23f166eb593f67d
|
9a9e739dcc559476ba796510182374ad460f2f8b
|
/PA2/PA2 2013/PA2-12/Asitha/pa2-12-2013.py
|
417b2661135cadcac82806c583d3fe2709b266e0
|
[] |
no_license
|
Divisekara/Python-Codes-First-sem
|
542e8c0d4a62b0f66c598ff68a5c1c37c20e484d
|
e4ca28f07ecf96181af3c528d74377ab02d83353
|
refs/heads/master
| 2022-11-28T01:12:51.283260
| 2020-08-01T08:55:53
| 2020-08-01T08:55:53
| 284,220,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
def getText():
try:
FileOpen=open("FileIn.txt","r")
L=[]
while True:
L.append(FileOpen.readline().split())
if L[-1]==[]:
break
FileOpen.close()
except IOError:
print "File Not Found"
else:
L.pop(-1)
return L
def calculations(L):
L1=[]
for i in L:
Temp=[]
Temp.append(i.pop(0))
Temp.append(sum(map(int,i)))
Temp.append(round(float(sum(map(int,i)))/len(i),2))
L1.append(Temp)
sums=[]
for j in L1:
sums.append(j[1])
ranks=sorted(sums)[::-1]
for k in L1:
k.append(ranks.index(k[1])+1)
return L1
def show(L):
L1=[]
for i in L:
L1.append(" ".join(map(str,i)))
lines="\n".join(L1)
print lines
return lines
def saveFile(s):
try:
FileCreate=open("result.txt","w")
FileCreate.write(s)
FileCreate.close()
except IOError:
print "File Error"
pass
saveFile(show(calculations(getText())))
|
[
"9asitha7@gmail.com"
] |
9asitha7@gmail.com
|
cd0d6330ebfe7555e50056381334270665abfb7c
|
30a2a924eb32e7297b5a99785950467f25ea785d
|
/ppgmle.py
|
65d92e6491d2764ae34a67f18c76334f0b21e094
|
[] |
no_license
|
zshwuhan/Reinforcement-Learning-of-Spatio-Temporal-Point-Processes
|
1a794e83491b52dea5db3926de91779a9e661a17
|
a3f98e77b56c03839dcdb545b17b3675e7c43878
|
refs/heads/master
| 2020-07-22T16:18:10.020860
| 2019-07-02T18:49:02
| 2019-07-02T18:49:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,065
|
py
|
import sys
import arrow
import utils
import numpy as np
import tensorflow as tf
from tfgen import SpatialTemporalHawkes
from ppgrl import RL_Hawkes_Generator
from stppg import GaussianMixtureDiffusionKernel, HawkesLam, SpatialTemporalPointProcess, StdDiffusionKernel
class MLE_Hawkes_Generator(object):
"""
Reinforcement Learning Based Point Process Generator
"""
def __init__(self, T, S, layers, n_comp, batch_size, C=1., data_dim=3, keep_latest_k=None, lr=1e-3, reg_scale=0.):
"""
Params:
- T: the maximum time of the sequences
- S: the space of location
- C: the constant in diffusion kernel
- batch_size: batch size of the training data
- maximum: upper bound of the conditional intensity
- data_dim: data dimension (=3 by default)
- keep_latest_k: only compute latest k points in log-likelihood calculation
- lr: learning rate for the SGD optimizer
"""
self.batch_size = batch_size
# Hawkes process
self.hawkes = SpatialTemporalHawkes(T, S, layers=layers, n_comp=n_comp, C=C, maximum=1e+3, verbose=True)
# regularization
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=reg_scale, scope=None)
penalty_term = tf.contrib.layers.apply_regularization(l1_regularizer, self.hawkes.Wss)
# input tensors: expert sequences (time, location, marks)
self.input_seqs = tf.placeholder(tf.float32, [batch_size, None, data_dim]) # [batch_size, seq_len, data_dim]
self.cost = -1 * self.log_likelihood(S, keep_latest_k=keep_latest_k) / batch_size # + penalty_term
# Adam optimizer
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(lr, global_step, decay_steps=100, decay_rate=0.99, staircase=True)
self.optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.6, beta2=0.9).minimize(self.cost, global_step=global_step)
def log_likelihood(self, S, keep_latest_k):
"""
compute the log-likelihood of the input data given the hawkes point process.
"""
# log-likelihood
loglikli = 0.
for b in range(batch_size):
seq = self.input_seqs[b, :, :]
# mask_t = tf.cast(seq[:, 0] > 0, tf.float32)
# trunc_seq = tf.boolean_mask(seq, mask_t)
# seq_len = tf.shape(trunc_seq)[0]
# # calculate the log conditional pdf for each of data points in the sequence.
# loglikli += tf.reduce_sum(tf.scan(
# lambda a, i: self.hawkes.log_conditional_pdf(trunc_seq[:i, :], keep_latest_k=keep_latest_k),
# tf.range(1, seq_len+1), # from the first point to the last point
# initializer=np.array(0., dtype=np.float32)))
loglikli += self.hawkes.log_likelihood(seq)
return loglikli
def train(self, sess,
epoches, # number of epoches (how many times is the entire dataset going to be trained)
expert_seqs, # [n, seq_len, data_dim=3]
pretrained=False):
"""train the point process generator given expert sequences."""
# initialization
if not pretrained:
# initialize network parameters
init_op = tf.global_variables_initializer()
sess.run(init_op)
print("[%s] parameters are initialized." % arrow.now(), file=sys.stderr)
# data configurations
# - number of expert sequences
n_data = expert_seqs.shape[0]
# - number of batches
n_batches = int(n_data / batch_size)
# training over epoches
all_train_cost = []
for epoch in range(epoches):
# shuffle indices of the training samples
shuffled_ids = np.arange(n_data)
np.random.shuffle(shuffled_ids)
# training over batches
avg_train_cost = []
for b in range(n_batches):
idx = np.arange(batch_size * b, batch_size * (b + 1))
# training and testing indices selected in current batch
batch_train_ids = shuffled_ids[idx]
# training and testing batch data
batch_train_seqs = expert_seqs[batch_train_ids, :, :]
# optimization procedure
sess.run(self.optimizer, feed_dict={self.input_seqs: batch_train_seqs})
# cost for train batch and test batch
train_cost = sess.run(self.cost, feed_dict={self.input_seqs: batch_train_seqs})
print("[%s] batch training cost: %.2f." % (arrow.now(), train_cost), file=sys.stderr)
# record cost for each batch
avg_train_cost.append(train_cost)
all_train_cost.append(train_cost)
# training log output
avg_train_cost = np.mean(avg_train_cost)
print('[%s] Epoch %d (n_train_batches=%d, batch_size=%d)' % (arrow.now(), epoch, n_batches, batch_size), file=sys.stderr)
print('[%s] Training cost:\t%f' % (arrow.now(), avg_train_cost), file=sys.stderr)
# save all training cost into numpy file.
np.savetxt("results/robbery_mle_train_cost.txt", all_train_cost, delimiter=",")
if __name__ == "__main__":
# Unittest example
S = [[-1., 1.], [-1., 1.]]
T = [0., 10.]
data = np.load('../Spatio-Temporal-Point-Process-Simulator/data/rescale.ambulance.perday.npy')
data = data[:320, 1:51, :] # remove the first element in each seqs, since t = 0
da = utils.DataAdapter(init_data=data, S=S, T=T)
# data = np.load('../Spatio-Temporal-Point-Process-Simulator/data/northcal.earthquake.perseason.npy')
# da = utils.DataAdapter(init_data=data)
seqs = da.normalize(data)
print(da)
print(seqs.shape)
# training model
with tf.Session() as sess:
batch_size = 32
epoches = 10
layers = [5]
n_comp = 5
ppg = MLE_Hawkes_Generator(
T=T, S=S, layers=layers, n_comp=n_comp,
batch_size=batch_size, data_dim=3,
keep_latest_k=None, lr=1e-1, reg_scale=0.)
ppg.train(sess, epoches, seqs)
ppg.hawkes.save_params_npy(sess,
path="../Spatio-Temporal-Point-Process-Simulator/data/rescale_ambulance_mle_gaussian_mixture_params.npz")
# generate samples and test mmd metric
# test_size = 20
# params = np.load('../Spatio-Temporal-Point-Process-Simulator/data/earthquake_mle_gaussian_mixture_params.npz')
# mu = .1 # params['mu']
# beta = 1. # params['beta']
# # print(mu)
# # print(beta)
# kernel = GaussianMixtureDiffusionKernel(
# n_comp=n_comp, layers=layers, C=1., beta=beta,
# SIGMA_SHIFT=.05, SIGMA_SCALE=.2, MU_SCALE=.01,
# Wss=params['Wss'], bss=params['bss'], Wphis=params['Wphis'])
# # kernel = StdDiffusionKernel(C=1., beta=1., sigma_x=.08, sigma_y=.08)
# lam = HawkesLam(mu, kernel, maximum=1e+3)
# pp = SpatialTemporalPointProcess(lam)
# learner_seqs = pp.generate(T, S, batch_size=test_size, min_n_points=5, verbose=True)[0]
# # uniform samples
# learner_seqs = []
# for i in range(test_size):
# N = 30
# _S = [T] + S
# points = [ np.random.uniform(_S[i][0], _S[i][1], N) for i in range(len(_S)) ]
# points = np.array(points).transpose()
# points = points[points[:, 0].argsort()].tolist()
# learner_seqs.append(points)
# learner_seqs = np.array(learner_seqs)
# expert_seqs = seqs[:test_size, :, :]
# print(learner_seqs.shape)
# # calculate mmd
# rlgen = RL_Hawkes_Generator(T, S, layers, n_comp, test_size)
# mmd = rlgen.mmd(sess, expert_seqs, learner_seqs)
# print(mmd)
|
[
"woodielove@163.com"
] |
woodielove@163.com
|
28acd089a5318eca2c288aeb6b39ea6b02b19415
|
c036befbd9a4b81c0f082273dd0eb007e7f9582d
|
/dort-core/protocols/full_node_protocol.py
|
79a277910c3f61ce22c0a990b22b93f0b6596264
|
[
"Apache-2.0"
] |
permissive
|
Dortchain/dort-blockchian
|
889f52f36dcdeffe0f852b413cdd32879741462f
|
14f16e321a60f9d70f849f58e4e9964fa337a084
|
refs/heads/main
| 2023-06-16T01:31:30.718415
| 2021-07-11T03:03:12
| 2021-07-11T03:03:12
| 384,694,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,240
|
py
|
from dataclasses import dataclass
from typing import List, Optional
from Dort.types.blockchain_format.sized_bytes import bytes32
from Dort.types.blockchain_format.vdf import VDFInfo, VDFProof
from Dort.types.end_of_slot_bundle import EndOfSubSlotBundle
from Dort.types.full_block import FullBlock
from Dort.types.peer_info import TimestampedPeerInfo
from Dort.types.spend_bundle import SpendBundle
from Dort.types.unfinished_block import UnfinishedBlock
from Dort.types.weight_proof import WeightProof
from Dort.util.ints import uint8, uint32, uint64, uint128
from Dort.util.streamable import Streamable, streamable
"""
Protocol between full nodes.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeak(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
unfinished_reward_block_hash: bytes32
@dataclass(frozen=True)
@streamable
class NewTransaction(Streamable):
transaction_id: bytes32
cost: uint64
fees: uint64
@dataclass(frozen=True)
@streamable
class RequestTransaction(Streamable):
transaction_id: bytes32
@dataclass(frozen=True)
@streamable
class RespondTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class RequestProofOfWeight(Streamable):
total_number_of_blocks: uint32
tip: bytes32
@dataclass(frozen=True)
@streamable
class RespondProofOfWeight(Streamable):
wp: WeightProof
tip: bytes32
@dataclass(frozen=True)
@streamable
class RequestBlock(Streamable):
height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RejectBlock(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestBlocks(Streamable):
start_height: uint32
end_height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RespondBlocks(Streamable):
start_height: uint32
end_height: uint32
blocks: List[FullBlock]
@dataclass(frozen=True)
@streamable
class RejectBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlock(Streamable):
block: FullBlock
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RespondUnfinishedBlock(Streamable):
unfinished_block: UnfinishedBlock
@dataclass(frozen=True)
@streamable
class NewSignagePointOrEndOfSubSlot(Streamable):
prev_challenge_hash: Optional[bytes32]
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RequestSignagePointOrEndOfSubSlot(Streamable):
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RespondSignagePoint(Streamable):
index_from_challenge: uint8
challenge_chain_vdf: VDFInfo
challenge_chain_proof: VDFProof
reward_chain_vdf: VDFInfo
reward_chain_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RespondEndOfSubSlot(Streamable):
end_of_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestMempoolTransactions(Streamable):
filter: bytes
@dataclass(frozen=True)
@streamable
class NewCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RequestCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RespondCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
vdf_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RequestPeers(Streamable):
"""
Return full list of peers
"""
@dataclass(frozen=True)
@streamable
class RespondPeers(Streamable):
peer_list: List[TimestampedPeerInfo]
|
[
"welllinks@outlook.com"
] |
welllinks@outlook.com
|
64c8fbf46fb22fc14a9bf06a229215fef2968c1d
|
bd87d8947878ccb2f5b720e70a22493b00868fd3
|
/fluent/17_concurrency_with_futures/flags2.py
|
8f780b17b279566b83594808af0ab947b0b18362
|
[] |
no_license
|
damiansp/completePython
|
4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac
|
3f5e2f14d79c93df5147b82d901190c054535158
|
refs/heads/master
| 2023-09-01T20:50:03.444440
| 2023-08-28T00:27:57
| 2023-08-28T00:27:57
| 99,197,610
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,351
|
py
|
'''Download 20 flag images sequentially (synchronous) for baseline comparison'''
import os
import sys
import time
import requests
POP20_CC = ('CN IN US ID BR PK NG BD RU JP MX PH VN ET EG DE IR TR CD FR')\
.split()
BASE_URL = 'http://flupy.org/data/flags'
DEST_DIR = 'images'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as f:
f.write(img)
def get_flag(cc):
url = '{}/{cc}/{cc}.gif'.format(BASE_URL, cc=cc.lower())
resp = requests.get(url)
if resp.status_code != 200:
reps.raise_for_status()
return resp.content
def download_one(cc, base_url, verbose=False):
try:
image = get_flag(base_url, cc)
except requests.exceptions.HTTPError as e:
res = e.response
if res.status_code == 404:
status = HTTPStatus.not_found
msg = 'not found'
else:
raise
else:
save_flag(image, cc.lower() + '.gif')
status = HTTPStatus.ok
msg = 'OK'
if verbose:
print(cc, msg)
return Result(status, cc)
def show(text):
print(text, end=' ')
sys.stdout.flush()
def download_many(cc_list, base_url, vebose, max_req):
counter = collections.Counter()
cc_iter = sorted(cc_list)
if not verbose:
cc_iter = tqdm.tqdm(cc_iter)
for cc in cc_iter:
try:
res = download_one(cc, base_url, verbose)
except requests.exceptions.HTTPError as e:
error_msg = 'HTTP error {res.status_code} - {res.reason}'
error_msg = error_msg.format(res=e.response)
except requests.exceptionsConnectionError as e:
error_msg = 'Connection error'
else:
error_msg = ''
status = res.status
if error_msg:
status = HTTPStatus.error
counter[status] += 1
if verbose and error_msg:
print('*** Error for {}: {}'.format(cc, error_msg))
return counter
# pass download_all to main so main can be used as lib func with other
# implementations for downloading
def main(download_many):
t0 = time.time()
count = download_all(POP20_CC)
elapsed = time.time() - t0
msg = '\n{} flags downloaded in {:.2f}s'
print(msg.format(count, elapsed))
if __name__ == '__main__':
main(download_all)
|
[
"damiansp@gmail.com"
] |
damiansp@gmail.com
|
27dcf8647fd0983da8e44125393eb3cc04a7340d
|
9dfb3372a1e4516d970a6e9d0a9fd8360580eae7
|
/python patten find/patten_find_of_know_type.py
|
5c17066891d4009fc74c65a7643b516f6664d7aa
|
[] |
no_license
|
clambering-goat/cameron_pyton
|
d1cd0e7b04da14e7ba4f89dcb4d973f297a4626c
|
df0b0365b86e75cfcfc2c1fc21608f1536a3b79f
|
refs/heads/master
| 2021-07-14T20:37:37.021401
| 2019-02-28T07:52:11
| 2019-02-28T07:52:11
| 137,251,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,049
|
py
|
#works but if the tail data is in the data packet the code will think that is the end of the data packet
import random
data=""
head="123"
tail="789"
parrton_data="456646849861"
parrton=head+parrton_data+tail
lenth_data=len(parrton)
print("looking for",parrton)
#data maker
#v1
'''
random_noise=5
parrton_reption=2
for number_of_parrton in range(parrton_reption):
data=data+parrton
for q in range(random_noise):
data=data+str(random.randint(1,10))
'''
#v2
random_noise=5
parrton_reption=2
aount_of_curruptiuon=2
for number_of_parrton in range(parrton_reption):
data=data+parrton
for q in range(random_noise):
data=data+str(random.randint(1,10))
#curption
for _ in range(aount_of_curruptiuon):
palce=random.randint(0,len(data))
print("ramdom curruption at ",palce)
data=data[0:palce]+str(random.randint(1,10))+data[palce:]
print(data)
#data find
#print("data in ",parrton in data)
head_match=[False,False,False]
looking_for_head=True
looking_for_tail=False
tail_match=[False,False,False]
retrved_data=""
for q in data:
#print("Q",q)
if looking_for_head==True:
if q==head[0] and head_match[0]==False and head_match[1]==False and head_match[2]==False:
head_match[0]=True
#print("1",q)
#print(head_match)
elif q==head[1] and head_match[0]==True and head_match[1]==False and head_match[2]==False:
head_match[1]=True
#print("2",q)
#print(head_match)
elif q==head[2] and head_match[0]==True and head_match[1]==True and head_match[2]==False:
head_match[2]=True
#print("3",q)
print("posible start found")
looking_for_head=False
looking_for_tail=True
retrved_data=""
#print(head_match)
else:
#print("reset")
head_match=[False,False,False]
#print(head_match)
if looking_for_tail ==True:
retrved_data=retrved_data+q
if q==tail[0] and tail_match[0]==False and tail_match[1]==False and tail_match[2]==False:
tail_match[0]=True
#print("1",q)
#print(head_match)
elif q==tail[1] and tail_match[0]==True and tail_match[1]==False and tail_match[2]==False:
tail_match[1]=True
#print("2",q)
#print(head_match)
elif q==tail[2] and tail_match[0]==True and tail_match[1]==True and tail_match[2]==False:
tail_match[2]=True
#print("3",q)
print("end found")
print("the data is :")
print(retrved_data[1:-len(tail)])
print("did the code work")
print(parrton_data==(retrved_data[1:-len(tail)]))
exit()
looking_for_head=True
looking_for_tail=False
#print(head_match)
else:
#print("reset")
tail_match=[False,False,False]
#print(head_match)
print("code not found")
|
[
"camerondrain@gmail.com"
] |
camerondrain@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.