blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2f660894a3d10df1c7a71e0782ffb017a97f802 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2972.py | 2fd708180642ad8b97474fbea1db1b2ae25b8eec | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | def yz(s, stalls):
g = len(stalls)
l = 0
r = 0
while True:
if s+r+1 >= g:
break
if stalls[s+r+1] == 0:
r += 1
else:
break
while True:
if s-l-1 < 0:
break
if stalls[s-l-1] == 0:
l += 1
else:
break
return (min(l,r), max(l,r))
for _ in range(int(input())):
n, k = [int(i) for i in input().split()]
stalls = [0 for i in range(n+2)]
stalls[0] = 1
stalls[-1] = 1
for i in range(k):
bests = 0
best = (0, 0)
for s in range(1, n+1):
if stalls[s] == 0:
gg = yz(s, stalls)
if yz(s, stalls) > best:
best = gg
bests = s
stalls[bests] = 1
if i == k-1:
print("Case #" + str(_ + 1) + ":", best[1], best[0])
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cf7c207170b97bd72c6456a05b66eb280835a058 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/follow302_status_body.py | 609f30a679e4f7051de995d36eec1c97b7acb914 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,938 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Follow302StatusBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'follow_status': 'str'
}
attribute_map = {
'domain_id': 'domain_id',
'follow_status': 'follow_status'
}
def __init__(self, domain_id=None, follow_status=None):
"""Follow302StatusBody - a model defined in huaweicloud sdk"""
self._domain_id = None
self._follow_status = None
self.discriminator = None
if domain_id is not None:
self.domain_id = domain_id
if follow_status is not None:
self.follow_status = follow_status
@property
def domain_id(self):
"""Gets the domain_id of this Follow302StatusBody.
加速域名id。获取方法请参见查询加速域名。
:return: The domain_id of this Follow302StatusBody.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this Follow302StatusBody.
加速域名id。获取方法请参见查询加速域名。
:param domain_id: The domain_id of this Follow302StatusBody.
:type: str
"""
self._domain_id = domain_id
@property
def follow_status(self):
"""Gets the follow_status of this Follow302StatusBody.
follow302状态(\"off\"/\"on\")
:return: The follow_status of this Follow302StatusBody.
:rtype: str
"""
return self._follow_status
@follow_status.setter
def follow_status(self, follow_status):
"""Sets the follow_status of this Follow302StatusBody.
follow302状态(\"off\"/\"on\")
:param follow_status: The follow_status of this Follow302StatusBody.
:type: str
"""
self._follow_status = follow_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Follow302StatusBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
74e4a49f5cc0b6f968eed7f58149de37464fd37b | f5fa77d99d31d4e8fef2648b83e9d988123be118 | /hanser/models/detection/gfocal.py | 841fdaa8c0c56a3fb85d366cebad9e3746961704 | [] | no_license | sbl1996/hanser | 6ff5362f6909c4ba717c10b5f7baf31a41b70531 | 21c6f9470dd21a5b2e7a18318f40314a34053822 | refs/heads/master | 2022-06-04T03:46:39.034645 | 2022-05-21T03:34:08 | 2022-05-21T03:34:08 | 197,355,428 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.initializers import RandomNormal, Zeros
from hanser.ops import safe_softmax, top_k
from hanser.models.layers import Linear
from hanser.models.detection.detector import SingleStageDetector
from hanser.models.detection.neck.fpn import FPN
from hanser.models.detection.retinanet import RetinaHead
from hanser.detection.assign import mlvl_concat
def integral(prob):
# n: (..., 4, n+1)
reg_max = prob.shape[-1] - 1
p = tf.constant(np.linspace(0, reg_max, reg_max + 1), dtype=prob.dtype)
p = tf.tensordot(prob, p, axes=[[-1], [0]])
return p
class GFocal(SingleStageDetector):
def __init__(self, backbone, num_classes, backbone_indices=(1, 2, 3),
feat_channels=256, num_extra_convs=2, stacked_convs=4, norm='bn'):
super().__init__()
self.backbone = backbone
self.backbone_indices = backbone_indices
backbone_channels = [backbone.feat_channels[i] for i in backbone_indices]
self.neck = FPN(backbone_channels, feat_channels, num_extra_convs,
extra_convs_on='output', norm=norm)
num_levels = len(backbone_indices) + num_extra_convs
strides = [2 ** (l + backbone_indices[0] + 2) for l in range(num_levels)]
self.head = GFocalHead(
num_classes, feat_channels, feat_channels, stacked_convs,
norm=norm, strides=strides)
class GFocalHead(RetinaHead):
def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4,
norm='bn', strides=(8, 16, 32, 64, 128), reg_max=16, reg_topk=4, reg_channels=64):
super().__init__(
1, num_classes, in_channels, feat_channels, stacked_convs,
centerness=False, bbox_out_channels=4 * (reg_max + 1),
concat=False, norm=norm, num_levels=len(strides))
self.strides = strides
self.reg_max = reg_max
self.reg_topk = reg_topk
self.reg_channels = reg_channels
self.reg_conf = Sequential([
Linear(4 * (reg_topk + 1), reg_channels, act='relu',
kernel_init=RandomNormal(stddev=0.01), bias_init=Zeros()),
Linear(reg_channels, 1, act='sigmoid',
kernel_init=RandomNormal(stddev=0.01), bias_init=Zeros()),
])
def call(self, x):
preds = super().call(x)
bbox_preds = preds['bbox_pred']
cls_scores = preds['cls_score']
b = tf.shape(bbox_preds[0])[0]
num_level_bboxes = [p.shape[1] for p in bbox_preds]
bbox_preds = tf.concat(bbox_preds, axis=1)
cls_scores = tf.concat(cls_scores, axis=1)
dis_logits = tf.reshape(bbox_preds, [b, -1, 4, self.reg_max + 1])
prob = safe_softmax(dis_logits, axis=-1)
prob_topk = top_k(prob, k=self.reg_topk)
stat = tf.concat([prob_topk, tf.reduce_mean(prob_topk, axis=-1, keepdims=True)], axis=-1)
stat = tf.reshape(stat, [b, -1, 4 * (self.reg_topk + 1)])
quality_score = self.reg_conf(stat)
cls_scores = tf.nn.sigmoid(cls_scores) * quality_score
scales = mlvl_concat(self.strides, num_level_bboxes, prob.dtype)[None, :, None]
bbox_preds = integral(prob) * scales
return {'dis_logit': dis_logits, 'bbox_pred': bbox_preds, 'cls_score': cls_scores,
'scales': scales}
| [
"sbl1996@126.com"
] | sbl1996@126.com |
e8aa80bf6d288ff8e7a87cbdf26ee0b74daf3509 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/sympy/physics/units/systems/natural.py | 124392e550ae5effd4ffc8ef3c5ef72af017eec4 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 931 | py | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Naturalunit system.
The natural system comes from "setting c = 1, hbar = 1". From the computer
point of view it means that we use velocity and action instead of length and
time. Moreover instead of mass we use energy.
"""
from __future__ import division
from sympy.physics.units.definitions import c, eV, hbar
from sympy.physics.units.dimensions import (
DimensionSystem, action, energy, force, frequency, length, mass, momentum,
power, time, velocity)
from sympy.physics.units.prefixes import PREFIXES, prefix_unit
from sympy.physics.units.unitsystem import UnitSystem
# dimension system
_natural_dim = DimensionSystem(
base_dims=(action, energy, velocity),
derived_dims=(length, mass, time, momentum, force, power, frequency)
)
units = prefix_unit(eV, PREFIXES)
# unit system
natural = UnitSystem(base=(hbar, eV, c), units=units, name="Natural system")
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
95e03440d89285f10df39b55be1fd06e80e3332f | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/user_list_type.py | 64bb9c0c18d0075bb477ea405690a9c7d7888bd2 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'UserListTypeEnum',
},
)
class UserListTypeEnum(proto.Message):
r"""The user list types. """
class UserListType(proto.Enum):
r"""Enum containing possible user list types."""
UNSPECIFIED = 0
UNKNOWN = 1
REMARKETING = 2
LOGICAL = 3
EXTERNAL_REMARKETING = 4
RULE_BASED = 5
SIMILAR = 6
CRM_BASED = 7
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
f07ab92ea930bbe7555bfa5f6e2cbebf127da184 | 81b0e6fe7a6e56ed8a91748499b81ddd3f2e45f8 | /GAN/mnist_inforgan.py | ae5cfe025134b4efad95451acb734d8a7928f906 | [] | no_license | shieldforever/DeepLearning | cfef817602b9677df4df4c1b87e60c5e91f2315a | b8080938a4b22395379be9032266df36cb5491e6 | refs/heads/master | 2021-01-05T14:12:26.110888 | 2019-10-29T11:23:06 | 2019-10-29T11:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,069 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 20 19:39:36 2019
@author: Administrator
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("MNIST_data/")
tf.reset_default_graph()
def generator(x):
reuse = len([t for t in tf.global_variables() if t.name.startswith('generator')]) > 0
#print (x.get_shape())
with tf.variable_scope('generator', reuse = reuse):
x = slim.fully_connected(x, 1024)
#print( x)
x = slim.batch_norm(x, activation_fn=tf.nn.relu)
x = slim.fully_connected(x, 7*7*128)
x = slim.batch_norm(x, activation_fn=tf.nn.relu)
x = tf.reshape(x, [-1, 7, 7, 128])
# print '22',tensor.get_shape()
x = slim.conv2d_transpose(x, 64, kernel_size=[4,4], stride=2, activation_fn = None)
#print ('gen',x.get_shape())
x = slim.batch_norm(x, activation_fn = tf.nn.relu)
z = slim.conv2d_transpose(x, 1, kernel_size=[4, 4], stride=2, activation_fn=tf.nn.sigmoid)
#print ('genz',z.get_shape())
return z
def leaky_relu(x):
return tf.where(tf.greater(x, 0), x, 0.01 * x)
def discriminator(x, num_classes=10, num_cont=2):
reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0
#print (reuse)
#print (x.get_shape())
with tf.variable_scope('discriminator', reuse=reuse):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
x = slim.conv2d(x, num_outputs = 64, kernel_size=[4,4], stride=2, activation_fn=leaky_relu)
x = slim.conv2d(x, num_outputs=128, kernel_size=[4,4], stride=2, activation_fn=leaky_relu)
#print ("conv2d",x.get_shape())
x = slim.flatten(x)
shared_tensor = slim.fully_connected(x, num_outputs=1024, activation_fn = leaky_relu)
recog_shared = slim.fully_connected(shared_tensor, num_outputs=128, activation_fn = leaky_relu)
disc = slim.fully_connected(shared_tensor, num_outputs=1, activation_fn=None)
disc = tf.squeeze(disc, -1)
#print ("disc",disc.get_shape())#0 or 1
recog_cat = slim.fully_connected(recog_shared, num_outputs=num_classes, activation_fn=None)
recog_cont = slim.fully_connected(recog_shared, num_outputs=num_cont, activation_fn=tf.nn.sigmoid)
return disc, recog_cat, recog_cont
batch_size = 10
classes_dim = 10
con_dim = 2
rand_dim = 38
n_input = 784
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.int32, [None])
z_con = tf.random_normal((batch_size, con_dim))#2列
z_rand = tf.random_normal((batch_size, rand_dim))#38列
z = tf.concat(axis=1, values=[tf.one_hot(y, depth = classes_dim), z_con, z_rand])#50列
gen = generator(z)
genout= tf.squeeze(gen, -1)
# labels for discriminator
y_real = tf.ones(batch_size) #真
y_fake = tf.zeros(batch_size)#假
# 判别器
disc_real, class_real, _ = discriminator(x)
disc_fake, class_fake, con_fake = discriminator(gen)
pred_class = tf.argmax(class_fake, dimension=1)
# 判别器 loss
loss_d_r = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real, labels=y_real))
loss_d_f = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake, labels=y_fake))
loss_d = (loss_d_r + loss_d_f) / 2
#print ('loss_d', loss_d.get_shape())
# generator loss
loss_g = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake, labels=y_real))
# categorical factor loss
loss_cf = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=class_fake, labels=y))#class ok 图片对不上
loss_cr = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=class_real, labels=y))#生成的图片与class ok 与输入的class对不上
loss_c =(loss_cf + loss_cr) / 2
# continuous factor loss
loss_con =tf.reduce_mean(tf.square(con_fake-z_con))
# 获得各个网络中各自的训练参数
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
disc_global_step = tf.Variable(0, trainable=False)
gen_global_step = tf.Variable(0, trainable=False)
train_disc = tf.train.AdamOptimizer(0.0001).minimize(loss_d + loss_c + loss_con, var_list = d_vars, global_step = disc_global_step)
train_gen = tf.train.AdamOptimizer(0.001).minimize(loss_g + loss_c + loss_con, var_list = g_vars, global_step = gen_global_step)
training_epochs = 3
display_step = 1
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)#取数据
feeds = {x: batch_xs, y: batch_ys}
# Fit training using batch data
l_disc, _, l_d_step = sess.run([loss_d, train_disc, disc_global_step],feed_dict=feeds)
l_gen, _, l_g_step = sess.run([loss_g, train_gen, gen_global_step],feed_dict=feeds)
# 显示训练中的详细信息
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f} ".format(l_disc),l_gen)
print("Finished!")
# 测试
print ("Result:", loss_d.eval({x: mnist.test.images[:batch_size],y:mnist.test.labels[:batch_size]})
, loss_g.eval({x: mnist.test.images[:batch_size],y:mnist.test.labels[:batch_size]}))
# 根据图片模拟生成图片
show_num = 10
gensimple,d_class,inputx,inputy,con_out = sess.run(
[genout,pred_class,x,y,con_fake], feed_dict={x: mnist.test.images[:batch_size],y: mnist.test.labels[:batch_size]})
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(show_num):
a[0][i].imshow(np.reshape(inputx[i], (28, 28)))
a[1][i].imshow(np.reshape(gensimple[i], (28, 28)))
print("d_class",d_class[i],"inputy",inputy[i],"con_out",con_out[i])
plt.draw()
plt.show()
my_con=tf.placeholder(tf.float32, [batch_size,2])
myz = tf.concat(axis=1, values=[tf.one_hot(y, depth = classes_dim), my_con, z_rand])
mygen = generator(myz)
mygenout= tf.squeeze(mygen, -1)
my_con1 = np.ones([10,2])
a = np.linspace(0.0001, 0.99999, 10)
y_input= np.ones([10])
figure = np.zeros((28 * 10, 28 * 10))
my_rand = tf.random_normal((10, rand_dim))
for i in range(10):
for j in range(10):
my_con1[j][0]=a[i]
my_con1[j][1]=a[j]
y_input[j] = j
mygenoutv = sess.run(mygenout,feed_dict={y:y_input,my_con:my_con1})
for jj in range(10):
digit = mygenoutv[jj].reshape(28, 28)
figure[i * 28: (i + 1) * 28,
jj * 28: (jj + 1) * 28] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
| [
"870407139@qq.com"
] | 870407139@qq.com |
e97dfd3726227b91404d69a2d8fec5bbd24c4630 | 0550c08cee19be891fde34fa109b5a4ad9f07e3a | /findingshared/findingshared.py | d97a6a0b84b2d5e368e82c243576bb1cbec62235 | [] | no_license | bendavidsteel/rosalind-solutions | 92653c49d8ef938306ac1289ccb4e4cfe4b8d3ae | 0749f2662efcac62383a8476ce13fcdd039928b1 | refs/heads/master | 2020-03-28T04:17:00.959446 | 2018-09-06T21:32:06 | 2018-09-06T21:32:06 | 147,705,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | maxlen = 2
maxstring = ""
def jloop
with open('sample.txt') as stringfile:
data = stringfile.read().split('\n')[1::2]
for i in range(len(min(data, key=len))):
for n in range(maxlen, len(min(data, key=len))):
for j in range(1, len(data)):
print(j)
print(data[0][i:i+n])
print(data[j][i:i+n])
if data[0][i:i+n] != data[j][i:i+n]:
break
else:
continue
if n > maxlen:
maxlen = n
maxstring = data[0][i:i+n]
break
output = open("output.txt", 'w')
output.write(maxstring)
output.close()
| [
"bendavidsteel@gmail.com"
] | bendavidsteel@gmail.com |
357746ec09b140827b2d7be08c9a9f27e7a7f71f | fce76a80bafa84b188fd9761d769f79dd712d79c | /JH_RestAPI/jobapps/migrations/0012_jobapplicationnote.py | 695d6185dc04b277ff8709a9c32c427aca09ecbf | [] | no_license | komal14prasad/backend | 547110ddb16f4ad16be7f3aebd87ad2cea52ee2f | 4a915f2e744c5697d8a90a59e358c1ce94b47d69 | refs/heads/master | 2020-05-22T02:34:21.237812 | 2019-05-12T03:36:22 | 2019-05-12T03:36:22 | 186,200,468 | 0 | 0 | null | 2019-05-12T02:03:13 | 2019-05-12T02:03:12 | null | UTF-8 | Python | false | false | 912 | py | # Generated by Django 2.1.5 on 2019-03-18 03:05
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobapps', '0011_auto_20190210_1053'),
]
operations = [
migrations.CreateModel(
name='JobApplicationNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('update_date', models.DateTimeField(blank=True, null=True)),
('job_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='jobapps.JobApplication')),
],
),
]
| [
"sdemirci55@gmail.com"
] | sdemirci55@gmail.com |
e6cb8f8f82846c0b57397552dae31ee75edf7f6f | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/ca/__init__.py | d25d40e257fd706cc679a013ba3733ee90462e07 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 2,297 | py | # coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
# uncomment if files are available
# from .norm_exceptions import NORM_EXCEPTIONS
# from .tag_map import TAG_MAP
# from .morph_rules import MORPH_RULES
# uncomment if lookup-based lemmatizer is available
from .lemmatizer import LOOKUP
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
# Create a Language subclass
# Documentation: https://spacy.io/docs/usage/adding-languages
# This file should be placed in spacy/lang/ca (ISO code of language).
# Before submitting a pull request, make sure the remove all comments from the
# language data files, and run at least the basic tokenizer tests. Simply add the
# language ID to the list of languages in spacy/tests/conftest.py to include it
# in the basic tokenizer sanity tests. You can optionally add a fixture for the
# language's tokenizer and add more specific tests. For more info, see the
# tests documentation: https://github.com/explosion/spaCy/tree/master/spacy/tests
class CatalanDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'ca' # ISO code
# add more norm exception dictionaries here
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
# overwrite functions for lexical attributes
lex_attr_getters.update(LEX_ATTRS)
# add custom tokenizer exceptions to base exceptions
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
# add stop words
stop_words = STOP_WORDS
# if available: add tag map
# tag_map = dict(TAG_MAP)
# if available: add morph rules
# morph_rules = dict(MORPH_RULES)
lemma_lookup = LOOKUP
class Catalan(Language):
lang = 'ca' # ISO code
Defaults = CatalanDefaults # set Defaults to custom language defaults
# set default export – this allows the language class to be lazy-loaded
__all__ = ['Catalan']
| [
"yuxuan2015@example.com"
] | yuxuan2015@example.com |
61004a97a2ccdbbd703c0335da0142a4d4b5ed9e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_glazed.py | 4b9d651bd336809ed5c8db024f25696635fdae74 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _GLAZED():
def __init__(self,):
self.name = "GLAZED"
self.definitions = glaze
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['glaze']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c2e58269c8af68e51587b23ce08e779eb9fc0c3e | 4b9f2d543755cf5ab9e1ddeab46e123b5a3e73ee | /lect05_52-week_saving_challenge/money_challenge_v4.0.py | 3d6c292351487772efecae124d9eeac25207b03a | [] | no_license | guolikai/mypython | ff8e3bbc22e346d90126b551b945909d64c4bb3e | 7c4003e5a70d306b34a102b3b32c667898c5e9c8 | refs/heads/master | 2022-12-25T06:52:06.253960 | 2020-09-30T13:53:09 | 2020-09-30T13:53:09 | 82,179,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """
作者:GuoLikai
功能:52周存钱挑战
版本:3.0
日期:05/08/2018
2.0增加功能:记录每周的存款数
3.0增加功能:使用循环直接计数
4.0增加功能:灵活设置每周的存款数,增加的存款数及存款周数
"""
import math
def save_money_in_n_weeks(money_per_week, increase_money, total_week):
"""
计算n周内的存款金额
"""
money_list = [] # 记录每周存款数的列表
for i in range(total_week):
money_list.append(money_per_week)
saving = math.fsum(money_list)
# 输出信息
# print('第{}周,存入{}元,账户累计{}元'.format(i + 1, money_per_week, saving))
# 更新下一周的存钱金额
money_per_week += increase_money
return saving
def main():
"""
主函数
"""
money_per_week = float(input('请输入每周的存入的金额:')) # 每周的存入的金额
increase_money = float(input('请输入每周的递增金额:')) # 递增的金额
total_week = int(input('请输入总共的周数:')) # 总共的周数
# 调用函数
saving = save_money_in_n_weeks(money_per_week, increase_money, total_week)
print('总存款金额', saving)
if __name__ == '__main__':
main()
| [
"glk73748196@sina.com"
] | glk73748196@sina.com |
cbd162117e95a560810e5d1c96b3b9c5300777c2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fish1_20200804200900.py | 7517919c0ab14ea3f149b1054c468da0ce11e9e1 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | def fish(A,B):
# where A represent the size of fish and B represents the direction of fish
aliveFish = len(A)
bigFish = 0
indexFish = None
j = 0
while j < len(B)-1:
if B[j] == 1 and B[j+1] !=1:
if A[j] > A[j+1]:
if A[j] > bigFish:
bigFish = A[j]
A.remove(A[j+1])
B.remove(B[j+1])
else:
if A[j+1] > bigFish:
bigFish = A[j+1]
A.remove(A[j])
B.remove(B[j])
aliveFish -=1
j +=1
fish([4,3,2,1,5],[0,1,0,0,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
a3d019059a08c7f50daa263ad9a29b74d90f96d7 | 49663ea34b41c8180d7484f778f5cad2e701d220 | /tests/dualtor/test_server_failure.py | 07c2bf67601fef2bf59b1e15b84bc027badc9c60 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | stepanblyschak/sonic-mgmt | ed08c98e7bff1615b057daa8711686aa5986073d | a1ae1e0b4e9927e6f52916f76121780d19ec3e54 | refs/heads/master | 2023-04-07T01:30:11.403900 | 2023-03-29T10:16:52 | 2023-03-29T10:16:52 | 135,678,178 | 0 | 0 | NOASSERTION | 2023-03-29T16:13:55 | 2018-06-01T06:41:49 | Python | UTF-8 | Python | false | false | 3,378 | py | import pytest
from tests.common.dualtor.mux_simulator_control import toggle_simulator_port_to_upper_tor, \
simulator_flap_counter, simulator_server_down # noqa F401
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.dualtor.dual_tor_utils import show_muxcable_status, rand_selected_interface # noqa: F401
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, \
run_icmp_responder # noqa: F401
from tests.common.utilities import wait_until
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.usefixtures('run_garp_service', 'run_icmp_responder')
]
@pytest.fixture(autouse=True, scope='module')
def skip_if_non_dualtor_topo(tbinfo):
pytest_require('dualtor' in tbinfo['topo']['name'], "Only run on dualtor testbed")
def test_server_down(duthosts, tbinfo, rand_selected_interface, simulator_flap_counter, # noqa F811
simulator_server_down, toggle_simulator_port_to_upper_tor, loganalyzer): # noqa F811
"""
Verify that mux cable is not toggled excessively.
"""
for analyzer in list(loganalyzer.values()):
analyzer.ignore_regex.append(r".*ERR swss#orchagent: :- setState: \
State transition from active to active is not-handled")
upper_tor = duthosts[tbinfo['duts'][0]]
lower_tor = duthosts[tbinfo['duts'][1]]
def upper_tor_mux_state_verification(state, health):
mux_state_upper_tor = show_muxcable_status(upper_tor)
return mux_state_upper_tor[itfs]['status'] == state and mux_state_upper_tor[itfs]['health'] == health
def lower_tor_mux_state_verfication(state, health):
mux_state_lower_tor = show_muxcable_status(lower_tor)
return mux_state_lower_tor[itfs]['status'] == state and mux_state_lower_tor[itfs]['health'] == health
itfs, _ = rand_selected_interface
# Set upper_tor as active
toggle_simulator_port_to_upper_tor(itfs)
pytest_assert(wait_until(30, 1, 0, upper_tor_mux_state_verification, 'active', 'healthy'),
"mux_cable status is unexpected. Should be (active, healthy). Test can't proceed. ")
mux_flap_counter_0 = simulator_flap_counter(itfs)
# Server down
simulator_server_down(itfs)
# Verify mux_cable state on upper_tor is active
pytest_assert(wait_until(20, 1, 0, upper_tor_mux_state_verification, 'active', 'unhealthy'),
"mux_cable status is unexpected. Should be (active, unhealthy)")
# Verify mux_cable state on lower_tor is standby
pytest_assert(wait_until(20, 1, 0, lower_tor_mux_state_verfication, 'standby', 'unhealthy'),
"mux_cable status is unexpected. Should be (standby, unhealthy)")
# Verify that mux_cable flap_counter should be no larger than 3
# lower_tor(standby) -> active -> standby
# upper_tor(active) -> active
# The toggle from both tor may be overlapped and invisible
mux_flap_counter_1 = simulator_flap_counter(itfs)
pytest_assert(mux_flap_counter_1 - mux_flap_counter_0 <= 3,
"The mux_cable flap count should be no larger than 3 ({})"
.format(mux_flap_counter_1 - mux_flap_counter_0))
| [
"noreply@github.com"
] | stepanblyschak.noreply@github.com |
7e79e805f44cd3ea15b84a2b395d30fbdc293fdd | e569aaa98d90ebfed429da9f1e8697b6122c66f9 | /ecoroofs/locations/migrations/0008_add_point_obscured_to_location_model.py | fd5dab8a0eab3b0b2cab506c1d8508d2dfc692c2 | [] | no_license | PSU-OIT-ARC/ecoroofs | 4f4a5df0b15faf8d4442155e7a70104a2c25b44f | e7acf776dbd19e90e22635424808c8c6807d7572 | refs/heads/develop | 2020-05-21T16:16:48.799309 | 2017-11-08T21:31:36 | 2017-11-08T21:31:36 | 64,701,178 | 0 | 3 | null | 2017-07-24T19:06:49 | 2016-08-01T20:59:05 | Python | UTF-8 | Python | false | false | 422 | py | import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('locations', '0007_add_year_built_to_location_model'),
]
operations = [
migrations.AddField(
model_name='location',
name='point_obscured',
field=django.contrib.gis.db.models.fields.PointField(srid=4326),
),
]
| [
"wbaldwin@pdx.edu"
] | wbaldwin@pdx.edu |
5a89b194f60aa6e8e7016670b169d895bdfd01e1 | c5458f2d53d02cb2967434122183ed064e1929f9 | /sdks/python/test/test_spending_proof.py | c58e33d52c302a1ce178f05e03d2ef9822171fd3 | [] | no_license | ross-weir/ergo-node-api-sdks | fd7a32f79784dbd336ef6ddb9702b9dd9a964e75 | 9935ef703b14760854b24045c1307602b282c4fb | refs/heads/main | 2023-08-24T05:12:30.761145 | 2021-11-08T10:28:10 | 2021-11-08T10:28:10 | 425,785,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | """
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: ergoplatform@protonmail.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.spending_proof import SpendingProof
class TestSpendingProof(unittest.TestCase):
"""SpendingProof unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSpendingProof(self):
"""Test SpendingProof"""
# FIXME: construct object with mandatory attributes with example values
# model = SpendingProof() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"29697678+ross-weir@users.noreply.github.com"
] | 29697678+ross-weir@users.noreply.github.com |
d23250f1b6b7e554bdf137997e2f02efa56feb70 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/230/28357/submittedfiles/swamee.py | e5f429c7808c600df6e2cb6d975c9662a428b88e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f = float(input('Digite o valor de f: '))
L = float(input('Digite o valor de L: '))
Q = float(input('Digite o valor de Q: '))
DeltaH = float(input('Digite o valor de DeltaH: '))
v = float(input('Digite o valor de v: '))
g = 9.81
E = 0.000002
D = (8*f*L*(Q**2)/((math.pi**2)*g*DeltaH))**0.2
Rey = (4*Q)/(math.pi*D*v)
k = (0.25)/(math.log10(E/(3.7*D)+(5.74/(Rey**0.9))))**2
print ('D = %.4f' % D)
print ('Rey = %.4f' % Rey)
print ('k = %.4f' % k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
db4640a338afe81eaf782bac82394116670bbc95 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02804/s938106196.py | e07f2d428fc7a4fa2bbc75241a67288a2325428b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | n,k,*l=map(int,open(0).read().split());l.sort();c,a,M=1,0,10**9+7
for i in range(k-1,n):a+=c*(l[i]-l[~i]);c=c*-~i*pow(i-k+2,M-2,M)%M
print(a%M) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e3b56cb7ad8f137d9438e36fdcaa4d4aeb773562 | 0a089d954518ef4a8f6aecf7097af8124e425d7e | /everest/tests/complete_app/resources.py | 3b0e03d850c91b76cc8b9fb22d05059d17c439e1 | [
"MIT"
] | permissive | papagr/everest | b13c06834ae38a5d441a9fd1c938d495ceca6e20 | 70c9b93c3061db5cb62428349d18b8fb8566411b | refs/heads/master | 2021-05-28T16:55:23.021924 | 2015-02-17T09:21:03 | 2015-02-17T09:21:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,060 | py | """
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Dec 1, 2011.
"""
from everest.resources.base import Member
from everest.resources.descriptors import attribute_alias
from everest.resources.descriptors import collection_attribute
from everest.resources.descriptors import member_attribute
from everest.resources.descriptors import terminal_attribute
from everest.tests.complete_app.interfaces import IMyEntity
from everest.tests.complete_app.interfaces import IMyEntityChild
from everest.tests.complete_app.interfaces import IMyEntityGrandchild
from everest.tests.complete_app.interfaces import IMyEntityParent
import datetime
from everest.constants import CARDINALITIES
__docformat__ = 'reStructuredText en'
__all__ = ['MyEntityChildMember',
'MyEntityMember',
'MyEntityGrandchildMember',
'MyEntityParentMember',
]
class MyEntityParentMember(Member):
relation = 'http://test.org/myentity-parent'
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
#
text_alias = attribute_alias('text')
class MyEntityMember(Member):
relation = 'http://test.org/myentity'
# Member.
parent = member_attribute(IMyEntityParent, 'parent',
cardinality=CARDINALITIES.ONETOONE,
backref='child')
# Collection.
children = collection_attribute(IMyEntityChild, 'children',
backref='parent')
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
# Number terminal.
number = terminal_attribute(int, 'number')
# Date time terminal.
date_time = terminal_attribute(datetime.datetime, 'date_time')
# Dotted attribute.
parent_text = terminal_attribute(str, 'parent.text_ent')
class MyEntityChildMember(Member):
relation = 'http://test.org/myentity-child'
# Member.
parent = member_attribute(IMyEntity, 'parent', backref='children')
# Collection accessed as entity attribute and represented as
# "parent equal to parent member" (backreferencing) specification.
children = collection_attribute(IMyEntityGrandchild,
entity_attr='children',
backref='parent')
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
class MyEntityGrandchildMember(Member):
relation = 'http://test.org/myentity-grandchild'
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
# Member.
parent = member_attribute(IMyEntityChild, 'parent', backref='children')
| [
"fogathmann@gmail.com"
] | fogathmann@gmail.com |
d83c4645ef7cc9f79a7c70d11198e76aad49ad85 | 9eaa17f50df49e5c6d204a7a7ece52f94c10b30b | /d3rlpy/algos/torch/utility.py | f431a78d8326da7fcd4c9ff7492d1147190162df | [
"MIT"
] | permissive | mchetouani/d3rlpy | 08d452ea2c8735b679a02474a3ae512caf91250a | 53ccf604298568b3a8322bb6f38bc33c0ac04ca2 | refs/heads/master | 2023-02-03T15:19:23.620608 | 2020-11-29T02:25:34 | 2020-11-29T02:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,435 | py | import torch
import numpy as np
from inspect import signature
def soft_sync(targ_model, model, tau):
with torch.no_grad():
params = model.parameters()
targ_params = targ_model.parameters()
for p, p_targ in zip(params, targ_params):
p_targ.data.mul_(1 - tau)
p_targ.data.add_(tau * p.data)
def hard_sync(targ_model, model):
with torch.no_grad():
params = model.parameters()
targ_params = targ_model.parameters()
for p, p_targ in zip(params, targ_params):
p_targ.data.copy_(p.data)
def set_eval_mode(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
module.eval()
def set_train_mode(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
module.train()
def to_cuda(impl, device):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, (torch.nn.Module, torch.nn.Parameter)):
module.cuda(device)
def to_cpu(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, (torch.nn.Module, torch.nn.Parameter)):
module.cpu()
def freeze(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
for p in module.parameters():
p.requires_grad = False
def unfreeze(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
for p in module.parameters():
p.requires_grad = True
def compute_augmentation_mean(augmentation, n_augmentations, func, inputs,
targets):
ret = 0.0
for _ in range(n_augmentations):
kwargs = dict(inputs)
for target in targets:
kwargs[target] = augmentation.transform(kwargs[target])
ret += func(**kwargs)
return ret / n_augmentations
def get_state_dict(impl):
rets = {}
for key in dir(impl):
obj = getattr(impl, key)
if isinstance(obj, (torch.nn.Module, torch.optim.Optimizer)):
rets[key] = obj.state_dict()
return rets
def set_state_dict(impl, chkpt):
for key in dir(impl):
obj = getattr(impl, key)
if isinstance(obj, (torch.nn.Module, torch.optim.Optimizer)):
obj.load_state_dict(chkpt[key])
def map_location(device):
if 'cuda' in device:
return lambda storage, loc: storage.cuda(device)
if 'cpu' in device:
return 'cpu'
raise ValueError('invalid device={}'.format(device))
def torch_api(scaler_targets=[]):
def _torch_api(f):
# get argument names
sig = signature(f)
arg_keys = list(sig.parameters.keys())[1:]
def wrapper(self, *args, **kwargs):
# convert all args to torch.Tensor
tensors = []
for i, val in enumerate(args):
if isinstance(val, torch.Tensor):
tensor = val
elif isinstance(val, np.ndarray):
if val.dtype == np.uint8:
dtype = torch.uint8
else:
dtype = torch.float32
tensor = torch.tensor(data=val,
dtype=dtype,
device=self.device)
else:
tensor = torch.tensor(data=val,
dtype=torch.float32,
device=self.device)
# preprocess
if self.scaler and arg_keys[i] in scaler_targets:
tensor = self.scaler.transform(tensor)
# make sure if the tensor is float32 type
if tensor.dtype != torch.float32:
tensor = tensor.float()
tensors.append(tensor)
return f(self, *tensors, **kwargs)
return wrapper
return _torch_api
def eval_api(f):
def wrapper(self, *args, **kwargs):
set_eval_mode(self)
return f(self, *args, **kwargs)
return wrapper
def train_api(f):
def wrapper(self, *args, **kwargs):
set_train_mode(self)
return f(self, *args, **kwargs)
return wrapper
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
612311f401562e6479a566b357190ad038ebd82e | a303cea3e4a5b9d774700111954b837e11ce8f64 | /Werkgroep API beveiliging/Implementaties/poc-oauth-python/web/woco_irma/woco_irma/wsgi.py | 910342af165b65cd78403f3a1559593c0eb7855a | [] | no_license | HenriKorver/KP-APIs | ee639ad9409b12710f6296e6cbf5d861b6d91571 | 3dde9bebf63c35b036145771ebf22d0851a5378c | refs/heads/master | 2021-06-12T05:22:10.712394 | 2021-06-09T14:55:41 | 2021-06-09T14:55:41 | 174,525,243 | 0 | 0 | null | 2019-05-16T15:06:41 | 2019-03-08T11:24:08 | HTML | UTF-8 | Python | false | false | 395 | py | """
WSGI config for woco_irma project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'woco_irma.settings')
application = get_wsgi_application()
| [
"sergei@maykinmedia.nl"
] | sergei@maykinmedia.nl |
1d8e81008f94de9bc956344891d46ea12658172d | bbed0e21b241c6a39341fed7d058563c30e2e0a4 | /tests/trainer/warnings_tests/test_flow_warnings.py | f5328d44020d717de90fa564f4c03c177fc40aa4 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | haven-jeon/pytorch-lightning | 656917a6cace5c3723d951f84515e8cb74a0ec79 | 6b9362bb73ada4c8702def43fe8f27eb3c382540 | refs/heads/master | 2023-02-23T18:29:08.298601 | 2021-01-28T07:42:14 | 2021-01-28T07:42:14 | 325,184,155 | 2 | 0 | Apache-2.0 | 2021-01-28T07:42:15 | 2020-12-29T04:19:44 | Python | UTF-8 | Python | false | false | 1,453 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from tests.base.boring_model import BoringModel
from pytorch_lightning import Trainer
import warnings
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
return acc
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test_no_depre_without_epoch_end(tmpdir):
"""
Tests that only training_step can be used
"""
model = TestModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
)
with warnings.catch_warnings(record=True) as w:
trainer.fit(model)
for msg in w:
assert 'should not return anything ' not in str(msg)
| [
"noreply@github.com"
] | haven-jeon.noreply@github.com |
630f388259ee71dd9d1b0b441c0117db73ee9b8d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/frnpau013/question2.py | 71396f2476198ce3175c7385e66de6c4fc587b0d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | def line_maker(string, length):
if len(string) > length:
line = string[0:length]
if line[-1] != " " and string[len(line)] != " ":
while line[-1] != " ":
line = delete_till_space(line)
line = line[:-1]
return line, string[len(line) + 1:]
else:
if line[-1] != " ":
return line, string[len(line) + 1:]
else:
return line[:-1], string[len(line):]
else:
line = string
return line, ""
def delete_till_space(string):
if string[-1] == " ":
return string
else:
return delete_till_space(string[:-1])
def paragraph_printer(paragraph_list, length, name):
outfile = open(name, 'w')
for paragraph in paragraph_list:
while paragraph > "":
line, paragraph = line_maker(paragraph, length)
print(line, file = outfile)
print('\n', end = '', file = outfile)
def main():
# getting inputted variables
name_infile = input('Enter the input filename:\n')
name_outfile = input('Enter the output filename:\n')
line_length = eval(input('Enter the line width:\n'))
# reading text from input
infile = open(name_infile, "r")
in_text = infile.read()
infile.close()
# splitting text into list of paragraphs
paragraph_list = in_text.split('\n\n')
for i in range(len(paragraph_list)):
paragraph_list[i] = paragraph_list[i].replace('\n', ' ')
# printing reformatted paragraphs to output
paragraph_printer(paragraph_list, line_length, name_outfile)
if __name__ == '__main__':
main() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
0da64247f2f05b4d47521f6362ed1d6f94bae56f | 725ac5a0bf72829be627bf8dc82fdc51ba0f94ae | /Text_Classification/multi_label_classify_bert/inference.py | 6517992e9a21b0f516d22a671658ec98435a63c4 | [] | no_license | shawroad/NLP_pytorch_project | fa14b6e4a156229765e1d552901d0492d8e1def3 | 1272fed2dc8fef78a9ded0f1ae1644d613a3b57b | refs/heads/master | 2023-06-25T02:37:35.503251 | 2023-06-12T10:57:11 | 2023-06-12T10:57:11 | 229,694,655 | 530 | 104 | null | 2020-12-08T09:21:47 | 2019-12-23T06:54:29 | Python | UTF-8 | Python | false | false | 2,940 | py | """
@file : inference.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2021-06-23
"""
import torch
from tqdm import tqdm
import pandas as pd
from model import Model
from config import set_args
from torch.utils.data import Dataset, DataLoader
from transformers.models.bert import BertTokenizer
class MyDataset(Dataset):
def __init__(self, dataframe, maxlen=256, test=False):
self.df = dataframe
self.maxlen = maxlen
self.test = test
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
# 将问题和其对应的细节进行拼接
text = str(self.df.question_title.values[idx]) + str(self.df.question_detail.values[idx])
encoding = tokenizer(text, padding='max_length', truncation=True, max_length=self.maxlen, return_tensors='pt')
input_ids = encoding['input_ids'][0]
attention_mask = encoding['attention_mask'][0]
if self.test:
return input_ids, attention_mask
else:
# 如果不是测试集 制作标签
tags = self.df.tag_ids.values[idx].split('|')
tags = [int(x) - 1 for x in tags] # 标签是从零开始的
label = torch.zeros((args.num_classes,))
label[tags] = 1 # 转成类似one_hot标签
return input_ids, attention_mask, label
def test_model():
result = []
model.eval()
tk = tqdm(test_loader, total=len(test_loader), position=0, leave=True)
with torch.no_grad():
for idx, (input_ids, attention_mask) in enumerate(tk):
input_ids, attention_mask = input_ids.cuda(), attention_mask.cuda()
output = model(input_ids, attention_mask)
for res in output: # 后处理,找大于0.5的类别(阈值可以微调),如果多了就取TOP5,如果没有就取TOP1
_, res1 = torch.topk(res, 5)
res1 = res1.cpu().numpy()
res2 = torch.where(res > 0.5)[0]
res2 = res2.cpu().numpy()
if len(res2) > 5:
result.append(res1)
elif len(res2) == 0:
result.append(res1[0])
else:
result.append(res2)
with open('submission.csv', 'w')as f:
for i in range(len(result)):
f.write(str(i) + ',')
res = [str(x + 1) for x in result[i]]
if len(res) < 5:
res += ['-1'] * (5 - len(res))
f.write(','.join(res))
f.write('\n')
if __name__ == '__main__':
args = set_args()
test = pd.read_csv(args.test_data)
test_set = MyDataset(test, test=True)
tokenizer = BertTokenizer.from_pretrained(args.vocab)
model = Model()
# 加载权重
model.load_state_dict(torch.load('model_epoch1.bin'))
test_loader = DataLoader(test_set, batch_size=args.test_batch_size, shuffle=False)
test_model()
| [
"luxiaonlp@163.com"
] | luxiaonlp@163.com |
e0ae3f8df1d6dacc769316b76657cf29b105ce4a | eaf2b6edff6d1020cf24028cf1d2f1df42b5a263 | /420-strong-password-checker/strong-password-checker.py | ec3726dbb2c42aaff8d8787e5b69e2cfdbaaf1a6 | [] | no_license | zhangoneone/leetcode | 1ac1a960d7aff5b6b8ddb85a48efc4f6c49c582c | 3f8632e5b436293c304e6df6326adc556be6b842 | refs/heads/master | 2020-04-21T06:57:47.415724 | 2018-12-20T01:33:58 | 2018-12-20T01:33:58 | 169,380,067 | 2 | 0 | null | 2019-02-06T09:17:31 | 2019-02-06T09:17:31 | null | UTF-8 | Python | false | false | 1,950 | py | # -*- coding:utf-8 -*-
# A password is considered strong if below conditions are all met:
#
#
# It has at least 6 characters and at most 20 characters.
# It must contain at least one lowercase letter, at least one uppercase letter, and at least one digit.
# It must NOT contain three repeating characters in a row ("...aaa..." is weak, but "...aa...a..." is strong, assuming other conditions are met).
#
#
# Write a function strongPasswordChecker(s), that takes a string s as input, and return the MINIMUM change required to make s a strong password. If s is already strong, return 0.
#
# Insertion, deletion or replace of any one character are all considered as one change.
class Solution(object):
def strongPasswordChecker(self, s):
"""
:type s: str
:rtype: int
"""
missing_type = 3
if any(c.islower() for c in s): missing_type -= 1
if any(c.isupper() for c in s): missing_type -= 1
if any(c.isdigit() for c in s): missing_type -= 1
change = 0
one = two = 0
p = 2
while p < len(s):
if s[p] == s[p-1] == s[p-2]:
length = 2
while p < len(s) and s[p] == s[p-1]:
length += 1
p += 1
change += length / 3
if length % 3 == 0: one += 1
elif length % 3 == 1: two += 1
else:
p += 1
if len(s) < 6:
return max(missing_type, 6 - len(s))
elif len(s) <= 20:
return max(missing_type, change)
else:
delete = len(s) - 20
change -= min(delete, one)
change -= min(max(delete - one, 0), two * 2) / 2
change -= max(delete - one - 2 * two, 0) / 3
return delete + max(missing_type, change)
| [
"foreverbonfy@163.com"
] | foreverbonfy@163.com |
92b27e668bf00bfed941b76018d92906c856691a | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/boosted_trees/estimator_batch/custom_loss_head.pyi | 9cfa47bc65db82426ee6bf56237e1355c15c69f7 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | pyi | # Stubs for tensorflow.contrib.boosted_trees.estimator_batch.custom_loss_head (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import dtypes as dtypes
from tensorflow.python.ops import array_ops as array_ops, math_ops as math_ops
from typing import Any as Any, Optional as Optional
class CustomLossHead(head_lib._RegressionHead):
def __init__(self, loss_fn: Any, link_fn: Any, logit_dimension: Any, head_name: Optional[Any] = ..., weight_column_name: Optional[Any] = ..., metrics_fn: Optional[Any] = ...) -> None: ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
d29e818b1a22fbe66308e12cac57776e2206777c | d94b78aeb8f7ea79545397f15b8b6d5a4b3e1635 | /test5.py | 916f2676d49ac4604269084f64e1708bedd82ce1 | [] | no_license | levylll/leetcode | 6a32c547c6a0d5aa07126a2ddc46b03b998ae5ad | 319f73e931b081fbf817769da5596c8eefd830a3 | refs/heads/master | 2021-06-19T01:30:03.113632 | 2021-06-11T07:46:02 | 2021-06-11T07:46:02 | 174,638,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | class Solution(object):
def gen_p1(self, idx, s):
tmp_max = s[idx]
move = 1
while True:
if idx - move >= 0 and idx + move < len(s) and s[idx-move] == s[idx+move]:
tmp_max = s[idx-move] + tmp_max + s[idx+move]
move += 1
else:
return tmp_max
def gen_p2(self, idx, s):
if s[idx] == s[idx + 1]:
tmp_max = s[idx] + s[idx + 1]
else:
return ''
move = 1
while True:
if idx - move >= 0 and idx + move + 1 < len(s) and s[idx-move] == s[idx+move+1]:
tmp_max = s[idx-move] + tmp_max + s[idx+move+1]
move += 1
else:
return tmp_max
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if not s:
return ''
max_s = ''
for idx, letter in enumerate(s):
tmp_max = self.gen_p1(idx, s)
if len(tmp_max) >= len(max_s):
max_s = tmp_max
if idx == len(s) - 1:
continue
tmp_max = self.gen_p2(idx, s)
if len(tmp_max) >= len(max_s):
max_s = tmp_max
return max_s
s = Solution()
a = "arra"
res = s.longestPalindrome(a)
print(res)
| [
"levylll@163.com"
] | levylll@163.com |
17c670a09a49ec13333d28a5078af3403e9357e3 | 72765a898e97d308c50a8b309b049d568fbbb622 | /examples/bio2bioes.py | 184bb58b88326fc54511acdb064c7ad1b0c3e6b6 | [] | no_license | gswyhq/bert4keras | 95a2fbdfbe49edb55ff56e8b1d75fd0432c440fb | e71a3acdc89b76f90e4d45527d228cc423f7cf1e | refs/heads/master | 2020-08-14T00:50:16.613380 | 2020-04-13T12:57:48 | 2020-04-13T12:57:48 | 215,066,934 | 0 | 0 | null | 2019-10-14T14:34:37 | 2019-10-14T14:34:37 | null | UTF-8 | Python | false | false | 2,114 | py | #!/usr/bin/python3
# coding: utf-8
import os
import sys
import unicodedata
def generator_load_data(data_path):
# print('读取文件:{}'.format(data_path))
with open(data_path, "r") as f:
text = f.readline()
while text:
text = unicodedata.normalize('NFKD', text).strip()
if '/' in text:
text = text.strip()
data = [[word.rsplit('/', maxsplit=1) for word in text.rsplit('\t', maxsplit=1)[0].split() if
word[1] == '/'], text.rsplit('\t', maxsplit=1)[-1]]
yield data
text = f.readline()
def bio2bioes(word_flag):
"""
BIO标注格式转换为BIOES格式
:param word_flag: [['谢', 'B-Shiyi'], ['德', 'I-Shiyi'], ['风', 'I-Shiyi'], ['的', 'O'], ['出', 'O'], ['生', 'O'], ['日', 'O'], ['期', 'O'], ['是', 'O']]
:return:
"""
new_word_flag = []
words_len = len(word_flag)
for _index, (word, flag) in enumerate(word_flag, 1):
if flag[0] in ['B', 'O']:
if flag[0] == 'B' and (_index == words_len or word_flag[_index][1][0] == 'O'):
# 最后,或者独立成词
flag = 'S' + flag[1:]
new_word_flag.append([word, flag])
elif flag[0] == 'I':
if _index == words_len or word_flag[_index][1][0] == 'O':
flag = 'E' + flag[1:]
new_word_flag.append([word, flag])
else:
new_word_flag.append([word, flag])
return new_word_flag
def main():
TRAIN_DATA_PATH = "../data/ner_rel_train.txt"
DEV_DATA_PATH = "../data/ner_rel_dev.txt"
TEST_DATA_PATH = "../data/ner_rel_test.txt"
for bio_file_name in [TRAIN_DATA_PATH, DEV_DATA_PATH, TEST_DATA_PATH]:
bioes_file_name = bio_file_name[:-4] + '_BIOES.txt'
with open(bioes_file_name, 'w')as f2:
for word_flag, label in generator_load_data(bio_file_name):
word_flag = bio2bioes(word_flag)
f2.write('{}\t{}\n'.format(' '.join('/'.join(w) for w in word_flag), label))
if __name__ == '__main__':
main() | [
"gswyhq@126.com"
] | gswyhq@126.com |
f9d1ff55ba1de3c5bd0d2a7619b945862ecc9e1f | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-hilens/huaweicloudsdkhilens/v3/model/create_task_request.py | 363ab85585e3ef85232d5c1955b7bd770a145264 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,181 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'deployment_id': 'str',
'body': 'TaskRequest'
}
attribute_map = {
'deployment_id': 'deployment_id',
'body': 'body'
}
def __init__(self, deployment_id=None, body=None):
"""CreateTaskRequest
The model defined in huaweicloud sdk
:param deployment_id: 部署ID,从专业版HiLens控制台部署管理[获取部署列表](getDeploymentListUsingGET.xml)获取
:type deployment_id: str
:param body: Body of the CreateTaskRequest
:type body: :class:`huaweicloudsdkhilens.v3.TaskRequest`
"""
self._deployment_id = None
self._body = None
self.discriminator = None
self.deployment_id = deployment_id
if body is not None:
self.body = body
@property
def deployment_id(self):
"""Gets the deployment_id of this CreateTaskRequest.
部署ID,从专业版HiLens控制台部署管理[获取部署列表](getDeploymentListUsingGET.xml)获取
:return: The deployment_id of this CreateTaskRequest.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""Sets the deployment_id of this CreateTaskRequest.
部署ID,从专业版HiLens控制台部署管理[获取部署列表](getDeploymentListUsingGET.xml)获取
:param deployment_id: The deployment_id of this CreateTaskRequest.
:type deployment_id: str
"""
self._deployment_id = deployment_id
@property
def body(self):
"""Gets the body of this CreateTaskRequest.
:return: The body of this CreateTaskRequest.
:rtype: :class:`huaweicloudsdkhilens.v3.TaskRequest`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateTaskRequest.
:param body: The body of this CreateTaskRequest.
:type body: :class:`huaweicloudsdkhilens.v3.TaskRequest`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
11da6a73feea9cad9d0a412ebabf156cad4d81c4 | 193b35f8acaae37b43fe680bf9a6a3111db3b9c7 | /myPython/class/__hook__.py | d28d28c64b8393a75ad01283dd5c8b737bb64977 | [] | no_license | zerosum99/python_basic | 6726d0d5210fdff1e22f452470b515478f64b7cb | 4b9e2b3478472830d901748bd6a2ac84c3dcc684 | refs/heads/master | 2021-05-23T05:41:01.742846 | 2021-05-16T09:16:39 | 2021-05-16T09:16:39 | 94,850,543 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 06 16:41:23 2016
@author: 06411
"""
import abc
def interface(*attributes):
def decorator(Base):
def checker(Other):
return all(hasattr(Other, a) for a in attributes)
def ins_checker(Other):
if type(Other) == Base :
return True
return False
def __subclasshook__(cls, Other):
if checker(Other):
return True
return NotImplemented
def __instancecheck__(cls, Other):
return ins_checker(Other)
Base.__subclasshook__ = classmethod(__subclasshook__)
Base.__instancecheck__ = classmethod(__instancecheck__)
return Base
return decorator
@interface("x", "y")
class Foo(object):
__metaclass__ = abc.ABCMeta
def x(self): return 5
def y(self): return 10
class Bar(object):
def x(self): return "blah"
def y(self): return "blah"
class Baz(object):
def __init__(self):
self.x = "blah"
self.y = "blah"
class attrdict(dict):
def __getattr__(self, attr):
return self[attr]
f = Foo()
b = Bar()
z = Baz()
t = attrdict({"x":27.5, "z":37.5})
print isinstance(f, Foo)
print isinstance(b, Foo)
print isinstance(z, Foo)
print isinstance(t, Foo)
print "hook ",Foo.__subclasshook__(f),Foo.__subclasshook__(t)
print "instance ",Foo.__instancecheck__(f),Foo.__instancecheck__(b) | [
"myjlms99@gmail.com"
] | myjlms99@gmail.com |
86c71b01760e5102eb4d6243cbb87d4644df51c8 | bb88122fc4978b14e8a9b02d8c11f1ce67ea17d0 | /03_ML/m31_smote2_wine_quality.py | 0910ec5b43d9b87466d0691c7f65f194c02a62e3 | [] | no_license | star10919/Keras_ | c2c8a6f3d0e1a7ceba9e81dbc51ecfd12bd5fe78 | f3156b7db6e12feea075b46e94b09157f43a141c | refs/heads/main | 2023-08-17T22:44:54.324315 | 2021-10-24T02:47:00 | 2021-10-24T02:47:00 | 390,066,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | ### 데이터 증폭(smote 사용) - acc보다 F1 score가 높아짐
# y 라벨 개수 가장 큰 거 기준으로 동일하게 맞춰줌
### k_neighbors 의 디폴트 5 : 각 라벨의 개수가 5보다 커야 증폭(smote) 가능해짐! / 아니면 k_neighbors의 값을 낮춰주면 됨
# => k_neighbors 값 줄이면 score 떨어짐(연산수 줄기 때문에)
from imblearn.over_sampling import SMOTE
from sklearn.datasets import load_wine
import pandas as pd
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
import time
import warnings
warnings.filterwarnings('ignore')
datasets = pd.read_csv('../_data/winequality-white.csv', index_col=None, header=0, sep=';') # 비정제 데이터
datasets = datasets.values # 판다스 넘파이로 변환
x = datasets[:, :11]
y = datasets[:, 11]
print(x.shape, y.shape) # (4898, 11) (4898,)
print(pd.Series(y).value_counts()) #value_counts는 판다스 함수임!(넘파이X)
# 6.0 2198
# 5.0 1457
# 7.0 880
# 8.0 175
# 4.0 163
# 3.0 20
# 9.0 5
print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.75, shuffle=True, random_state=9, stratify=y) # stratify=y_new : y_new 라벨의 비율로 나눠줌!!!
print(pd.Series(y_train).value_counts())
# 6.0 1648
# 5.0 1093
# 7.0 660
# 8.0 131
# 4.0 122
# 3.0 15
# 9.0 4
model = XGBClassifier(n_jobs=-1)
model.fit(x_train, y_train, eval_metric='mlogloss') # xgboost 쓰면 이발메트릭스 사용해야 함!
score = model.score(x_test, y_test)
print("model.score :", score) # model.score : 0.6563265306122449
########################################### smote 적용 ##############################################
print("=============================== smote 적용 ===============================")
smote = SMOTE(random_state=66, k_neighbors=3) #k_neighbors 의 디폴트 5 / 가장 작은 라벨인 9의 라벨 개수가 4이므로 디폴드인 5보다 작은 값으로 에러나니까 k_neighbors의 값을 낮춰줌
x_smote_train, y_smote_train = smote.fit_resample(x_train, y_train) # train만 smote(증폭) 시킴, test는 하지 않음
#####################################################################################################
print(pd.Series(y_smote_train).value_counts())
# 6.0 1648
# 5.0 1648
# 4.0 1648
# 9.0 1648
# 8.0 1648
# 7.0 1648
# 3.0 1648
print(x_smote_train.shape, y_smote_train.shape) #
print("smote 전 :", x_train.shape, y_train.shape)
print("smote 후 :", x_smote_train.shape, y_smote_train.shape)
print("smote전 레이블 값 분포 :\n", pd.Series(y_train).value_counts())
print("smote후 레이블 값 분포 :\n", pd.Series(y_smote_train).value_counts())
model2 = XGBClassifier(n_jobs=-1)
model2.fit(x_smote_train, y_smote_train, eval_metric='mlogloss')
score2 = model2.score(x_test, y_test)
print("model2.score :", score2) # model2.score : 0.6302040816326531 | [
"star10919@naver.com"
] | star10919@naver.com |
624b8b66742a21f03a4eaf8cd8f640fc0054d3f3 | f7be1846da14366ca8277dc3edc207766de838f0 | /ICPC Practice/minimum-distances.py | 0fdc9b8546501ad63e61fac9a7b45f125161e1d2 | [] | no_license | IsThatYou/Competitive-Programming | d440f9e78f8a982cd60aa8c81833b9e10208b29e | a924ac7087654402c7f7c4b62374c377178061ad | refs/heads/master | 2020-07-19T16:31:32.492635 | 2019-01-27T02:45:21 | 2019-01-27T02:45:21 | 73,759,128 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | length = int(input().strip())
A = [int(A_temp) for A_temp in input().strip().split(' ')]
lowest = 2000
for i in range(length):
for j in range(i + 1, length):
if A[i] == A[j]:
dis = abs(j - i)
if dis < lowest:
lowest = dis
if lowest != 2000:
print(lowest)
else:
print(-1) | [
"junlinwang18@gmail.com"
] | junlinwang18@gmail.com |
af2ab744148c104fa9b235621f90b30107208a7a | 8c2de4da068ba3ed3ce1adf0a113877385b7783c | /hyperion/bin/pack-wav-rirs.py | 0017798894270af755bafa05e4a9b4fd9b86d2b2 | [
"Apache-2.0"
] | permissive | hyperion-ml/hyperion | a024c718c4552ba3a03aae2c2ca1b8674eaebc76 | c4c9eee0acab1ba572843373245da12d00dfffaa | refs/heads/master | 2023-08-28T22:28:37.624139 | 2022-03-25T16:28:08 | 2022-03-25T16:28:08 | 175,275,679 | 55 | 20 | Apache-2.0 | 2023-09-13T15:35:46 | 2019-03-12T18:40:19 | Python | UTF-8 | Python | false | false | 1,865 | py | #!/usr/bin/env python
"""
Copyright 2020 Jesus Villalba (Johns Hopkins University)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import sys
import os
from jsonargparse import (
ArgumentParser,
ActionConfigFile,
ActionParser,
namespace_to_dict,
)
import time
import logging
import math
import numpy as np
from hyperion.hyp_defs import config_logger
from hyperion.io import SequentialAudioReader as AR
from hyperion.io import DataWriterFactory as DWF
def pack_wav_rirs(input_path, output_spec, **kwargs):
writer = DWF.create(output_spec, scp_sep=" ", compress=False)
t1 = time.time()
with AR(input_path, wav_scale=1) as reader:
for data in reader:
key, h, fs = data
if h.ndim == 2:
h = h[:, 0]
h_delay = np.argmax(np.abs(h))
h_max = h[h_delay]
h /= h_max
h[h < 1e-3] = 0
h = np.trim_zeros(h)
logging.info(
"Packing rir %s h_max=%f h_delay=%d h-length=%d"
% (key, h_max, h_delay, len(h))
)
writer.write([key], [h])
logging.info("Packed RIRS elapsed-time=%.f" % (time.time() - t1))
if __name__ == "__main__":
parser = ArgumentParser(description="Packs RIRs in wave format to h5/ark files")
parser.add_argument("--cfg", action=ActionConfigFile)
parser.add_argument("--input", dest="input_path", required=True)
parser.add_argument("--output", dest="output_spec", required=True)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
default=1,
choices=[0, 1, 2, 3],
type=int,
help="Verbose level",
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
pack_wav_rirs(**namespace_to_dict(args))
| [
"jesus.antonio.villalba@gmail.com"
] | jesus.antonio.villalba@gmail.com |
4bacc7bb356d16649339adbe3f7ae44936cef97b | b1cf54e4d6f969d9084160fccd20fabc12c361c2 | /leetcode/longest_substring.py | e189bb5bcce062b5ff99c66cba029c56af0f6dd4 | [] | no_license | zarkle/code_challenges | 88a53477d6f9ee9dd71577678739e745b9e8a694 | 85b7111263d4125b362184df08e8a2265cf228d5 | refs/heads/master | 2021-06-10T11:05:03.048703 | 2020-01-23T06:16:41 | 2020-01-23T06:16:41 | 136,668,643 | 0 | 1 | null | 2019-02-07T23:35:59 | 2018-06-08T21:44:26 | JavaScript | UTF-8 | Python | false | false | 1,746 | py | # https://leetcode.com/problems/longest-substring-without-repeating-characters/
# https://leetcode.com/problems/longest-substring-without-repeating-characters/solution/
# runtime 640 ms, 12.5%; memory 13.1 MB, 5.5%
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
i = longest = 0
chars = {}
while i <= len(s) - 1:
if s[i] not in chars:
chars[s[i]] = i
else:
if len(chars) > longest:
longest = len(chars)
temp = chars[s[i]] + 1
chars = {}
chars[s[temp]] = temp
i = temp
i += 1
if len(chars) > longest:
longest = len(chars)
return longest
# runtime 56 ms, 99.5%; memory 13.3 MB, 5%
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
ls = ''
longest = 0
for char in s:
if char in ls:
if len(ls) > longest:
longest = len(ls)
ls = ls[ls.index(char) + 1:]
ls += char
if len(ls) > longest:
longest = len(ls)
return longest
# runtime 48 ms, 100%; memory 13.3 MB, 5%
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
d = {}
start = -1
max = 0
for i in range(len(s)):
if s[i] in d and d[s[i]] > start:
start = d[s[i]]
d[s[i]] = i
else:
d[s[i]] = i
if i - start > max:
max = i - start
return max
# test cases: "au", "dvdf"
| [
"beverly.pham@gmail.com"
] | beverly.pham@gmail.com |
082a6d27401dee260c30ad1efb445313ff63bd21 | c9d81b5d0b258b57a06a99f43a79dc1ecd219488 | /Test_1.py | 6b0a1b0fdce78cf79cfec3ab37a0739d156c579e | [] | no_license | babiswas/Python-Design-Patterns | 4f4851cc55ae1bee8828f099f2c2610a36f4e8d5 | ea6e417880bab26bded60c67188a12623f74639f | refs/heads/master | 2020-07-30T05:28:10.030541 | 2019-09-26T17:54:21 | 2019-09-26T17:54:21 | 210,102,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from abc import ABC,abstractmethod
class Pet:
@abstractmethod
def speak(self):
pass
@abstractmethod
def having_food(self):
pass
class Cat(Pet):
def __init__(self,name):
self.name=name
def speak(self):
print(f"{self.name} Mew Mew")
def having_food(self):
print(f"{self.name} having milk")
class Dog(Pet):
def __init__(self,name):
self.name=name
def speak(self):
print(f"{self.name} bow bow")
def having_food(self):
print(f"{self.name} having meat")
def get_pet(pet="cat"):
pets=dict(cat=Cat("Meww"),dog=Dog("Bow Bow"))
return pets[pet]
if __name__=="__main__":
while True:
try:
key=input("Enter key")
obj=get_pet(key)
obj.speak()
obj.having_food()
except Exception as e:
print("Invalid key entered")
| [
"noreply@github.com"
] | babiswas.noreply@github.com |
78b2e5ad6eda0389d08f76cb7f95ff272be8fc13 | 152b74ed7d60d75a9d70f6637c107fff9b064ff9 | /Chapter03/MalGan/MalGAN_gen_adv_examples.py | 0289a0a9a76912a65061cdd41fbacd0dab425a1f | [
"MIT"
] | permissive | PacktPublishing/Machine-Learning-for-Cybersecurity-Cookbook | 1d7a50fb79b5da8c411eda9dc9cface4d0f78125 | 19b9757020cbcb09d9bb4249605fbb9c7322d92b | refs/heads/master | 2023-05-12T08:29:13.569598 | 2023-01-18T10:19:07 | 2023-01-18T10:19:07 | 222,411,828 | 250 | 164 | MIT | 2023-05-01T20:11:44 | 2019-11-18T09:33:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,075 | py | from sklearn.neighbors import NearestNeighbors
from keras import backend as K
import MalGAN_utils
from MalGAN_preprocess import preprocess
import numpy as np
def gen_adv_samples(model, fn_list, pad_percent=0.1, step_size=0.001, thres=0.5):
### search for nearest neighbor in embedding space ###
def emb_search(org, adv, pad_idx, pad_len, neigh):
out = org.copy()
for idx in range(pad_idx, pad_idx+pad_len):
target = adv[idx].reshape(1, -1)
best_idx = neigh.kneighbors(target, 1, False)[0][0]
out[0][idx] = best_idx
return out
max_len = int(model.input.shape[1])
emb_layer = model.layers[1]
emb_weight = emb_layer.get_weights()[0]
inp2emb = K.function([model.input]+ [K.learning_phase()], [emb_layer.output]) # [function] Map sequence to embedding
# Build neighbor searches
neigh = NearestNeighbors(1)
neigh.fit(emb_weight)
log = MalGAN_utils.logger()
adv_samples = []
for e, fn in enumerate(fn_list):
### run one file at a time due to different padding length, [slow]
inp, len_list = preprocess([fn], max_len)
inp_emb = np.squeeze(np.array(inp2emb([inp, False])), 0)
pad_idx = len_list[0]
pad_len = max(min(int(len_list[0]*pad_percent), max_len-pad_idx), 0)
org_score = model.predict(inp)[0][0] ### origianl score, 0 -> malicious, 1 -> benign
loss, pred = float('nan'), float('nan')
if pad_len > 0:
if org_score < thres:
adv_emb, gradient, loss = fgsm(model, inp_emb, pad_idx, pad_len, e, step_size)
adv = emb_search(inp, adv_emb[0], pad_idx, pad_len, neigh)
pred = model.predict(adv)[0][0]
final_adv = adv[0][:pad_idx+pad_len]
else: # use origin file
final_adv = inp[0][:pad_idx]
log.write(fn, org_score, pad_idx, pad_len, loss, pred)
# sequence to bytes
bin_adv = bytes(list(final_adv))
adv_samples.append(bin_adv)
return adv_samples, log | [
"dineshchaudhary@packtpub.com"
] | dineshchaudhary@packtpub.com |
cc45fdbbddc41768ecf333b33f0e6b2c01dcbd95 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v3/services/ad_schedule_view_service_client_config.py | 90efc7090c35efbde8f0c9a47c102453d887ece9 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 815 | py | config = {
"interfaces": {
"google.ads.googleads.v3.services.AdScheduleViewService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetAdScheduleView": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
7da40b77085e8a9aff24f6e4855a7b4fee264fef | 373c0cc659e0c77739ff87f01b159ab3969bce72 | /pca/pca5.py | a80b3a04c841b8406b5da0159da05a9db468a7bd | [] | no_license | c1a1o1/cancer | 75da16ab43d4925f849fc0c84a98d69d46e3aea1 | 268f8e1553f1237fc2d0d0d3d7c13f664792aa92 | refs/heads/master | 2021-01-20T04:17:09.858006 | 2017-02-24T16:53:35 | 2017-02-24T16:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | #from numpy import mean, cov, cumsum, dot, linalg, size, flipud, argsort
#from pylab import imread, subplot, imshow, title, gray, figure, show, NullLocator, imsave
import numpy as np
import matplotlib.pyplot as plt
def princomp(A, numpc=0):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - np.mean(A.T, axis=1)).T # subtract the mean (along columns)
[eigenvalues, eigenvectors] = np.linalg.eig(np.cov(M))
p = np.size(eigenvectors, axis=1)
idx = np.argsort(eigenvalues) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
# sorting eigenvectors according to the sorted eigenvalues
eigenvectors = eigenvectors[:, idx]
eigenvalues = eigenvalues[idx] # sorting eigenvalues
projection = 0
if numpc < p and numpc >= 0:
eigenvectors = eigenvectors[:, range(numpc)] # cutting some PCs if needed
projection = np.dot(eigenvectors.T, M) # projection of the data in the new space
return eigenvectors, eigenvalues, projection
A = plt.imread('../shakira.jpg') # load an image
A = np.mean(A, 2) # to get a 2-D array
full_pc = np.size(A, axis=1) # numbers of all the principal components
i = 1 # subplots
dist = []
for numpc in range(0, full_pc+10, 10): # 0 10 20 ... full_pc
# showing the pics reconstructed with less than 50 PCs
if numpc <= 50:
eigenvectors, eigenvalues, projection = princomp(A, numpc)
Ar = np.dot(eigenvectors, projection).T + np.mean(A, axis=0) # image reconstruction
# difference in Frobenius norm
dist.append(np.linalg.norm(A-Ar, 'fro'))
ax = plt.subplot(2, 3, i, frame_on=False)
ax.xaxis.set_major_locator(plt.NullLocator()) # remove ticks
ax.yaxis.set_major_locator(plt.NullLocator())
i += 1
plt.imshow(Ar)
plt.title('PCs # ' + str(numpc))
plt.gray()
if numpc == 50:
A50 = Ar
plt.figure()
plt.imshow(A)
plt.title('numpc FULL')
plt.gray()
#imsave("shakira40.jpg", A50)
plt.figure()
plt.imshow(A50)
plt.title('numpc 50')
plt.gray()
plt.figure()
perc = np.cumsum(eigenvalues) / sum(eigenvalues)
dist = dist / np.max(dist)
plt.plot(range(len(perc)), perc, 'b')
#plt.plot(range(len(dist)), dist, 'r')
plt.axis([0, full_pc, 0, 1.1])
plt.show()
| [
"senenbotello@gmail.com"
] | senenbotello@gmail.com |
822d0722fc6c480872f5f0a209984e64e0d29091 | 597f847f0bd2112e45ca50979acec403a5f969cf | /python/day06/exercise/test2.py | f2ef95420fe5500c69f3b2906f06ca1a0d91a442 | [] | no_license | wangyuhui12/AID1804 | e5d26aa6d505655cd885784cc0645c9ea8323df5 | 7291fc9411b541d17e540dd829166e64887fd9f7 | refs/heads/master | 2022-09-11T23:10:47.631426 | 2022-08-03T09:01:13 | 2022-08-03T09:01:13 | 136,152,631 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py |
# 2、有一些数存在于列表L中,如:
# L = [1, 3, 2, 1, 6, 4, ..., 98, 82]
# (此数据自己定义)
# 将列表L中的数存入于另一个列表L2中(要求,重复出现多次的数字只在L2列表中保留一份)
L = []
while True:
n = input("请输入一个整数:")
if not n:
break
n = int(n)
L.append(n)
# for i in L:
# if L.count(i) > 1:
# j = L.count(i)
# for x in range(j-1):
# L.remove(i)
# 判断i是否在L2中,是则添加,否则continue
L2 =[]
for i in L:
if i in L2:
continue
L2.append(i)
print(L2)
| [
"noreply@github.com"
] | wangyuhui12.noreply@github.com |
c1d153362cd6ae47be10bb825b009ce4fa2a7ef9 | 204ec78fcebcea9e1e1da4905cf3fad0a514b01f | /pyocd/target/builtin/target_MPS3_AN522.py | c8d9dd2265ca1de7624831c0ec1156619eb87e21 | [
"Apache-2.0"
] | permissive | ARMmbed/pyOCD | 659340bf8753aa8e15a72890b8bea64dff2c2f42 | d4cdcf7e532cae17caad866839287bbe1e0d952b | refs/heads/master | 2023-05-31T13:45:15.797588 | 2020-10-12T13:55:47 | 2020-10-12T13:55:47 | 190,203,829 | 3 | 1 | Apache-2.0 | 2019-07-05T11:05:40 | 2019-06-04T13:09:56 | Python | UTF-8 | Python | false | false | 1,492 | py | # pyOCD debugger
# Copyright (c) 2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (RamRegion, MemoryMap)
class AN522(CoreSightTarget):
VENDOR = "Arm"
MEMORY_MAP = MemoryMap(
RamRegion( name='itcm', start=0x00000000, length=0x00080000, access='rwx'),
RamRegion( name='sram', start=0x30000000, length=0x00100000, access='rwx'),
RamRegion( name='dram6_ns', start=0x60000000, length=0x10000000, access='rwx'),
RamRegion( name='dram7_s', start=0x70000000, length=0x10000000, access='rwxs'),
RamRegion( name='dram8_ns', start=0x80000000, length=0x10000000, access='rwx'),
RamRegion( name='dram9_s', start=0x90000000, length=0x10000000, access='rwxs'),
)
def __init__(self, session):
super(AN522, self).__init__(session, self.MEMORY_MAP)
| [
"flit@me.com"
] | flit@me.com |
b141ab2597ba820e82b8091ee4334bd5218daa59 | d2fdd6b10b0467913971d1408a9a4053f0be9ffb | /datahub/metadata/migrations/0015_add_iso_to_country.py | 891f31f2965cb861506230efd7bb9f663ed86b23 | [] | no_license | jakub-kozlowski/data-hub-leeloo | fc5ecebb5e4d885c824fc7c85acad8837fcc5c76 | 7f033fcbcfb2f7c1c0e10bec51620742d3d929df | refs/heads/master | 2020-05-18T13:29:14.145251 | 2019-04-30T12:12:50 | 2019-04-30T12:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Generated by Django 2.1.2 on 2018-10-30 17:24
from pathlib import PurePath
from django.core.management import call_command
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0014_investmentprojectstage_exclude_from_investment_flow'),
]
operations = [
migrations.AddField(
model_name='country',
name='iso_alpha2_code',
field=models.CharField(blank=True, max_length=2),
),
]
| [
"info@marcofucci.com"
] | info@marcofucci.com |
ddbc45e6681b12728108ffcd2a027d6639b8f7a3 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/eventhub/azure-eventhub/azure/eventhub/_buffered_producer/_buffered_producer_dispatcher.py | 7bad79bead6409a6e962b5d0e80ddaf2063c179e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 7,290 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
from threading import Lock
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Optional, List, Callable, Union, TYPE_CHECKING
from ._partition_resolver import PartitionResolver
from ._buffered_producer import BufferedProducer
from .._producer import EventHubProducer
from ..exceptions import EventDataSendError, ConnectError, EventHubError
if TYPE_CHECKING:
from .._transport._base import AmqpTransport
from .._producer_client import SendEventTypes
_LOGGER = logging.getLogger(__name__)
class BufferedProducerDispatcher:
# pylint: disable=too-many-instance-attributes
def __init__(
self,
partitions: List[str],
on_success: Callable[["SendEventTypes", Optional[str]], None],
on_error: Callable[["SendEventTypes", Optional[str], Exception], None],
create_producer: Callable[..., EventHubProducer],
eventhub_name: str,
max_message_size_on_link: int,
*,
amqp_transport: AmqpTransport,
max_buffer_length: int = 1500,
max_wait_time: float = 1,
executor: Optional[Union[ThreadPoolExecutor, int]] = None
):
self._buffered_producers: Dict[str, BufferedProducer] = {}
self._partition_ids: List[str] = partitions
self._lock = Lock()
self._on_success = on_success
self._on_error = on_error
self._create_producer = create_producer
self._eventhub_name = eventhub_name
self._max_message_size_on_link = max_message_size_on_link
self._partition_resolver = PartitionResolver(self._partition_ids)
self._max_wait_time = max_wait_time
self._max_buffer_length = max_buffer_length
self._existing_executor = False
self._amqp_transport = amqp_transport
if not executor:
self._executor = ThreadPoolExecutor()
elif isinstance(executor, ThreadPoolExecutor):
self._existing_executor = True
self._executor = executor
elif isinstance(executor, int):
self._executor = ThreadPoolExecutor(executor)
def _get_partition_id(self, partition_id, partition_key):
if partition_id:
if partition_id not in self._partition_ids:
raise ConnectError(
"Invalid partition {} for the event hub {}".format(
partition_id, self._eventhub_name
)
)
return partition_id
if isinstance(partition_key, str):
return self._partition_resolver.get_partition_id_by_partition_key(
partition_key
)
return self._partition_resolver.get_next_partition_id()
def enqueue_events(
self, events, *, partition_id=None, partition_key=None, timeout_time=None
):
pid = self._get_partition_id(partition_id, partition_key)
with self._lock:
try:
self._buffered_producers[pid].put_events(events, timeout_time)
except KeyError:
buffered_producer = BufferedProducer(
self._create_producer(pid),
pid,
self._on_success,
self._on_error,
self._max_message_size_on_link,
executor=self._executor,
max_wait_time=self._max_wait_time,
max_buffer_length=self._max_buffer_length,
amqp_transport = self._amqp_transport,
)
buffered_producer.start()
self._buffered_producers[pid] = buffered_producer
buffered_producer.put_events(events, timeout_time)
def flush(self, timeout_time=None):
# flush all the buffered producer, the method will block until finishes or times out
with self._lock:
futures = []
for pid, producer in self._buffered_producers.items():
# call each producer's flush method
futures.append(
(
pid,
self._executor.submit(
producer.flush, timeout_time=timeout_time
),
)
)
# gather results
exc_results = {}
for pid, future in futures:
try:
future.result()
except Exception as exc: # pylint: disable=broad-except
exc_results[pid] = exc
if not exc_results:
_LOGGER.info("Flushing all partitions succeeded")
return
_LOGGER.warning(
"Flushing all partitions partially failed with result %r.", exc_results
)
raise EventDataSendError(
message="Flushing all partitions partially failed, failed partitions are {!r}"
" Exception details are {!r}".format(exc_results.keys(), exc_results)
)
def close(self, *, flush=True, timeout_time=None, raise_error=False):
with self._lock:
futures = []
# stop all buffered producers
for pid, producer in self._buffered_producers.items():
futures.append(
(
pid,
self._executor.submit(
producer.stop,
flush=flush,
timeout_time=timeout_time,
raise_error=raise_error,
),
)
)
exc_results = {}
# gather results
for pid, future in futures:
try:
future.result()
except Exception as exc: # pylint: disable=broad-except
exc_results[pid] = exc
if exc_results:
_LOGGER.warning(
"Stopping all partitions partially failed with result %r.",
exc_results,
)
if raise_error:
raise EventHubError(
message="Stopping all partitions partially failed, failed partitions are {!r}"
" Exception details are {!r}".format(
exc_results.keys(), exc_results
)
)
if not self._existing_executor:
self._executor.shutdown()
def get_buffered_event_count(self, pid):
try:
return self._buffered_producers[pid].buffered_event_count
except KeyError:
return 0
@property
def total_buffered_event_count(self):
return sum(
[self.get_buffered_event_count(pid) for pid in self._buffered_producers]
)
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
3eb5675cb98630a1417a7a99e8ead5bfe8caf461 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/show_member_detail_request.py | b7d02e8b454b32351aa7f4f4af4a67e39d3cac14 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,456 | py | # coding: utf-8
import pprint
import re
import six
class ShowMemberDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup_id': 'str',
'member_id': 'str'
}
attribute_map = {
'backup_id': 'backup_id',
'member_id': 'member_id'
}
def __init__(self, backup_id=None, member_id=None):
"""ShowMemberDetailRequest - a model defined in huaweicloud sdk"""
self._backup_id = None
self._member_id = None
self.discriminator = None
self.backup_id = backup_id
self.member_id = member_id
@property
def backup_id(self):
"""Gets the backup_id of this ShowMemberDetailRequest.
备份副本id
:return: The backup_id of this ShowMemberDetailRequest.
:rtype: str
"""
return self._backup_id
@backup_id.setter
def backup_id(self, backup_id):
"""Sets the backup_id of this ShowMemberDetailRequest.
备份副本id
:param backup_id: The backup_id of this ShowMemberDetailRequest.
:type: str
"""
self._backup_id = backup_id
@property
def member_id(self):
"""Gets the member_id of this ShowMemberDetailRequest.
成员id
:return: The member_id of this ShowMemberDetailRequest.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""Sets the member_id of this ShowMemberDetailRequest.
成员id
:param member_id: The member_id of this ShowMemberDetailRequest.
:type: str
"""
self._member_id = member_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowMemberDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
f2e6beef9ab524d5325b345c9e2d1affa0b1a47d | 022228249a7892e1efeaa11f0d445ba7cb115891 | /product/api/serializers_site.py | e33b258e12bf2a82306d4ebbae1db3f0c1ac77f0 | [] | no_license | angaradev/django-vue-interface | bacef985b9f9cbd4379992133168c9278b4f707b | 2e55d217b12bcda3c54acbb90cd3466bca955c5a | refs/heads/master | 2022-12-01T09:24:29.788368 | 2022-11-22T07:09:44 | 2022-11-22T07:09:44 | 241,351,571 | 0 | 2 | null | 2022-11-22T07:09:45 | 2020-02-18T12:08:14 | JavaScript | UTF-8 | Python | false | false | 3,167 | py | # -*- coding: utf-8 -*-
# from rest_framework_recursive.fields import RecursiveField
from product.models import Cross
from rest_framework import serializers
from product.models import (
Product,
ProductImage,
Category,
Units,
CarModel,
CarMake,
CarEngine,
Country,
BrandsDict,
ProductVideos,
Category,
ProductDescription,
ProductAttribute,
ProductAttributeName,
)
class CategorySerializer(serializers.ModelSerializer):
"""Serializer for all categories in flat mode"""
class Meta:
model = Category
fields = ("id", "name", "slug", "cat_image", "level", "parent")
class RecursiveField(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context) # type: ignore
return serializer.data
class CategoryTreeSerializer(serializers.ModelSerializer):
"""
This class give us caregories in tree wiew json for front end
"""
children = RecursiveField(many=True)
class Meta:
model = Category
fields = ("id", "name", "parent", "children", "slug")
class CategoryFirstLevelSerializer(serializers.ModelSerializer):
"""
First level Category Serializer
"""
class Meta:
model = Category
fields = ("id", "name", "parent", "children", "slug")
depth = 1
class MpttTestSerializer(serializers.ModelSerializer):
"""
First level Category Serializer
"""
# some_count = serializers.SerializerMethodField()
class Meta:
model = Category
fields = ("id", "name", "parent", "children", "slug")
depth = 1
def get_some_count(self, obj):
return obj.some_count
class ProductCrossSerializer(serializers.ModelSerializer):
"""
Serializer for getting product Crosses
"""
class Meta:
model = Cross
fields = ["cross"]
depth = 0
class GetSingleProductSerializer(serializers.ModelSerializer):
"""
Serializer for getting single product for site no authentication required
Also getting all related fields like images, videos, attributes, etc...
"""
product_cross = ProductCrossSerializer(many=True, read_only=True)
class Meta:
model = Product
fields = [
"id",
"name",
"name2",
"cat_number",
"slug",
"brand",
"unit",
"car_model",
"category",
"related",
"engine",
"product_image",
"product_video",
"product_description",
"product_cross",
"product_attribute",
"one_c_id",
]
depth = 2 # Dont change it All may craches
class GetCarModelSerializer(serializers.ModelSerializer):
"""
Getting car models required from UI
"""
class Meta:
model = CarModel
fields = "__all__"
depth = 1
class GetCarMakesSerializer(serializers.ModelSerializer):
"""
Car Makes All list API
"""
class Meta:
model = CarMake
fields = "__all__"
| [
"angara99@gmail.com"
] | angara99@gmail.com |
a270f8f89e241c78f56752aad9c4849cf7d66f8c | 15c016140f03bb476549fa4bf20d4f52077783a6 | /ecl/module_loader.py | faf271acdef9b3503ee8c94a5b92d083abcd53f0 | [
"Apache-2.0"
] | permissive | nttcom/eclsdk | 5cbb3f0067d260d257f7366d18e0f554d8f17cb3 | c2dafba850c4e6fb55b5e10de79257bbc9a01af3 | refs/heads/master | 2023-08-09T06:24:53.466570 | 2022-12-01T02:44:48 | 2022-12-01T02:44:48 | 86,663,654 | 5 | 15 | Apache-2.0 | 2023-09-06T02:39:01 | 2017-03-30T05:45:19 | Python | UTF-8 | Python | false | false | 989 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Load various modules for authorization and eventually services.
"""
from stevedore import extension
def load_service_plugins(namespace):
service_plugins = extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
)
services = {}
for service in service_plugins:
service = service.obj
service.interface = None
services[service.service_type] = service
return services
| [
"h.ohta@ntt.com"
] | h.ohta@ntt.com |
872bb43358fa79c7700cf6626f0e22339467ce80 | 43f78a4d51f441b4dbbc1a84537804123201a246 | /dataaccess/dataexporter.py | b764c20af14dd42baafd8605f4af24b491277d9b | [] | no_license | shmyhero/data-process | 28d5bfb27999cb0d462453d4663f01f43a649386 | 63dac65b41333d6e81bf32ecaa6533b28975c985 | refs/heads/master | 2021-03-22T00:11:51.373193 | 2019-10-22T06:27:28 | 2019-10-22T06:27:28 | 98,510,392 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | import os
from utils.logger import Logger
from common.configmgr import ConfigMgr
from common.pathmgr import PathMgr
from optionskewdao import OptionSkewDAO
from vixdao import VIXDAO
class DataExporter(object):
def __init__(self, daily_raw_path=None):
self.logger = Logger(__name__, PathMgr.get_log_path())
self.config = ConfigMgr.get_output_config()
self.output_dir = self.config['output_dir']
def export_skew(self):
self.logger.info('Export skew data to csv...')
csv_file = os.path.join(self.output_dir, self.config['skew_file'])
OptionSkewDAO().export_data_to_csv(csv_file)
self.logger.info('Export skew completed.')
def export_vix(self):
self.logger.info('Export vix data to csv...')
vix_file = os.path.join(self.output_dir, self.config['vix_file'])
df = VIXDAO().gen_all_vix()
df.to_csv(vix_file)
self.logger.info('Export vix completed.')
if __name__ == '__main__':
#DataExporter().export_skew()
DataExporter().export_vix() | [
"elwin.luo@tradehero.mobi"
] | elwin.luo@tradehero.mobi |
e959b8a741f902b0414e1f40c0253495b77e7b9d | f6bba50fccc6fb0dae2f046193434cfb4b9d32d5 | /54/A.py | ebcd9d660e8171e1ea4e24847f3e19f299254c40 | [] | no_license | seven320/AtCoder | 4c26723d20004fe46ce118b882faabc05066841c | 45e301e330e817f1ace4be4088d3babe18588170 | refs/heads/master | 2021-11-22T22:57:32.290504 | 2021-10-24T09:15:12 | 2021-10-24T09:15:12 | 162,827,473 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # encoding:utf-8
import copy
import random
import bisect #bisect_left これで二部探索の大小検索が行える
import fractions #最小公倍数などはこっち
import math
import sys
mod = 10**9+7
sys.setrecursionlimit(mod) # 再帰回数上限はでdefault1000
A,B = map(int,input().split())
if A==B:
ans = "Draw"
else:
if A == 1:
ans = "Alice"
elif B == 1:
ans = "Bob"
elif A>B:
ans = "Alice"
else:
ans = "Bob"
print(ans)
| [
"yosyuaomenw@yahoo.co.jp"
] | yosyuaomenw@yahoo.co.jp |
fcc98bedd3a0869fa161e82145679045adc49903 | 5fd4707876cac0a4ca3b14af9a936301c45b5599 | /10_序列的修改、散列和切片/fp_04_了解__getitem__和切片的行为.py | 49d7c6d581010d434f3c5d183befc6c6beade33c | [] | no_license | xuelang201201/FluentPython | 5b0d89bfc6ee1238ad77db9955ec7e8417b418b8 | 7cbedf7c780c2a9e0edac60484f2ad4c385e1dbd | refs/heads/master | 2022-04-26T21:49:16.923214 | 2020-04-27T01:27:50 | 2020-04-27T01:27:50 | 258,290,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class MySeq:
def __getitem__(self, index):
return index # 在这个示例中,__getitem__直接返回传给它的值。
s = MySeq()
print(s[1]) # 单个索引,没什么新奇的。
print(s[1:4]) # 1:4 表示法变成了 slice(1, 4, None)。
print(s[1:4:2]) # slice(1, 4, 2) 的意思是从 1 开始,到 4 结束,步幅为 2。
print(s[1:4:2, 9]) # 神奇的事发生了:如果 [] 中有逗号,那么 __getitem__ 收到的是元组。
print(s[1:4:2, 7:9]) # 元组中甚至可以有多个切片对象。
| [
"xuelang201201@gmail.com"
] | xuelang201201@gmail.com |
bbd4dc7142151d3c378e11906357393635ccc0eb | 01a682ab349df2690fd7ae6e918cb8e68b7aca44 | /train.py | b71c77dc926e16074db73763b62597aa42f95d5e | [
"MIT"
] | permissive | misads/torch_image_template | 4ecbeaa8c28764cab90b73101fb0309ae2856c8d | db55be6fcebdb6b0c5c739e505b8a7a2eb81c3c1 | refs/heads/master | 2020-09-22T09:18:48.737332 | 2020-01-07T13:36:47 | 2020-01-07T13:36:47 | 225,135,127 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,387 | py | """
PyTorch Image Template
Author: xuhaoyu@tju.edu.cn
File Structure:
.
├── train.py :Train and evaluation loop, errors and outputs visualization (Powered by TensorBoard)
├── test.py :Test
│
├── network
│ ├── Model.py :Define models, losses and parameter updating
│ └── *.py :Define networks
├── options
│ └── options.py :Define options
│
├── dataloader/ :Define Dataloaders
├── model_zoo :Commonly used models
├── utils
│ ├── misc_utils.py :System utils
│ └── torch_utils.py :PyTorch utils
│
├── checkpoints/<tag> :Trained checkpoints
├── logs/<tag> :Logs and TensorBoard event files
└── results/<tag> :Test results
Usage:
#### Train
python3 train.py --tag network_1 --epochs 800 --batch_size 16 --gpu_ids 1
#### Resume or Fine Tune
python3 train.py --load checkpoints/network_1 --which-epoch 500
#### test
python3 test.py --tag test_1 --dataset RESIDE
License: MIT
Last modified 12.24
"""
import os
import time
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from dataloader import dual_residual_dataset
from dataloader.image_folder import get_data_loader_folder
from eval import evaluate
from network import models
import pdb
from options import opt, logger
from utils.torch_utils import create_summary_writer, write_image, write_meters_loss, LR_Scheduler, tensor2im
import utils.misc_utils as utils
import torch
data_name = 'RESIDE'
data_root = './datasets/' + data_name + '/ITS/'
imlist_pth = './datasets/' + data_name + '/indoor_train_list.txt'
valroot = "./datasets/" + data_name + "/SOTS/nyuhaze500/"
val_list_pth = './datasets/' + data_name + '/sots_test_list.txt'
realroot = "./datasets/" + data_name + "/REAL/"
real_list_pth = './datasets/' + data_name + '/real.txt'
# dstroot for saving models.
# logroot for writting some log(s), if is needed.
save_root = os.path.join(opt.checkpoint_dir, opt.tag)
log_root = os.path.join(opt.log_dir, opt.tag)
utils.try_make_dir(save_root)
utils.try_make_dir(log_root)
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
# Transform
transform = transforms.ToTensor()
# Dataloader
max_size = 9999999
if opt.debug:
max_size = opt.batch_size * 10
train_dataset = dual_residual_dataset.ImageSet(data_root, imlist_pth,
transform=transform, is_train=True,
with_aug=opt.data_aug, crop_size=opt.crop, max_size=max_size)
dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=5)
######################
# Val dataset
######################
val_dataset = dual_residual_dataset.ImageSet(valroot, val_list_pth,
transform=transform)
val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)
######################
# Real (val) dataset
######################
real_dataloader = get_data_loader_folder(realroot, 1, train=False, num_workers=1, crop=False)
if opt.model in models:
Model = models[opt.model]
else:
Model = models['default']
model = Model(opt)
# if len(opt.gpu_ids):
# model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model = model.cuda(device=opt.device)
# optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D
# optimizer_G = model.g_optimizer
start_epoch = opt.which_epoch if opt.which_epoch else 0
model.train()
# Start training
print('Start training...')
start_step = start_epoch * len(dataloader)
global_step = start_step
total_steps = opt.epochs * len(dataloader)
start = time.time()
writer = create_summary_writer(log_root)
scheduler = None
if opt.lr_schedular is not None:
scheduler = LR_Scheduler(opt.lr_schedular, opt.lr, opt.epochs, len(dataloader), warmup_epochs=opt.warmup_epochs)
for epoch in range(start_epoch, opt.epochs):
for iteration, data in enumerate(dataloader):
break
global_step += 1
######################
# lr_schedular
######################
if opt.lr_schedular is not None:
scheduler(model.g_optimizer, iteration, epoch)
rate = (global_step - start_step) / (time.time() - start)
remaining = (total_steps - global_step) / rate
img, label, trans, _ = data
img_var = Variable(img, requires_grad=False).cuda(device=opt.device)
label_var = Variable(label, requires_grad=False).cuda(device=opt.device)
trans_var = Variable(trans, requires_grad=False).cuda(device=opt.device)
# Cleaning noisy images
# cleaned, A, t = model.cleaner(img_var)
fine, coarse_1, coarse_2, trans_1, trans_2 = model.update_G(img_var, label_var, trans_var)
# Jt = torch.clamp(cleaned * t, min=.01, max=.99)
# airlight = torch.clamp(A * (1-t), min=.01, max=.99)
if epoch % opt.log_freq == opt.log_freq - 1 and iteration < 5:
write_image(writer, 'train/%d' % iteration, '0_input', tensor2im(img), epoch)
write_image(writer, 'train/%d' % iteration, '1_fine', tensor2im(fine), epoch)
write_image(writer, 'train/%d' % iteration, '2_coarse_1', tensor2im(coarse_1), epoch)
write_image(writer, 'train/%d' % iteration, '3_coarse_2', tensor2im(coarse_2), epoch)
# write_image(writer, 'train/%d' % iteration, '4_trans_1', tensor2im(coarse_2), epoch)
# write_image(writer, 'train/%d' % iteration, '5_trans_2', tensor2im(coarse_2), epoch)
write_image(writer, 'train/%d' % iteration, '8_target', tensor2im(label_var), epoch)
write_image(writer, 'train/%d' % iteration, '9_trans', tensor2im(trans_var), epoch)
# update
pre_msg = 'Epoch:%d' % epoch
msg = '(loss) %s ETA: %s' % (str(model.avg_meters), utils.format_time(remaining))
utils.progress_bar(iteration, len(dataloader), pre_msg, msg)
# print(pre_msg, msg)
# print('Epoch(' + str(epoch + 1) + '), iteration(' + str(iteration + 1) + '): ' +'%.4f, %.4f' % (-ssim_loss.item(),
# l1_loss.item()))
# write_loss(writer, 'train', 'F1', 0.78, epoch)
write_meters_loss(writer, 'train', model.avg_meters, epoch)
logger.info('Train epoch %d, (loss) ' % epoch + str(model.avg_meters))
if epoch % opt.save_freq == opt.save_freq - 1 or epoch == opt.epochs-1: # 每隔10次save checkpoint
model.save(epoch)
if epoch % opt.eval_freq == (opt.eval_freq - 1):
model.eval()
# evaluate(model.cleaner, val_dataloader, epoch + 1, writer)
evaluate(model.cleaner, real_dataloader, epoch + 1, writer, 'SINGLE')
model.train()
pdb.set_trace()
# if epoch in [700, 1400]:
# for param_group in model.g_optimizer.param_groups:
# param_group['lr'] *= 0.1
| [
"523131316@qq.com"
] | 523131316@qq.com |
24eca094d917207229a1b23a1435763f5d8d962d | a84e1ed67ef2592cf22f7d19cdddaf16700d6a8e | /graveyard/web/VNET/branches/vnf/content/actors/obsolete/vaspPhon.odb | 0b915dda2a74f0472b61de90044a53085518ebfd | [] | no_license | danse-inelastic/inelastic-svn | dda998d7b9f1249149821d1bd3c23c71859971cc | 807f16aa9510d45a45360d8f59f34f75bb74414f | refs/heads/master | 2016-08-11T13:40:16.607694 | 2016-02-25T17:58:35 | 2016-02-25T17:58:35 | 52,544,337 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 558 | odb | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from vnf.components.VaspPhon import VaspPhon
def actor():
return VaspPhon()
# version
__id__ = "$Id: greet.odb,v 1.1.1.1 2006-11-27 00:09:15 aivazis Exp $"
# End of file
| [
"jbrkeith@gmail.com"
] | jbrkeith@gmail.com |
a5678355479cffd9ab84a011fd49e713ecd02ad5 | 177c090fffc3baba54db88fd51f4f21c74f6acb3 | /manage.py | 334c2decfb81708d1bd7a6599be5ddd095d697d7 | [] | no_license | Deep-sea-boy/iHome | d68dcd196c204d63766fc75bdc214fbd8fa6177c | 0cb9a23e864e9b8b82126db6e6bedf62a52b73d7 | refs/heads/master | 2020-03-11T02:52:39.851372 | 2018-04-23T02:31:10 | 2018-04-23T02:31:10 | 130,633,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #coding:utf-8
from flask_migrate import Migrate,MigrateCommand
from flask_script import Manager
from iHome import get_app,db
app = get_app('dev')
# # 在迁移时让app和db建⽴关联
Migrate(app,db)
manager = Manager(app)
# 将迁移脚本添加到脚本管理器
manager.add_command('db',MigrateCommand)
if __name__ == "__main__":
print app.url_map
manager.run()
| [
"youremail@example.com"
] | youremail@example.com |
b29ec90a8c342083bb1c9315e24d1a38f21f3c5d | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/twilio/build/lib/twilio/rest/conversations/v1/configuration/__init__.py | 662c9c24e4e32fe46eca6dd20efe791d972d6ff9 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:462dd494db244faa218d38ab9f086e0defd945bc165b5d99f3410b331d22010e
size 11015
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
d1dd441e212f6b5630bf450ef5d17cd930f65e8f | 0e94b21a64e01b992cdc0fff274af8d77b2ae430 | /function/最大逆向匹配.py | 7bbeb95f5295f34cf11bb6d1a625805a4c92e954 | [] | no_license | yangnaGitHub/LearningProcess | 1aed2da306fd98f027dcca61309082f42b860975 | 250a8b791f7deda1e716f361a2f847f4d12846d3 | refs/heads/master | 2020-04-15T16:49:38.053846 | 2019-09-05T05:52:04 | 2019-09-05T05:52:04 | 164,852,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 11:37:30 2019
@author: natasha_yang
@e-mail: ityangna0402@163.com
"""
#分词方法,逆向最大匹配
class RMM():
def __init__(self, dict_path):
#prepare dictionary
self.dictionary = set()
self.maximum = 0
with open(dict_path, 'r', encoding='utf-8') as fd:
for line in fd:
line = line.strip()
if not line:
continue
self.dictionary.add(line)
self.maximum = max(self.maximum, len(line))
def cut(self, cut_str):
result = []
index = len(cut_str)
while 0 < index:
findword = False
for steptag in range(self.maximum, 0, -1):
step = index - steptag
if 0 > step:
step = 0
temp = cut_str[step:index]
if temp in self.dictionary:
findword = True
result.append(temp)
index = step
break
if not findword:
result.append(cut_str[index-1])
index -= 1
return result | [
"ityangna0402@163.com"
] | ityangna0402@163.com |
0c42f661e88b5e9e2c9be1f70ac0b4dd7b1fbd0d | 08296e4f0139bd23ec73836996e3637eda666a68 | /modelsFormsDemo/modelforms/forms.py | ad6e17a28c64283db488cff139dd2c04020dd4f9 | [] | no_license | 146789/projects | 4589efd8f2f11a1beb487ef06a7556d49ed95c72 | a288e39cd088ea2717017285fd68d8b42cf4d493 | refs/heads/main | 2023-02-02T13:54:34.360222 | 2020-12-19T08:16:11 | 2020-12-19T08:16:11 | 322,798,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django import forms
from modelforms.models import Project
class projectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
| [
"34913079+146789@users.noreply.github.com"
] | 34913079+146789@users.noreply.github.com |
d78b64e291c878941f42ce614f2374cf3d1e5db0 | 060967fa3e6e390ac0504172e6dea8421ffb9d98 | /2022/python2022/tests/test_day05.py | 6e3cdc045d3b37efe23b317fb4ef6d05472cd6dd | [] | no_license | mreishus/aoc | 677afd18521b62c9fd141a45fec4b7bc844be259 | e89db235837d2d05848210a18c9c2a4456085570 | refs/heads/master | 2023-02-22T12:00:52.508701 | 2023-02-09T04:37:50 | 2023-02-09T04:39:44 | 159,991,022 | 16 | 3 | null | 2023-01-05T10:00:46 | 2018-12-01T22:00:22 | Python | UTF-8 | Python | false | false | 609 | py | #!/usr/bin/env python3
"""
Test Day05.
"""
import unittest
from aoc.day05 import Day05
class TestDay05(unittest.TestCase):
"""Test Day05."""
def test_part1(self):
"""Test part1"""
self.assertEqual(Day05.part1("../inputs/05/input_small.txt"), "CMZ")
self.assertEqual(Day05.part1("../inputs/05/input.txt"), "RNZLFZSJH")
def test_part2(self):
"""Test part2"""
self.assertEqual(Day05.part2("../inputs/05/input_small.txt"), "MCD")
self.assertEqual(Day05.part2("../inputs/05/input.txt"), "CNSFCGJSM")
if __name__ == "__main__":
unittest.main()
| [
"mreishus@users.noreply.github.com"
] | mreishus@users.noreply.github.com |
254f2622c359329e542c446d230344005908e2cf | 0bd7c1f7bf6da5ef92b9013e1d913140f0249dfa | /cecilia-python/company-title/alibaba/2020/LongestStringNote.py | bb33251577054be3950acdf1f824040773ccc3af | [] | no_license | Cecilia520/algorithmic-learning-leetcode | f1fec1fae71c4cf7410122f5ce969e829f451308 | 32941ee052d0985a9569441d314378700ff4d225 | refs/heads/master | 2022-05-02T03:00:57.505672 | 2022-03-19T09:51:28 | 2022-03-19T09:51:28 | 229,673,810 | 7 | 1 | null | 2022-03-19T09:34:57 | 2019-12-23T04:04:04 | Python | UTF-8 | Python | false | false | 2,909 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : LongestStringNote.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/3/30 22:09 cecilia 1.0 最长字符串音符统计
问题描述:
小强最近喜欢弹钢琴,一段旋律中的每个音符都可以用一个小写英文字母表示。
当组成一段段旋律的字符ASCII码是非递减的,旋律被称为是高昂的,例如aaa,bcd。
现在小强已经学会了n段高昂的旋律,他想利用他们拼接出一个尽可能长的高昂的旋律,问最长长度是多少?
输入描述:n行每行一个字符串,保证每个字符串中的字符的ASCII是非递减的。n在[1, 1000,000],
保证所有字符串长度之和不超过1000,000且仅由小写字母构成。
示例1:
> Input:
4
aaa
bcd
zzz
bcdef
> OutPut: 11。将1,4,3段字符拼接在一起,长度为11。
示例2:
输入:
4
abghkl
behklmmm
hopqsttz
yzzz
输出:
12
"""
class Solution:
def getMaxLongestNote(self, n, notes):
"""
计算最长上升的音符串的拼接长度
思路方法:双指针法。创建两个指针,一个指针指向当前的音符的首字母索引位置,一个指针指向前一个字符的末尾字符的索引位置。
其实题目是给定了n个上升音符串,如果题目给定的音符不是满足上升非递减规律,另外还需要对每个音符串做判断的,
但是这里已经说明了每个音符是满足非递减规律的,因此不需要考虑内部的,只需要考虑字符串之间的关系。
:param n: 音符集合长度
:param notes: 音符集合
:return:
算法分析:时间复杂度O(NlogN),空间复杂度O(N)
"""
if n < 0:
return 0
# 对所有的字符集合中的首字母进行排序
notes = sorted(notes)
#
print(notes)
# 定义状态,dp[i]代表前i个音符组成的最长长度
dp = [0] * n
dp[0] = len(notes[0])
maxL = 0
maxres = 0
curentL = 0
for i in range(1, n):
for j in range(i):
# 如果当前音符的首字符大于前一个音符的末尾字符
if notes[i][0] >= notes[j][-1]:
# curentL = len(notes[i]) + len(notes[j])
dp[i] = max(dp[j], len(notes[j]) + len(notes[i]))
dp[i] = maxL + len(notes[i])
maxres = max(maxres, dp[i])
print(dp)
return max(dp)
if __name__ == '__main__':
n = int(input())
notes = []
for i in range(n):
notes.append(input())
# print(notes)
s = Solution()
res = s.getMaxLongestNote(n, notes)
print(res)
| [
"cc15572018516@163.com"
] | cc15572018516@163.com |
1448d856879ce5cf3724a82a93410eb12c35ce47 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_abominating.py | cf50c6ad0533dcdabbfe02cae9118aac20902ad1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _ABOMINATING():
def __init__(self,):
self.name = "ABOMINATING"
self.definitions = abominate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['abominate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aca024609ff1a5d1f0412312402ccbc8796917f4 | c191c82ca5b67d2caf447e16e6d2368404fb6730 | /collegeproject/faculty/forms.py | 03a3c44e6c8587059cf6254f5dcdbfbf8acccb71 | [] | no_license | arunthankachan07/DjangoProjects | 070457fe0afeaea0633ab674b311f220fa6bec83 | 08bd1925ff2d882876b79bc0d8820f033dde3bb3 | refs/heads/master | 2023-04-22T07:26:19.444894 | 2021-05-10T06:36:38 | 2021-05-10T06:36:38 | 361,839,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from django import forms
class FacultyRegistrationForm(forms.Form):
firstname=forms.CharField(max_length=120)
username=forms.CharField(max_length=120)
password=forms.CharField(max_length=100)
class FacultyLoginForm(forms.Form):
username=forms.CharField(max_length=120)
password=forms.CharField(max_length=100)
| [
"arunkaripuzha07@gmail.com"
] | arunkaripuzha07@gmail.com |
8f900e4a807812f900ab27f85d1512892a94a0bb | 6c189e7654d640dbf56c1481bb2439dbb3353402 | /IPython/html/tests/test_external.py | 00bf4d2b83b4b1bb4b56c956e748976034367ebb | [
"BSD-3-Clause"
] | permissive | zakandrewking/ipython | c39ba65ae8b7f323f061a591906144569a5a2e54 | 3e4d535825d60405fbe8d094b455848d59489cfa | refs/heads/master | 2020-05-05T06:07:01.968458 | 2014-05-07T16:35:37 | 2014-05-07T16:35:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | """Test external"""
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
| [
"zaking17@gmail.com"
] | zaking17@gmail.com |
7acea09ca55ff4c8012867d77a44b7dd143eaeab | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py | b8d069bc79d770fe4193fbaf947981a0394075e9 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c0b7ee140903b8fcf26aad781cc776aa0a35cbc9f9efcc1f0b65c3e7318bcd6a
size 7820
| [
"business030301@gmail.com"
] | business030301@gmail.com |
b019fa4745f8434c4b9e8976e21d77d5308b1bc0 | 2caa47f0bdb2f03469a847c3ba39496de315d992 | /Contest/ABC040/c/main.py | 9e5d1b18c640488baf621790690b16c39aaaaa6c | [
"CC0-1.0"
] | permissive | mpses/AtCoder | 9023e44885dc67c4131762281193c24b69d3b6da | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | refs/heads/master | 2023-03-23T17:00:11.646508 | 2021-03-20T12:21:19 | 2021-03-20T12:21:19 | 287,489,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | #!/usr/bin/env python3
n, *a = map(int, open(0).read().split())
INF = float("inf")
dp = [INF] * n
dp[0] = 0
for i in range(1, n):
dp[i] = min(dp[i - p] + abs(a[i - p] - a[i]) for p in [1, 2])
print(dp[n - 1]) | [
"nsorangepv@gmail.com"
] | nsorangepv@gmail.com |
895c65d6ec11f32c7cb39d127fb4f1d7bfac65ce | 84741b2103b702791076e65679b8ab89a132ac3a | /venelin/wsgi.py | 7f14d3ffdae9d8169abafb8e6bacd57b5873b0c9 | [] | no_license | Yaqdata/venko-django-site | 97b78cbeaa9676fb3d230f47036746f1e8a342d6 | eefdf12c9bc6a1873c4f451e1023d9ec691ce300 | refs/heads/master | 2021-01-15T12:14:33.573809 | 2013-12-09T22:12:06 | 2013-12-09T22:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "venelin.settings")
# This application object is used by the development server
# as well as any WSGI server configured to use this file.
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| [
"vkstoykov@gmail.com"
] | vkstoykov@gmail.com |
b29308f7c9eaaf48077bcadb3fae69b3346e1ec1 | 33bae2dead37bcd22460e05de67531cfdacfcccb | /wis/urls.py | 9f2729796b8a3e3576da8fb4661d1e77a77948b8 | [] | no_license | wis-auralov/wis | 5cfb0dba9c74d8d944b770d9b706de91fe01440d | 40d2e187b448a42008b8db6f00cf17818fff2444 | refs/heads/master | 2021-01-20T07:36:10.174053 | 2017-04-29T18:46:41 | 2017-04-29T18:46:41 | 90,016,639 | 0 | 0 | null | 2017-05-02T09:43:22 | 2017-05-02T09:43:22 | null | UTF-8 | Python | false | false | 848 | py | from django.conf.urls import url, include
from django.contrib import admin
from clients.views import ClientList, ClientDetail, ClientDelete, ClientCreate, ClientScores, download_xlsx
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^client_detail/(?P<pk>[0-9]+)/$', ClientDetail.as_view(), name='client_detail'),
url(r'^client_delete/(?P<pk>[0-9]+)/$', ClientDelete.as_view(), name='client_delete'),
url(r'^client_create/$', ClientCreate.as_view(), name='client_create'),
url(r'^client_scores/$', ClientScores.as_view(), name='client_scores'),
url(r'^download_file/$', download_xlsx, name='download_file'),
url(r'^$', ClientList.as_view(), name='client_list'),
url(r'^api/', include('clients.api.urls', namespace="api")),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| [
"lse1983@mail.ru"
] | lse1983@mail.ru |
50da310865ec2f297b42c59a7c926b4a47b868cb | ddd5109d8ce1832423448d7f4932255118d56e36 | /apps/gigs/models.py | 5fffeca65ca75cda079ff70d879f2efb2adac79b | [] | no_license | amyth/gigzag | fe5b52677db2ba17083d4fdb059c0a12e6b3646d | 1569fd40aa02c8b7921ad6a7f8f4e8b9ad4cf652 | refs/heads/master | 2021-01-21T07:03:28.397118 | 2017-02-27T12:01:02 | 2017-02-27T12:01:02 | 83,305,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,769 | py | import os
import uuid
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from django.core.files import File
from django.db import models
from django.db.models import Q
from django_comments.models import Comment
from liked.models import Like
from moviepy.editor import VideoFileClip
from apps.gigs import choices
from core.models import SluggedModel
from utils.choices import GIG_PRIVACY_LEVELS
GIG_TYPES = (
(1, 'Public Gig'),
(2, 'Home Gig'),
)
GIG_STATUSES = (
(0, 'Inactive'),
(1, 'Active'),
)
class GigLocation(models.Model):
address = models.TextField()
latitude = models.DecimalField(max_digits=9, decimal_places=6, blank=True,
null=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, blank=True,
null=True)
city = models.CharField(max_length=5, choices=choices.CITIES)
def __unicode__(self):
return u'%s, %s' % (self.address, self.get_city_display())
class Gig(SluggedModel):
## Core fields
description = models.TextField(blank=True, null=True)
gigtime = models.DateTimeField(blank=True, null=True)
gig_type = models.IntegerField(choices=GIG_TYPES, default=1)
no_of_pax = models.IntegerField(default=0)
location = models.ForeignKey(GigLocation, blank=True, null=True)
artists = models.ManyToManyField("accounts.Artist", blank=True)
tags = models.ManyToManyField("tags.Tag", blank=True)
cover = models.ImageField(upload_to="images/cover/", null=True, blank=True)
video = models.FileField(upload_to="video/cover", null=True, blank=True)
youtube_link = models.URLField(null=True, blank=True)
## activity fields
likes = GenericRelation(Like)
comments = GenericRelation(Comment, object_id_field='object_pk')
## contact fields
band_name = models.CharField(max_length=150)
phone = models.CharField(max_length=14, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
## settings and privacy
privacy = models.IntegerField(choices=GIG_PRIVACY_LEVELS, default=1)
## Backend fields
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.IntegerField(choices=GIG_STATUSES, default=1)
moderated = models.BooleanField(default=False)
created_by = models.ForeignKey(User, related_name='created_gigs')
rsvp = models.ManyToManyField(User, related_name='went_to', blank=True)
def __unicode__(self):
return u'%s' % self.title
def save(self, *args, **kwargs):
url = self.youtube_link
if self.youtube_link:
if 'youtu.be' in url:
vid = url.split('/')[-1]
url = "https://www.youtube.com/embed/%s" % vid
elif 'watch?v=' in url:
url = url.replace('watch?v=', "embed/")
self.youtube_link = url.split('&')[0]
return super(Gig, self).save(*args, **kwargs)
@property
def human_time(self):
human_date = self.gigtime.date().strftime('%d/%m/%Y')
human_time = self.gigtime.strftime('%I:%M %p')
return '%s, %s' % (human_date, human_time)
@property
def detail_link(self):
return "http://gigzag.in/#/gigs/%s" % self.id
@property
def host_history(self):
gigs = self.created_by.created_gigs.filter(~Q(id=self.id), moderated=True)
return ",".join([gig.title for gig in gigs][:5])
@property
def v_youtube_link(self):
return self.youtube_link.replace('embed', 'v')
@property
def v_youtube_image(self):
iurl = "https://i.ytimg.com/vi/%s/sddefault.jpg"
vid = self.youtube_link.split('/')[-1]
print iurl % vid
return iurl % vid
| [
"aroras.official@gmail.com"
] | aroras.official@gmail.com |
aaa4bf02509e564a891d5a05ae8f6daf2a153f8a | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/grcdea001/question1.py | dba5df6eec999efec76cbe617d7ebd465574d3a3 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | """Program to print out a list of strings without duplicates, assignment 7 question 1
Dean Gracey
27 April 2014"""
word=input("Enter strings (end with DONE):\n")
words = []
while word!= "DONE":
if not word in words:
words.append(word)
word = input("")
print("\nUnique list:")
for i in words:
print(i) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
054f71754967e78023ad8b59df9cff86e239ca30 | eb71f3494fb00708e6d5e8c332f27c4026063339 | /il2fb/ds/events/definitions/version.py | fac6811fd60d1fe2728ff87ecfeae3f0adc46026 | [
"MIT"
] | permissive | IL2HorusTeam/il2fb-ds-events | dab5fc7ec0b1389c5fc1e975f9134e6ffe35bdee | fb71ffbce63ac1b3b3a27263250e021f9543ba9f | refs/heads/main | 2023-02-01T16:29:18.565016 | 2020-12-22T17:53:45 | 2020-12-22T17:53:45 | 310,578,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | VERSION_SUFFIX = "a0"
VERSION_PATCH = "0" + (VERSION_SUFFIX or "")
VERSION_MINOR = "0"
VERSION_MAJOR = "1"
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
VERSION = ".".join([str(x) for x in VERSION_INFO])
| [
"oblovatniy@gmail.com"
] | oblovatniy@gmail.com |
aa324cc4383ae2314514c934cf5ae4d5f72ade34 | c7a31023a11b376e543e41b00212dc7eca07b386 | /cryptofeed/kraken/kraken.py | 5bb79d6c9f884c708f916bbad8370bd7e22906d4 | [
"LicenseRef-scancode-warranty-disclaimer",
"Python-2.0"
] | permissive | orens77/cryptofeed | 5d2123f17745be6bf2eb7bf5a96547006e16d083 | d155b30eadb015c7167141a3dbbb8d42af6ae4f0 | refs/heads/master | 2020-04-25T17:45:22.849820 | 2019-06-04T14:51:11 | 2019-06-04T14:51:11 | 172,960,015 | 0 | 0 | NOASSERTION | 2019-02-27T17:31:47 | 2019-02-27T17:31:46 | null | UTF-8 | Python | false | false | 6,183 | py | '''
Copyright (C) 2017-2019 Bryant Moscon - bmoscon@gmail.com
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import json
import logging
from decimal import Decimal
import time
from sortedcontainers import SortedDict as sd
from cryptofeed.feed import Feed
from cryptofeed.defines import TRADES, BUY, SELL, BID, ASK, TICKER, L2_BOOK, KRAKEN
from cryptofeed.standards import pair_exchange_to_std
LOG = logging.getLogger('feedhandler')
class Kraken(Feed):
id = KRAKEN
def __init__(self, pairs=None, channels=None, callbacks=None, depth=10, **kwargs):
super().__init__('wss://ws.kraken.com', pairs=pairs, channels=channels, callbacks=callbacks, **kwargs)
self.book_depth = depth
def __reset(self):
self.l2_book = {}
self.channel_map = {}
async def subscribe(self, websocket):
self.__reset()
if self.config:
for chan in self.config:
sub = {"name": chan}
if 'book' in chan:
sub['depth'] = self.book_depth
await websocket.send(json.dumps({
"event": "subscribe",
"pair": self.config[chan],
"subscription": sub
}))
else:
for chan in self.channels:
sub = {"name": chan}
if 'book' in chan:
sub['depth'] = self.book_depth
await websocket.send(json.dumps({
"event": "subscribe",
"pair": self.pairs,
"subscription": sub
}))
async def _trade(self, msg, pair):
"""
example message:
[1,[["3417.20000","0.21222200","1549223326.971661","b","l",""]]]
channel id, price, amount, timestamp, size, limit/market order, misc
"""
for trade in msg[1]:
price, amount, timestamp, side, _, _ = trade
await self.callbacks[TRADES](feed=self.id,
pair=pair,
side=BUY if side == 'b' else SELL,
amount=Decimal(amount),
price=Decimal(price),
order_id=None,
timestamp=float(timestamp))
async def _ticker(self, msg, pair):
"""
[93, {'a': ['105.85000', 0, '0.46100000'], 'b': ['105.77000', 45, '45.00000000'], 'c': ['105.83000', '5.00000000'], 'v': ['92170.25739498', '121658.17399954'], 'p': ['107.58276', '107.95234'], 't': [4966, 6717], 'l': ['105.03000', '105.03000'], 'h': ['110.33000', '110.33000'], 'o': ['109.45000', '106.78000']}]
channel id, asks: price, wholeLotVol, vol, bids: price, wholeLotVol, close: ...,, vol: ..., VWAP: ..., trades: ..., low: ...., high: ..., open: ...
"""
await self.callbacks[TICKER](feed=self.id,
pair=pair,
bid=Decimal(msg[1]['b'][0]),
ask=Decimal(msg[1]['a'][0]))
async def _book(self, msg, pair):
delta = {BID: [], ASK: []}
msg = msg[1:]
if len(msg) == 1 and 'as' in msg[0]:
# Snapshot
self.l2_book[pair] = {BID: sd({
Decimal(update[0]): Decimal(update[1]) for update in msg[0]['bs']
}), ASK: sd({
Decimal(update[0]): Decimal(update[1]) for update in msg[0]['as']
})}
await self.book_callback(pair, L2_BOOK, True, delta, time.time())
else:
for m in msg:
for s, updates in m.items():
side = BID if s == 'b' else ASK
for update in updates:
price, size, _ = update
price = Decimal(price)
size = Decimal(size)
if size == 0:
# Per Kraken's technical support
# they deliver erroneous deletion messages
# periodically which should be ignored
if price in self.l2_book[pair][side]:
del self.l2_book[pair][side][price]
delta[side].append((price, 0))
else:
delta[side].append((price, size))
self.l2_book[pair][side][price] = size
for side in (BID, ASK):
while len(self.l2_book[pair][side]) > self.book_depth:
del_price = self.l2_book[pair][side].items()[0 if side == BID else -1][0]
del self.l2_book[pair][side][del_price]
delta[side].append((del_price, 0))
await self.book_callback(pair, L2_BOOK, False, delta, time.time())
async def message_handler(self, msg):
msg = json.loads(msg, parse_float=Decimal)
if isinstance(msg, list):
if self.channel_map[msg[0]][0] == 'trade':
await self._trade(msg, self.channel_map[msg[0]][1])
elif self.channel_map[msg[0]][0] == 'ticker':
await self._ticker(msg, self.channel_map[msg[0]][1])
elif self.channel_map[msg[0]][0] == 'book':
await self._book(msg, self.channel_map[msg[0]][1])
else:
LOG.warning("%s: No mapping for message %s", self.id, msg)
else:
if msg['event'] == 'heartbeat':
return
elif msg['event'] == 'systemStatus':
return
elif msg['event'] == 'subscriptionStatus' and msg['status'] == 'subscribed':
self.channel_map[msg['channelID']] = (msg['subscription']['name'], pair_exchange_to_std(msg['pair']))
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
| [
"bmoscon@gmail.com"
] | bmoscon@gmail.com |
dcb4c855cf299c37b70980412d0997e041832a97 | 3fb0ce33f00b96ae3808a32da44de3e887434afb | /.提出一覧/AtCoder/M-SOLUTIONSプロコンオープン2020/msol_D.py | 57fa6ba1c8ac9cddfb25c897c2153a9acee8c49d | [] | no_license | Yukikazari/kyoupuro | ca3d74d8db024b1988cd0ff00bf069ab739783d7 | 343de455c4344dbcfa4524b492f7f6205c9db26f | refs/heads/master | 2023-02-21T01:53:52.403729 | 2021-01-27T03:55:01 | 2021-01-27T03:55:01 | 282,222,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | N = int(input())
A = list(map(int, input().split()))
money = 1000
kabu = 0
for i in range(len(A)-1):
if kabu != 0:
if A[i] >= A[i + 1]:
money += kabu * A[i]
kabu = 0
else:
if A[i] < A[i + 1]:
kabu = money // A[i]
money -= kabu * A[i]
if kabu != 0:
money += kabu * A[len(A) - 1]
print(money)
| [
"haya_nanakusa793@yahoo.co.jp"
] | haya_nanakusa793@yahoo.co.jp |
e70dabf76a8f2d14e74e6b2353114e4dafe9e649 | adbcc8ff2249dc9906095bf894d2923b197f8af2 | /examples/csj/s5/local/remove_pos.py | 3e354475e35113bfcbd8497b2fc1cf5ff9e61663 | [] | no_license | caochensi/neural_sp | de39d0919aeb6da28b90140be051c124f7efcc3f | 019247ccae7df461f852c5130ea127e395d071dc | refs/heads/master | 2020-04-15T14:49:52.408291 | 2019-01-06T10:52:36 | 2019-01-06T14:43:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Remove POS tag."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import re
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('text', type=str,
help='path to text file')
args = parser.parse_args()
def main():
with open(args.text, 'r') as f:
pbar = tqdm(total=len(open(args.text).readlines()))
for line in f:
line = unicode(line, 'utf-8').strip()
utt_id = line.split()[0]
words = line.split()[1:]
# Remove POS tag
text = ' '.join(list(map(lambda x: x.split('+')[0], words)))
# Remove <sp> (short pause)
text = text.replace('<sp>', '')
# Remove conseccutive spaces
text = re.sub(r'[\s]+', ' ', text)
# Remove the first and last spaces
if text[0] == ' ':
text = text[1:]
if text[-1] == ' ':
text = text[:-1]
line = utt_id + ' ' + text
print('%s' % line.encode('utf-8'))
pbar.update(1)
if __name__ == '__main__':
main()
| [
"hiro.mhbc@gmail.com"
] | hiro.mhbc@gmail.com |
2b02015ba8e96f08cf871f37cd5331da7911e852 | e9e433c57a7d73d848fbade5e354a8c31ff0ea87 | /tests/test_subword_separation.py | 43d1525eb175ddd755df02c966b5532c5a64d8f9 | [
"Apache-2.0"
] | permissive | Joshua0128/codeprep | a7f99533e7b74e089fb66a3f10f6be59d4ca4b71 | 0f41307f7a9ad545e5ec0cc9552a0144328f2422 | refs/heads/master | 2023-07-09T13:07:33.094940 | 2021-04-21T18:08:30 | 2021-04-21T18:08:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,553 | py | # SPDX-FileCopyrightText: 2020 Hlib Babii <hlibbabii@gmail.com>
#
# SPDX-License-Identifier: Apache-2.0
# import unittest
#
# from codeprep.bpepkg.bpe_encode import BpeData
# from codeprep.parse.core import convert_text
# from codeprep.parse.model.containers import SplitContainer
# from codeprep.parse.model.numeric import Number
# from codeprep.parse.model.placeholders import placeholders
# from codeprep.parse.model.word import Underscore, Word
# from codeprep.prepconfig import PrepConfig
# from codeprep.to_repr import to_repr
#
# test_cases = {
# "create": (
# [SplitContainer.from_single_token("create")],
# ["create"],
# ),
# "Vector": (
# [SplitContainer.from_single_token("Vector")],
# [placeholders["capital"], "vector"],
# ),
# "players": (
# [SplitContainer.from_single_token("players")],
# [placeholders["word_start"], 'play', 'er', 's', placeholders["word_end"]]
# ),
# "0.345e+4": (
# [Number("0.345e+4")],
# [placeholders["word_start"], "0.", "3", "4", "5", "e+", "4", placeholders["word_end"]]
# ),
# "bestPlayers": (
# [SplitContainer([Word.from_("best"), Word.from_("Players")])],
# [placeholders["word_start"], "best", placeholders["capital"], 'play', "er", "s", placeholders["word_end"]]
# ),
# "test_BestPlayers": (
# [SplitContainer([Word.from_("test"), Underscore(), Word.from_("Best"), Word.from_("Players")])],
# [placeholders["word_start"], "test", '_', placeholders["capital"],
# "best", placeholders["capital"], 'play', "er", "s", placeholders["word_end"]]
# ),
# "test_BestPlayers_modified": (
# [SplitContainer(
# [Word.from_("test"), Underscore(), Word.from_("Best"), Word.from_("Players"), Underscore(),
# Word.from_("modified")]
# )],
# [placeholders["word_start"], "test", '_', placeholders["capital"],
# "best", placeholders["capital"], 'play', "er", "s", '_', "mod",
# "if", "ied",
# placeholders["word_end"]]
# ),
# "N_PLAYERS_NUM": (
# [SplitContainer([Word.from_("N"), Underscore(), Word.from_("PLAYERS"), Underscore(), Word.from_("NUM")])],
# [placeholders["word_start"], placeholders["capitals"], "n", '_',
# placeholders["capitals"], "play", "er", "s", '_', placeholders["capitals"],
# "num", placeholders["word_end"]]
# ),
# "_players": (
# [SplitContainer([Underscore(), (Word.from_("players"))])],
# [placeholders['word_start'], '_', "play", "er", "s", placeholders['word_end']]
# ),
# }
#
# bpe_merges_cache = {
# "players": ["play", "er", "s"],
# "0.345e+4": ["0.", "3", "4", "5", "e+", "4"],
# "modified": ["mod", "if", "ied"],
#
# "create": ["create"],
# "vector": ["vector"],
# "best": ["best"],
# "test": ["test"],
# "num": ["num"],
# "user": ["user"],
# "get": ["get"],
# "nick": ["ni", "ck"],
# "logger": ["logger"],
# "info": ["info"]
# }
#
#
# class SubwordSeparation(unittest.TestCase):
# def test(self):
# for input, output_tuple in test_cases.items():
# parsed = [p for p in convert_text(input, "java")][:-1]
#
# self.assertEqual(output_tuple[0], parsed)
#
# repred, metadata = to_repr(PrepConfig.from_encoded_string('Uc140l'), parsed, BpeData(merges_cache=bpe_merges_cache))
#
# self.assertEqual(output_tuple[1], repred)
#
#
# if __name__ == '__main__':
# unittest.main() | [
"hlibbabii@gmail.com"
] | hlibbabii@gmail.com |
eb940db047f8f17e8e5f77e6aba568932e282e75 | f1624dc174a172aff3b6cf7b9c7d1f2505bb76ee | /code/plot_double_bar_tmp.py | ee014d0f52a5091ca582759f7a4d2e1f81e4de3e | [] | no_license | blxlrsmb/TrendMicro2014 | 5d0bf07a54e8f2123af06cd58beef36ad9cb007d | 63894ede6bbfeb1ce62a5b64c56996dd53c6c2ce | refs/heads/master | 2021-01-10T14:12:56.868967 | 2015-11-21T23:39:36 | 2015-11-21T23:39:36 | 46,638,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# $File: plot_double_bar_tmp.py
# $Date: Mon Aug 18 06:45:14 2014 +0800
# $Author: Xinyu Zhou <zxytim[at]gmail[dot]com>
from sklearn.datasets import load_svmlight_file
import matplotlib.pyplot as plt
import numpy
import sys
from IPython import embed
def process_data(X, y, column, nr_bins):
x = X[:,column].toarray().T[0]
y = y[x != 0]
x = x[x != 0]
y = y[x < 3000]
x = x[x < 3000]
min_x, max_x = min(x), max(x)
yp, p = numpy.histogram(x, nr_bins)
# p = (numpy.array(list(p) + [0]) + numpy.asarray([0] + list(p)))[1:-1] * 0.5
width = (max_x - min_x) / float(nr_bins) * 0.6
y0, x0 = numpy.histogram(x[y == 0], p)
y1, x1 = numpy.histogram(x[y == 1], p)
p = (numpy.array(list(p) + [0]) + numpy.asarray([0] + list(p)))[1:-1] * 0.5
return p, y0, y1, width
def main():
if len(sys.argv) != 6:
print 'Usage: {} <svm_file> <desc> <column> <nr_bins> <output>'.format(sys.argv[0])
sys.exit(1)
input_path, desc_path, column, nr_bins, output_path = sys.argv[1:]
column, nr_bins = map(int, [column, nr_bins])
with open(desc_path) as f:
descs = [" ".join(line.rstrip().split()[1:]) for line in f]
X, y = load_svmlight_file(input_path)
x0, y0, y1, width = process_data(X, y, column, nr_bins)
nr_items = sum(y0) + sum(y1)
if nr_items < 100:
print 'to few data for column {}: {}, abort.'.format(
column, nr_items)
return
ff = open('/tmp/{0}'.format(descs[column]), 'w')
print >> ff, x0, '\n', y0, '\n', y1
ff.close()
plt.bar(x0 + width * 0.3, y0, width=width, label='un-renewed', color='#55DD55')
plt.bar(x0, y1, width=width, label='renewed', color='#5555DD')
plt.title('distribution of {} on renewal'.format(descs[column]))
plt.legend()
plt.tight_layout()
plt.grid()
plt.savefig(output_path)
# plt.show()
if __name__ == '__main__':
main()
# vim: foldmethod=marker
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
a9c44935af9e748a670870274888deac52241fc7 | c3865ab8c207ee58056be8eda7319acaa9a04654 | /Plots/simple/SimpleBarPlot.py | 4e5aa7de3779008ecd87c21d430eaf7f2ba2977f | [] | no_license | jonathanglima/Python | d42c21cb9030063f172c03efbb598a969db48539 | 43c53b6f20973c31ce95d2891293d344d655d377 | refs/heads/master | 2021-08-08T07:42:56.348207 | 2017-11-09T22:19:17 | 2017-11-09T22:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 07 16:56:32 2015
Simple Bar plot function
@author: Edward
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import os
# global variables
# fontname = 'C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/resource/Helvetica.ttf' # font .ttf file path
# platform specific fonts
#import sys
#fontname = {'darwin': 'Helvetica', # Mac
# 'win32':'Arial', # Windows
# 'linux': 'FreeSans', # Linux
# 'cygwin': 'Arial' # use Windows
# }.get(sys.platform)
fontname = 'Helvetica'
fontsize = {'title':16, 'xlab':12, 'ylab':12, 'xtick':10,'ytick':10,'texts':10,
'legend': 10, 'legendtitle':10} # font size
def SimpleBarPlot(groups, values, errors, savepath=None, width = 0.27,
size=(3, 3), color=['#1f77b4']):
"""Takes 3 inputs and generate a simple bar plot
e.g. groups = ['dog','cat','hippo']
values = [15, 10, 3]
errors = [0.5, 0.3, 0.8]
savepath: path to save figure. Format will be parsed by the extension
of save name
width: distance between each bars
size: figure size, in inches. Input as a tuple. Default (3,3)
color: default tableau10's steelblue color (hex: #1f77b4)
"""
# Get bar plot function according to style
ngroups = len(groups) # group labels
# leftmost position of bars
pos = np.arange(ngroups)
# initialize the plot
fig, axs = plt.subplots(nrows=1, ncols = 1, sharex=True)
# plot the series
axs.bar(pos, values, width, yerr=errors, color=color, align='center')
# set axis
axs.tick_params(axis='both',direction='out')
axs.spines['left'].set_visible(True)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.spines['bottom'].set_visible(True)
axs.xaxis.set_ticks_position('bottom')
axs.yaxis.set_ticks_position('left')
ymin, ymax = axs.get_ybound()
if ymax <= 0.0: # only negative data present
# flip label to top
axs.spines['bottom'].set_position('zero') # zero the x axis
axs.tick_params(labelbottom=False, labeltop=True)
elif ymin >= 0.0: # only positive data present. Default
axs.spines['bottom'].set_position('zero') # zero the x axis
else: # mix of positive an negative data : set all label to bottoms
axs.spines['bottom'].set_visible(False)
axs.spines['top'].set_visible(True)
axs.spines['top'].set_position('zero')
axs.xaxis.set_ticks_position('none')
# Set x categorical label
x = range(0,len(groups))
if axs.get_xlim()[0] >= x[0]:
axs.set_xlim(axs.get_xticks()[0]-1,axs.get_xlim()[-1])
if axs.get_xlim()[-1] <= x[-1]:
axs.set_xlim(axs.get_xlim()[0], axs.get_xticks()[-1]+1)
plt.xticks(x, groups)
# Set font
itemDict = {'title':[axs.title], 'xlab':[axs.xaxis.label],
'ylab':[axs.yaxis.label], 'xtick':axs.get_xticklabels(),
'ytick':axs.get_yticklabels(),
'texts':axs.texts if isinstance(axs.texts, np.ndarray)
or isinstance(axs.texts, list) else [axs.texts],
'legend': [] if axs.legend_ is None
else axs.legend_.get_texts(),
'legendtitle':[] if axs.legend_ is None
else [axs.legend_.get_title()]}
itemList, keyList = [], []
for k, v in iter(itemDict.items()):
itemList += v
keyList += [k]*len(v)
# initialize fontprop object
fontprop = fm.FontProperties(style='normal', weight='normal',
stretch = 'normal')
if os.path.isfile(fontname): # check if font is a file
fontprop.set_file(fontname)
else:# check if the name of font is available in the system
if not any([fontname.lower() in a.lower() for a in
fm.findSystemFonts(fontpaths=None, fontext='ttf')]):
print('%s font not found. Use system default.' %(fontname))
fontprop.set_family(fontname) # set font name
# set font for each object
for n, item in enumerate(itemList):
if isinstance(fontsize, dict):
fontprop.set_size(fontsize[keyList[n]])
elif n <1: # set the properties only once
fontprop.set_size(fontsize)
item.set_fontproperties(fontprop) # change font for all items
# Set figure size
fig.set_size_inches(size[0],size[1])
# Save the figure
if savepath is not None:
fig.savefig(savepath, bbox_inches='tight', rasterized=True, dpi=300)
return(fig, axs)
if __name__=='__main__':
groups = ['dog','cat','hippo']
values = [-15, 10, 3]
errors = [0.5, 0.3, 0.8]
SimpleBarPlot(groups, values, errors, savepath='C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/barplot.eps')
| [
"cui23327@gmail.com"
] | cui23327@gmail.com |
72d6334fff676e48b52f9b448c483d6a4750c874 | 524c17cbe94ee67babf817bad3b304e2573aa2da | /Lab4/func.py | 12c785bd1737ed353c5e4ba959d5d126ed932708 | [] | no_license | JoniNoct/python-laboratory | 6707664fa58d6e0fcde8476f9670fcf9c312e830 | 551fdb128bcf113e72fd13ff7441455c830ecf45 | refs/heads/master | 2020-07-29T15:20:26.183289 | 2019-11-11T22:42:42 | 2019-11-11T22:42:42 | 209,859,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | def checkType(a):
i = 1
if a == "int":
while i == 1:
try:
v = int(input())
i += 1
except:
print("Потрібно ввести число")
return v
elif a == "float":
while i == 1:
try:
v = float(input())
i += 1
except:
print("Потрібно ввести число")
return v
def setChoice():
print("Ви би хотіли розпочати знову?\n1) так\n2) ні")
i = 1
j = 1
while i == 1:
c = checkType("int")
if c == 1:
print("Починаэмо спочатку")
i += 1
elif c == 2:
print("До побачення")
i += 1
j += 1
else:
print("У вас є можливість обрати лише з 2 пунктів")
return j
def welcome(a):
print("Лабораторна робота №%d Майструк Ілля №6\nДоброго дня" %(a))
| [
"tereshchenko.igor@gmail.com"
] | tereshchenko.igor@gmail.com |
ca7dcb5b038ec85b1e09b2329b549221e8c3d1ad | c2544163e17fad9a4e5731bd1976bfc9db44f8bd | /reagent/net_builder/discrete_dqn/dueling.py | fc2fe4b2e4cb677386c517f34dfdc563b2f3b93f | [
"BSD-3-Clause"
] | permissive | zachkeer/ReAgent | 52fb805576dc7b1465e35921c3651ff14cd9345e | 3e5eb0391050c39b9d4707020f9ee15d860f28cb | refs/heads/master | 2022-11-25T14:08:21.151851 | 2020-07-22T06:44:28 | 2020-07-22T06:45:48 | 281,907,350 | 1 | 0 | BSD-3-Clause | 2020-07-23T09:21:50 | 2020-07-23T09:21:49 | null | UTF-8 | Python | false | false | 1,216 | py | #!/usr/bin/env python3
from typing import List
from reagent import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.models.base import ModelBase
from reagent.models.dueling_q_network import DuelingQNetwork
from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNNetBuilder
from reagent.parameters import NormalizationData, param_hash
@dataclass
class Dueling(DiscreteDQNNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [256, 128])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
def __post_init_post_parse__(self):
assert len(self.sizes) == len(self.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{self.sizes}, {self.activations}"
)
def build_q_network(
self,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
output_dim: int,
) -> ModelBase:
state_dim = self._get_input_dim(state_normalization_data)
return DuelingQNetwork.make_fully_connected(
state_dim, output_dim, self.sizes, self.activations
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e5384e5808f75faeb98b36e08e1fdf5188cd9ff4 | 301a7cb5d21808b4442f641ba4396fbd6c1da6f0 | /dataclasses_serialization/serializer_base.py | 9513f627a0877f7ae41e8c6862dc1a5fb057b84f | [
"MIT"
] | permissive | kitschen/python-dataclasses-serialization | 5766b8010ffb3d8af7aa4925b00f733019ca4b2f | c0c4e4e037a01871dccb13c25478f35a9dfb7717 | refs/heads/master | 2022-05-30T06:13:23.474156 | 2020-02-18T17:12:52 | 2020-02-18T17:12:52 | 260,832,071 | 0 | 0 | MIT | 2020-05-03T05:05:06 | 2020-05-03T05:05:05 | null | UTF-8 | Python | false | false | 7,635 | py | from dataclasses import dataclass, fields, is_dataclass
from functools import partial
from typing import TypeVar, Union, Dict, List, get_type_hints
from typing_inspect import is_union_type, get_origin, get_args
from toolz import curry
try:
from typing import GenericMeta
except ImportError:
from typing import _GenericAlias, _SpecialForm
GenericMeta = (_GenericAlias, _SpecialForm)
__all__ = [
"isinstance",
"issubclass",
"noop_serialization",
"noop_deserialization",
"dict_to_dataclass",
"union_deserialization",
"dict_serialization",
"dict_deserialization",
"list_deserialization",
"Serializer",
"SerializationError",
"DeserializationError"
]
get_args = partial(get_args, evaluate=True)
original_isinstance = isinstance
original_issubclass = issubclass
def isinstance(o, t):
if t is dataclass:
return original_isinstance(o, type) and is_dataclass(o)
if original_isinstance(t, GenericMeta):
if t is Dict:
return original_isinstance(o, dict)
if get_origin(t) in (dict, Dict):
key_type, value_type = get_args(t)
return original_isinstance(o, dict) and all(
isinstance(key, key_type) and isinstance(value, value_type)
for key, value in o.items()
)
return original_isinstance(o, t)
def issubclass(cls, classinfo):
if classinfo is dataclass:
return False
if classinfo is Union or is_union_type(cls):
return classinfo is Union and is_union_type(cls)
if original_isinstance(classinfo, GenericMeta):
return original_isinstance(cls, GenericMeta) and classinfo.__args__ is None and get_origin(cls) is classinfo
if original_isinstance(cls, GenericMeta):
origin = get_origin(cls)
if isinstance(origin, GenericMeta):
origin = origin.__base__
return origin is classinfo
return original_issubclass(cls, classinfo)
def noop_serialization(obj):
return obj
@curry
def noop_deserialization(cls, obj):
if not isinstance(obj, cls):
raise DeserializationError("Cannot deserialize {} {!r} to type {}".format(
type(obj),
obj,
cls
))
return obj
@curry
def dict_to_dataclass(cls, dct, deserialization_func=noop_deserialization):
if not isinstance(dct, dict):
raise DeserializationError("Cannot deserialize {} {!r} using {}".format(
type(dct),
dct,
dict_to_dataclass
))
if hasattr(cls, '__parameters__'):
if cls.__parameters__:
raise DeserializationError("Cannot deserialize unbound generic {}".format(
cls
))
origin = get_origin(cls)
type_mapping = dict(zip(origin.__parameters__, get_args(cls)))
type_hints = get_type_hints(origin)
flds = fields(origin)
fld_types = (type_hints[fld.name] for fld in flds)
fld_types = (
fld_type[tuple(type_mapping[type_param] for type_param in fld_type.__parameters__)]
if isinstance(fld_type, GenericMeta) else
type_mapping[fld_type]
if isinstance(fld_type, TypeVar) else
fld_type
for fld_type in fld_types
)
else:
type_hints = get_type_hints(cls)
flds = fields(cls)
fld_types = (type_hints[fld.name] for fld in flds)
try:
return cls(**{
fld.name: deserialization_func(fld_type, dct[fld.name])
for fld, fld_type in zip(flds, fld_types)
if fld.name in dct
})
except TypeError:
raise DeserializationError("Missing one or more required fields to deserialize {!r} as {}".format(
dct,
cls
))
@curry
def union_deserialization(type_, obj, deserialization_func=noop_deserialization):
for arg in get_args(type_):
try:
return deserialization_func(arg, obj)
except DeserializationError:
pass
raise DeserializationError("Cannot deserialize {} {!r} to type {}".format(
type(obj),
obj,
type_
))
@curry
def dict_serialization(obj, key_serialization_func=noop_serialization, value_serialization_func=noop_serialization):
if not isinstance(obj, dict):
raise SerializationError("Cannot serialize {} {!r} using dict serialization".format(
type(obj),
obj
))
return {
key_serialization_func(key): value_serialization_func(value)
for key, value in obj.items()
}
@curry
def dict_deserialization(type_, obj, key_deserialization_func=noop_deserialization, value_deserialization_func=noop_deserialization):
if not isinstance(obj, dict):
raise DeserializationError("Cannot deserialize {} {!r} using dict deserialization".format(
type(obj),
obj
))
if type_ is dict or type_ is Dict:
return obj
key_type, value_type = get_args(type_)
return {
key_deserialization_func(key_type, key): value_deserialization_func(value_type, value)
for key, value in obj.items()
}
@curry
def list_deserialization(type_, obj, deserialization_func=noop_deserialization):
if not isinstance(obj, list):
raise DeserializationError("Cannot deserialize {} {!r} using list deserialization".format(
type(obj),
obj
))
if type_ is list or type_ is List:
return obj
value_type, = get_args(type_)
return [
deserialization_func(value_type, value)
for value in obj
]
@dataclass
class Serializer:
serialization_functions: dict
deserialization_functions: dict
def __post_init__(self):
self.serialization_functions.setdefault(dataclass, lambda obj: self.serialize(dict(obj.__dict__)))
self.deserialization_functions.setdefault(dataclass, dict_to_dataclass(deserialization_func=self.deserialize))
self.deserialization_functions.setdefault(Union, union_deserialization(deserialization_func=self.deserialize))
def serialize(self, obj):
"""
Serialize given Python object
"""
for type_, func in self.serialization_functions.items():
if isinstance(obj, type_):
return func(obj)
if is_dataclass(obj) and dataclass in self.serialization_functions:
return self.serialization_functions[dataclass](obj)
raise SerializationError("Cannot serialize type {}".format(type(obj)))
@curry
def deserialize(self, cls, serialized_obj):
"""
Attempt to deserialize serialized object as given type
"""
for type_, func in self.deserialization_functions.items():
if issubclass(cls, type_):
return func(cls, serialized_obj)
if is_dataclass(cls) and dataclass in self.deserialization_functions:
return self.deserialization_functions[dataclass](cls, serialized_obj)
raise DeserializationError("Cannot deserialize type {}".format(cls))
@curry
def register_serializer(self, cls, func):
self.serialization_functions[cls] = func
@curry
def register_deserializer(self, cls, func):
self.deserialization_functions[cls] = func
def register(self, cls, serialization_func, deserialization_func):
self.register_serializer(cls, serialization_func)
self.register_deserializer(cls, deserialization_func)
class SerializationError(TypeError):
pass
class DeserializationError(TypeError):
pass
| [
"madman.bob@hotmail.co.uk"
] | madman.bob@hotmail.co.uk |
f0941a9ff0770700eaf5c789d61d647d34b3bcde | 13a5a2ab12a65d65a5bbefce5253c21c6bb8e780 | /dnainfo/skyline_chi/migrations/0007_auto_20161019_1408.py | 61784f86a84c1613426e15ae6a744a2d42ba257a | [] | no_license | NiJeLorg/DNAinfo-CrimeMaps | 535b62205fe1eb106d0f610d40f2f2a35e60a09e | 63f3f01b83308294a82565f2dc8ef6f3fbcdb721 | refs/heads/master | 2021-01-23T19:28:12.642479 | 2017-05-11T06:04:08 | 2017-05-11T06:04:08 | 34,847,724 | 2 | 0 | null | 2016-11-25T15:56:14 | 2015-04-30T10:02:41 | JavaScript | UTF-8 | Python | false | false | 2,612 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-19 18:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('skyline_chi', '0006_auto_20161007_1528'),
]
operations = [
migrations.AddField(
model_name='chi_building_permits',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='chi_building_permits',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='chireporterbuildings',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='chireporterbuildings',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='chiskyline',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='chisponsoredbuildings',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='chisponsoredbuildings',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='chi_building_permits',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='chireporterbuildings',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='chiskyline',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='chisponsoredbuildings',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"jd@nijel.org"
] | jd@nijel.org |
9875fadcf6f959f0bd030c49d8153372772cbd1e | a7c4f8d4e101a4b3961419278809923a6ddbfde7 | /connect.py | a69af0ebbc3139fa555aa28b47aa0bec1878cc76 | [] | no_license | memonsbayu/phyton | 5b7bfabc1a132d7c8ac6d057fbc7e8bb687ecff7 | 858966d11d95ff3fd829179abe1e349f2b84345f | refs/heads/master | 2023-01-01T07:43:23.784410 | 2020-10-22T13:07:09 | 2020-10-22T13:07:09 | 306,339,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import mysql.connector
def koneksi():
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="sewa_alat_olahraga",
autocommit=True
)
if db.is_connected():
return db
else:
print ('DB disconnected!')
| [
"you@example.com"
] | you@example.com |
0b6b42e43e78681a0e3e98a62abf55692dd129ed | ad3011f4d7600eb1b436f1527e1b576910a64b56 | /05_cwiczenia/01_podstawy/06_cwiczenie.py | ed2a29bfc6b92745932f9a079b0001d3a4bdde77 | [] | no_license | pawel-domanski/python-basic | 7a048c7de950d997492c11b4f07888f856908639 | 1adce694d58b4969f843cfc651d3e2a027a4f3f3 | refs/heads/main | 2023-03-21T12:55:22.097946 | 2021-03-06T09:54:09 | 2021-03-06T09:54:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | # -*- coding: utf-8 -*-
"""
Używając odpowiedniej metody połącz podane wyrazy symbolem '#':
> sport
> python
> free
> time
Następnie wydrukuj do konsoli.
Oczekiwany rezultat:
sport#python#free#time
""" | [
"krakowiakpawel9@gmail.com"
] | krakowiakpawel9@gmail.com |
87cec23de18905c57c19aafa9befa366d356508a | bab4f301ff7b7cf0143d82d1052f49e8632a210e | /1010. Pairs of Songs With Total Durations Divisible by 60.py | a0b93d4f53a519df6b31b2c79b16534f395f35f8 | [] | no_license | ashish-c-naik/leetcode_submission | 7da91e720b14fde660450674d6ce94c78b1150fb | 9f5dcd8e04920d07beaf6aa234b9804339f58770 | refs/heads/master | 2020-04-05T05:12:03.656621 | 2019-06-08T17:30:22 | 2019-06-08T17:30:22 | 156,585,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | class Solution(object):
def numPairsDivisibleBy60(self, time):
"""
:type time: List[int]
:rtype: int
"""
hm = collections.defaultdict(int)
count = 0
for x in time:
# if x % 60 == 0:
# count += 1
new = x % 60
# print(new, hm)
if new in hm:
count += hm[new]
hm[60-new] += 1
if new == 0: hm[new] += 1
return count | [
"ashishnaik121@gmail.com"
] | ashishnaik121@gmail.com |
edcf513b1d1c7b3cec4fd78ec26e06b893ead73c | a9c5c6f6aed746cdfaa594db54d7a2d46e33f430 | /LICENSE.md/windpred/rev.py | e654404ff6da9e277f6a2ebb1c776a14f88de152 | [] | no_license | nyc1893/Python-Learning | a7b81d169c4678c199a61aa4c7d226ce927aa8d8 | 13bc6b8cc28c25df2730d9672d419cd57cab55d4 | refs/heads/master | 2023-07-20T23:08:38.164660 | 2023-07-06T14:35:29 | 2023-07-06T14:35:29 | 101,700,286 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,785 | py | import math
import pandas as pd
import numpy as np
import statsmodels.api as sm # recommended import according to the docs
import matplotlib.pyplot as plt
from scipy.stats import genpareto
# delta p are used
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import timeit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
def grimshaw(yt):
ymean = sum(yt)/float(len(yt))
ymin = min(yt)
xstar = 2*(ymean - ymin)/(ymin)**2
total = 0
for i in yt:
total = total + math.log(1+xstar*i)
vx = 1 + (1/len(yt))*total
gam =vx -1
sig = gam/float(xstar)
return gam,sig
def calT(q,gam,sig,n,no_t,t):
zq = t+ (sig/gam)*(((q*n/no_t)**(-gam))-1)
return zq
def pot_func(x,q):
# The paper is 98% have nothing to do with the q
t = np.percentile(x,q)
nt = [n for n in x if n>t]
yt = [n-t for n in nt]
ymean = sum(yt)/float(len(yt))
ymin = min(yt)
xstar = 2*(ymean - ymin)/(ymin)**2
total = 0
no_t = len(nt)
n = len(x)
# gam,sig<--Grimshaw(yt)
for i in yt:
total = total + math.log(1+xstar*i)
vx = 1 + (1/len(nt))*total
gam =vx -1
sig = gam/float(xstar)
# zq<--calcthereshold(q,... n,nt,t)
zq = t+ (sig/gam)*(((q*n/no_t)**(-gam))-1) #function(1)
return zq,t
# print ("Inital Threshold", t)
# print ("Updated Threshold", zq)
# print ("len nt = ", len(nt))
# print ("len yt = ", len(yt))
# from IPython.core.pylabtools import figsize
"""
"""
# input
# n: lens of calibration data
# d: window size
# q: quantile
def fun1(n,d,turb,sign,L):
# L = 5
i = 0 #initial point
df1 = pd.read_csv("../../../data/total_"+str(turb)+"_2008.csv")
df2 = pd.read_csv("../../../data/total_"+str(turb)+"_2009.csv")
df3 = pd.read_csv("../../../data/total_"+str(turb)+"_2010.csv")
df1[df1<0] = 0
df2[df2<0] = 0
df3[df3<0] = 0
# df1 = df1.iloc[-(d+n):]
# print("2008 shape",df1.shape)
data = pd.concat([df1, df2], axis=0)
data = pd.concat([data, df3], axis=0)
t1 = data.values
num = t1.shape[0] - L
cc = np.zeros(num)
dd = np.zeros(num)
for i in range(num):
cc[i] = t1[i+L]-t1[i]
dd[i] = t1[i]-t1[i+L]
# print(data.shape)
cc = pd.DataFrame(cc)
# print(cc.head())
# print(cc.shape)
dd = pd.DataFrame(dd)
# print(dd.head())
# print(dd.tail())
# print(dd.shape)
cc.columns = ['a']
dd.columns = ['a']
if turb =='ge':
max_y = 53*1.5
else:
max_y = 221
temp = dd[dd["a"]>0.1*max_y]
ind = temp.index.tolist()
temp2 = cc[cc["a"]>0.1*max_y]
ind2 = temp2.index.tolist()
if sign ==0:
return temp2,ind2
return temp,ind
def fun3(q,d,turb,sign,offset,L):
n = 1500
data,ind = fun1(n,d,turb,sign,L)
# now x is delta power
x = data.values
n2 = len(x)
M = np.zeros(n2+2,float)
y = np.zeros(n2+2,float)
# wstar = df.values
# M[d+1] = np.mean(wstar)
xp = np.zeros(n2,float)
list = []
zq,t = pot_func(x[d+1:d+n],q)
zzq =zq*np.ones(n2)
# A is a set of anomalies
Avalue = []
Aindex = []
k = n
k2 = len(x)-n-d
yt = []
no_t = 0
result = []
for i in range(d+n,d+n+k2):
if x[i]>zq:
# print("yeah1")
Avalue.append(x[i])
Aindex.append(i)
# M[i+1] = M[i]
elif x[i]>t:
print("yeah2")
y[i] = xp[i]-t
yt.append(y[i])
no_t = no_t +1
k = k+1
gam,sig = grimshaw(yt)
zq = calT(q,gam,sig,k,no_t,t)
# wstar =np.append(wstar[1:],x[i])
# M[i+1] = np.mean(wstar)
zzq[i+1] = zq
else:
k = k+1
# print(len(Avalue))
# print(len(Aindex))
Aindex = np.array(Aindex)
# print(ind[:5])
# print(ind[-5:])
ck1 = []
ck2 = []
for i in range(len(Aindex)):
if ind[Aindex[i]]<52560+10091-offset and ind[Aindex[i]]>10091-offset:
ck1.append(ind[Aindex[i]]-10091+offset)
elif ind[Aindex[i]]>52560+10091-offset :
ck2.append(ind[Aindex[i]]-10091-52560+offset)
# print(x[ind[Aindex[0]]])
# print(ck1[:5])
# print(ck1[-5:])
# print(ck2[:5])
# print(ck2[-5:])
# ck1 : 2009
# ck2 : 2010
return ck1,ck2
def main():
# plot_m()
# fun1(10,30,"mit")
fun3(20,200,1,"mit")
"""
"""
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | nyc1893.noreply@github.com |
4fa2ed4953655341861de67ea4bdc73d68fb9f5f | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /Examples/bokeh/charts/file/scatter_multi.py | 0d60b5f6af7af7857ba99101beed2f2f5abe3f18 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 2,207 | py |
import pandas as pd
from bokeh.charts import Scatter, output_file, show, vplot, hplot, defaults
from bokeh.charts.operations import blend
from bokeh.charts.utils import df_from_json
from bokeh.sampledata.autompg import autompg as df
from bokeh.sampledata.iris import flowers
from bokeh.sampledata.olympics2014 import data
defaults.plot_width = 450
defaults.plot_height = 400
scatter0 = Scatter(
df, x='mpg', title="x='mpg'", xlabel="Miles Per Gallon")
scatter1 = Scatter(
df, x='mpg', y='hp', title="x='mpg', y='hp'",
xlabel="Miles Per Gallon", ylabel="Horsepower", legend='top_right')
scatter2 = Scatter(
df, x='mpg', y='hp', color='cyl', title="x='mpg', y='hp', color='cyl'",
xlabel="Miles Per Gallon", ylabel="Horsepower", legend='top_right')
scatter3 = Scatter(
df, x='mpg', y='hp', color='origin', title="x='mpg', y='hp', color='origin', "
"with tooltips",
xlabel="Miles Per Gallon", ylabel="Horsepower",
legend='top_right', tooltips=[('origin', "@origin")])
scatter4 = Scatter(
df, x='mpg', y='hp', color='cyl', marker='origin', title="x='mpg', y='hp', color='cyl', marker='origin'",
xlabel="Miles Per Gallon", ylabel="Horsepower", legend='top_right')
# Example with nested json/dict like data, which has been pre-aggregated and pivoted
df2 = df_from_json(data)
df2 = df2.sort('total', ascending=False)
df2 = df2.head(10)
df2 = pd.melt(df2, id_vars=['abbr', 'name'])
scatter5 = Scatter(
df2, x='value', y='name', color='variable', title="x='value', y='name', color='variable'",
xlabel="Medals", ylabel="Top 10 Countries", legend='bottom_right')
scatter6 = Scatter(flowers, x=blend('petal_length', 'sepal_length', name='length'),
y=blend('petal_width', 'sepal_width', name='width'), color='species',
title='x=petal_length+sepal_length, y=petal_width+sepal_width, color=species',
legend='top_right')
scatter6.title_text_font_size = '10pt'
output_file("scatter_multi.html", title="scatter_multi.py example")
show(vplot(
hplot(scatter0, scatter1),
hplot(scatter2, scatter3),
hplot(scatter4, scatter5),
hplot(scatter6)
))
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
451dbc04f9f9a357113e55a6e8573541403d21aa | 8ada05f8c41e2b238f1dad053d6abf66bc53f633 | /1600~1699/1629.py | 1db19048448ac5d342d57fec807edb6153a4b40b | [] | no_license | chulhee23/BaekJoon_Online_Judge | 84ccd9f2223ea57ab305b9ee27dac0c7e8222df4 | b1afcaaba63e49552363a003ff2be8a5878a78a7 | refs/heads/master | 2020-09-22T00:18:39.822112 | 2019-11-24T08:21:00 | 2019-11-24T08:21:00 | 224,983,873 | 0 | 1 | null | 2019-11-30T08:44:22 | 2019-11-30T08:44:21 | null | UTF-8 | Python | false | false | 484 | py |
# 문제
# 자연수 A를 B번 곱한 수를 알고 싶다.
# 단 구하려는 수가 매우 커질 수 있으므로
# 이를 C로 나눈 나머지를 구하는 프로그램을 작성하시오.
#
# 입력
# 첫째 줄에 A, B, C가 빈 칸을 사이에 두고 순서대로 주어진다.
# A, B, C는 모두 2,147,483,647 이하의 자연수이다.
#
# 출력
# 첫째 줄에 A를 B번 곱한 수를 C로 나눈 나머지를 출력한다.
print(pow(*map(int, input().split())))
| [
"alstn2468_@naver.com"
] | alstn2468_@naver.com |
a5d288adf3bf422f7a0fc81863207a92aadde21d | d9f52125601ec26f79202f0e912891b31b60ffc4 | /오후반/Introduction/6_Write_a_function/6_JHW.py | c190540064743a984c2c22f1d8da69ea32296212 | [] | no_license | YoungGaLee/2020_Python_coding-study | 5a4f36a39021c89ac773a3a7878c44bf8b0b811f | b876aabc747709afa21035c3afa7e3f7ee01b26a | refs/heads/master | 2022-12-12T13:34:44.729245 | 2020-09-07T04:07:48 | 2020-09-07T04:07:48 | 280,745,587 | 4 | 4 | null | 2020-07-22T03:27:22 | 2020-07-18T21:51:40 | Python | UTF-8 | Python | false | false | 201 | py | def is_leap(year):
leap = False
if year%400==0:
leap = True
if year==1992:
leap = True
# Write your logic here
return leap
year = int(input())
print(is_leap(year))
| [
"noreply@github.com"
] | YoungGaLee.noreply@github.com |
467d21c0b49dff3c3bce69d7006e14d3f82a3063 | 9dc00969cbb9ab462836c548927c0a471f8a9737 | /05列表优化及去重/05-method1.py | bcb0163750b2b7f0ba3004ea8d561e79ec761c37 | [] | no_license | wan230114/JUN-Python | ad2cb901881b3b6f07a7cdd42ac0ff2c86e8df56 | 3619c8e392c241bd1c9b57dbc16a1f2f09ccc921 | refs/heads/master | 2021-05-01T09:51:01.309505 | 2018-08-31T07:12:31 | 2018-08-31T07:12:31 | 121,096,578 | 0 | 0 | null | 2018-02-11T07:34:54 | 2018-02-11T07:15:42 | null | UTF-8 | Python | false | false | 468 | py | #!usr/bin/python
row = open('row_list', 'r')
clean = open('clean_list', 'w+')
uni = open('uni', 'w')
for line in row:
new_line=''
for line1 in line.strip():
if line1=='.':break
new_line=new_line+line1
clean.write(new_line+'\n')
print new_line
clean.close()
clean = open('clean_list', 'r')
unique = set(clean)
for line in unique:
uni.write(line)
row.close()
clean.close()
uni.close()
| [
"1170101471@qq.com"
] | 1170101471@qq.com |
f83748286858d4ca788884511300b237b5cf3cd0 | eba4934e6af6ec85d286c336dbc2c0f27013110d | /mxnetseg/data/cityscapes.py | 8bd95d8aab0fa9d8025b6819fd3934d769a7c0c9 | [
"Apache-2.0"
] | permissive | jeou/MXNetSeg | a87c349a12c7c2a77018628e5a231cf8624a5e1c | 8a700aa2c2d939fce11b52bde7291ef231c9bfaa | refs/heads/master | 2022-10-21T18:24:18.864007 | 2020-06-14T10:10:18 | 2020-06-14T10:10:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | # coding=utf-8
# Adapted from: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/data/cityscapes.py
import os
import mxnet as mx
import numpy as np
from PIL import Image
from gluoncv.data.segbase import SegmentationDataset
class CitySegmentation(SegmentationDataset):
NUM_CLASS = 19
def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/citys'), split='train',
mode=None, transform=None, **kwargs):
super(CitySegmentation, self).__init__(
root, split, mode, transform, **kwargs)
self.images, self.mask_paths = _get_city_pairs(self.root, self.split)
assert (len(self.images) == len(self.mask_paths))
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
self._key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')
def _class_to_index(self, mask):
# assert the values
values = np.unique(mask)
for value in values:
assert (value in self._mapping)
index = np.digitize(mask.ravel(), self._mapping, right=True)
return self._key[index].reshape(mask.shape)
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
img = self._img_transform(img)
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.mask_paths[index])
# synchronized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def _mask_transform(self, mask):
target = self._class_to_index(np.array(mask).astype('int32'))
return mx.nd.array(target).astype('int32')
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 0
@property
def classes(self):
return ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain',
'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle')
def _get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for root, _, files in os.walk(img_folder):
for filename in files:
if filename.endswith(".png"):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths, mask_paths
def _get_city_pairs(folder, split='train'):
if split in ('train', 'val', 'test'):
img_folder = os.path.join(folder, 'leftImg8bit/' + split)
mask_folder = os.path.join(folder, 'gtFine/' + split)
img_paths, mask_paths = _get_path_pairs(img_folder, mask_folder)
return img_paths, mask_paths
else:
assert split == 'trainval'
train_img_folder = os.path.join(folder, 'leftImg8bit/train')
train_mask_folder = os.path.join(folder, 'gtFine/train')
val_img_folder = os.path.join(folder, 'leftImg8bit/val')
val_mask_folder = os.path.join(folder, 'gtFine/val')
train_img_paths, train_mask_paths = _get_path_pairs(train_img_folder, train_mask_folder)
val_img_paths, val_mask_paths = _get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
return img_paths, mask_paths
| [
"bebdong@outlook.com"
] | bebdong@outlook.com |
bce858e5ade4868147186f01125ce9ce7fdd7da8 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /6kseWBaSTv6GgaKDS_13.py | 450ef2b91638e545ce5a4241819dcf90166b90f5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py |
def next_letters(s):
if s == '' :
return 'A'
elif s.count('Z') == len(s) :
return (len(s)+1)*'A'
else :
z = 0 # number of z at the end of our string
while s[::-1][z] == 'Z' : z += 1
pA, pZ = s[::-1][:z:-1], s[-z-1:] # splitting into 2 parts : constant/changed
pZ = chr(ord(pZ[0])+1) + (len(pZ)-1)*'A' # changing first value to its next one and Z to As
return pA + pZ
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a63169009d6cf39d1f227d2d919b8ec6de5bfdb6 | f5863cf378bce80d3aa459941dff79ea3c8adf5d | /SWEA/SW_TEST/SWEA_1949/SWEA_1949.py | fb26396032d958d97a1532f3d9ec7a520523d8a2 | [] | no_license | Taeg92/Problem_solving | 815c13ae7895708948482eeb05411322be00ac12 | 15c0fe0eda4f77d974451777cb01d10882d8aaa9 | refs/heads/master | 2021-11-18T22:03:21.727840 | 2021-09-06T14:21:09 | 2021-09-06T14:21:09 | 235,335,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | # Problem [1949] : 등산로 조정
import sys
sys.stdin = open('input.txt')
dx = (-1, 0, 1, 0)
dy = (0, 1, 0, -1)
def DFS(x, y, d, k):
global m
if m < d:
m = d
C[x][y] = 1
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < N and 0 <= ny < N and not C[nx][ny]:
if D[x][y] <= D[nx][ny]:
if D[x][y] > D[nx][ny] - k:
temp = D[nx][ny]
D[nx][ny] = D[x][y]-1
DFS(nx,ny,d+1,0)
D[nx][ny] = temp
else:
DFS(nx,ny,d+1,k)
C[x][y] = 0
def get_Max(arr):
m = 0
for col in arr:
m = max(m,max(col))
return m
if __name__ == "__main__":
T= int(input())
for tc in range(1, T+1):
N, K = map(int, input().split())
D = [list(map(int, input().split())) for _ in range(N)]
maxD = get_Max(D)
L = [[i,j] for i in range(N) for j in range(N) if D[i][j] == maxD]
C = [[0]*N for _ in range(N)]
m = 0
for axis in L:
c, r = axis
DFS(c,r,1,K)
print('#{} {}'.format(tc,m)) | [
"gtg92t@gmail.com"
] | gtg92t@gmail.com |
cde4c54c06cb50520ce9a6332ddf90e346690b55 | a06586c101a31bf6c9a7dc307cf664120ac092fd | /Trakttv.bundle/Contents/Code/core/helpers.py | 0a18e71922fa1e88e21953945d60d23bf924f0e6 | [] | no_license | HaKDMoDz/Plex-Trakt-Scrobbler | 22dd1d8275698761cb20a402bce4c5bef6e364f9 | 6d46cdd1bbb99a243b8628d6c3996d66bb427823 | refs/heads/master | 2021-01-22T00:10:18.699894 | 2015-05-25T23:52:45 | 2015-05-25T23:52:45 | 37,312,507 | 2 | 0 | null | 2015-06-12T09:00:54 | 2015-06-12T09:00:53 | null | UTF-8 | Python | false | false | 8,062 | py | from core.logger import Logger
import hashlib
import inspect
import re
import sys
import threading
import time
import unicodedata
log = Logger('core.helpers')
PY25 = sys.version_info[0] == 2 and sys.version_info[1] == 5
def try_convert(value, value_type, default=None):
try:
return value_type(value)
except ValueError:
return default
except TypeError:
return default
def add_attribute(target, source, key, value_type=str, func=None, target_key=None):
if target_key is None:
target_key = key
value = try_convert(source.get(key, None), value_type)
if value:
target[target_key] = func(value) if func else value
def merge(a, b):
a.update(b)
return a
def all(items):
for item in items:
if not item:
return False
return True
def any(items):
for item in items:
if item:
return True
return False
def json_import():
try:
import simplejson as json
log.info("Using 'simplejson' module for JSON serialization")
return json, 'json'
except ImportError:
pass
# Try fallback to 'json' module
try:
import json
log.info("Using 'json' module for JSON serialization")
return json, 'json'
except ImportError:
pass
# Try fallback to 'demjson' module
try:
import demjson
log.info("Using 'demjson' module for JSON serialization")
return demjson, 'demjson'
except ImportError:
log.warn("Unable to find json module for serialization")
raise Exception("Unable to find json module for serialization")
# Import json serialization module
JSON, JSON_MODULE = json_import()
# JSON serialization wrappers to simplejson/json or demjson
def json_decode(s):
if JSON_MODULE == 'json':
return JSON.loads(s)
if JSON_MODULE == 'demjson':
return JSON.decode(s)
raise NotImplementedError()
def json_encode(obj):
if JSON_MODULE == 'json':
return JSON.dumps(obj)
if JSON_MODULE == 'demjson':
return JSON.encode(obj)
raise NotImplementedError()
def str_format(s, *args, **kwargs):
"""Return a formatted version of S, using substitutions from args and kwargs.
(Roughly matches the functionality of str.format but ensures compatibility with Python 2.5)
"""
args = list(args)
x = 0
while x < len(s):
# Skip non-start token characters
if s[x] != '{':
x += 1
continue
end_pos = s.find('}', x)
# If end character can't be found, move to next character
if end_pos == -1:
x += 1
continue
name = s[x + 1:end_pos]
# Ensure token name is alpha numeric
if not name.isalnum():
x += 1
continue
# Try find value for token
value = args.pop(0) if args else kwargs.get(name)
if value:
value = str(value)
# Replace token with value
s = s[:x] + value + s[end_pos + 1:]
# Update current position
x = x + len(value) - 1
x += 1
return s
def str_pad(s, length, align='left', pad_char=' ', trim=False):
if not s:
return s
if not isinstance(s, (str, unicode)):
s = str(s)
if len(s) == length:
return s
elif len(s) > length and not trim:
return s
if align == 'left':
if len(s) > length:
return s[:length]
else:
return s + (pad_char * (length - len(s)))
elif align == 'right':
if len(s) > length:
return s[len(s) - length:]
else:
return (pad_char * (length - len(s))) + s
else:
raise ValueError("Unknown align type, expected either 'left' or 'right'")
def pad_title(value):
"""Pad a title to 30 characters to force the 'details' view."""
return str_pad(value, 30, pad_char=' ')
def total_seconds(span):
return (span.microseconds + (span.seconds + span.days * 24 * 3600) * 1e6) / 1e6
def sum(values):
result = 0
for x in values:
result = result + x
return result
def timestamp():
return int(time.time())
# <bound method type.start of <class 'Scrobbler'>>
RE_BOUND_METHOD = Regex(r"<bound method (type\.)?(?P<name>.*?) of <(class '(?P<class>.*?)')?")
def get_func_name(obj):
if inspect.ismethod(obj):
match = RE_BOUND_METHOD.match(repr(obj))
if match:
cls = match.group('class')
if not cls:
return match.group('name')
return '%s.%s' % (
match.group('class'),
match.group('name')
)
return None
def get_class_name(cls):
if not inspect.isclass(cls):
cls = getattr(cls, '__class__')
return getattr(cls, '__name__')
def spawn(func, *args, **kwargs):
thread_name = kwargs.pop('thread_name', None) or get_func_name(func)
def wrapper(thread_name, args, kwargs):
try:
func(*args, **kwargs)
except Exception, ex:
log.error('Thread "%s" raised an exception: %s', thread_name, ex, exc_info=True)
thread = threading.Thread(target=wrapper, name=thread_name, args=(thread_name, args, kwargs))
try:
thread.start()
log.debug("Spawned thread with name '%s'" % thread_name)
except thread.error, ex:
log.error('Unable to spawn thread: %s', ex, exc_info=True, extra={
'data': {
'active_count': threading.active_count()
}
})
return None
return thread
def schedule(func, seconds, *args, **kwargs):
def schedule_sleep():
time.sleep(seconds)
func(*args, **kwargs)
spawn(schedule_sleep)
def build_repr(obj, keys):
key_part = ', '.join([
('%s: %s' % (key, repr(getattr(obj, key))))
for key in keys
])
cls = getattr(obj, '__class__')
return '<%s %s>' % (getattr(cls, '__name__'), key_part)
def plural(value):
if type(value) is list:
value = len(value)
if value == 1:
return ''
return 's'
def get_pref(key, default=None):
if Dict['preferences'] and key in Dict['preferences']:
return Dict['preferences'][key]
return Prefs[key] or default
def join_attributes(**kwargs):
fragments = [
(('%s: %s' % (key, value)) if value else None)
for (key, value) in kwargs.items()
]
return ', '.join([x for x in fragments if x])
def get_filter(key, normalize_values=True):
value = get_pref(key)
if not value:
return None, None
value = value.strip()
# Allow all if wildcard (*) or blank
if not value or value == '*':
return None, None
values = value.split(',')
allow, deny = [], []
for value in [v.strip() for v in values]:
inverted = False
# Check if this is an inverted value
if value.startswith('-'):
inverted = True
value = value[1:]
# Normalize values (if enabled)
if normalize_values:
value = flatten(value)
# Append value to list
if not inverted:
allow.append(value)
else:
deny.append(value)
return allow, deny
def normalize(text):
if text is None:
return None
# Normalize unicode characters
if type(text) is unicode:
text = unicodedata.normalize('NFKD', text)
# Ensure text is ASCII, ignore unknown characters
return text.encode('ascii', 'ignore')
def flatten(text):
if text is None:
return None
# Normalize `text` to ascii
text = normalize(text)
# Remove special characters
text = re.sub('[^A-Za-z0-9\s]+', '', text)
# Merge duplicate spaces
text = ' '.join(text.split())
# Convert to lower-case
return text.lower()
def md5(value):
# Generate MD5 hash of key
m = hashlib.md5()
m.update(value)
return m.hexdigest()
| [
"gardiner91@gmail.com"
] | gardiner91@gmail.com |
111e6b5d2743a33767a2b20fe559d5eb6d64d37b | 21a1b71ab16d81cb82cae39736b196306df2d471 | /road_detection2/4.2.road_detecting_using_cam.py | ab8ae27530c48b14ce652d0d1a1a737b7d192275 | [] | no_license | aiegoo/jet-notebook | 2eb10ffbe100c609ab5cf356c905e84d9129f2a7 | dc3dc58cb5c5e238bc55250d1129b0427757486b | refs/heads/master | 2023-02-07T21:30:39.800359 | 2021-01-04T01:59:51 | 2021-01-04T01:59:51 | 318,427,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,223 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import cv2, time
from jetbot import Camera
from jetbot import bgr8_to_jpeg
import ipywidgets.widgets as widgets
import traitlets
# In[2]:
def preprocessing(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
gray = cv2.GaussianBlur(gray, (7,7),0)
return gray
def thresholding(img_gray):
_, img_th = cv2.threshold(img_gray,np.average(img_gray)-40,255,cv2.THRESH_BINARY)
img_th2 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,21,15)
img_th3 = np.bitwise_and(img_th, img_th2)
img_th4 = cv2.subtract(img_th2, img_th3)
for i in range(5):
img_th4 = cv2.medianBlur(img_th4, 5)
return img_th4
def mask_roi(img_th, roi):
mask = np.zeros_like(img_th)
cv2.fillPoly(mask, np.array([roi], np.int32), 255)
masked_image = cv2.bitwise_and(img_th, mask)
return masked_image
def drawContours(img_rgb, contours):
for cnt in contours:
area = cv2.contourArea(cnt)
cv2.drawContours(img_rgb, [cnt], 0, (255,0,0), 1)
return img_rgb
def approximationContour(img, contours, e=0.02):
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
epsilon = e*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img, [approx], 0, (0,255,255), 2)
return img
def rectwithname(img, contours, e=0.02):
result = img.copy()
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
epsilon = e*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,255),2)
return result
def find_midptr(contours):
center_ptrs = []
e=0.01
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
center_ptr = [y, x + 0.5*w,]
center_ptrs.append(center_ptr)
center_ptrs = np.array(center_ptrs)
return center_ptrs
def find_midlane(center_ptrs, center_image_point):
L2_norm = np.linalg.norm((center_ptrs - center_image_point), axis=1, ord=2)
loc = np.where(L2_norm==L2_norm.min())[0][0]
midlane = center_ptrs[loc]
return midlane
def find_degree(center_image_point, midlane):
return 57.2958*np.arctan((midlane[1] - center_image_point[1])/(center_image_point[0] - midlane[0]))
# In[3]:
width = 224
height = 224
camera = Camera.instance()
input_image = widgets.Image(format='jpeg', width=width, height=height)
result1 = widgets.Image(format='jpeg', width=width, height=height)
result2 = widgets.Image(format='jpeg', width=width, height=height)
result3 = widgets.Image(format='jpeg', width=width, height=height)
result4 = widgets.Image(format='jpeg', width=width, height=height)
image_box = widgets.HBox([input_image, result1, result2, result3, result4], layout=widgets.Layout(align_self='center'))
display(image_box)
# display(result)
# In[4]:
count = 0
while True:
img = camera.value
img_gray = preprocessing(img)
img_th = thresholding(img_gray)
roi = [(0, height),(0, height/2-30), (width, height/2-30),(width, height),]
img_roi = mask_roi(img_th, roi)
kernel = np.ones((5,3),np.uint8)
img_cl = cv2.morphologyEx(img_roi,cv2.MORPH_CLOSE, np.ones((5,5),np.uint8),iterations=4)
img_op = cv2.morphologyEx(img_cl,cv2.MORPH_OPEN, np.ones((5,5),np.uint8),iterations=3)
cannyed_image = cv2.Canny(img_op, 300, 500)
contours, _ = cv2.findContours(cannyed_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
img_approx = approximationContour(img, contours, e=0.02)
img_approx_rect = rectwithname(img, contours, e=0.01)
center_ptrs = find_midptr(contours)
center_image_point = [height-1, width/2-1]
midlane = find_midlane(center_ptrs, center_image_point)
seta = find_degree(center_image_point, midlane)
cv2.line(img,(int(center_image_point[1]), int(center_image_point[0])),(int(midlane[1]),int(midlane[0])),(0,0,255),3)
cv2.putText(img, f'{seta}', (int(midlane[1]), int(midlane[0])-5), cv2.FONT_HERSHEY_COMPLEX, 0.5,(255, 0, 0), 1)
result_img1 = img_th
result_img2 = img_cl
result_img3 = img_op
result_img4 = img
#show results
result_imgs = [result_img1, result_img2, result_img3, result_img4]
result_values = [result1, result2, result3, result4]
for result_img, result_value in zip(result_imgs, result_values):
# if len(result_img.shape)==2:
# result_img = np.stack((result_img,)*3,2)
result_value.value = bgr8_to_jpeg(result_img)
input_image.value = bgr8_to_jpeg(img_gray)
if count ==1000:
break
else:
count = count +1
# print(count, end=' ')
time.sleep(0.1)
# In[5]:
def search_road(img):
img_gray = preprocessing(img)
img_th = thresholding(img_gray)
roi = [(0, height),(0, height/2-30), (width, height/2-30),(width, height),]
img_roi = mask_roi(img_th, roi)
kernel = np.ones((5,3),np.uint8)
img_cl = cv2.morphologyEx(img_roi,cv2.MORPH_CLOSE, np.ones((5,5),np.uint8),iterations=4)
img_op = cv2.morphologyEx(img_cl,cv2.MORPH_OPEN, np.ones((5,5),np.uint8),iterations=3)
cannyed_image = cv2.Canny(img_op, 300, 500)
contours, _ = cv2.findContours(cannyed_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
center_ptrs = find_midptr(contours)
center_image_point = [height-1, width/2-1]
midlane = find_midlane(center_ptrs, center_image_point)
seta = find_degree(center_image_point, midlane)
cv2.line(img,(int(center_image_point[1]), int(center_image_point[0])),(int(midlane[1]),int(midlane[0])),(0,0,255),3)
cv2.putText(img, f'{seta}', (int(midlane[1]), int(midlane[0])-5), cv2.FONT_HERSHEY_COMPLEX, 0.5,(255, 0, 0), 1)
return img, seta
# In[6]:
count = 0
while True:
img = camera.value
img_result, seta = search_road(img)
input_image.value = bgr8_to_jpeg(img_gray)
result1.value = bgr8_to_jpeg(img_result)
if count ==20:
break
else:
count = count +1
# print(count, end=' ')
time.sleep(0.1)
# In[ ]:
| [
"eozz21@gmail.com"
] | eozz21@gmail.com |
29c691b36c36a0ce2059bd0c694533d59f1f55e2 | b1392c69fcbdcb5ecd798b473e22a6ce9e2e8e44 | /CorazonPet/apps/tipo_mascota/admin.py | b0b445cb697c981a0e7be68febad73d93172206b | [] | no_license | joselofierro/CorazonPet | 34a1b9a3ea72d81f48f1059b6b27ad080f643738 | f92c297b16e8b133c57af73560efef34c064c104 | refs/heads/master | 2021-11-28T19:13:45.353617 | 2018-01-09T22:42:52 | 2018-01-09T22:42:52 | 111,141,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.contrib import admin
# Register your models here.
from apps.tipo_mascota.models import TipoMascota
admin.site.register(TipoMascota) | [
"juliofierro@Mac-mini-de-JULIO.local"
] | juliofierro@Mac-mini-de-JULIO.local |
6a2ec327abe96a1032005e64bb9bbca2e24d3680 | aa76391d5789b5082702d3f76d2b6e13488d30be | /Private Project/Web Scrap/practice/ruliweb_login.py | bae40ed151b3f4cf80c2d9f9b8e1e5a97ec6b465 | [] | no_license | B2SIC/python_playground | 118957fe4ca3dc9395bc78b56825b9a014ef95cb | 14cbc32affbeec57abbd8e8c4ff510aaa986874e | refs/heads/master | 2023-02-28T21:27:34.148351 | 2021-02-12T10:20:49 | 2021-02-12T10:20:49 | 104,154,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | import requests
from getpass import getpass
from bs4 import BeautifulSoup
getId = input("ID: ")
getPw = getpass("Pw: ")
# 로그인 유저 정보
LOGIN_INFO = {
'user_id': getId,
'user_pw': getPw
}
# Session 생성, with 구문안에서 유지
with requests.Session() as s:
login_req = s.post("https://user.ruliweb.com/member/login_proc", data=LOGIN_INFO)
# HTML 소스 확인
# print(login_req.text)
# Header 확인
# print(login_req.headers)
if login_req.status_code == 200 and login_req.ok:
post_one = s.get('http://market.ruliweb.com/read.htm?table=market_pcsoft&page=1&num=129102&find=&ftext=')
post_one.raise_for_status()
soup = BeautifulSoup(post_one.text, 'html.parser')
article = soup.select("tr tr > td.con")[1].find_all('span') # or 'p'
for e in article:
if e.string is not None:
print(e.string)
| [
"the_basic_@kookmin.ac.kr"
] | the_basic_@kookmin.ac.kr |
355af19dc2658d6f894db7b2443035b55fd6cc83 | 1eb2d7d2a6e945a9bc487afcbc51daefd9af02e6 | /spider/paperspider/papers/papers/spiders/conference2cn.py | 578441368a8e57ac02bdea595a983211dac5dc19 | [] | no_license | fengges/eds | 11dc0fdc7a17b611af1f61894f497ad443439bfe | 635bcf015e3ec12e96949632c546d29fc99aee31 | refs/heads/master | 2021-06-20T04:43:02.019309 | 2019-06-20T12:55:26 | 2019-06-20T12:55:26 | 133,342,023 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,558 | py | # # -*- coding: utf-8 -*-
#
# import json
# import scrapy
# import os
# import execjs
# from spider.paperspider.papers.papers.services.paperservices import paper_service
# from spider.paperspider.papers.papers.items import *
#
# root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
#
#
# class PaperSpider(scrapy.Spider):
# handle_httpstatus_list = [403]
# name = 'conference_translate'
# allowed_domains = []
# start_urls = ['http://www.baidu.com/']
#
# def parse(self, response):
# self.ctx = execjs.compile("""
# function TL(a) {
# var k = "";
# var b = 406644;
# var b1 = 3293161072;
#
# var jd = ".";
# var $b = "+-a^+6";
# var Zb = "+-3^+b+-f";
#
# for (var e = [], f = 0, g = 0; g < a.length; g++) {
# var m = a.charCodeAt(g);
# 128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
# e[f++] = m >> 18 | 240,
# e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
# e[f++] = m >> 6 & 63 | 128),
# e[f++] = m & 63 | 128)
# }
# a = b;
# for (f = 0; f < e.length; f++) a += e[f],
# a = RL(a, $b);
# a = RL(a, Zb);
# a ^= b1 || 0;
# 0 > a && (a = (a & 2147483647) + 2147483648);
# a %= 1E6;
# return a.toString() + jd + (a ^ b)
# };
#
# function RL(a, b) {
# var t = "a";
# var Yb = "+";
# for (var c = 0; c < b.length - 2; c += 3) {
# var d = b.charAt(c + 2),
# d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
# d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
# a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
# }
# return a
# }
# """)
# paper_list = open(root+'\\file\\conference.txt', 'r', encoding='utf8')
# print(len(paper_list))
# if not paper_list:
# return
# for paper in paper_list:
# en_author = eval(paper["en_author"])[0]
# en_org = en_author["org"]
# if en_org == "":
# continue
# key = en_org
# url = self.getUrl(key)
# id = paper["_id"]
# if len(url) >= 16000:
# continue
# else:
# yield scrapy.Request(url, lambda arg1=response, arg2=id: self.PaperInfo(arg1, arg2), dont_filter=True)
#
# def getUrl(self, q):
# url="https://translate.google.cn/translate_a/single?client=t&sl=en&tl=zh-CN&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&source=btn&ssel=0&tsel=0&kc=0&tk="+self.getTk(q)+"&q="+q
# return url
#
# def getTk(self, text):
# return self.ctx.call("TL", text)
#
# def PaperInfo(self, response, id):
# s = str(response.body, encoding="utf-8")
# null = None
# true = True
# false = False
# if response.status == 403:
# return
# list = eval(s)
#
# cn = ""
# for l in list[0]:
# if l[0]:
# cn += l[0]
#
# print((cn, id))
# paper_service.update_engpaper((cn, id))
#
#
| [
"1059387928@qq.com"
] | 1059387928@qq.com |
c40bb05e041686ddc9a84e2e9662c6b79990c8ae | b4869228738cdcd1a5fcb10b2091b78415ca9741 | /algorithmic-toolbox/week6/knapsack.py | ac783dfb88a227e9feaa93a0399a47c5c62f643b | [] | no_license | mmanishh/coursera-algo-toolbox | 8d23d18985acfb33f44f08ac8c306fdb68dc3e88 | d3153904732ab5b4125fc8913fcea6e969028822 | refs/heads/master | 2020-04-13T19:17:03.421935 | 2019-01-14T11:15:06 | 2019-01-14T11:15:06 | 163,397,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # Uses python3
import sys
def optimal_weight_naive(W, w):
# write your code here
result = 0
for x in w:
if result + x <= W:
result = result + x
return result
def optimal_weight(W,w):
n = len(w)
value = [[0 for j in range(W+1)] for i in range(n+1)]
for i in range(1,n+1):
for j in range(1,W+1):
#print("i: {0},j: {1}".format(i,j))
value[i][j]=value[i-1][j]
if w[i-1] <= j:
val = value[i-1][j-w[i-1]] + w[i-1] # replacing v[i] with w[i] because weight itself is value here
if val>value[i][j]:
value[i][j] = val
#print(np.array(value))
#print(np.array(value).shape)
return value[n][W]
if __name__ == '__main__':
input = sys.stdin.read()
W, n, *w = list(map(int, input.split()))
print(optimal_weight(W, w))
| [
"dfrozenthrone@gmail.com"
] | dfrozenthrone@gmail.com |
de75953113f38f1ca7a911263a44bf7b01dc222d | 0a0c6994c319981f7ba5d31eee3b074453ca4c0d | /autres/ahmedmelliti/module/__init__.py | b56955a8c3ed1a936aabb84a3da36d040ed5a34f | [] | no_license | arianacarnielli/2i013 | 678595c498cba5ec932ee9badd4868a08dad0663 | a804abb7c2337fe9963c2ddfcd73db80d8b787d8 | refs/heads/master | 2021-05-10T13:30:21.353951 | 2018-05-08T14:17:40 | 2018-05-08T14:17:40 | 118,476,287 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | #from .strategies import RandomStrategy
#from .strategies import *
from .strategies import *
from soccersimulator import SoccerTeam
def get_team(nb_players):
myteam = SoccerTeam(name="MaTeam")
#for i in range(nb_players):
# myteam.add("Joueur "+str(i) ,FonceurStrategy())
if nb_players==1:
myteam.add("Joueur "+str(0) ,FonceurStrategy())
if nb_players==2:
myteam.add("Joueur "+str(0) ,Defenseur())
myteam.add("Joueur "+str(1) ,FonceurStrategy())
if nb_players==4:
myteam.add("Joueur "+str(0) ,Defenseur())
myteam.add("Joueur "+str(1) ,Milieu())
myteam.add("Joueur "+str(2) ,FonceurStrategy())
myteam.add("Joueur "+str(3) ,FonceurStrategy())
return myteam
def get_team_challenge(num):
myteam = SoccerTeam(name="MaTeamChallenge")
if num == 1:
myteam.add("Joueur Chal "+str(num),FonceurStrategy())
return myteam
| [
"ariana.carnielli@gmail.com"
] | ariana.carnielli@gmail.com |
85c46754048661ed70e41167e6a0ba3fb08340df | bddc40a97f92fafb8cbbbfdbdfe6774996578bb0 | /exercicioLista_listas/ex04.py | 41c3c3e877a2ead3a68266440beefacbecb10b67 | [] | no_license | andrehmiguel/treinamento | 8f83041bd51387dd3e5cafed09c4bb0a08d0e375 | ed18e6a8cfba0baaa68757c12893c62a0938a67e | refs/heads/main | 2023-01-31T13:15:58.113392 | 2020-12-16T02:47:44 | 2020-12-16T02:47:44 | 317,631,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # 4. Faça um Programa que leia um vetor de 10 caracteres, e diga quantas consoantes
# foram lidas. Imprima as consoantes.
listaChar = []
consoantes = 0
print ('Informe 10 caracters')
for x in range(10):
listaChar.append(input(f'Informe o {x + 1}º caracter: '))
char = listaChar[x]
if(char not in ('a','e','i','o','u')):
consoantes += 1
print(f'Foram inseridas {consoantes} consoantes.')
print(listaChar) | [
"andrehmiguel@outlook.com"
] | andrehmiguel@outlook.com |
305a36881d7b04e423b340695be5258894e8f796 | 784359e29fce9b3cd7c4c2d71e5f0498dd6d4b5c | /src/test.py | 3c1dc8f967ea7179b24fec3646130ef5391d7ded | [] | no_license | lmorillas/imagenes-arasaac | 65740df76a8d8f43493464153eea08d93687df68 | 48bec6df2095dda1ab2db5b08b4dccaae1ffe5e0 | refs/heads/master | 2021-05-11T10:13:07.999743 | 2018-01-19T08:18:36 | 2018-01-19T08:18:36 | 118,097,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | # coding: utf-8
'''
https://stackoverflow.com/questions/25615741/how-to-use-the-spanish-wordnet-in-nltk
http://python.6.x6.nabble.com/attachment/2743017/0/Python%252520Text%252520Processing%252520with%252520NLTK%2525202.0%252520Cookbook.pdf
https://www.pybonacci.org/2015/11/24/como-hacer-analisis-de-sentimiento-en-espanol-2/
http://www.tsc.uc3m.es/~miguel/MLG/adjuntos/NLTK.pdf
'''
import nltk
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.stem import SnowballStemmer
tokenizer=nltk.data.load(‘tokenizers/punkt/spanish.pickle’)
frase='''Trabajo básicamente en el apartado de la comedia. Me gustaría
estar en Diseño de Programación, pero por desgracia aprobé el bachillerato.'''
sent_tokenize(frase)
tokenizer=nltk.data.load('tokenizers/punkt/spanish.pickle')
tokenizer.tokenize(frase)
word_tokenize(frase)
spanish_stemmer=SnowballStemmer("spanish")
tokens = nltk.word_tokenize(frase)
[spanish_stemmer.stem(t) for t in tokens]
| [
"morillas@gmail.com"
] | morillas@gmail.com |
06a5d6e41fa605cea837fb39e38f115ddd43e2f2 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/lib/python3.6/site-packages/pbr/pbr_json.py | c18362b03df827909c74e5c536a42156e97ecada | [] | no_license | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:686172037b3d816c532f2e6aad2cfc06e034a7e538b9be33bf7a1f7a559cd64f
size 1284
| [
"Nqk180998!"
] | Nqk180998! |
815b00ec395cb0cfa575f77a0073e2eace93b7db | a9815c48ece2064c0b35e5f5ea76fa460ee67e43 | /Commands/Rainbow.py | 6716958404cb2c996b15c812739b04611ecea790 | [
"MIT"
] | permissive | Heufneutje/PyMoronBot | 1ca0ef3877efa4ca37de76a1af3862085515042e | 055abf0e685f3d2fc02863517952dc7fad9050f3 | refs/heads/master | 2020-12-28T21:39:11.871132 | 2016-07-14T16:38:15 | 2016-07-14T16:38:15 | 24,449,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | # -*- coding: utf-8 -*-
"""
Created on May 04, 2014
@author: Tyranic-Moron
"""
from CommandInterface import CommandInterface
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from twisted.words.protocols.irc import assembleFormattedText, attributes as A
class Rainbow(CommandInterface):
triggers = ['rainbow']
help = 'rainbow <text> - outputs the specified text with rainbow colours'
colours = [assembleFormattedText(A.fg.lightRed['']),
#assembleFormattedText(A.fg.orange['']),
assembleFormattedText(A.fg.yellow['']),
assembleFormattedText(A.fg.lightGreen['']),
assembleFormattedText(A.fg.lightCyan['']),
assembleFormattedText(A.fg.lightBlue['']),
assembleFormattedText(A.fg.lightMagenta['']),
]
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) == 0:
return IRCResponse(ResponseType.Say, "You didn't give me any text to rainbow!", message.ReplyTo)
outputMessage = ''
for i, c in enumerate(message.Parameters):
outputMessage += self.colours[i % len(self.colours)] + c
outputMessage += assembleFormattedText(A.normal[''])
return IRCResponse(ResponseType.Say, outputMessage, message.ReplyTo)
| [
"matthewcpcox@gmail.com"
] | matthewcpcox@gmail.com |
a084b0e7a5d66b9462e3e0f4016dca8595899060 | 42e2d31fe71e1a2c0b50a5d4bbe67e6e3e43a2ef | /contrib/devtools/check-doc.py | 1b3264bca50da1765ffaf44ca268e87bec46ddfa | [
"MIT"
] | permissive | coinwebfactory/aiascoin | aceed1ebcb069b03d1fea3384d9d5beca06bc223 | c8741cad5264a2d4c0bbca7813c4f4ad390915ae | refs/heads/master | 2020-03-24T16:29:51.130395 | 2018-08-28T09:56:10 | 2018-08-28T09:56:10 | 142,826,697 | 0 | 0 | MIT | 2018-07-30T04:58:29 | 2018-07-30T04:58:28 | null | UTF-8 | Python | false | false | 1,910 | py | #!/usr/bin/env python
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizeaiasamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
exit(len(args_need_doc))
if __name__ == "__main__":
main()
| [
"root@localhost"
] | root@localhost |
663424fd44e897ae383df66011f4a1f60bed3000 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02990/s076168357.py | 2db79e763da1248e48e6dfc6516dac1e7d408771 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | mod = 10**9+7
N, K = map(int, input().split())
print(N-K+1)
i = 2
C1 = N-K+1
C2 = 1
while i <= K:
C1 *= ((N-K+1-(i-1))*pow(i, mod-2, mod))%mod
C2 *= ((K-(i-1))*pow(i-1, mod-2, mod))%mod
print((C1*C2)%mod)
i += 1 | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
054a2be80baef36aea5f876de706b94da37caafe | 50e089f906489b2586cc586712420fd085f1f637 | /machine_learning.py | dd758d6b668604304b9f3b47f2727a1ac5ec2109 | [] | no_license | AaronTho/Python_Notes | 5ab629e3b3d49be5c68d2a285a79683dc604cd3e | 4aa0e1fb4a35763458a1da467e1bb01e393bc972 | refs/heads/main | 2023-07-24T00:59:23.552952 | 2021-09-11T17:32:25 | 2021-09-11T17:32:25 | 375,399,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | # Import the Data
# Clean the Data
# Split the Data into Training/Test Sets
# Create a Model
# Train the Model
# Make Predictions
# Evaluate and Improve
# Libraries and Tools:
# Numpy
# Pandas
# MatPlotLIb
# Scikit-Learn
# Jupyter is the a good machine learning environment
# Install Anaconda (had to also install the command line version)
# Command Line "jupyter notebook" to create new notebook in the browser
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
music_data = pd.read_csv('music.csv')
X = music_data.drop(columns=['genre'])
y = music_data['genre']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
predictions
score = accuracy_score(y_test, predictions)
score
| [
"aamith@gmail.com"
] | aamith@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.