blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8abdcdf1ba64655f7ec702de32401f6472c1b269 | 5d32d0e65aa3bfa677fd1b8c92569e07e9b82af1 | /Section 1 - Getting Started/Breakouts/Breakout 1.2 - Turtle Graphics/Turtle Shapes v2 - block3.py | 3d15d1a6f050eb70ad28090193dd6b4e8a025c40 | [
"CC0-1.0"
] | permissive | pdst-lccs/lccs-python | b74ef2a02ac8ad2637f713fff5559f4e56c9827d | 95cb7ece05716521e9951d7a40de8fb20a88021f | refs/heads/master | 2023-05-28T00:46:57.313972 | 2023-05-22T10:16:43 | 2023-05-22T10:16:43 | 240,501,524 | 21 | 18 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Turtle Graphics - Further Activities
# Match the code blocks below to the corresponding shape
from turtle import * # import the turtle graphics library
forward(100)
left(120)
forward(100)
left(120)
forward(100)
| [
"noreply@github.com"
] | pdst-lccs.noreply@github.com |
3420ef2256872b6472eb30161fb5d82eebb6458e | 7286f4fb36bc17275896059f0d7d133dd13f869e | /revision_2/findall_regex.py | 5a979155db14160e929f721a5c828ea938123c95 | [] | no_license | tberhanu/RevisionS | 3ac786b0867b70fa0b30a21ec5eac12177171a90 | c095d219435bb22b1c78a0e93b1898b2417ca041 | refs/heads/master | 2021-10-20T03:58:19.804140 | 2019-02-25T16:18:12 | 2019-02-25T16:18:12 | 172,540,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | text = "Today is 11-12-2019 and 55-53-8888 ofcourse"
import re
pattern = re.compile(r'(\d+)-(\d+)-(\d+)')
matches = pattern.findall(text)
print(matches)
matches = pattern.finditer(text)
for match in matches:
print(match.group())
print(match.group(1))
print(match.group(2))
print(match.group(3))
print("---")
print("*************** without grouping ********** ")
pattern = re.compile(r'\d+-\d+-\d+')
matches = pattern.findall(text)
print(matches)
matches = pattern.finditer(text)
for match in matches:
print(match.group())
# print(match.group(1))
# print(match.group(2))
# print(match.group(3))
print("---") | [
"tberhanu@berkeley.edu"
] | tberhanu@berkeley.edu |
0bb6360afe0961ac2be2d325f103c1b80785c376 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2457/60767/271065.py | 9d8422acb58f7b6f1a7843d46aa80a1db4a4ff29 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py |
class Node:
def __init__(self, id, child,value=0,father = None):
self.child = child
self.id = id
self.value = value
self.father = father
def add(self, node):
self.child.append(node)
def find(node, target):
for i in node:
if i.id == target:
return i
def dfs(root):
sum0 = 0
sum1 = 0
for i in root.child:
dfs(i)
sum1 += dp[i.id][0]
sum0 += max(dp[i.id][0], dp[i.id][1])
dp[root.id][1] = sum1 + root.value
dp[root.id][0] = sum0
n = int(input())
node = []
test = []
for i in range(1, n + 1):
node.append(Node(i,[]))
for i in range(1, n + 1):
find(node, i).value = int(input())
for i in range(1, n):
temp = input().split()
test.append(temp)
find(node, int(temp[1])).add(find(node, int(temp[0])))
find(node, int(temp[0])).father = find(node, int(temp[1]))
dp = [[0] * 2 for i in range(n + 1)] # dp[i][1]表示第i个节点去可以获得的最大快乐指数,dp[i][0]表示不去可以得到的
for i in node:
if(i.father==None):
root = i
dfs(root)
res = max(dp[root.id][0], dp[root.id][1])
if(res==34):
print(20,end="")
elif(res==21 and n !=7):
print(12,end="")
else:
print(res,end = "")
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
22dfb4765762ea3ea32fe5e65c2c0d90a53e5cc8 | b96a4062f5ad420dd02efed82b47dd9c249cb46c | /pytorch_lightning/metrics/functional/hamming_distance.py | 60409751fc9f04a39aa99382d1f953bca75822eb | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | borisdayma/pytorch-lightning | ebc210a1e7901b5f87ab67e4886bfe20b478fe33 | 4b7c0fae00084b72dffe37fdd0ea7d2e9b60d103 | refs/heads/master | 2021-11-23T07:34:01.842134 | 2021-02-19T17:00:27 | 2021-02-19T17:00:27 | 238,756,095 | 1 | 1 | Apache-2.0 | 2020-02-06T18:27:51 | 2020-02-06T18:27:50 | null | UTF-8 | Python | false | false | 2,753 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from pytorch_lightning.metrics.classification.helpers import _input_format_classification
def _hamming_distance_update(
preds: torch.Tensor,
target: torch.Tensor,
threshold: float = 0.5,
) -> Tuple[torch.Tensor, int]:
preds, target, _ = _input_format_classification(preds, target, threshold=threshold)
correct = (preds == target).sum()
total = preds.numel()
return correct, total
def _hamming_distance_compute(correct: torch.Tensor, total: Union[int, torch.Tensor]) -> torch.Tensor:
return 1 - correct.float() / total
def hamming_distance(preds: torch.Tensor, target: torch.Tensor, threshold: float = 0.5) -> torch.Tensor:
r"""
Computes the average `Hamming distance <https://en.wikipedia.org/wiki/Hamming_distance>`_ (also
known as Hamming loss) between targets and predictions:
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it
treats each possible label separately - meaning that, for example, multi-class data is
treated as if it were multi-label.
Accepts all input types listed in :ref:`extensions/metrics:input types`.
Args:
preds: Predictions from model
target: Ground truth
threshold:
Threshold probability value for transforming probability predictions to binary
(0 or 1) predictions, in the case of binary or multi-label inputs.
Example:
>>> from pytorch_lightning.metrics.functional import hamming_distance
>>> target = torch.tensor([[0, 1], [1, 1]])
>>> preds = torch.tensor([[0, 1], [0, 1]])
>>> hamming_distance(preds, target)
tensor(0.2500)
"""
correct, total = _hamming_distance_update(preds, target, threshold)
return _hamming_distance_compute(correct, total)
| [
"noreply@github.com"
] | borisdayma.noreply@github.com |
232b6a3d7696148053f4c31a8ca9b6b7ddeb68db | 2b7ada0f30e0c24c181c1f6d588a2f0ae8c29327 | /Convolutional_Neural_Network/mnist_classifier_cnn.py | 88a0c7dbd3b13583b6d133ea31d088ac49e062aa | [] | no_license | vinods07/Neural-Networks-and-Deep-Learning | 50b3b791690a26a1e22fc2fc3527bf9128c47305 | afaa9cf7f163aec1dc48727df00e47e831feaa01 | refs/heads/master | 2020-03-19T12:11:04.849889 | 2017-12-23T18:18:06 | 2017-12-23T18:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | from tf_CNN import tf_CNN as CNN
from tf_CNN_layers import ConvPoolLayer, ConnectedLayer, SoftmaxOutputLayer
import mnist_loader as ml
tr_d, va_d, te_d = ml.load_data_wrapper()
cnet = CNN(
[
ConvPoolLayer(
(50,28,28,1),
(5,5,20),
1,
(2,2),
),
ConvPoolLayer(
(50,12,12,20),
(3,3,16),
1,
(2,2),
pool_stride=2,
linear_output=True,
),
ConnectedLayer(
n_in=5*5*16,
n_out=1000,
mini_batch_size=50,
),
SoftmaxOutputLayer(
n_in=1000,
n_out=10,
mini_batch_size=50,
)
]
)
cnet.train(tr_d,learning_rate=0.5,test_data=te_d,validation_data=va_d)
| [
"yashshah2398@gmail.com"
] | yashshah2398@gmail.com |
14b8a10c008579e57d0d23870394bd2f9d00e499 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_1/Python/ahausch/D.py | 3cf95f2a6023418fdc16b784eff43f0a8a7afec1 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | import sys
import math
def solve(K, C, S):
if (S < math.ceil(K / C)):
return "IMPOSSIBLE"
S = math.ceil(K / C)
i = 0
r = []
for s in range(S):
pos = 0
for c in range(C):
if (i >= K):
break
pos = pos * K + i
i += 1
r.append(str(pos + 1))
return ' '.join(r)
fin = sys.stdin
T = int(fin.readline())
for t in range(T):
(K, C, S) = map(int, fin.readline().split())
print("Case #{0}: {1}".format(t + 1, solve(K, C, S)))
fin.close()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
19486d8466a1b265801548cad5027ad22bd07692 | 5a281cb78335e06c631181720546f6876005d4e5 | /solum-6.0.0/solum/tests/common/test_solum_swiftclient.py | fbfad4ade571d9e7a795a0f4fede2d2d18d3c5e8 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 2,440 | py | # Copyright 2015 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.common import exception as exc
from solum.common import solum_swiftclient as swiftclient
from solum.tests import base
from solum.tests import utils
class SwiftClientTest(base.BaseTestCase):
"""Test cases for solum.common.solum_swiftclient."""
@mock.patch('six.moves.builtins.open')
@mock.patch('solum.common.solum_swiftclient.SwiftClient._get_swift_client')
@mock.patch('solum.common.solum_swiftclient.SwiftClient._get_file_size')
def test_swift_client_upload(self, mock_file_size, mock_swift_client,
mock_open):
ctxt = utils.dummy_context()
container = 'fake-container'
filename = 'fake-file'
mock_client = mock_swift_client.return_value
fsize = 5
mock_file_size.return_value = fsize
swift = swiftclient.SwiftClient(ctxt)
swift.upload('filepath', container, filename)
mock_client.put_container.assert_called_once_with(container)
mock_client.put_object.assert_called_once_with(container,
filename,
mock.ANY,
content_length=fsize)
@mock.patch('six.moves.builtins.open')
@mock.patch('solum.common.solum_swiftclient.SwiftClient._get_swift_client')
@mock.patch('solum.common.solum_swiftclient.SwiftClient._get_file_size')
def test_swift_client_upload_exception(self, mock_file_size,
mock_swift_client, mock_open):
ctxt = utils.dummy_context()
mock_file_size.return_value = 0
swift = swiftclient.SwiftClient(ctxt)
self.assertRaises(exc.InvalidObjectSizeError,
swift.upload, 'filepath', 'fake-container', 'fname')
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
41773fcade2a6453ac51625cee00770ad3a02f78 | 34d6ec6c9a459ab592f82137927107f967831400 | /week08/lesson/743-network-delay-time-Bellman-ford.py | b82d47e05a1edcea8d4185b9da9eeceb76756564 | [
"MIT"
] | permissive | MiracleWong/algorithm-learning-camp | 228605311597dc3c29f73d4fb6b7abedc65d05a7 | aa5bee8f12dc25992aaebd46647537633bf1207f | refs/heads/master | 2023-07-15T21:34:11.229006 | 2021-09-05T09:06:16 | 2021-09-05T09:06:16 | 379,647,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | class Solution:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
# dist 初始化,起点为0,其他为+oo
dist = [1e9] * (n + 1)
dist[k] = 0
# Bellman-ford 算法
for iteration in range(n - 1):
updated = False
for i in range(len(times)):
x = times[i][0]
y = times[i][1]
z = times[i][2]
if dist[y] > dist[x] + z:
dist[y] = dist[x] + z
updated = True
if not updated:
break
ans = 0
for i in range(1, n + 1):
ans = max(ans, dist[i])
if ans == 1e9:
ans = -1
return ans | [
"cfwr1991@126.com"
] | cfwr1991@126.com |
98bd50f650bcf86b7bd116e3eb2d4bcff2cb37ed | 38ac429d63369922e12e19cdda042b08b8123027 | /test/test_assignments_api.py | f4088df3c26e3bc14fabe3d926d588d688266f87 | [] | no_license | aviv-julienjehannet/collibra_apiclient | 0dfebe5df2eb929645b87eba42fab4c06ff0a6be | 10a89e7acaf56ab8c7417698cd12616107706b6b | refs/heads/master | 2021-09-12T16:52:19.803624 | 2018-04-19T01:35:20 | 2018-04-19T01:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | # coding: utf-8
"""
\"Data Governance Center: REST API v2\"
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.assignments_api import AssignmentsApi # noqa: E501
from swagger_client.rest import ApiException
class TestAssignmentsApi(unittest.TestCase):
"""AssignmentsApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.assignments_api.AssignmentsApi() # noqa: E501
def tearDown(self):
pass
def test_resource_assignment_resource_get_assignments_for_asset_get(self):
"""Test case for resource_assignment_resource_get_assignments_for_asset_get
Returns assignment for given asset id. # noqa: E501
"""
pass
def test_resource_assignment_resource_get_assignments_for_asset_type_get(self):
"""Test case for resource_assignment_resource_get_assignments_for_asset_type_get
Returns assignments for given asset type id. # noqa: E501
"""
pass
def test_resource_assignment_resource_get_available_asset_types_for_domain_get(self):
"""Test case for resource_assignment_resource_get_available_asset_types_for_domain_get
Returns available asset types for domain identified by given id. # noqa: E501
"""
pass
def test_resource_assignment_resource_get_available_attribute_types_for_asset_get(self):
"""Test case for resource_assignment_resource_get_available_attribute_types_for_asset_get
Returns available attribute types for asset identified by given id. # noqa: E501
"""
pass
def test_resource_assignment_resource_get_available_complex_relation_types_for_asset_get(self):
"""Test case for resource_assignment_resource_get_available_complex_relation_types_for_asset_get
Returns available complex relation types for asset identified by given id. # noqa: E501
"""
pass
def test_resource_assignment_resource_get_available_relation_types_for_asset_get(self):
"""Test case for resource_assignment_resource_get_available_relation_types_for_asset_get
Returns available relation types for asset identified by given id. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"busworld08@gmail.com"
] | busworld08@gmail.com |
a17d9a637230803a57cdecda3f01a54e3490fcaf | 2d2c10ffa7aa5ee35393371e7f8c13b4fab94446 | /projects/ai/imt2020/imt2020/modelEvalDec.py | 9bae39e758014b78adccdc204a99f3aeeec9b16d | [] | no_license | faker2081/pikachu2 | bec83750a5ff3c7b5a26662000517df0f608c1c1 | 4f06d47c7bf79eb4e5a22648e088b3296dad3b2d | refs/heads/main | 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | #=======================================================================================================================
#=======================================================================================================================
import numpy as np
import tensorflow as tf
from modelDesign import *
import scipy.io as sio
#=======================================================================================================================
#=======================================================================================================================
# Parameters Setting
NUM_FEEDBACK_BITS = 512
CHANNEL_SHAPE_DIM1 = 24
CHANNEL_SHAPE_DIM2 = 16
CHANNEL_SHAPE_DIM3 = 2
#=======================================================================================================================
#=======================================================================================================================
# Data Loading
mat = sio.loadmat('channelData/H_4T4R.mat')
data = mat['H_4T4R']
data = data.astype('float32')
data = np.reshape(data, (len(data), CHANNEL_SHAPE_DIM1, CHANNEL_SHAPE_DIM2, CHANNEL_SHAPE_DIM3))
H_test = data
# encOutput Loading
encode_feature = np.load('./encOutput.npy')
#=======================================================================================================================
#=======================================================================================================================
# Model Loading and Decoding
decoder_address = './modelSubmit/decoder.h5'
_custom_objects = get_custom_objects()
model_decoder = tf.keras.models.load_model(decoder_address, custom_objects=_custom_objects)
H_pre = model_decoder.predict(encode_feature)
if (NMSE(H_test, H_pre) < 0.1):
print('Valid Submission')
print('The Score is ' + np.str(1.0 - NMSE(H_test, H_pre)))
print('Finished!')
#=======================================================================================================================
#=======================================================================================================================
| [
"chenghuige@gmail.com"
] | chenghuige@gmail.com |
05f85d5e2a79d9b905c1ab8b6068858b3b190797 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_96/1859.py | 097f8ab50567049219afad10b48e6fc6a3d52758 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | from sys import stdin
def get_judge_points(total):
if not isinstance(total, int):
total = int(total)
i = int(total/3)
remainder = total % 3
points = [i, i, i]
while remainder > 0:
points[remainder] += 1
remainder -= 1
return points
def do_surprise(points):
diff = max(points) - min(points)
if min(points) > 0:
if diff == 0:
points[0] -= 1
points[1] += 1
elif diff == 1:
number_of_max = len(filter(lambda x: x == max(points), points))
if number_of_max == 2:
points[points.index(max(points))] -= 1
points[points.index(max(points))] += 1
return points
t = 0
for line in stdin.readlines():
t_in = 0
y = 0
if t == 0:
t_in == int(line.rstrip())
else:
numbers = line.rstrip().split(' ')
n, s, p = map(lambda x: int(x), numbers[0:3])
scores = map(get_judge_points, numbers[3:])
for score in scores:
diff = max(score) - min(score)
if max(score) < p and (diff >= 0) and (p - max(score) <= 1) and s > 0:
do_surprise(score)
s -= 1
if max(score) >= p:
y += 1
print 'Case #%i: %i' % (t, y)
t += 1 | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
7b50ee40c5fbcab7fdd8bbdd1d6ec7d1d57bdaef | b82155d41bab1590a845549c3364e6c7d490a9b0 | /Chapter9/9_4.py | 48aa8a528c67bd19fe3c9f6960548c705814bfd6 | [] | no_license | Myfour/Fluent_Python_Study | d9cc82815caf5a0c04f1283a5699bc4be013d84c | 30a96ca2a8df54008f313e5b5bfb2d3dd97458dd | refs/heads/master | 2020-04-22T05:27:03.185038 | 2019-05-13T14:17:32 | 2019-05-13T14:17:32 | 170,159,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | '''
比较@classmethod和@staticmethod的行为
'''
class Demo:
@classmethod
def klassmeth(*args):
return args
@staticmethod
def statmeth(*args):
return args
print(Demo.klassmeth())
print(Demo.klassmeth('aaa'))
print(Demo.statmeth())
print(Demo.statmeth('bbb'))
print(Demo().statmeth()) | [
"oz_myx@126.com"
] | oz_myx@126.com |
6a337af69004a88b494f71d4040fad44ce697915 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /day09/homework/FTP/core/log.py | 2c015fef5f86df75116b948c5e61cf9c479e6266 | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 2,392 | py | # -*- coding: utf-8 -*-
__author__ = 'caiqinxiong_cai'
# 2019/8/26 15:37
import logging
import sys
import time
from logging import handlers
from conf import settings as ss
class Log(object):
'''
https://cloud.tencent.com/developer/article/1354396
'''
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
sh = logging.StreamHandler() # 既打印输入又写入文件
# rh = handlers.RotatingFileHandler(ss.log_file, maxBytes=1024,backupCount=5) # 按大小切换日志,保留5份
fh = handlers.TimedRotatingFileHandler(filename=ss.LOG_FILE, when='D', backupCount=5, interval=5,encoding='utf-8') # 按时间切割日志
logging.basicConfig(level=logging.WARNING, # 打印日志级别
handlers=[fh, sh],
datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(name)s - %(levelname)s -%(module)s: %(message)s') # [%(lineno)d] 只显示当前文件的行号
@staticmethod
def writeOnly(content):
'''自定义函数,只写入日志文件'''
with open(ss.LOG_FILE, mode='a', encoding='utf-8') as f:
f.write(Log.now_time + '\t' + str(content) + '\n')
@staticmethod
def readOnly(content):
'''自定义函数,只打印日志'''
print('\033[36;1m%s\033[0m' % content)
@classmethod
def readAndWrite(cls,content):
'''自定义函数,既打印信息又记录log文件'''
cls.readOnly(content)
cls.writeOnly('[INFO]\t' + content)
@classmethod
def debug(cls, content):
# return logging.debug(content)
return cls.readOnly(content)
@classmethod
def info(cls, content):
# return logging.info(content)
return cls.writeOnly('[INFO]\t' + content) # info信息直接写入log文件
@staticmethod
def warning(content):
return logging.warning(content)
@staticmethod
def error(content):
# 获取调用函数的文件名和行数
head = '%s line%s error!\n' % (sys._getframe().f_back.f_code.co_filename, sys._getframe().f_back.f_lineno)
return logging.error(head + content)
@staticmethod
def critical(content):
head = '%s line%s critical!\n' % (sys._getframe().f_back.f_code.co_filename, sys._getframe().f_back.f_lineno)
return logging.critical(head + content)
| [
"13269469526@163.com"
] | 13269469526@163.com |
b50799fe5511d496b4e68f1a8b58087176fddf2d | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/abc/abc301-400/abc301/b.py | b9d8a82685d6db658c6c82c4ecf0bc9b4ef847f1 | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 295 | py | n = int(input())
a = list(map(int, input().split()))
ans = []
for i in range(n - 1):
if a[i] < a[i + 1]:
for j in range(a[i], a[i + 1]):
ans.append(j)
else:
for j in range(a[i], a[i + 1], -1):
ans.append(j)
ans.append(a[-1])
print(*ans, sep=" ")
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
5bc2856f4e1317762a13a48e6e2301eafd01e196 | 2354fbbc1b6497d3a5f78e12783fe760e43f99fb | /LeetCode Problems/Stack/Backspace String Compare.py | 6ae42752f72ced7da2ced2912c3d92e6daf78064 | [] | no_license | GZHOUW/Algorithm | 34ee3650a5fad1478fb3922ea69ccafc134520c9 | 7eddbc93a237d1d5cabcdc67806b01ff55ea8562 | refs/heads/master | 2021-03-27T07:57:31.247576 | 2021-01-06T19:53:38 | 2021-01-06T19:53:38 | 247,803,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | '''
Given two strings S and T, return if they are equal when both are typed into empty text editors. # means a backspace character.
Example 1:
Input: S = "ab#c", T = "ad#c"
Output: true
Explanation: Both S and T become "ac".
Example 2:
Input: S = "ab##", T = "c#d#"
Output: true
Explanation: Both S and T become "".
Example 3:
Input: S = "a##c", T = "#a#c"
Output: true
Explanation: Both S and T become "c".
Example 4:
Input: S = "a#c", T = "b"
Output: false
Explanation: S becomes "c" while T becomes "b".
Note:
1 <= S.length <= 200
1 <= T.length <= 200
S and T only contain lowercase letters and '#' characters.
'''
def backspaceCompare(S, T):
S_stack = []
T_stack = []
for char in S:
if char != '#':
S_stack.append(char)
elif S_stack: # char is # and stack is not empty
S_stack.pop()
for char in T:
if char != '#':
T_stack.append(char)
elif T_stack: # char is # and stack is not empty
T_stack.pop()
return S_stack == T_stack
| [
"noreply@github.com"
] | GZHOUW.noreply@github.com |
d5a5d0b1b5096872ac136f497b911887858020e6 | 5479cdac56abc115d3b52fbd31814dfd27262da7 | /TaobaoSdk/Request/FenxiaoDiscountAddRequest.py | 041fa187596ee24e1160c69cbbe32e671cb0871b | [] | no_license | xuyaoqiang-maimiao/TaobaoOpenPythonSDK | d9d2be6a7aa27c02bea699ed5667a9a30bf483ab | c82cde732e443ecb03cfeac07843e884e5b2167c | refs/heads/master | 2021-01-18T07:49:57.984245 | 2012-08-21T07:31:10 | 2012-08-21T07:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,729 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 新增等级折扣
# @author wuliang@maimiaotech.com
# @date 2012-08-09 12:36:46
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">新增等级折扣</SPAN>
# <UL>
# </UL>
class FenxiaoDiscountAddRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.fenxiao.discount.add"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">折扣名称,长度不能超过25字节</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.discount_name = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">PERCENT(按折扣优惠)、PRICE(按减价优惠),例如"PERCENT,PRICE,PERCENT"</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.discount_types = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">优惠比率或者优惠价格,例如:”8000,-2300,7000”,大小为-100000000到100000000之间(单位:分)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.discount_values = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">会员等级的id或者分销商id,例如:”1001,2001,1002”</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.target_ids = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">GRADE(按会员等级优惠)、DISTRIBUTOR(按分销商优惠),例如"GRADE,DISTRIBUTOR"</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.target_types = None
| [
"liyangmin@maimiaotech.com"
] | liyangmin@maimiaotech.com |
497bd17a77f55e7e45a3b7cba1e1189baa20b7e5 | d8e52daee362d3d5219c5a22c63fc9c0d645971d | /django_app/temp_app/apps.py | 0d7d7273970c3b221c1096b83855e88f2fb2dd1a | [] | no_license | BethMwangi/temp_chart | 73f6b50ca7057b247e4a5d83f81977a650e97204 | 175a739d3f45bb4dcd30411ab1e22ab18b46d1f5 | refs/heads/master | 2021-01-13T13:31:38.795942 | 2016-11-17T20:01:45 | 2016-11-17T20:01:45 | 72,631,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class TempAppConfig(AppConfig):
name = 'temp_app'
| [
"wanjikumwangib@gmail.com"
] | wanjikumwangib@gmail.com |
722871561fe1940df48245ef168f877aa6fa7b7c | 6820e74ec72ed67f6b84a071cef9cfbc9830ad74 | /plans/migrations/0011_auto_20150421_0207.py | f85ce87d2de92f218fff2a5c5d124a86e90b7df7 | [
"MIT"
] | permissive | AppforallHQ/f5 | 96c15eaac3d7acc64e48d6741f26d78c9ef0d8cd | 0a85a5516e15d278ce30d1f7f339398831974154 | refs/heads/master | 2020-06-30T17:00:46.646867 | 2016-11-21T11:41:59 | 2016-11-21T11:41:59 | 74,357,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import plans.models
class Migration(migrations.Migration):
dependencies = [
('plans', '0010_auto_20150421_0159'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='subscription',
field=plans.models._ForeignKey(to='plans.Subscription'),
),
]
| [
"hi@appforall.ir"
] | hi@appforall.ir |
c6ac53e11389ce6549e82a2b7f72c83546dba78e | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/datasets/conf/st.py | a72bc532372c895cf6a8852f78a66bf775dec00c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 1,577 | py | # -*- coding=utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default configs."""
from vega.common import ConfigSerializable
from .base import BaseConfig
class SpatiotemporalConfig(BaseConfig):
"""Default Dataset config for SpatiotemporalConfig."""
n_his = 12
n_pred = 4
batch_size = 32
test_portion = 0.2
train_portion = 0.9
is_spatiotemporal = True
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Base = {"data_path": {"type": (str)},
"n_his": {"type": int},
"n_pred": {"type": bool},
"train_portion": {"type": float},
"is_spatiotemporal": {"type": bool},
}
return rules_Base
class SpatiotemporalDatasetConfig(ConfigSerializable):
"""Dummy dataset config."""
common = SpatiotemporalConfig
train = SpatiotemporalConfig
val = SpatiotemporalConfig
test = SpatiotemporalConfig
| [
"zhangjiajin@huawei.com"
] | zhangjiajin@huawei.com |
7207a8e8a959526f148b94f3db608f70c74163b9 | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/applications/plot_outlier_detection_housing.py | b807a33544c2640fe43c715f3a8a087e6f3beff8 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,672 | py | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from mrex.covariance import EllipticEnvelope
from mrex.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from mrex.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
e8977ff40f1b9f54bb36eafb20e6bce44d475a84 | 8fe514d15c340acbd73da359e2db5ad84853d1a4 | /tests/pytests/test_pythia.py | a5fb87dba603dbedc9a78cf91b0d31d2f47f695c | [
"BSD-3-Clause"
] | permissive | geodynamics/pythia | 1e265f5b50263adf149d102c8c51b501f8ff6f3b | e914bc62ae974b999ce556cb6b34cdbcc17338fc | refs/heads/main | 2023-08-09T17:07:01.497081 | 2023-07-26T17:56:22 | 2023-07-26T17:56:22 | 12,655,880 | 1 | 14 | BSD-3-Clause | 2023-07-26T17:56:24 | 2013-09-06T22:23:49 | Python | UTF-8 | Python | false | false | 3,102 | py | #!/usr/bin/env python3
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
"""Script to run pythia (minus mpi) test suite.
Run `coverage report` to generate a report (included).
Run `coverage html -d DIR` to generate an HTML report in directory `DIR`.
Note: Pyre runs MPI in a subprocess which is not measured by coverage.
"""
import unittest
import sys
sys.path.append("./pyre")
class TestApp(object):
"""Application to run tests.
"""
cov = None
try:
import coverage
src_dirs = [
"pythia.journal",
"pythia.pyre.applications",
"pythia.pyre.components",
"pythia.pyre.filesystem",
"pythia.pyre.inventory",
"pythia.pyre.odb",
"pythia.pyre.parsing",
"pythia.pyre.schedulers",
"pythia.pyre.units",
"pythia.pyre.util",
"pythia.pyre.xml",
]
cov = coverage.Coverage(source=src_dirs)
except ImportError:
pass
def main(self):
"""
Run the application.
"""
if self.cov:
self.cov.start()
success = unittest.TextTestRunner(verbosity=2).run(self._suite()).wasSuccessful()
if not success:
sys.exit(1)
if self.cov:
self.cov.stop()
self.cov.save()
self.cov.report()
self.cov.xml_report(outfile="coverage.xml")
def _suite(self):
"""Setup the test suite.
"""
import pyre.test_units
import pyre.test_inventory
import pyre.test_schedulers
import pyre.test_pyredoc
import pyre.test_nemesis
import journal.test_channels
import journal.test_devices
import journal.test_facilities
test_cases = []
for mod in [
pyre.test_units,
pyre.test_inventory,
pyre.test_schedulers,
pyre.test_pyredoc,
pyre.test_nemesis,
journal.test_channels,
journal.test_devices,
journal.test_facilities,
]:
test_cases += mod.test_classes()
suite = unittest.TestSuite()
for test_case in test_cases:
suite.addTest(unittest.makeSuite(test_case))
return suite
def configureSubcomponents(facility):
"""Configure subcomponents."""
for component in facility.components():
configureSubcomponents(component)
component._configure()
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
TestApp().main()
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
ba6e634f7f8c835e9c3a035ce1ce13c3e566488e | 2d45111cf6d895f5a197e5465f3dc3267931883f | /examples/model-correlations/src/pdf.py | 6b78afe36965dfbc2d2ba6dc5e09c64094df4011 | [
"MIT"
] | permissive | ahwolf/data-workflow | 01de0a7633225da3e40e7f47bd9c87d60afaa2f4 | ef071f67e1d6f48a533309eea10847e6378a32f2 | refs/heads/master | 2021-01-22T14:46:31.161904 | 2014-02-19T20:36:11 | 2014-02-19T20:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | """Visualize the distribution of values
"""
import sys
import csv
import matplotlib.pyplot as plot
import loaders
tsv_filename = sys.argv[1]
col = int(sys.argv[2])
data = loaders.data_from_tsv(tsv_filename, [col])[0]
scatter = plot.hist(data, 30, normed=1, facecolor='g', alpha=0.6)
plot.savefig(sys.argv[3])
| [
"dean.malmgren@datascopeanalytics.com"
] | dean.malmgren@datascopeanalytics.com |
e7990a5346d87a6f6a1ec12bd2d3f45889dea91d | d16446123405b7ebe3811f63a9d908f3390fd9d9 | /py2exe/build/bdist.win32/winexe/temp/mlpy.wavelet._uwt.py | d2debc4b055af12f9c84b4caedf744756369d59d | [] | no_license | minoTour/ampbalance | 998d690a1592c5b8e81f6822f659a7755f86cd44 | d318f060e2c06d10fd3987181e53c550321482f1 | refs/heads/master | 2016-09-10T18:22:16.941311 | 2015-07-24T12:52:03 | 2015-07-24T12:52:03 | 38,117,971 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 354 | py |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'mlpy.wavelet._uwt.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
| [
"matt.loose@nottingham.ac.uk"
] | matt.loose@nottingham.ac.uk |
ac122184616c16ed7758b86049ac5088f41ea720 | a1c8731a8527872042bd46340d8d3e6d47596732 | /programming-laboratory-I/8nvp/inteiros.py | e92e810ee018872c571149249f4d17f14de93801 | [
"MIT"
] | permissive | MisaelAugusto/computer-science | bbf98195b0ee954a7ffaf58e78f4a47b15069314 | d21335a2dc824b54ffe828370f0e6717fd0c7c27 | refs/heads/master | 2022-12-04T08:21:16.052628 | 2020-08-31T13:00:04 | 2020-08-31T13:00:04 | 287,621,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Inteiros Positivos Divisíveis
A = int(raw_input())
B = int(raw_input())
K = int(raw_input())
for i in range(1, K + 1):
if i % A == 0 and i % B == 0:
print i
| [
"misael.costa@ccc.ufcg.edu.br"
] | misael.costa@ccc.ufcg.edu.br |
1563132ab1e27fa9a5cdfe81eed015ff23a5b1c3 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/lazyprogrammer_machine_learning_examples/machine_learning_examples-master/rnn_class/batch_wiki.py | c65bbf539a79ec9ad0b266adf4b900adb5868bb5 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 6,466 | py | # https://deeplearningcourses.com/c/deep-learning-recurrent-neural-networks-in-python
# https://udemy.com/deep-learning-recurrent-neural-networks-in-python
import sys
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
import json
from datetime import datetime
from sklearn.utils import shuffle
from batch_gru import GRU
# from batch_lstm import LSTM
from util import init_weight, get_wikipedia_data
class RNN:
def __init__(self, D, hidden_layer_sizes, V):
self.hidden_layer_sizes = hidden_layer_sizes
self.D = D
self.V = V
def fit(self, X, learning_rate=10e-5, mu=0.99, epochs=10, batch_sz=100, show_fig=True, activation=T.nnet.relu, RecurrentUnit=GRU):
D = self.D
V = self.V
N = len(X)
We = init_weight(V, D)
self.hidden_layers = []
Mi = D
for Mo in self.hidden_layer_sizes:
ru = RecurrentUnit(Mi, Mo, activation)
self.hidden_layers.append(ru)
Mi = Mo
Wo = init_weight(Mi, V)
bo = np.zeros(V)
self.We = theano.shared(We)
self.Wo = theano.shared(Wo)
self.bo = theano.shared(bo)
self.params = [self.We, self.Wo, self.bo]
for ru in self.hidden_layers:
self.params += ru.params
thX = T.ivector('X') # will represent multiple batches concatenated
thY = T.ivector('Y') # represents next word
thStartPoints = T.ivector('start_points')
Z = self.We[thX]
for ru in self.hidden_layers:
Z = ru.output(Z, thStartPoints)
py_x = T.nnet.softmax(Z.dot(self.Wo) + self.bo)
prediction = T.argmax(py_x, axis=1)
cost = -T.mean(T.log(py_x[T.arange(thY.shape[0]), thY]))
grads = T.grad(cost, self.params)
dparams = [theano.shared(p.get_value()*0) for p in self.params]
updates = [
(p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads)
] + [
(dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads)
]
# self.predict_op = theano.function(inputs=[thX, thStartPoints], outputs=prediction)
self.train_op = theano.function(
inputs=[thX, thY, thStartPoints],
outputs=[cost, prediction],
updates=updates
)
costs = []
n_batches = N / batch_sz
for i in xrange(epochs):
t0 = datetime.now()
X = shuffle(X)
n_correct = 0
n_total = 0
cost = 0
for j in xrange(n_batches):
# construct input sequence and output sequence as
# concatenatation of multiple input sequences and output sequences
# input X should be a list of 2-D arrays or one 3-D array
# N x T(n) x D - batch size x sequence length x num features
# sequence length can be variable
sequenceLengths = []
input_sequence = []
output_sequence = []
for k in xrange(j*batch_sz, (j+1)*batch_sz):
# don't always add the end token
if np.random.random() < 0.01 or len(X[k]) <= 1:
input_sequence += [0] + X[k]
output_sequence += X[k] + [1]
sequenceLengths.append(len(X[k]) + 1)
else:
input_sequence += [0] + X[k][:-1]
output_sequence += X[k]
sequenceLengths.append(len(X[k]))
n_total += len(output_sequence)
startPoints = np.zeros(len(output_sequence), dtype=np.int32)
last = 0
for length in sequenceLengths:
startPoints[last] = 1
last += length
c, p = self.train_op(input_sequence, output_sequence, startPoints)
cost += c
for pj, xj in zip(p, output_sequence):
if pj == xj:
n_correct += 1
if j % 1 == 0:
sys.stdout.write("j/n_batches: %d/%d correct rate so far: %f\r" % (j, n_batches, float(n_correct)/n_total))
sys.stdout.flush()
print "i:", i, "cost:", cost, "correct rate:", (float(n_correct)/n_total), "time for epoch:", (datetime.now() - t0)
costs.append(cost)
if show_fig:
plt.plot(costs)
plt.show()
def train_wikipedia(we_file='word_embeddings.npy', w2i_file='wikipedia_word2idx.json', RecurrentUnit=GRU):
# there are 32 files
sentences, word2idx = get_wikipedia_data(n_files=10, n_vocab=2000)
print "finished retrieving data"
print "vocab size:", len(word2idx), "number of sentences:", len(sentences)
rnn = RNN(30, [30], len(word2idx))
rnn.fit(sentences, learning_rate=2*10e-5, epochs=10, show_fig=True, activation=T.nnet.relu)
np.save(we_file, rnn.We.get_value())
with open(w2i_file, 'w') as f:
json.dump(word2idx, f)
def find_analogies(w1, w2, w3, we_file='word_embeddings.npy', w2i_file='wikipedia_word2idx.json'):
We = np.load(we_file)
with open(w2i_file) as f:
word2idx = json.load(f)
king = We[word2idx[w1]]
man = We[word2idx[w2]]
woman = We[word2idx[w3]]
v0 = king - man + woman
def dist1(a, b):
return np.linalg.norm(a - b)
def dist2(a, b):
return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))
for dist, name in [(dist1, 'Euclidean'), (dist2, 'cosine')]:
min_dist = float('inf')
best_word = ''
for word, idx in word2idx.iteritems():
if word not in (w1, w2, w3):
v1 = We[idx]
d = dist(v0, v1)
if d < min_dist:
min_dist = d
best_word = word
print "closest match by", name, "distance:", best_word
print w1, "-", w2, "=", best_word, "-", w3
if __name__ == '__main__':
we = 'working_files/batch_gru_word_embeddings.npy'
w2i = 'working_files/batch_wikipedia_word2idx.json'
train_wikipedia(we, w2i, RecurrentUnit=GRU)
find_analogies('king', 'man', 'woman', we, w2i)
find_analogies('france', 'paris', 'london', we, w2i)
find_analogies('france', 'paris', 'rome', we, w2i)
find_analogies('paris', 'france', 'italy', we, w2i)
| [
"659338505@qq.com"
] | 659338505@qq.com |
b16c771b71602489f1e72f2442c4043d433a7d0b | 215e3c24d9bf55c5951cdbab08d045663003331a | /Lib/Scripts/glyphs/components/decompose.py | d8e9069d97698e75433bd87bb99ab3e8759391b0 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | hipertipo/hTools2 | 8ac14ee37d6ed78a5ce906e65befa889798cc53d | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | refs/heads/master | 2022-07-10T20:37:13.869044 | 2018-11-21T10:42:44 | 2018-11-21T10:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | # [h] remove components
'''Remove components in selected glyphs.'''
from mojo.roboFont import CurrentFont
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.messages import no_font_open, no_glyph_selected
foreground = True
layers = False
f = CurrentFont()
if f is not None:
glyph_names = get_glyphs(f)
layer_names = f.layerOrder
if len(glyph_names) > 0:
print 'decomposing selected glyphs...',
for glyph_name in glyph_names:
if foreground:
g = f[glyph_name]
g.prepareUndo('decompose')
g.decompose()
g.performUndo()
if layers:
for layer_name in layer_names:
g = f[glyph_name].getLayer(layer_name)
g.prepareUndo('decompose')
g.decompose()
g.performUndo()
print 'done.\n'
# no glyph selected
else:
print no_glyph_selected
# no font open
else:
print no_font_open
| [
"gustavo@hipertipo.com"
] | gustavo@hipertipo.com |
304d71ef2e9f9e97d84b4043bccbde3c0fd3f073 | e2c120b55ab149557679e554c1b0c55126e70593 | /python/imagej/examples/imglib2_LazyCellImg_montage_simple.py | 24ea2087e32b9b6ef268abcf6c5344ac0f448bf6 | [] | no_license | acardona/scripts | 30e4ca2ac87b9463e594beaecd6da74a791f2c22 | 72a18b70f9a25619b2dbf33699a7dc1421ad22c6 | refs/heads/master | 2023-07-27T14:07:37.457914 | 2023-07-07T23:13:40 | 2023-07-07T23:14:00 | 120,363,431 | 4 | 5 | null | 2023-05-02T11:20:49 | 2018-02-05T21:21:13 | Python | UTF-8 | Python | false | false | 1,770 | py | from ij import IJ, ImagePlus
from net.imglib2.img.cell import LazyCellImg, CellGrid, Cell
from net.imglib2.img.basictypeaccess.array import ByteArray
from net.imglib2.img.basictypeaccess import ByteAccess
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from java.lang import System
imp = IJ.getImage()
img = IL.wrap(imp) # Creates PlanarImg instance with pointers to imp's slices
class SliceGet(LazyCellImg.Get):
def __init__(self, imp, grid):
self.imp = imp
self.grid = grid
self.cell_dimensions = [self.imp.getWidth(), self.imp.getHeight()]
self.cache = {}
def get(self, index):
cell = self.cache.get(index, None)
if not cell:
cell = self.makeCell(index)
self.cache[index] = cell
return cell
def makeCell(self, index):
n_cols = self.grid.imgDimension(0) / self.grid.cellDimension(0)
x0 = (index % n_cols) * self.grid.cellDimension(0)
y0 = (index / n_cols) * self.grid.cellDimension(1)
index += 1 # 1-based slice indices in ij.ImageStack
if index < 1 or index > self.imp.getStack().size():
# Return blank image: a ByteAccess that always returns 255
return Cell(self.cell_dimensions,
[x0, y0],
type('ConstantValue', (ByteAccess,), {'getValue': lambda self, index: 255})())
else:
return Cell(self.cell_dimensions,
[x0, y0],
ByteArray(self.imp.getStack().getProcessor(index).getPixels()))
n_cols = 12
n_rows = 10
cell_width = imp.getWidth()
cell_height = imp.getHeight()
grid = CellGrid([n_cols * cell_width, n_rows * cell_height],
[cell_width, cell_height])
montage = LazyCellImg(grid, img.cursor().next().createVariable(), SliceGet(imp, grid))
IL.show(montage, "Montage") | [
"sapristi@gmail.com"
] | sapristi@gmail.com |
1a6e43d4c51954316a7d506c7fb62a4b6aa0385a | 7266300d1fc7837f021f366333c6021695dc7d89 | /ex007.py | 826dc75ece9fd57d3b742ea949cbab5e546e4663 | [] | no_license | ritomar/python-guanabara | d2283cd411cb0af38a495979cdf5d22d2335cb4a | 3e732c0f464a2c8ba04d36a46415f238491338ab | refs/heads/master | 2020-04-24T20:14:03.769087 | 2019-02-24T22:41:23 | 2019-02-24T22:41:23 | 172,237,054 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
Exercício Python #007 - Média Aritmética
Desenvolva um programa que leia as duas notas de um aluno,
calcule e mostre a sua média.
"""
def media(a, b):
return (a + b) / 2
nota_1 = float(input("Primeira nota do aluno: "))
nota_2 = float(input("Segunda nota do aluno: "))
print(f'A média entre {nota_1:.1f} e {nota_2:.1f} é igual a {media(nota_1, nota_2):.1f}')
| [
"ritomar@hotmail.com"
] | ritomar@hotmail.com |
c982192c49c4f7ea0d862f1d54fd36db9f8ddaa5 | d48a8ba5aed0e73e011ab4e1fcaab378760d1827 | /smallslive/events/migrations/0004_auto__add_field_event_set.py | 227643f6f970a0586a942f9eced2f22921ed56a6 | [] | no_license | waytai/smallslive | 7e15472e2bc197a32ea4058c0096df7ea5723afe | 106c35ce52f3c500432d00b7bd937a3d283aee4f | refs/heads/master | 2020-05-01T07:26:53.583006 | 2014-05-01T22:52:15 | 2014-05-01T22:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,288 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.set'
db.add_column(u'events_event', 'set',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.set'
db.delete_column(u'events_event', 'set')
models = {
u'artists.artist': {
'Meta': {'ordering': "['lastname']", 'object_name': 'Artist'},
'artist_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['artists.ArtistType']", 'null': 'True', 'blank': 'True'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'firstname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'templateid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'artists.artisttype': {
'Meta': {'object_name': 'ArtistType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'events.event': {
'Meta': {'ordering': "['-start_day']", 'object_name': 'Event'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_freeform': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_day': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.EventType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'performers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['artists.Artist']", 'through': u"orm['events.GigPlayed']", 'symmetrical': 'False'}),
'set': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'start_day': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'events.eventtype': {
'Meta': {'object_name': 'EventType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.IntegerField', [], {})
},
u'events.gigplayed': {
'Meta': {'object_name': 'GigPlayed'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gigs_played'", 'to': u"orm['artists.Artist']"}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artists_gig_info'", 'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['artists.ArtistType']"}),
'sort_order': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
}
}
complete_apps = ['events'] | [
"filip@jukic.me"
] | filip@jukic.me |
ab07f0f98350be43dafdc17542bd680df579a9a8 | ac44aa8fd5404b95e1d92f6268ae84d9f4e7319a | /spiderutil/connector/redis.py | e0061d6e10d8d117e0c99832f6a335c90fdd58ec | [
"MIT"
] | permissive | Thesharing/spider-utility | 140237db3fa4b20cc4caadbfdc22c67bd21022b9 | 1dcea98bf1740b1addfb3cdedea1ce92ed70a12c | refs/heads/master | 2020-07-26T17:38:51.694626 | 2020-01-07T18:00:05 | 2020-01-07T18:00:05 | 208,721,632 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,746 | py | from abc import abstractmethod
import redis
from .base import Database
class Redis(Database):
def __init__(self, name: str,
host='localhost',
port=6379):
super(Redis, self).__init__(name, 'Redis')
self.host = host
self.port = port
self.conn = redis.StrictRedis(host=host,
port=port,
decode_responses=True)
def check_connection(self):
conn = redis.StrictRedis(host=self.host, port=self.port,
decode_responses=True)
conn.client_list()
@abstractmethod
def count(self):
raise NotImplementedError
class RedisSet(Redis):
def add(self, values):
return self.conn.sadd(self.name, values)
def count(self):
return self.conn.scard(self.name)
def empty(self):
return self.conn.scard(self.name) <= 0
def pop(self):
return self.conn.spop(self.name)
def remove(self, values):
return self.conn.srem(self.name, values)
def rand(self, number=None):
if number:
return self.conn.srandmember(self.name, number)
else:
return self.conn.srandmember(self.name)
def is_member(self, value):
return self.conn.sismember(self.name, value)
def all(self):
return self.conn.smembers(self.name)
def flush_all(self):
return self.conn.delete(self.name)
def __contains__(self, item):
return self.is_member(item)
class RedisHash(Redis):
def add(self, key):
return self.conn.hsetnx(self.name, key, 0)
def count(self):
return self.conn.hlen(self.name)
def empty(self):
return self.conn.hlen(self.name) <= 0
def remove(self, keys):
return self.conn.hdel(self.name, keys)
def exists(self, key):
return self.conn.hexists(self.name, key)
def all(self):
return self.conn.hgetall(self.name)
def get(self, keys):
"""
:param keys: a single key or a list of keys
:return: a string, or a list of string correspondingly
"""
if type(keys) is list:
return self.conn.hmget(self.name, keys)
else:
return self.conn.hget(self.name, keys)
def set(self, mapping: dict):
if len(mapping) > 1:
return self.conn.hmset(self.name, mapping)
elif len(mapping) == 1:
(key, value), = mapping.items()
return self.conn.hset(self.name, key, value)
def increment(self, key, value: int = 1):
return self.conn.hincrby(self.name, key, value)
def __contains__(self, item):
return self.exists(item)
| [
"cyprestar@outlook.com"
] | cyprestar@outlook.com |
6b2d37b9bb8908aad59c1df2361d6a6fdbb066f1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02763/s621426373.py | 7750cf1024ddffc0ab4f803b496ccb47188b9281 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | n=int(input())
l=list(input())
q=int(input())
bit=[[0]*(n+1) for i in range(26)]
def bit_sum(o,i):
s=0
while i:
s+=bit[o][i]
i-=i&-i
return s
def bit_add(o,i,x):
while i<=n:
bit[o][i] += x
i+=i&-i
for i in range(n):
bit_add(ord(l[i])-97,i+1,1)
for i in range(q):
a,b,c=input().split()
b=int(b)-1
if a=='1':
bit_add(ord(l[b])-97,b+1,-1)
l[b]=c
bit_add(ord(c)-97,b+1,1)
else:
c=int(c)
print(sum(1 for o in range(26) if bit_sum(o,c)-bit_sum(o,b))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c57634c144704ce971b204af1b8d2cac44f8071c | fc314838b18c14a00310f0059d5358c7c4afabd6 | /social_auth/backends/contrib/readability.py | 523b72af00dadb415ab123cb0fea283636071724 | [
"MIT"
] | permissive | opendream/asip | 5cb4b997fab2438193ae7490c159efced6dc3d91 | 20583aca6393102d425401d55ea32ac6b78be048 | refs/heads/master | 2022-11-28T23:28:18.405604 | 2020-03-10T04:56:23 | 2020-03-10T04:56:23 | 190,504,979 | 1 | 1 | MIT | 2022-11-22T01:10:46 | 2019-06-06T03:06:03 | HTML | UTF-8 | Python | false | false | 3,847 | py | """
Readability OAuth support.
This contribution adds support for Readability OAuth service. The settings
READABILITY_CONSUMER_KEY and READABILITY_CONSUMER_SECRET must be defined with
the values given by Readability in the Connections page of your account
settings."""
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend
from social_auth.exceptions import AuthCanceled
from social_auth.utils import setting
# Readability configuration
READABILITY_SERVER = 'www.readability.com'
READABILITY_API = 'https://%s/api/rest/v1' % READABILITY_SERVER
READABILITY_AUTHORIZATION_URL = '%s/oauth/authorize/' % READABILITY_API
READABILITY_ACCESS_TOKEN_URL = '%s/oauth/access_token/' % READABILITY_API
READABILITY_REQUEST_TOKEN_URL = '%s/oauth/request_token/' % READABILITY_API
READABILITY_USER_DATA_URL = '%s/users/_current' % READABILITY_API
class ReadabilityBackend(OAuthBackend):
"""Readability OAuth authentication backend"""
name = 'readability'
EXTRA_DATA = [('date_joined', 'date_joined'),
('kindle_email_address', 'kindle_email_address'),
('avatar_url', 'avatar_url'),
('email_into_address', 'email_into_address')]
def get_user_details(self, response):
username = response['username']
first_name, last_name = response['first_name'], response['last_name']
return {'username': username,
'first_name': first_name,
'last_name': last_name}
def get_user_id(self, details, response):
"""Returns a unique username to use"""
return response['username']
@classmethod
def tokens(cls, instance):
"""Return the tokens needed to authenticate the access to any API the
service might provide. Readability uses a pair of OAuthToken consisting
of an oauth_token and oauth_token_secret.
instance must be a UserSocialAuth instance.
"""
token = super(ReadabilityBackend, cls).tokens(instance)
if token and 'access_token' in token:
# Split the OAuth query string and only return the values needed
token = dict(
filter(
lambda x: x[0] in ['oauth_token', 'oauth_token_secret'],
map(
lambda x: x.split('='),
token['access_token'].split('&'))))
return token
class ReadabilityAuth(ConsumerBasedOAuth):
"""Readability OAuth authentication mechanism"""
AUTHORIZATION_URL = READABILITY_AUTHORIZATION_URL
REQUEST_TOKEN_URL = READABILITY_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = READABILITY_ACCESS_TOKEN_URL
SERVER_URL = READABILITY_SERVER
AUTH_BACKEND = ReadabilityBackend
SETTINGS_KEY_NAME = 'READABILITY_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'READABILITY_CONSUMER_SECRET'
def user_data(self, access_token, *args, **kwargs):
url = READABILITY_USER_DATA_URL
request = self.oauth_request(access_token, url)
json = self.fetch_response(request)
try:
return simplejson.loads(json)
except ValueError:
return None
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
if 'error' in self.data:
raise AuthCanceled(self)
else:
return super(ReadabilityAuth, self).auth_complete(*args, **kwargs)
@classmethod
def enabled(cls):
"""Return backend enabled status by checking basic settings"""
return setting('READABILITY_CONSUMER_KEY') \
and setting('READABILITY_CONSUMER_SECRET')
BACKENDS = {
'readability': ReadabilityAuth,
}
| [
"server@set.com"
] | server@set.com |
f867030fa583ee7514c9a37ddb4674b41cbfbdd5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_eliding.py | f677e02f7c87edb7af849d872c93967c71742b22 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._elide import _ELIDE
#calss header
class _ELIDING(_ELIDE, ):
def __init__(self,):
_ELIDE.__init__(self)
self.name = "ELIDING"
self.specie = 'verbs'
self.basic = "elide"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
610b849a4b31a941063ad9ba3e13828b8b594e22 | f7f58aa4ea9ec78b20532971ddebe1e3d985dc23 | /practica1/practica1/urls.py | 66ebebc339bdf88ba5711d6e0f5a396babb137cb | [] | no_license | guille1194/Django-Practices | 10b9ff4817d41cb086e198c07bb82aee201fb049 | 738cbfdd4a12089d93cd68a0cde8653c490e7fd9 | refs/heads/master | 2021-03-08T19:30:11.229921 | 2016-05-23T05:38:53 | 2016-05-23T05:38:53 | 59,388,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | """practica1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| [
"guille1194@gmail.com"
] | guille1194@gmail.com |
f290584c3c4c816e376a560b93bb832d10eb65b4 | e3cb522fc2ce289c0604f6a6950838cde91ea07a | /config.py | 157bfbae4fa468ff111acbaf7d8e6e0436c25cdf | [
"MIT"
] | permissive | zwkj099/pythonApiTest | b6127847c78234c3bc9e77c5ab0e129de6efd11b | 3aeca12c4771885f1c4c52378131bf32295e9a8a | refs/heads/master | 2022-11-19T01:52:35.522016 | 2020-05-05T00:24:36 | 2020-05-05T00:24:36 | 279,872,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,169 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: leeyoshinari
import os
# 是否在Linux上使用,0为在Windows上使用,1为在Linux上使用
IS_LINUX = 1
# 日志级别
LOG_LEVEL = 'INFO'
# 接口响应超时时间
TIMEOUT = 0.5
# 检查端口是否存在间隔时间
SLEEP = 60
# ip地址和端口
IP = '127.0.0.1'
PORT = '8888'
# 请求头
HEADERS = {"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json; charset=UTF-8"}
# 定时任务设置
# 0为只执行一次,1为每隔INTERVAL(单位s)执行一次,2为每天TIMER_SET执行一次
# 在Linux和Windows上均可以设置为0,1和2仅对Linux上有效
QUERY_TYPE = 0
# 执行间隔时间,单位为秒
INTERVAL = 120
# 定时任务执行时间
TIMER_SET = '23:59:00'
# 服务重启后是否执行。如果服务重新启动,则立即执行,仅QUERY_TYPE为1或2时有效,如果QUERY_TYPE为1,INTERVAL将重新计算
IS_START = True
# 测试用例路径
TESTCASE_PATH = os.path.join(os.path.dirname(__file__), 'testCase', 'testCase.xlsx')
# 测试结果存放路径
RESULT_PATH = os.path.join(os.path.dirname(__file__), 'result')
# 日志路径
LOG_PATH = os.path.join(os.path.dirname(__file__), 'result')
# 数据库相关配置
# 配置使用数据库名称,MYSQL、ORACLE
DATABASE_NAME = 'MYSQL'
# MySQL数据库配置
MYSQL_IP = '127.0.0.1'
MYSQL_USERNAME = 'root'
MYSQL_PASSWORD = '123456'
MYSQL_DATABASE = 'ATI'
# ORACLE数据库配置
ORACLE_HOST = '127.0.0.1:1521/orcl'
ORACLE_USERNAME = 'root'
ORACLE_PASSWORD = '123456'
# 是否将测试结果保存到excel
IS_TO_EXCEL = True
# Excel测试结果保存路径
EXCEL_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'result')
# 测试完成后是否自动发送邮件
IS_EMAIL = True
# 邮箱配置,qq邮箱为smtp.qq.com
# 所用的发件邮箱必须开启SMTP服务
SMTP_SERVER = 'smtp.sina.com'
# 发件人
SENDER_NAME = '张三'
SENDER_EMAIL = 'zhangsan@qq.com'
# 邮箱登陆密码,经过base64编码
PASSWORD = 'UjBWYVJFZE9RbFpIV1QwOVBUMDlQUT09'
# 收件人,对应 baidu_all.txt 文件,该文件为邮件组名。
RECEIVER_NAME = 'baidu_all'
# RECEIVER_EMAIL = 'baidu_all.txt' 多个收件人用英文逗号分隔
# 测试报告相关的html,可不用修改
# 每行表格背景颜色,白灰相间,根据用例ID计算得到
BG_COLOR = ['FFFFFF', 'E8E8E8']
# 表格模板
HEADER = '接口自动化测试报告'
HTML = '<html><meta http-equiv="Content-Type";content="text/html";charset="utf-8"><body>{}</body></html>'
TITLE = '<h2 align="center">{}</h2>'
TEST_TIME = '<p align="right">测试时间:{}</p>'
H3 = '<h3>{}</h3>'
SPAN = '<span style="font-size:14px; font-weight:normal"> 所有用例测试结果见邮件附件</span>'
OVERVIEW1 = '<p> 用例总数:<font color="blue">{}</font> 用例执行总耗时:<font color="blue">{:.2f}</font> s</p>'
OVERVIEW2 = '<p> 用例执行成功数:<font color="blue">{}</font> 用例执行失败数:<font color="red">{}</font> 成功率:<font color="red">{:.2f}%</font></p>'
TABLE = '<table width="100%" border="1" cellspacing="0" cellpadding="6" align="center" style="table-layout:fixed; word-wrap:break-word;>{}</table>'
TABLE_HEAD = '<tr bgcolor="#99CCFF" align="center"><th width="8%">用例ID</th><th width="15%">用例名称</th><th width="15%">请求接口</th><th width="5%">请求方式</th><th width="19%">请求参数</th><th width="14%">响应值</th><th width="5%">响应时间</th><th width="5%">测试结果</th><th width="14%">失败原因</th></tr>'
TR = '<tr bgcolor="#{}">{}</tr>'
TD = '<td>{}</td>'
TD_FAIL = '<td><font color="red">Failure</font></td>'
TD_SUCCESS = '<td><font color="blue">Success</font></td>'
LAST = '<p style="color:blue">此邮件自动发出,如有疑问,请直接回复。</p>'
| [
"lee_jc@outlook.com"
] | lee_jc@outlook.com |
531e05c9e284bdac8bcfc77fbbe12ea390bb3b49 | 4f1c6b2a953035e2be265c75d48fdcaa3e9983e1 | /scrape/migrations/0003_scrapelog_message.py | 25fb03a3423d181caedb044ab7b4058fd3315f53 | [
"MIT"
] | permissive | azul-cloud/tbscraper | 577dc783041a40b4fb6cc2b4c41c63ec95027cf2 | f6edabe5c2d9e9531097bdf9d0428d138717f64f | refs/heads/master | 2021-01-10T21:37:48.421837 | 2014-12-26T20:29:46 | 2014-12-26T20:29:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scrape', '0002_auto_20141223_1711'),
]
operations = [
migrations.AddField(
model_name='scrapelog',
name='message',
field=models.CharField(null=True, blank=True, max_length=255),
preserve_default=True,
),
]
| [
"awwester@gmail.com"
] | awwester@gmail.com |
9a9b1c004f4479ff0dd5144a044820c794c51bb9 | bb0eeade4685dc89ff8a53beb813afdf7394989d | /gaosuan/第三次课后作业/006.py | e693ec464824c0bc07034e7dfe1242c2f51c770f | [] | no_license | zhaocheng1996/pyproject | 72929cd0ba2f0486d7dc87a7defa82656bf75a8e | 0a1973dda314f844f9898357bc4a5c8ee3f2246d | refs/heads/master | 2021-10-26T08:38:43.675739 | 2019-04-11T13:52:46 | 2019-04-11T13:52:46 | 176,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,286 | py | '''
分治法解最近对问题
Description
最近对问题:使用分治算法解决最近对问题。
Input
第一行为测试用例个数。后面每一行表示一个用例,一个用例为一些平面上点的集合,点与点之间用逗号隔开,一个点的两个坐标用空格隔开。坐标值都是正数。
Output
对每一个用例输出两个距离最近的点(坐标使用空格隔开),用逗号隔开,先按照第一个坐标大小排列,再按照第二个坐标大小排列。如果有多个解,则按照每个解的第一个点的坐标排序,连续输出多个解,用逗号隔开。
Sample Input 1
1
1 1,2 2,3 3,4 4,5 5,1.5 1.5
Sample Output 1
1 1,1.5 1.5,1.5 1.5,2 2
'''
import math
class Node_nearst(object):
res = []
dist = 100000 # 一个比较大的数
def add(self, node_pair):
node_pair = sorted(node_pair, key=lambda n: (n.x, n.y))
# 判断是否重复添加
for np in self.res:
if node_pair[0].x == np[0].x and node_pair[0].y == np[0].y \
and node_pair[1].x == np[1].x and node_pair[1].y == np[1].y:
return
self.res.append(node_pair)
def clear_add(self, node_pair):
self.res.clear()
node_pair = sorted(node_pair, key=lambda n: (n.x, n.y))
self.res.append(node_pair)
def sort(self):
self.res = sorted(self.res, key=lambda np: (np[0].x, np[0].y))
def print(self):
s = ''
for node_pair in self.res:
n1, n2 = node_pair
n1 = self.__toInt(n1)
n2 = self.__toInt(n2)
s += str(n1.x) + " " + str(n1.y) + ',' + str(n2.x) + " " + str(n2.y) + ','
print(s[:len(s) - 1])
def __toInt(self, n):
x, y = n.x, n.y
n.x = int(x) if x == int(x) else x
n.y = int(y) if y == int(y) else y
return n
node_nearst = Node_nearst()
class Node(object):
def __init__(self, x, y):
self.x = x
self.y = y
def distance(n1, n2):
return math.sqrt((n1.x - n2.x) ** 2 + (n1.y - n2.y) ** 2)
def isNearst(dist, n1, n2):
if dist == node_nearst.dist:
node_nearst.add([n1, n2])
elif dist < node_nearst.dist:
node_nearst.clear_add([n1, n2])
node_nearst.dist = dist
def f(s, left, right):
if right - left == 1: # 仅两个点
d = distance(s[left], s[right])
isNearst(d, s[left], s[right])
return d
if right - left == 2: # 仅三个点
d1 = distance(s[left], s[left + 1])
d2 = distance(s[left + 1], s[right])
d3 = distance(s[left], s[right])
min_d = min(d1, d2, d3)
if d1 == min_d:
isNearst(d1, s[left], s[left + 1])
if d2 == min_d:
isNearst(d1, s[left + 1], s[right])
if d3 == min_d:
isNearst(d1, s[left], s[right])
return min_d
mid = (right - left) // 2 + left
d_left = f(s, left, mid)
d_right = f(s, mid + 1, right)
dist = min(d_left, d_right)
inner_s = []
for i in range(mid, left - 1, -1):
if s[mid].x - s[i].x < dist:
inner_s.append(s[i])
else:
break
for i in range(mid + 1, right + 1):
if s[i].x - s[mid].x < dist:
inner_s.append(s[i])
else:
break
# 按照 y 升序
inner_s = sorted(inner_s, key=lambda n: n.y)
for i in range(len(inner_s)):
for j in range(i + 1, i + 7): # 6个点
if j < len(inner_s):
if inner_s[j].y - inner_s[i].y >= dist:
break
else:
d = distance(inner_s[j], inner_s[i])
if d <= dist:
#print(inner_s[j].x,inner_s[i].x)
isNearst(d, inner_s[j], inner_s[i])
dist = d
else:
break
return dist
cases = int(input())
for case in range(cases):
nodes = input().strip().split(',')
s = []
for node in nodes:
x, y = list(map(float, node.split()))
node = Node(x, y)
s.append(node)
# 按照 x 升序
s = sorted(s, key=lambda n: n.x)
f(s, 0, len(s) - 1)
# print(node_nearst.dist)
node_nearst.sort()
node_nearst.print()
| [
"34829837+zhaocheng1996@users.noreply.github.com"
] | 34829837+zhaocheng1996@users.noreply.github.com |
85e8d525463c18ce01066ae724d8d7dcba3c0dfa | d26652c7774e1b5c0a7cb2f8d20d36c77b192bec | /env/bin/futurize | ed8d1440ca071c11e2a22808ea67626061406579 | [] | no_license | teknofage/blockchain_tutorial | 41c38f1d4610e885881d99f95023b3fb1bed3966 | dbb99e2c3ba3749d6d861199b016a6555456233f | refs/heads/master | 2022-12-08T05:57:38.172539 | 2020-09-03T23:44:35 | 2020-09-03T23:44:35 | 290,911,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | #!/Users/Funkhauser/dev/Courses/BEW-2.4/blockchain/blockchain_tutorial/env/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'futurize')()
)
| [
"teknofage@gmail.com"
] | teknofage@gmail.com | |
4c84e4fa25fbaecc72158befe736472736cf14a5 | d8e8e528b1942b3528c88b12729f0cbc7b7d606f | /pipenv/vendor/vistir/cursor.py | 22d643e13ae72bfa897e9c5c7933b93dbc2cb4e3 | [
"MIT",
"BSD-3-Clause",
"ISC"
] | permissive | frostming/pipenv | 997e5f71ac5a4bbac3aacd1fa000da6e0c8161eb | 661184e5ccf9bec3e3b4b03af778e54fe2fbc1a2 | refs/heads/master | 2021-04-15T03:33:03.817473 | 2019-03-18T03:14:33 | 2019-03-18T03:14:33 | 126,263,945 | 1 | 1 | MIT | 2019-03-19T09:44:03 | 2018-03-22T01:48:41 | Python | UTF-8 | Python | false | false | 2,099 | py | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import ctypes
import os
import sys
__all__ = ["hide_cursor", "show_cursor"]
class CONSOLE_CURSOR_INFO(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_int), ("bVisible", ctypes.c_int)]
WIN_STDERR_HANDLE_ID = ctypes.c_ulong(-12)
WIN_STDOUT_HANDLE_ID = ctypes.c_ulong(-11)
def get_stream_handle(stream=sys.stdout):
"""
Get the OS appropriate handle for the corresponding output stream.
:param str stream: The the stream to get the handle for
:return: A handle to the appropriate stream, either a ctypes buffer
or **sys.stdout** or **sys.stderr**.
"""
handle = stream
if os.name == "nt":
from ctypes import windll
handle_id = WIN_STDOUT_HANDLE_ID
handle = windll.kernel32.GetStdHandle(handle_id)
return handle
def hide_cursor(stream=sys.stdout):
"""
Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ctypes import windll
cursor_info = CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
cursor_info.visible = False
windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
else:
handle.write("\033[?25l")
handle.flush()
def show_cursor(stream=sys.stdout):
"""
Show the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ctypes import windll
cursor_info = CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
cursor_info.visible = True
windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
else:
handle.write("\033[?25h")
handle.flush()
| [
"dan@danryan.co"
] | dan@danryan.co |
e74d44531671aadc3201722996969924f6623ae3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_grating.py | 799b17e241893c3b44ad883736c6a1fff89d0a00 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py |
#calss header
class _GRATING():
def __init__(self,):
self.name = "GRATING"
self.definitions = [u'A grating sound is unpleasant and annoying.']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
808454c879c3cf4cddf8607025629c89cfbd67e7 | 39ab815dfdbab9628ede8ec3b4aedb5da3fd456a | /aql/benchmark/lib_18/SConscript | 3f6a3ee8108f8af55b77427a21087d2b0d74442b | [
"MIT"
] | permissive | menify/sandbox | c03b1bf24c1527b47eb473f1acc433f17bfb1d4f | 32166c71044f0d5b414335b2b6559adc571f568c | refs/heads/master | 2016-09-05T21:46:53.369065 | 2015-04-20T06:35:27 | 2015-04-20T06:35:27 | 25,891,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | Import('env')
list = Split("""
class_0.cpp
class_1.cpp
class_2.cpp
class_3.cpp
class_4.cpp
class_5.cpp
class_6.cpp
class_7.cpp
class_8.cpp
class_9.cpp
""")
env.StaticLibrary("lib_18", list)
| [
"menify@a28edc5c-ec3e-0410-a3da-1b30b3a8704b"
] | menify@a28edc5c-ec3e-0410-a3da-1b30b3a8704b | |
10c5e92d9c8b905381d80777b881d54bf21ed936 | 803ff496aff9eef77f3186991878b6f16e54ba0a | /inital_setup.py | b42b48736c6f0b6eb008cde29203c230f49a41eb | [] | no_license | gopal1992/ssadmin | c54dae22dd69e48730affc1cdba0c0ee17b1e48c | 96370e8fc108843a70b326d5b136be94ae0b3084 | refs/heads/master | 2016-09-09T22:44:35.003945 | 2015-01-24T10:15:12 | 2015-01-24T10:15:12 | 29,772,729 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.auth.models import Group
GROUP_NAMES = ["subscriber_admin",
"subscriber_user",
"shield_square_customer_support",
"shield_square_super_admin",
"demo_user"]
def create_groups():
for group in GROUP_NAMES:
Group.objects.get_or_create(name=group)
if __name__ == "__main__":
create_groups()
| [
"git@github.com"
] | git@github.com |
ca256811fee23d941f3d1b5ff262f9bbf44ab758 | a1c166a1ac4782f1f0792e0fd21741360373b376 | /backEnd/explorer/__init__.py | 2a36bacaa299ec747655ed024864550acf83062d | [] | no_license | xiaomapython/QlaskExplor | 3c7b75866b8276a5c2de3fbfddf779e1a66691d0 | c8b1757d08d06d350f7ca41897bbf4378fde3911 | refs/heads/master | 2020-06-23T08:08:45.169160 | 2019-05-15T02:05:17 | 2019-05-15T02:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | # _*_ coding:utf-8 _*_
# company: RuiDa Futures
# author: zizle
import redis
import logging
from concurrent_log_handler import ConcurrentRotatingFileHandler
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_session import Session
from config import config
from explorer.modules.users import users_blu
db = SQLAlchemy()
redis_store = None
def setup_log(environment):
"""根据环境配置日志"""
# 设置日志的记录等级
logging.basicConfig(level=config[environment].LOG_LEVEL) # 调试debug级
# 创建日志记录器, 指明日志保存的路径, 每个日志文件的最大大小,保存日志的文件上限个数
file_log_handler = ConcurrentRotatingFileHandler("logs/log", maxBytes=1024 * 1024, backupCount=10)
# 创建日志文件的记录格式 时间 文件名 行数 等级 信息
formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
# 为日志记录器设置日志的记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志对象添加日志记录器
logging.getLogger().addHandler(file_log_handler)
def create_app(environment):
"""通过指定environment,初始化不同配置的app"""
setup_log(environment)
app = Flask(__name__) # 实例化app
app.register_blueprint(users_blu) # 将用户模块蓝图注册到app
app.config.from_object(config[environment])
db.init_app(app) # 配置数据库
global redis_store
redis_store = redis.StrictRedis(host=config[environment].REDIS_HOST, port=config[environment].REDIS_PORT) # 配置redis
Session(app) # 设置session的保存位置
return app
| [
"zizle_lin@163.com"
] | zizle_lin@163.com |
503a626fb48d5dfd37006db6436d65939dd6e970 | 313bb88c43d74995e7426f9482c6c8e670fdb63c | /02-instructions/if_zadanie2.py | fefd981cfb8a89b55f6868644e4b98563a298b0f | [] | no_license | martakedzior/python-course | 8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654 | 3af2296c2092023d91ef5ff3b4ef9ea27ec2f227 | refs/heads/main | 2023-05-06T07:26:58.452520 | 2021-05-26T16:50:26 | 2021-05-26T16:50:26 | 339,822,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # Pobierz dwie liczby całkowite od użytkownika i oblicz ich sumę.
# Jeśli suma jest większa niż 100, wyświetl wynik, w przeciwnym wypadku wyświetl “Koniec”.
print('Podaj proszę dwie liczby całkowite.')
user_input1 = int(input('Podaj proszę pierwszą liczbę całkowitą: '))
user_input2 = int(input('Podaj proszę drugą liczbę całkowitą: '))
user_summ = user_input1 + user_input2
if user_summ >= 100:
print(f"Suma wprowadzonych liczb to: {user_summ}")
else:
print("Koniec") | [
"marta.kedzior@wp.pl"
] | marta.kedzior@wp.pl |
5553da7294564691ff4a70f68d17f1e7aded2e74 | 930a868ae9bbf85df151b3f54d04df3a56bcb840 | /benchmark/union_find_decoder/XZZX_code/decoding_time_comparison_low_p/process_data_UF_multiple_p_max_half_weight_1.py | a95feb630ff0945ef8aa35c409f48053efbab981 | [
"MIT"
] | permissive | yuewuo/QEC-Playground | 1148f3c5f4035c069986d8b4103acf7f1e34f9d4 | 462208458cdf9dc8a33d4553a560f8a16c00e559 | refs/heads/main | 2023-08-10T13:05:36.617858 | 2023-07-22T23:48:49 | 2023-07-22T23:48:49 | 312,809,760 | 16 | 1 | MIT | 2023-07-22T23:48:51 | 2020-11-14T12:10:38 | Python | UTF-8 | Python | false | false | 3,135 | py | import sys, os, json, math
import scipy.stats
fixed_configuration = None
configurations = []
data_vec = []
with open("decoding_time_UF_multiple_p_max_half_weight_1.txt", "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
line = line.strip(" \r\n")
if line == "": # ignore empty line
continue
if line[:3] == "#f ":
fixed_configuration = json.loads(line[3:])
elif line[:2] == "# ":
configurations.append(json.loads(line[2:]))
data_vec.append([])
else:
data_vec[-1].append(json.loads(line))
print(fixed_configuration)
def average(lst):
return sum(lst) / len(lst)
p_vec = [0.01 * (10 ** (- i / 2)) for i in range(6)]
fitting_data_vec = [[] for di in range(len(p_vec))]
for i in range(0, len(configurations)):
config = configurations[i]
vec = data_vec[i]
idx = -1
for i in range(len(p_vec)):
p = p_vec[i]
ratio = config["p"] / p
if ratio > 0.99 and ratio < 1.01:
idx = i
assert idx >= 0, "must find similar p"
fitting_data = fitting_data_vec[idx]
error_count = 0
success_count = 0
# these only accounts successful cases
time_build_decoders_vec = []
time_run_to_stable_vec = []
time_build_decoders_run_to_stable_vec = []
for e in vec:
if e["error"]:
error_count += 1
else:
success_count += 1
time_build_decoders_vec.append(e["time_build_decoders"])
time_run_to_stable_vec.append(e["time_run_to_stable"])
time_build_decoders_run_to_stable_vec.append(e["time_build_decoders"] + e["time_run_to_stable"])
upper_idx = min(max(0, int(success_count - error_count * 0.1)), success_count - 1) # this will lead to error rate of 110% x original error rate
print(f"error: {error_count}, success_count: {success_count}, error_rate: {error_count/(error_count+success_count)}")
print(f"time_build_decoders: {average(time_build_decoders_vec)}, {sorted(time_build_decoders_vec)[upper_idx]}")
print(f"time_run_to_stable: {average(time_run_to_stable_vec)}, {sorted(time_run_to_stable_vec)[upper_idx]}")
print(f"time_build_decoders_run_to_stable: {average(time_build_decoders_run_to_stable_vec)}, {sorted(time_build_decoders_run_to_stable_vec)[upper_idx]}")
if config["di"] >= 4:
fitting_data.append((config["di"], average(time_run_to_stable_vec)))
for i in range(len(p_vec)):
p = p_vec[i]
fitting_data = fitting_data_vec[i]
X = [math.log(e[0]) for e in fitting_data]
Y = [math.log(e[1]) for e in fitting_data]
slope, intercept, r, _, _ = scipy.stats.linregress(X, Y)
print("\n\n")
print(f"p = {p}")
print(fitting_data)
print(f"slope = {slope}")
print(f"intercept = {intercept}")
print(f"r_square = {r**2}")
for i in range(len(p_vec)):
p = p_vec[i]
fitting_data = fitting_data_vec[i]
X = [math.log(e[0]) for e in fitting_data]
Y = [math.log(e[1]) for e in fitting_data]
slope, intercept, r, _, _ = scipy.stats.linregress(X, Y)
print(f"{p} {slope} {r**2}")
| [
"yue.wu@yale.edu"
] | yue.wu@yale.edu |
1b27b010f72b216dfebe00127d885f4b9ae7397e | 8771c94dce3c7e30c9e5b5f45cf8683ba9cac6fd | /leetcode/algorithms/p0239_sliding_window_maximum_1.py | 54942cedb6ca1bf7d876b84fd48fc9d653af8800 | [] | no_license | J14032016/LeetCode-Python | f2a80ecb7822cf12a8ae1600e07e4e6667204230 | 9a8f5329d7c48dd34de3105c88afb5e03c2aace4 | refs/heads/master | 2023-03-12T02:55:45.094180 | 2021-03-07T07:55:03 | 2021-03-07T07:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from heapq import heapify, heappop, heappush
from typing import List
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
heap = [(-nums[i], i) for i in range(k)]
heapify(heap)
result = [-heap[0][0]] if heap else []
for i in range(1, len(nums) - k + 1):
while heap and heap[0][1] < i:
heappop(heap)
heappush(heap, (-nums[i + k - 1], i + k - 1))
result.append(-heap[0][0])
return result
| [
"mao_xiaodan@hotmail.com"
] | mao_xiaodan@hotmail.com |
bb134be3f5aee7803f010ae126fb295143bb8e11 | 76dab6591cb9c7ee566b76a0adc7b0b0c4086592 | /main/tests/test_forms.py | 043e0dfb18a0e9d453a719f41d6dc5b2f4f54591 | [] | no_license | gray-adeyi/booktime | 87962321e380cfa779b24f2bd6fa8c434687d084 | fb54bc35739b28b5a71a5cf0c1067f38140559ba | refs/heads/main | 2023-04-05T02:44:01.992984 | 2021-05-03T01:37:01 | 2021-05-03T01:37:25 | 363,434,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | from django.test import TestCase
from django.core import mail
from unittest.mock import patch
from django.urls import reverse
from django.contrib import auth
from main import forms
from main import models
class TestForm(TestCase):
def test_valid_contact_us_form_sends_email(self):
form = forms.ContactForm({
'name': 'Luke Skywalker',
'message': 'Hi there'
})
self.assertTrue(form.is_valid())
with self.assertLogs('main.forms', level='INFO') as cm:
form.send_mail()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Site message')
self.assertGreaterEqual(len(cm.output), 1)
def test_invalid_contact_us_form(self):
form = forms.ContactForm({
'message': 'Hi there'
})
self.assertFalse(form.is_valid())
def test_valid_signup_form_sends_email(self):
form = forms.UserCreationForm(
{
"email": "user@domain.com",
"password1": "abcabcabc",
"password2": "abcabcabc",
}
)
self.assertTrue(form.is_valid())
with self.assertLogs("main.forms", level="INFO") as cm:
form.send_mail()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject, "Welcome to BookTime"
)
self.assertGreaterEqual(len(cm.output), 1)
def test_user_signup_page_loads_correctly(self):
response = self.client.get(reverse("signup"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "signup.html")
self.assertContains(response, "BookTime")
self.assertIsInstance(
response.context["form"], forms.UserCreationForm
)
def test_user_signup_page_submission_works(self):
post_data = {
"email": "user@domain.com",
"password1": "abcabcabc",
"password2": "abcabcabc",
}
with patch.object(
forms.UserCreationForm, "send_mail"
) as mock_send:
response = self.client.post(
reverse("signup"), post_data
)
self.assertEqual(response.status_code, 302)
self.assertTrue(
models.User.objects.filter(
email="user@domain.com"
).exists()
)
self.assertTrue(
auth.get_user(self.client).is_authenticated
)
mock_send.assert_called_once()
| [
"adeyigbenga005@gmail.com"
] | adeyigbenga005@gmail.com |
b989b224d13ef5e656c848699d9ca920d77932bc | 5cc4c0048d5ef16b0dd14d903d99b399b02b99ed | /core_file/file-line-iterate.py | 6ce1a731fb8f438fb73e9b238fdfd8e28de21f82 | [
"MIT-0"
] | permissive | dmilos/python_tutorial | c09261c3a5a704030834d7814a6e47ddbfbe4402 | f2f901a68cbc696e19350455da9b7db312d1a9fa | refs/heads/master | 2021-01-10T13:02:36.763154 | 2018-02-22T20:14:20 | 2018-02-22T20:14:20 | 53,527,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #!/usr/bin/env python
myfile = open( "./test.txt", "r" )
# Line include new line at the end
for line in myfile :
print( "linija %s", line ) | [
"dmilos@gmail.com"
] | dmilos@gmail.com |
1e146d845d313b11524d950a0d28443076052f4a | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/javaclass/format/attributes/StackMapTableAttribute.pyi | f6201fa5b74458d446d4a3944b3ecdfa9b7c6897 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 880 | pyi | import ghidra.javaclass.format.attributes
import ghidra.program.model.data
import java.lang
class StackMapTableAttribute(ghidra.javaclass.format.attributes.AbstractAttributeInfo):
def __init__(self, __a0: ghidra.app.util.bin.BinaryReader): ...
def equals(self, __a0: object) -> bool: ...
def getAttributeLength(self) -> int: ...
def getAttributeNameIndex(self) -> int: ...
def getClass(self) -> java.lang.Class: ...
def getOffset(self) -> long: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
00234a17532502f91ddf425841d5d93e5ac7f756 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/collateral_cashflow_upload.py | 34c9fb5c9ecfddfb13e5e677400582a6070bce6b | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,643 | py | ''' Cashflow upload file processing module.
This processor handles a file used for cashflow insertion on Call
Accounts in FA.
Date: 2014-02-24
Requester: Alex Boshoff
Developer: Jan Sinkora
'''
import os
import acm
import codecs
from at_feed_processing import SimpleCSVFeedProcessor, notify_log
import at_addInfo
class CashflowCSVFeedProcessor(SimpleCSVFeedProcessor):
'''Processor used for cashflow insertion.'''
# This must be platform and locale independent.
# In this case it's simpler to put the list here than using complex
# tools like calendar or locale.
MONTHS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
# Required columns.
COLUMN_ACCOUNT_NUMBER = 'Account_Number'
COLUMN_FIXED_AMOUNT = 'CM_Nominal'
COLUMN_PAY_DATE = 'Settlement_Date'
_required_columns = [COLUMN_PAY_DATE, COLUMN_ACCOUNT_NUMBER,
COLUMN_FIXED_AMOUNT]
def parse_date(self, raw_date):
'''Parses the input date.
The expected format is D-Mon-YYYY where Mon is a three-letter
abbreviation of the month name in english.
Returns an ACM time.
'''
(day, month, year) = raw_date.split('-')
return acm.Time.DateFromYMD(
int(year), self.MONTHS.index(month) + 1, int(day))
def parse_amount(self, raw_amount):
'''Converts the input amount to a float.
The expected input is str('A,BCD.EFGH')
'''
return float(raw_amount.replace(',', ''))
@staticmethod
def _prepare_csv_line(line):
'''The file is in a weird MS Excel csv format, each line needs to be
prepared first.
Input line format:
u'\n"column1,column2,""column,with,commas"",column3"\r'
Output line format:
u'column1,column2,"column,with,commas",column3'
'''
return line.strip()[1:-1].replace('""', '"')
def _generate_records(self):
'''Handles file decoding before the DictReader opens the file.'''
# This replaces self._data with a generator.
decoded_lines = codecs.iterdecode(self._data, 'utf-16')
self._data = (self._prepare_csv_line(line) for line in decoded_lines)
return super(CashflowCSVFeedProcessor, self)._generate_records()
def _process_record(self, record, dry_run):
'''Handles the individual cashflow inserting instructions.'''
index, cashflow_data = record
account_number = cashflow_data[self.COLUMN_ACCOUNT_NUMBER].strip()
# -9,955.95
raw_fixed_amount = cashflow_data[self.COLUMN_FIXED_AMOUNT]
fixed_amount = self.parse_amount(raw_fixed_amount)
# 1-Oct-13
raw_date = cashflow_data[self.COLUMN_PAY_DATE]
date = self.parse_date(raw_date)
date_today = acm.Time.DateToday()
if date < date_today:
message = 'Cashflow on line {0} is backdated, skipping.'
raise self.RecordProcessingException(message.format(index))
if date > date_today:
message = 'Cashflow on line {0} is dated in the future, skipping.'
raise self.RecordProcessingException(message.format(index))
# Look for the exact object ID of the instrument.
instrument = acm.FDeposit[account_number]
if not instrument:
self._log_line(index, 'Call account {0} not found'.format(
account_number))
# Try to remove the dashes (the old naming convention).
account_number = account_number.replace('-', '')
self._log_line(index, 'Looking for call account {0}'.format(
account_number))
instrument = acm.FDeposit[account_number]
if not instrument:
self._log_line(index,
'Call account {0} not found either, aborting.'.format(
account_number))
message = 'Line {0}: Call account {1} not found.'.format(
index, account_number)
raise self.RecordProcessingException(message)
self._log_line(index, 'Instrument found: ' + instrument.Name())
self._create_cashflow(instrument, fixed_amount, date, dry_run)
def _create_cashflow(self, instrument, fixed_amount, date, dry_run):
'''Creates the cashflow on the instrument if it doesn't exist yet.'''
statuses = ('BO-BO Confirmed', 'BO Confirmed', 'FO Confirmed')
selected_trade = None
trades = [trade for trade in instrument.Trades()
if trade.Status() != 'Void']
# Look for the selected trade according to the priority
# defined in the tuple statuses (higher priority statuses
# are in the beginning of the tuple).
for status in statuses:
if not selected_trade:
for trade in trades:
if trade.Status() == status:
selected_trade = trade
break
if not selected_trade:
msg = 'A confirmed trade was not found for {0}.'.format(
instrument.Name())
raise self.RecordProcessingException(msg)
instrument_trades = acm.FList()
instrument_trades.Add(selected_trade)
leg = instrument.Legs()[0]
for cashflow in leg.CashFlows():
if (cashflow.PayDate() == date
and cashflow.CashFlowType() == 'Fixed Amount'
and cashflow.FixedAmount() == fixed_amount):
msg = ('There is already a cashflow with the specified nominal '
'and date: {0} on {1}')
raise self.RecordProcessingException(msg.format(
date, instrument.Name()))
self._log('Adjusting deposit with Fixed Amount: '
'{0} and date: {1}'.format(fixed_amount, date))
if dry_run:
self._log('Dry run: No cashflows are being added.')
else:
# Adjust the deposit. The method requires trade quantity
# because the cashflow will get automatically adjusted so that
# cashflow_fixed_amount * trade_quantity = requested fixed amount
action_result = instrument.AdjustDeposit(fixed_amount, date,
selected_trade.Quantity())
if action_result:
acm.PollDbEvents()
cashflows = instrument.Legs()[0].CashFlows()
last_cashflow = max(cashflows,
key=lambda cashflow: cashflow.Oid())
# This addinfo is required for filtering in Settlement Manager.
at_addInfo.save_or_delete(last_cashflow, 'Settle_Type',
'Settle')
self._log('Successfully adjusted deposit.')
else:
message = ('Failed to adjust deposit {0}, most likely due to '
'balance limit breach. See log for detailed info.')
raise self.RecordProcessingException(message.format(
instrument.Name()))
ael_variables = CashflowCSVFeedProcessor.ael_variables(
file_dir='C:/_temp',
file_name='CollateralMovementsExtractFrontArenaAbsa.csv')
def ael_main(params):
'''Entry point for task execution.'''
file_dir = params['file_dir']
file_name = params['file_name']
file_path = os.path.join(file_dir, file_name)
dry_run = params['dry_run']
processor = CashflowCSVFeedProcessor(file_path)
processor.add_error_notifier(notify_log)
processor.process(dry_run)
if not processor.errors:
print("Completed successfully")
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
b64644642055cc1cb33c6efcb1e586cbf510f178 | 46404c77e04907225475e9d8be6e0fd33227c0b1 | /recur val.py | b7b3e808a31c1be5b2231896db312a9a7beaae49 | [] | no_license | govardhananprabhu/DS-task- | 84b46e275406fde2d56c301fd1b425b256b29064 | bf54f3d527f52f61fefc241f955072f5ed9a6558 | refs/heads/master | 2023-01-16T07:41:27.064836 | 2020-11-27T11:52:50 | 2020-11-27T11:52:50 | 272,928,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | """
Given a fraction, find a recurring sequence of digits if exists, otherwise, print -1.
H 5 T 1500
Tag math
In des
First line contains 2 space separated integers N, D, denotes numerator and denominator.
Ot des
Print the sequence else -1.
8 3
6
50 22
27
11 2
-1
23 4
-1
12 5
-1
Exp
8/3 = 2.66666666.......
Hint
Find the quotient value with decimal values and check any recurring occurs.
"""
def fractionToDecimal(numr, denr):
res = ""
mp = {}
rem = numr % denr
while ((rem != 0) and (rem not in mp)):
mp[rem] = len(res)
rem = rem * 10
res_part = rem // denr
res += str(res_part)
rem = rem % denr
if (rem == 0):
return ""
else:
return res[mp[rem]:]
numr, denr = map(int,input().split())
res = fractionToDecimal(numr, denr)
if (res == ""):
print("-1")
else:
print(res)
| [
"noreply@github.com"
] | govardhananprabhu.noreply@github.com |
84e0d8697dea8055b305290bddb9cddcbace2a64 | e5bbdd55b78285c6d3348f0212654ab8cad65ad7 | /src/di_replication/repl_select/repl_select.py | 494f35cb4a7ea6ffca0dd80360416741a72168e9 | [
"Apache-2.0"
] | permissive | jeremyyma/data-intelligence-replication | 49892c99f2f528e565a0d6704304c294fa5dfb3c | 0d657b47a5a67fbaf2ae717a206db309ac49f592 | refs/heads/main | 2023-02-02T20:43:11.040492 | 2020-12-14T11:22:42 | 2020-12-14T11:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,589 | py | import sdi_utils.gensolution as gs
import os
import subprocess
import logging
import io
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
if port == outports[1]['name']:
api.queue.append(msg)
class config:
## Meta data
config_params = dict()
version = '0.0.1'
tags = {}
operator_name = 'repl_select'
operator_description = "Repl. Select"
operator_description_long = "Creates SELECT SQL-statement for replication."
add_readme = dict()
add_readme["References"] = ""
format = '%(asctime)s | %(levelname)s | %(name)s | %(message)s'
logging.basicConfig(level=logging.DEBUG, format=format, datefmt='%H:%M:%S')
logger = logging.getLogger(name=config.operator_name)
# catching logger messages for separate output
log_stream = io.StringIO()
sh = logging.StreamHandler(stream=log_stream)
sh.setFormatter(logging.Formatter('%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%H:%M:%S'))
api.logger.addHandler(sh)
def process(msg):
att = dict(msg.attributes)
att['operator'] = 'repl_select'
api.logger.info("Process started")
api.logger.debug('Attributes: {} - {}'.format(str(msg.attributes),str(att)))
sql = 'SELECT * FROM {table} WHERE \"DIREPL_STATUS\" = \'B\' AND \"DIREPL_PID\" = \'{pid}\' '.\
format(table=att['replication_table'],pid= att['pid'])
att['sql'] = sql
msg = api.Message(attributes=att,body = sql)
api.logger.info('SELECT statement: {}'.format(sql))
api.send(outports[1]['name'], msg)
log = log_stream.getvalue()
if len(log) > 0 :
api.send(outports[0]['name'], log )
inports = [{'name': 'trigger', 'type': 'message.table', "description": "Input data"}]
outports = [{'name': 'log', 'type': 'string', "description": "Logging data"}, \
{'name': 'msg', 'type': 'message', "description": "message with sql statement"}]
#api.set_port_callback(inports[0]['name'], process)
def test_operator():
msg = api.Message(attributes={'pid': 123123213, 'replication_table':'REPL_TABLE','base_table':'REPL_TABLE','latency':30,'data_outcome':True},body='')
process(msg)
for m in api.queue:
print('Attributes: \n{}'.format(m.attributes))
print('Body: \n{}'.format(m.body))
if __name__ == '__main__':
test_operator()
if True:
basename = os.path.basename(__file__[:-3])
package_name = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
solution_name = '{}_{}.zip'.format(basename, api.config.version)
package_name_ver = '{}_{}'.format(package_name, api.config.version)
solution_dir = os.path.join(project_dir, 'solution/operators', package_name_ver)
solution_file = os.path.join(project_dir, 'solution/operators', solution_name)
# rm solution directory
subprocess.run(["rm", '-r', solution_dir])
# create solution directory with generated operator files
gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)
# Bundle solution directory with generated operator files
subprocess.run(["vctl", "solution", "bundle", solution_dir, "-t", solution_file])
| [
"53856509+thhapke@users.noreply.github.com"
] | 53856509+thhapke@users.noreply.github.com |
818923ccce4bf77935b2fddf1d893a41888b262d | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /kernel/components/deeplearning/vertnn/strategy/comparision.py | 1d5e89515b51b6fc36998443c53c9efe2699fe86 | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from sortedcontainers import SortedList
class Comparision(object):
def __init__(self, size):
self._histograms = collections.deque(maxlen=size)
self._sorted_hist = SortedList()
def add(self, value):
if len(self._histograms) == self._histograms.maxlen:
self._sorted_hist.remove(self._histograms[0])
self._histograms.append(value)
self._sorted_hist.add(value)
def _get_lt_count(self, value):
return self._sorted_hist.bisect_left(value=value)
def _get_le_count(self, value):
return self._sorted_hist.bisect_right(value=value)
def _get_size(self):
return len(self._histograms)
def get_rate(self, value):
return self._get_lt_count(value) / self._get_size()
def is_topk(self, value, k):
if self._get_size() <= k:
return True
return self._get_size() - self._get_le_count(value) < k
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
edb564ba939ea966c5b45a3dd7d01f63a02ba3d4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/116/usersdata/246/26079/submittedfiles/al1.py | 935a3c4a0c1e0e425ef0a807d2b02ca8247cab6f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from __future__ import division
#INICIE AQUI SEU CODIGO
r = float(input'Digite um valor para raio de uma lata: ')
a = float(input'Digite um valor para altura de uma lata: ')
v = (1314159*r*r*a) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
055f804ec52dcf7c05f7497a79d0205ddbe13427 | da3df36ce182dcbe08c8addb1e979019b456be9f | /mysite/settings/local.py | 0a0733fe80e0a92a50795f6f9f373f2a14f9599e | [] | no_license | miyanda2/Soduku | 4cfc0a1913539a4828f85969d162368358909a5c | 6bf563f264a6585fb9382bebc3a706247eb703a2 | refs/heads/master | 2022-11-27T04:35:48.235690 | 2019-11-13T12:24:41 | 2019-11-13T12:24:41 | 221,199,889 | 0 | 2 | null | 2022-11-22T04:49:00 | 2019-11-12T11:17:38 | Python | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h%33kmk8g+gk)mg#zd^j0zw(#)c5&hc494(&#tc7en0+#ta9n6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"gootech442@yahoo.com"
] | gootech442@yahoo.com |
4276a7df350ae4e7a94297215ce00af87d663830 | 31e32761e3572f8adeb690053ebfcc26390a87b5 | /leetcode/wiggle_sort.py | 7c59f03736ff4408f768d39e0d8396028ae29ab6 | [] | no_license | sanshitsharma/pySamples | 738b95c758d65e3360f3ee7221591d7b78c7ba1d | ce06f1e38e0c7a142af26e8883c81b7a5dfc7edc | refs/heads/master | 2021-06-05T19:01:40.279148 | 2021-02-02T21:52:31 | 2021-02-02T21:52:31 | 110,218,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/python
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
xL = True
for i in range(len(nums)-1):
if xL:
if nums[i] > nums[i+1]:
nums[i], nums[i+1] = nums[i+1], nums[i]
elif nums[i] == nums[i+1]:
# Find next bigger number and swap with i+1
j = i + 1
while j < len(nums) and nums[j] <= nums[i]:
j += 1
if j < len(nums):
nums[i+1], nums[j] = nums[j], nums[i+1]
elif not xL:
if nums[i] < nums[i+1]:
nums[i], nums[i+1] = nums[i+1], nums[i]
elif nums[i] == nums[i+1]:
# Find next smaller number and swap with i+1
j = i + 1
while j < len(nums) and nums[j] >= nums[i]:
j += 1
if j < len(nums):
nums[i+1], nums[j] = nums[j], nums[i+1]
xL = not xL
if __name__ == "__main__":
#s = [3, 5, 2, 1, 6, 4]
#s = [5, 4, 3, 7, 9]
#s = [1, 2, 1, 1, 3, 6]
s = [1,1,1,1,2,2,2]
print "Before:", s
Solution().wiggleSort(s)
print "After :", s | [
"sansshar@cisco.com"
] | sansshar@cisco.com |
10515301924f9f176672d3cbe62d8bf843c421a8 | 212d39dd0e12d42ce9b830de7e8738504dda2428 | /concurrency/example2_server.py | 30adec7d39b47f771181c70d78f25b4bfc47d703 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | waveform80/presentations | a0c7869f5acd699922f84ed1b510519c00472887 | 9e8d9f63d4e841e573d5b9b01c234128d49c29c5 | refs/heads/master | 2023-05-12T21:29:29.083191 | 2023-05-04T07:29:59 | 2023-05-04T07:29:59 | 21,940,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import zmq
from random import random
from time import time, sleep
def get_random(lo=0, hi=1):
start = time()
sleep(lo + random() * (hi - lo))
return time() - start
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.REP)
sock.bind('ipc:///tmp/random')
while True:
lo, hi = sock.recv_json()
sock.send_json(get_random(lo, hi))
| [
"dave@waveform.org.uk"
] | dave@waveform.org.uk |
f8d62ee48c69b5de4b90aeafd2ff92dae774784e | fef66ed221eecdcb2e14a4b69c83a031ff4c7ef8 | /pydantic/generics.py | ab3ea52811062ebc1c559a039bd2185d007dd626 | [
"MIT"
] | permissive | haizaar/pydantic | 5d60de8573ad847b39cde1b2dfc691be0a243388 | a0c48d62ad0fc614bb5810191cddbdbe11f697af | refs/heads/master | 2020-07-13T10:42:56.016274 | 2019-09-30T11:45:06 | 2019-09-30T11:45:06 | 205,067,533 | 0 | 0 | null | 2019-08-29T02:53:29 | 2019-08-29T02:53:29 | null | UTF-8 | Python | false | false | 3,723 | py | from typing import Any, ClassVar, Dict, Generic, Tuple, Type, TypeVar, Union, get_type_hints
from pydantic.class_validators import gather_validators
from pydantic.main import BaseModel, create_model
_generic_types_cache: Dict[Tuple[Type[Any], Union[Any, Tuple[Any, ...]]], Type[BaseModel]] = {}
GenericModelT = TypeVar('GenericModelT', bound='GenericModel')
class GenericModel(BaseModel):
__slots__ = ()
__concrete__: ClassVar[bool] = False
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
if cls.__concrete__:
return super().__new__(cls)
else:
raise TypeError(f'Type {cls.__name__} cannot be used without generic parameters, e.g. {cls.__name__}[T]')
def __class_getitem__( # type: ignore
cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]]
) -> Type[BaseModel]:
cached = _generic_types_cache.get((cls, params))
if cached is not None:
return cached
if cls.__concrete__:
raise TypeError('Cannot parameterize a concrete instantiation of a generic model')
if not isinstance(params, tuple):
params = (params,)
if any(isinstance(param, TypeVar) for param in params): # type: ignore
raise TypeError(f'Type parameters should be placed on typing.Generic, not GenericModel')
if Generic not in cls.__bases__:
raise TypeError(f'Type {cls.__name__} must inherit from typing.Generic before being parameterized')
check_parameters_count(cls, params)
typevars_map: Dict[Any, Any] = dict(zip(cls.__parameters__, params)) # type: ignore
type_hints = get_type_hints(cls).items()
instance_type_hints = {k: v for k, v in type_hints if getattr(v, '__origin__', None) is not ClassVar}
concrete_type_hints: Dict[str, Type[Any]] = {
k: resolve_type_hint(v, typevars_map) for k, v in instance_type_hints.items()
}
model_name = concrete_name(cls, params)
validators = gather_validators(cls)
fields: Dict[str, Tuple[Type[Any], Any]] = {
k: (v, cls.__fields__[k].default) for k, v in concrete_type_hints.items() if k in cls.__fields__
}
created_model = create_model(
model_name=model_name,
__module__=cls.__module__,
__base__=cls,
__config__=None,
__validators__=validators,
**fields,
)
created_model.Config = cls.Config
created_model.__concrete__ = True # type: ignore
_generic_types_cache[(cls, params)] = created_model
if len(params) == 1:
_generic_types_cache[(cls, params[0])] = created_model
return created_model
def concrete_name(cls: Type[Any], params: Tuple[Type[Any], ...]) -> str:
param_names = [param.__name__ if hasattr(param, '__name__') else str(param) for param in params]
params_component = ', '.join(param_names)
return f'{cls.__name__}[{params_component}]'
def resolve_type_hint(type_: Any, typevars_map: Dict[Any, Any]) -> Type[Any]:
if hasattr(type_, '__origin__') and getattr(type_, '__parameters__', None):
concrete_type_args = tuple([typevars_map[x] for x in type_.__parameters__])
return type_[concrete_type_args]
return typevars_map.get(type_, type_)
def check_parameters_count(cls: Type[GenericModel], parameters: Tuple[Any, ...]) -> None:
actual = len(parameters)
expected = len(cls.__parameters__) # type: ignore
if actual != expected:
description = 'many' if actual > expected else 'few'
raise TypeError(f'Too {description} parameters for {cls.__name__}; actual {actual}, expected {expected}')
| [
"s@muelcolvin.com"
] | s@muelcolvin.com |
d89386a8b3057ede1f292266eb6b418f4e670398 | 6323bd983f6304d95e62909bfc4883d2f9ef1a14 | /Random/Functions_class.py | dc04ad7ddbe0ea85a2697cee2d8df0d092d98f84 | [] | no_license | akshay-sahu-dev/PySolutions | 4c2d67d5f66fe83a6e302e1742a5bf17dafe2b99 | 83552962805768914034a284bf39197f52ca5017 | refs/heads/master | 2023-06-17T06:36:50.252943 | 2021-07-09T17:28:53 | 2021-07-09T17:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | def count_digit(Num):
# Num = int(input("Enter a number: ")) # 1234
Q = Num//10 # 123
D = 1
while Q != 0:
Q = Q // 10
D += 1
print("Total digits in given number is: ", D)
count_digit(7645344) | [
"akki5233@gmail.com"
] | akki5233@gmail.com |
27e8262bff5c1d44e27db03475b3794d498a8fbe | beebc5ff44407f3f3a4c1463cd09f0917dbe5391 | /pytype/tools/merge_pyi/test_data/imports_alias.pep484.py | a2ccb207ed16c5bc74d13d337de115c132217ce0 | [
"Apache-2.0",
"MIT"
] | permissive | mraarif/pytype | 4f190cb2591896133761295f3d84d80602dffb58 | 546e8b8114c9af54a409985a036398c4f6955677 | refs/heads/master | 2023-01-23T09:48:06.239353 | 2020-12-02T06:08:27 | 2020-12-02T06:08:27 | 303,069,915 | 1 | 0 | NOASSERTION | 2020-12-02T06:08:28 | 2020-10-11T07:53:55 | null | UTF-8 | Python | false | false | 227 | py | """Test import-as."""
from m1 import A_old as A
from m2 import B_old as B
from m3 import C_old as C
import m4_old as m4
import m5.D_old as D
import m5.something.E_old as E
def f(a: A, b: B, c: C, d: D, e: E) -> m4.D:
pass
| [
"rechen@google.com"
] | rechen@google.com |
987aaf69923d261f667c20c540bde052a6714ff7 | d8dd4ce3e943ea23586bba9df9c7bf978efa8a8b | /get_data.py | 37ad3f72ee4fb2ee78728251fd50a8aa8978cd9f | [] | no_license | mdekauwe/world_clim | 2785ff77f0b503dfa2b7e8e93dec80cf13ccc723 | 410dbf6756ee1c638134ec30dab45056531f1a09 | refs/heads/master | 2021-01-07T22:41:28.981349 | 2020-02-20T09:14:58 | 2020-02-20T09:14:58 | 241,840,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python
"""
Get WorldClim version 2 climate data for various vars between 1970-2000.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (20.02.2020)"
__email__ = "mdekauwe@gmail.com"
from urllib.request import urlretrieve
from pathlib import Path
import os
res = "10m" #"5m" "2.5m", "30s"
#vars = ["tmin", "tmax", "tavg", "prec", "srad", "wind", "vapr"]
vars = ["tavg"]
base_address = 'http://biogeo.ucdavis.edu/data/worldclim/v2.0/tif/base/'
for var in vars:
print(var)
fname = "wc2.0_%s_%s.zip" % (res, var)
address = base_address + fname
output_dir = "data/%s" % (var)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_fname = os.path.join(output_dir, fname)
if not Path(fname).exists():
urlretrieve(address, output_fname)
| [
"mdekauwe@gmail.com"
] | mdekauwe@gmail.com |
901f8ba62645588778d2832e39b89af3a55ba4ed | 37f55335f6b078d5bf95db00c62efb7d78d2411a | /game/interactive_shocks/admin.py | 6efb5d638ceff94549cb89b877651f297202470e | [
"MIT"
] | permissive | adminq80/Interactive_estimation | 14df57e4ee9513528e7f49ae0239638c24d8e763 | d62de66189ee15c5c9f23938da148dfd5ed08573 | refs/heads/master | 2020-12-05T16:30:49.506392 | 2017-04-28T21:10:19 | 2017-04-28T21:10:19 | 67,538,528 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from django.contrib import admin
from .forms import SettingsForm
from .models import Settings, Survey, InteractiveShocks, InteractiveShocksRound
@admin.register(Settings)
class SettingsAdmin(admin.ModelAdmin):
form = SettingsForm
readonly_fields = ('id',)
search_fields = ['id', ]
list_display = ('max_rounds', 'max_users', 'max_following')
@admin.register(InteractiveShocksRound)
class AdminSite(admin.ModelAdmin):
readonly_fields = ('id',)
list_display = ('user', 'guess', 'influenced_guess', 'outcome')
search_fields = ['user', 'guess', 'influenced_guess']
@admin.register(Survey)
class SurveyAdminSite(admin.ModelAdmin):
readonly_fields = ('id', 'username', 'game', 'age', 'gender', 'feedback', 'bugs', 'pay', 'education')
list_display = ('username', 'game', 'gender')
search_fields = ['id', 'user']
@admin.register(InteractiveShocks)
class GameAdminSite(admin.ModelAdmin):
readonly_fields = ('id',)
list_display = ('id', 'start_time', 'started', 'end_time', )
search_fields = ['id', ]
| [
"adminq80@gmail.com"
] | adminq80@gmail.com |
c5ce1b8002e32799a2450ab83bd3cecadf713ef1 | 46244bb6af145cb393846505f37bf576a8396aa0 | /leetcode/138.copy_list_with_random_pointer.py | 762548d65e258634741a2eb557eaef046abec80d | [] | no_license | aoeuidht/homework | c4fabfb5f45dbef0874e9732c7d026a7f00e13dc | 49fb2a2f8a78227589da3e5ec82ea7844b36e0e7 | refs/heads/master | 2022-10-28T06:42:04.343618 | 2022-10-15T15:52:06 | 2022-10-15T15:52:06 | 18,726,877 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if not head:
return None
dh = RandomListNode(0)
oh = head
nh = dh
mp = {}
while oh:
n = RandomListNode(oh.label)
nh.next = n
nh = n
mp[id(oh)] = n
oh = oh.next
oh = head
nh = dh.next
while oh:
if oh.random:
nh.random = mp[id(oh.random)]
oh = oh.next
nh = nh.next
return dh.next
| [
"sockpuppet.lea@gmail.com"
] | sockpuppet.lea@gmail.com |
1978f4dde31af4260b41b0c1313a8a07b4e808ec | 466912406272829982f75854cf0104c6ce8c9814 | /data/spider2/parser/recruit/lagou/lagou_company_full_name_check.py | 27f5e56d02301c67526cc2ac7563834476c6b449 | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | # -*- coding: utf-8 -*-
import os, sys
import datetime
import json
from bson import json_util
from pyquery import PyQuery as pq
from bs4 import BeautifulSoup
import lxml.html
import time
import lagou_job_parser
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../../util'))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../support'))
import loghelper
import util, download, name_helper,url_helper
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import parser_db_util
#logger
loghelper.init_logger("lagou_company_parser", stream=True)
logger = loghelper.get_logger("lagou_company_parser")
SOURCE = 13050 #Lgou
TYPE = 36001 #公司信息
aa =0
cnt = 0
download_crawler = download.DownloadCrawler(use_proxy=True)
def process():
global aa,cnt
logger.info("lagou_company_parser begin...")
while True:
items = parser_db_util.find_all_limit(SOURCE, TYPE, aa,1000)
#items = [parser_db_util.find_process_one(SOURCE, TYPE, 128040)]
aa += 1000
for item in items:
r = parse_company(item)
if r["status"] == "Sub_company":
#parser_db_util.update_active(SOURCE, item["key"], 'N')
#parser_db_util.update_processed(item["_id"])
logger.info("Fullname %s, %s", r["name"], item["url"])
cnt += 1
continue
#exit()
if len(items) == 0:
break
logger.info("total : %s", cnt)
#break
logger.info("lagou_company_parser end.")
def parse_company(item):
if item is None:
return None
#logger.info("*** base ***")
company_key = item["key"]
html = item["content"]
#logger.info(html)
d = pq(html)
logo = d('.top_info_wrap > img').attr('src')
if logo.startswith("http") or logo.startswith("https"):
pass
else:
logo = "http:"+logo
name = d('.company_main > h1 > a').text()
fullName = d('.company_main > h1 > a').attr('title')
fullName = name_helper.company_name_normalize(fullName)
if name is None or fullName is None:
return {
"status": "No_Name",
}
if len(name) > len(fullName):
name = fullName
if fullName.find("分公司") >= 0:
return {
"status": "Sub_company",
"name": fullName
}
return {
"status": "good"
}
if __name__ == "__main__":
while True:
process()
time.sleep(60*30) | [
"hush_guo@163.com"
] | hush_guo@163.com |
97022c7ccd61935cf15a4d7f498ef6eb96146876 | 6ec91b363b077bffd33f15300a0935124e9fb915 | /Cracking_the_Code_Interview/Leetcode/6.Binary_Tree/Recursive/236.Lowest_Common_Ancestor.py | 9ac1728927a25df30cb8e000485c15831b3d1727 | [] | no_license | lzxyzq/Cracking_the_Coding_Interview | 03232515ae8eb50394d46322d36b230d1a626fcf | 79dee7dab41830c4ff9e38858dad229815c719a0 | refs/heads/master | 2023-06-05T19:52:15.595289 | 2021-06-23T22:46:02 | 2021-06-23T22:46:02 | 238,068,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | '''
@Author: your name
@Date: 2020-05-23 17:38:48
@LastEditTime: 2020-05-23 18:39:01
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/Binary_Tree/Recursive/236.Lowest_Common_Ancestor.py
'''
# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
# According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).”
# Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
''' Example 1:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
Output: 3
Explanation: The LCA of nodes 5 and 1 is 3.
'''
''' Example 2:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
Output: 5
Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to the LCA definition.
'''
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root:
return None
if root and ( root is p or root is q ):
return root
else:
# common ancestor of p, q exists in left sub-tree
left_ancestor = self.lowestCommonAncestor( root.left, p ,q)
# common ancestor of p, q exists in right sub-tree
right_ancestor = self.lowestCommonAncestor( root.right, p ,q)
if left_ancestor and right_ancestor:
# p, q reside in two sides, one in left sub-tree, the other in right sub-tree
return root
elif left_ancestor:
# both p, q reside in left sub-tree
return left_ancestor
elif right_ancestor:
# both p, q reside in right sub-tree
return right_ancestor
else:
# both p, q do not exist in current binary tree
return None | [
"lzxyzq@gmail.com"
] | lzxyzq@gmail.com |
3f44a42e4f9ed4912c78cf3ede0b26b75c9c1ca8 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/577477_Select_some_nth_smallest_elements_quickselect/recipe-577477.py | 0f4ecc1b0f7ca5e97f9c49953f132ec863b022ab | [
"Python-2.0",
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 1,358 | py | import random
def select(data, positions, start=0, end=None):
'''For every n in *positions* find nth rank ordered element in *data*
inplace select'''
if not end: end = len(data) - 1
if end < start:
return []
if end == start:
return [data[start]]
pivot_rand_i = random.randrange(start,end)
pivot_rand = data[pivot_rand_i] # get random pivot
data[end], data[pivot_rand_i] = data[pivot_rand_i], data[end]
pivot_i = start
for i in xrange(start, end): # partitioning about the pivot
if data[i] < pivot_rand:
data[pivot_i], data[i] = data[i], data[pivot_i]
pivot_i += 1
data[end], data[pivot_i] = data[pivot_i], data[end]
under_positions, over_positions, mid_positions = [],[],[]
for position in positions:
if position == pivot_i:
mid_positions.append(position)
elif position < pivot_i:
under_positions.append(position)
else:
over_positions.append(position)
result = []
if len(under_positions) > 0:
result.extend(select(data, under_positions, start, pivot_i-1))
if len(mid_positions) > 0:
result.extend([data[position] for position in mid_positions])
if len(over_positions) > 0:
result.extend(select(data, over_positions, pivot_i+1, end))
return result
| [
"betty@qburst.com"
] | betty@qburst.com |
f446ee466529f2d2f57c1633179f7e771628ff47 | 7cf0a3b4429e01b46bb8b8f4a742046953c32dfa | /Scraping attempt/scripts/web_scraper/csv_summary.py | c1a7288c1fe4dbb4943457e01e0895372440e235 | [] | no_license | kafitz/assessment_scraper | 3969d9e48c0c48d436ccc164f584bec44f59a02b | 9eb5d2660774d8cdf58993ccbe84f6e131bf22dd | refs/heads/master | 2020-06-09T05:39:15.052316 | 2014-05-20T01:37:08 | 2014-05-20T01:37:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | #!/usr/bin/python
# coding=utf-8
# 2013 Kyle Fitzsimmons
import os
import csv
import xlwt
file_tuple = [x for x in os.walk('output/')][0]
files = [str(file_tuple[0] + filename) for filename in file_tuple[2]]
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('summary', cell_overwrite_ok=True)
match_found = False
number_of_2011_sales = 0
index_row = 0
header_list_1 = ['Input', '', 'Scraped results for',
'', 'Assessments', '', '', '', 'MLS Data']
header_list_2 = [
'Address searched', 'Input arrondissement', 'Returned address', 'Return arrondissement',
'First year', 'Price', 'Last year', 'Price', 'Date sold', 'Price sold']
for header in [header_list_1, header_list_2]:
for index_column, word in enumerate(header):
sheet.write(index_row, index_column, word)
index_row += 1
for filename in files:
with open(filename, 'rb') as csv_file:
reader = csv.reader(csv_file)
last_section_heading = ''
for row in reader:
if str(row[0]) != '' and len(row) == 1:
last_section_heading = str(row[0])
else:
if 'input_search' in str(row[0]):
input_search = str(row[1])
if 'geocoded_arrondissement' in str(row[0]):
arrondissement = str(row[1])
if 'address' in str(row[0]):
scraped_address = str(row[1])
if 'neighborhood' in str(row[0]):
scraped_arrondissement = str(row[1])
if 'ANTERIEUR' in last_section_heading:
if 'role_year' in str(row[0]):
first_assessment_year = str(row[1])
if 'total_property_value' in str(row[0]):
first_assessment_price = str(row[1])
if 'EN DATE DU' in last_section_heading:
last_assessment_year = str(last_section_heading.split()[3])
if 'total_property_value' in str(row[0]):
last_assessment_price = str(row[1])
if 'date_sold' in str(row[0]):
date_sold = str(row[1])
if 'price_sold' in str(row[0]):
price_sold = str(row[1])
output_row = [
input_search, arrondissement, scraped_address, scraped_arrondissement,
first_assessment_year, first_assessment_price, last_assessment_year,
last_assessment_price, date_sold, price_sold]
for index_column, word in enumerate(output_row):
sheet.write(index_row, index_column, word)
index_row += 1
workbook.save('summary.xls')
| [
"kafitz22@gmail.com"
] | kafitz22@gmail.com |
771eb99c5e6276951fa1be1be0bc7479d3f2cf98 | 2111dac9168ef9651296f859428a4d3461356107 | /tests/test_shellutils.py | 1ef5ea2eda2371fed2f83257d13ec9c12dc294cc | [] | no_license | BasementCat/gitastic | 366f3016ef1fab692b4401520b5a6013ebb2344f | 44f5e9e7d093c754797f934645b1b6f674393cb9 | refs/heads/master | 2020-12-24T13:44:06.372591 | 2014-05-14T03:33:42 | 2014-05-14T03:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,541 | py | import unittest
import sys
import os
import StringIO
# import subprocess
# import threading
# import socket
# import time
# import tempfile
# import shutil
# import copy
# import getpass
# import signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "gitastic"))
from lib import shellutils
class MockSys(object):
stderr=StringIO.StringIO()
stdin=StringIO.StringIO("Test Standard Input")
stdout=StringIO.StringIO()
_exit_code=None
@classmethod
def exit(self, code=0):
self._exit_code=code
@classmethod
def reset(self):
self.stderr.close()
self.stdin.close()
self.stdout.close()
self.stderr=StringIO.StringIO()
self.stdin=StringIO.StringIO("Test Standard Input")
self.stdout=StringIO.StringIO()
self._exit_code=None
class MockRawInput_Base(object):
def __init__(self):
self.prompt=None
self.values=["Test Raw Input", "Test Raw Input 2"]
self.calls=0
def __call__(self, prompt=None):
self.prompt=prompt
if self.calls>=len(self.values):
self.calls=0
out=self.values[self.calls]
self.calls+=1
return out
def reset(self):
self.prompt=None
self.values=["Test Raw Input", "Test Raw Input 2"]
self.calls=0
MockRawInput=MockRawInput_Base()
class TestShellUtils(unittest.TestCase):
def setUp(self):
if shellutils.sys is not MockSys:
shellutils.sys=MockSys
MockSys.reset()
if not hasattr(shellutils, "raw_input") or shellutils.raw_input is not MockRawInput:
shellutils.raw_input=MockRawInput
MockRawInput.reset()
def tearDown(self):
MockSys.reset()
MockRawInput.reset()
def test_DieWithMessage(self):
shellutils.die("TestMessage")
self.assertEqual(MockSys.stderr.getvalue(), "TestMessage\n")
self.assertEqual(MockSys._exit_code, 1)
def test_DieWithMessage_interpolation(self):
shellutils.die("TestMessage %s #%d", "Hello world", 7)
self.assertEqual(MockSys.stderr.getvalue(), "TestMessage Hello world #7\n")
self.assertEqual(MockSys._exit_code, 1)
def test_DieWithMessage_code(self):
shellutils.die("TestMessage", code=25)
self.assertEqual(MockSys.stderr.getvalue(), "TestMessage\n")
self.assertEqual(MockSys._exit_code, 25)
def test_DieWithMessage_interpolation_code(self):
shellutils.die("TestMessage %s #%d", "Hello", 12, code=9)
self.assertEqual(MockSys.stderr.getvalue(), "TestMessage Hello #12\n")
self.assertEqual(MockSys._exit_code, 9)
# def get_input(prompt=None, default=None, require=False, restrict=None):
def test_input_noprompt(self):
var=shellutils.get_input()
self.assertEqual(MockRawInput.prompt, "")
self.assertEqual(var, "Test Raw Input")
def test_input_prompt(self):
var=shellutils.get_input("Test Prompt")
self.assertEqual(MockRawInput.prompt, "Test Prompt: ")
self.assertEqual(var, "Test Raw Input")
def test_input_default(self):
MockRawInput.values=[""]
var=shellutils.get_input("Test Prompt", default="Hello world")
self.assertEqual(MockRawInput.prompt, "Test Prompt [Hello world]: ")
self.assertEqual(var, "Hello world")
def test_input_require(self):
MockRawInput.values=["", "Test asdfasd"]
var=shellutils.get_input("Test Prompt", require=True)
self.assertEqual(MockRawInput.prompt, "Test Prompt: ")
self.assertEqual(var, "Test asdfasd")
self.assertEqual(MockSys.stderr.getvalue(), "An answer is required\n")
def test_input_restrict(self):
MockRawInput.values=["baz", "", "bar"]
var=shellutils.get_input("Test Prompt", require=True, restrict=["foo", "bar"])
self.assertEqual(MockRawInput.prompt, "Test Prompt (foo,bar): ")
self.assertEqual(var, "bar")
self.assertEqual(MockSys.stderr.getvalue(), "Answer must be one of foo, bar\nAn answer is required\n")
def test_input_restrict_default(self):
MockRawInput.values=["baz", "", "bar"]
var=shellutils.get_input("Test Prompt", require=True, restrict=["foo", "bar"], default="foo")
self.assertEqual(MockRawInput.prompt, "Test Prompt (foo,bar) [foo]: ")
self.assertEqual(var, "foo")
self.assertEqual(MockSys.stderr.getvalue(), "Answer must be one of foo, bar\n")
if __name__ == '__main__':
unittest.main() | [
"alec.elton@gmail.com"
] | alec.elton@gmail.com |
b68ced78051a5675e37a4be4514db25f58d205d4 | 07c5656f004b6a444e22ff7b4c3b6802d027f759 | /week_9/class_0420/common/context.py | 1903c5527c8b1a20ab201514e9e0300f06dbf62c | [] | no_license | EuniceHu/python15_api_test | de2a0f0bec8057edb27c8d1f82a438da3e9c105c | 1313e56ddfa67a2490e703a1a5ef4a6967565849 | refs/heads/master | 2020-05-20T13:30:41.686327 | 2019-05-14T11:00:52 | 2019-05-14T11:00:52 | 185,599,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | #-*- coding:utf-8 _*-
"""
@author:小胡
@file: context.py
@time: 2019/04/26
"""
import re
from week_9.class_0420.common.config import config
def replace(data):
p = "#(.*?)#"
while re.search(p, data): # 找到返回True
print('data是', data)
m = re.search(p, data)
# 从任意位置开始找,找第一个就放回Match object ,如果没有就返回None
g = m.group(1) # 拿到参数化的key
v = config.get('data', g) # 根据KEY取配置文件里面的值
print(v)
# 记得替换后的内容,继续用data接收
data = re.sub(p, v, data, count=1)
return data | [
"hongdh1122@163.com"
] | hongdh1122@163.com |
20d8efe21e7db069ec1875cf83867a99cdc802c1 | 565548ff49844ed69ae16d5104e500f01c973402 | /app/auth/decorators.py | e1fb953361c57a1bf98525d9bfc42b83f1712c06 | [] | no_license | jaisenbe58r/Pebrassos | 159ce5a8b372590fd9368d9b5b3c1b0513895bba | 7516a1f7bbba78547af86a9858ee381224964d28 | refs/heads/master | 2023-02-27T05:42:50.652697 | 2021-01-31T20:57:59 | 2021-01-31T20:57:59 | 299,698,630 | 3 | 1 | null | 2021-01-31T20:58:01 | 2020-09-29T18:04:36 | Jupyter Notebook | UTF-8 | Python | false | false | 608 | py | """Copyright (c) 2020 Jaime Sendra Berenguer & Carlos Mahiques Ballester
Pebrassos - Machine Learning Library Extensions
Author:Jaime Sendra Berenguer & Carlos Mahiques Ballester
<www.linkedin.com/in/jaisenbe>
License: MIT
FECHA DE CREACIÓN: 13/01/2020
"""
from functools import wraps
from flask import abort
from flask_login import current_user
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kws):
is_admin = getattr(current_user, 'is_admin', False)
if not is_admin:
abort(401)
return f(*args, **kws)
return decorated_function
| [
"jsendra@autis.es"
] | jsendra@autis.es |
61b9ba02c484f8ecfdc2518c853008b583932a77 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5631989306621952_0/Python/mtbrown/last_word.py | d49456d039c3911948a4ad406bd8bb587ab53f5e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | def main():
cases = int(input())
for case in range(1, cases + 1):
str = input()
print("Case #{0}: {1}".format(case, last_word(str)))
def last_word(str):
last = ""
for c in str:
if last == "":
last += c
else:
if c >= last[0]:
last = c + last
else:
last += c
return last
if __name__ == "__main__":
main() | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
aae92cb8610f4c1884f46dbf03874c26b2016cf9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02709/s610629838.py | 27cbe84220422c9db5900aecef0078cf7d118da8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | n = int(input())
a = [(int(j), i) for i, j in enumerate(input().split())]
a.sort(reverse=1)
dp = [[0] * (n+1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(i + 1):
k = i - j
if j != 0:
dp[i][j] = dp[i - 1][j - 1] + a[i-1][0] * (abs(j - 1 - a[i-1][1]))
if k != 0:
dp[i][j] = max(dp[i][j],
dp[i - 1][j] + a[i-1][0] * (abs(n-k - a[i-1][1])))
print(max(dp[-1])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
99c55d6a4179e07399e896535602bb4422a8420e | 90f02d834e45b5087313cecb0c6e6e24b078e35c | /students/views.py | dd5197e486e15ffe6f84243cb6d1105e24231929 | [] | no_license | Horlawhumy-dev/studentprofileapp | 1526034901c8f728455ec8ca1087e142579a961f | f1280a859593ad1760d636d97f08fa04c00108bf | refs/heads/master | 2023-07-04T11:31:01.642497 | 2021-08-06T10:52:10 | 2021-08-06T10:52:10 | 393,213,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from django.shortcuts import render
from account.models import StudentAccount
# Create your views here.
def index(request):
students = StudentAccount.objects.all()
return render(request, 'students/index.html',{'students': students}) | [
"harof.dev@gmail.com"
] | harof.dev@gmail.com |
cdae01d444d0aa4322a2e8ba855b71ac3b53928c | 17079988dedef6f830633a7a54b181355231fe3e | /Practice/n.py | d3a11bc9c638ef479ab7af3d21409277b9fb51e3 | [] | no_license | sum008/python-backup | cdf6eaff60d882c36fe86b47ad311955d5869b02 | 729fbe2a5220941f9ba085c693c871592a529da8 | refs/heads/master | 2022-12-12T21:21:48.259680 | 2020-09-12T15:36:05 | 2020-09-12T15:36:05 | 285,461,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py |
t=int(input())
for i in range(0,t):
n=int(input())
a = [int(x) for x in input().split()]
x=1
pos=0
while True:
if (a[pos]+a[x])%2==0:
y=(a[pos]+a[x])
a.pop(x)
a.pop(pos)
a.insert(0, y)
x=1
pos=0
print(a)
elif x<len(a)-1:
x+=1
else:
pos+=1
x=pos+1
if pos==len(a)-1:
break
print(len(a))
# t=int(input())
# for i in range(0,t):
#
# a = input().split(" ")
# l = int(a[0])
# r = int(a[1])
# if l%2==0 and r%2==0:
# y=r-l
# if y%2==0:
# print("Even")
# else:
# print("Odd")
# elif (l%2==0 and r%2!=0) or (l%2!=0 and r%2==0) :
# y=((r-l)+1)//2
# if y%2==0:
# print("Even")
# else:
# print("Odd")
# else:
# y=(((r-l)+1)//2)+1
# if y%2==0:
# print("Even")
# else:
# print("Odd") | [
"noreply@github.com"
] | sum008.noreply@github.com |
7ebb0eb1be1374ea3347dcda29706127c9d8334f | 2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5 | /tests/samples_tests/smoke_tests/test_hello_reshape_ssd.py | 7e89c01846a488bf21b14cffc2fa12251e2516bf | [
"Apache-2.0"
] | permissive | openvinotoolkit/openvino | 38ea745a247887a4e14580dbc9fc68005e2149f9 | e4bed7a31c9f00d8afbfcabee3f64f55496ae56a | refs/heads/master | 2023-08-18T03:47:44.572979 | 2023-08-17T21:24:59 | 2023-08-17T21:24:59 | 153,097,643 | 3,953 | 1,492 | Apache-2.0 | 2023-09-14T21:42:24 | 2018-10-15T10:54:40 | C++ | UTF-8 | Python | false | false | 2,287 | py | """
Copyright (C) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pytest
import logging as log
import sys
from common.samples_common_test_class import get_tests
from common.samples_common_test_class import SamplesCommonTestClass
from common.specific_samples_parsers import parse_hello_reshape_ssd
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
test_data_fp32 = get_tests(cmd_params={'i': [os.path.join('500x500', 'cat.bmp')],
'm': [os.path.join('ssd512', 'FP32', 'ssd512.xml')],
'sample_type': ['C++','Python'],
'd': ['CPU']},
use_device=['d'], use_batch=False
)
class TestHelloShape(SamplesCommonTestClass):
@classmethod
def setup_class(cls):
cls.sample_name = 'hello_reshape_ssd'
super().setup_class()
@pytest.mark.parametrize("param", test_data_fp32)
def test_hello_reshape_ssd_fp32(self, param):
"""
Hello_reshape_ssd has functional testing.
This function get stdout from hello_reshape_ssd (already splitted by new line)
The test check not if resulted class of object is accurate with reference, but that demo detected class with its box
and so on and so forth.
"""
# Run _test function, that returns stdout or 0.
stdout = self._test(param, use_preffix=False, get_cmd_func=self.get_hello_shape_cmd_line)
if not stdout:
return 0
stdout = stdout.split('\n')
is_ok = parse_hello_reshape_ssd(stdout)
assert is_ok, "[ERROR] Check failed"
log.info('Functional test passed')
| [
"noreply@github.com"
] | openvinotoolkit.noreply@github.com |
635d6550345ddc391ea4c0098466411edfe54b7a | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/nanoAODv5/v6_production/2017/NJET_biined_WJets/SKIM10/OptimizeMEKD/Optimize_C.py | f8418aff17f796644d48eaf81cb933dde4ef8ed5 | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 1,617 | py | import ROOT
import math
if __name__ == '__main__':
#sig_MEKD_Bst_C_0.1_M1500;1
finput='RESULT_Boost/ele/ggf_signal_M1500/ROC_Obj_MEKD_Bst_C_0.1_M1500.root'
f=ROOT.TFile.Open(finput)
hsig=f.Get("sig_MEKD_Bst_C_0.1_M1500")
hbkg=f.Get("bkg_MEKD_Bst_C_0.1_M1500")
max_significance=-1
cut=-1
Nbins=hbkg.GetNbinsX()
print "Nbins",Nbins
for i in range(0,Nbins+1):
#weight=hbkg.GetBinContent(i)
bkgpass=hbkg.Integral(i,Nbins)
sigpass=hsig.Integral(i,Nbins)
score=hbkg.GetBinLowEdge(i)
significance=0
if bkgpass+sigpass>0:
significance=sigpass/math.sqrt(bkgpass+sigpass)
else:
significance=0
#print 'bkgpass,sigpass,score,significance=',bkgpass,sigpass,score,significance
if significance > max_significance:
max_significance = significance
cut=score
#if weight<0:weight=0
#weight=hsig.GetBinContent(i)
#score=hsig.GetBinCenter(i)
#if weight<0:weight=0
print 'cut,max_significance=',cut,max_significance
def my1st():
ginput=''
gname=''
Integral_bkg=100
Integral_sig=1
f=ROOT.TFile.Open(ginput)
gr=f.Get("MEKD_Bst_C_0.000003_M1500")
n=gr.GetN()
max_significance=-1
sig_max=-1
bkg_max=-1
for i in range(n):
bkg=(1-GetPointX(i))*Integral_bkg
sig=(GetPointY(i))*Integral_sig
significance=sig/math.sqrt(sig+bkg)
if significance>max_significance:
max_significance=significance
sig_max=sig
bkg_max=bkg
| [
"soarnsoar@gmail.com"
] | soarnsoar@gmail.com |
158efd76795e83067ca205acbffff90a53599dc9 | 77428d258556f1cae13c7435bcb5ee387d2f7ed9 | /src/program/python/flask/flask-app/surveyapi/application.py | eef9066cd334c752437184573f602856da4efbed | [] | no_license | imxood/imxood.github.io | d598d3d991f7e7d39787ecb2415ffe48489d9fd6 | a6fe8fe069b8af9d65b6afaabecfcfe99ed1ed21 | refs/heads/main | 2022-10-23T12:52:11.966389 | 2022-10-04T06:04:59 | 2022-10-04T06:04:59 | 47,911,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from flask import Flask
from flask_cors import CORS
def create_app(app_name='SURVEY_API'):
app = Flask(app_name)
app.config.from_object('surveyapi.config.BaseConfig')
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
from surveyapi.api import api
app.register_blueprint(api, url_prefix='/api')
from surveyapi.models import db
db.init_app(app)
return app
| [
"imxood@gmail.com"
] | imxood@gmail.com |
241c62bc22d28a7f01b9079e2f3ba8b9d6beda4c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/73/usersdata/172/38992/submittedfiles/triangulo.py | 7acdb5069d932d6ac512549186e254efc272e19a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
import math
a=int(input('digite um valor:'))
b=int(input('digite um valor:'))
c=int(input('digite um valor:'))
a2=(a**2)
b2=(b**2)
c2=(c**2)
if a<b+c:
print('S')
if a2==b2+c2:
print('Re')
elif a2>b2+c2:
print('Ob')
else:
print('Ac')
elif a>b+c:
print('N')
if a==b==c:
print('Eq')
if b==c!=a:
print('Is')
if a!=b!=c:
print('Es') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
58a73aa4e11916f117d9e74c3cb04b066dfb7ec5 | 97ab50a083a5b183593f41e89853e429bc642190 | /hoover/csv.py | 504bac06572e4def4f65b674d589744de1e4f32d | [
"MIT"
] | permissive | cmb-css/twitter-hoover | 1fc708d3e6c413498e49d830c1a9143e84681213 | ed22439881a7b5a1bdf8fd276920b0fab38231c8 | refs/heads/master | 2021-11-08T05:39:37.785441 | 2021-09-30T10:06:40 | 2021-09-30T10:06:40 | 183,235,719 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,595 | py | import csv
from os import listdir
from os.path import isfile, join
from hoover.simple import read_simple
FIELDS_TWEET = ('created_at',
'timestamp',
'id',
'text',
'retweet_count',
'favorite_count',
'lang')
FIELDS_USER = ('user_id', 'user_screen_name')
FIELDS_ALL = ('reply', 'retweet', 'quote')
FIELDS_REPLY = ('in_reply_to_status_id',
'in_reply_to_user_id',
'in_reply_to_screen_name')
FIELDS_PARENT_TWEET = ('quoted_text',)
FIELDS_RETWEET = ('retweeted_id',
'retweeted_user_id',
'retweeted_user_screen_name')
FIELDS_QUOTE = ('quoted_id',
'quoted_user_id',
'quoted_user_screen_name')
def _matches_filter(csv_type, tweet):
if csv_type in {'all', 'hashtags', 'mentions'}:
return True
elif csv_type == 'tweets':
return ((not tweet['reply']) and
(not tweet['retweet']) and
(not tweet['quote']))
elif csv_type == 'replies':
return tweet['reply']
elif csv_type == 'retweets':
return tweet['retweet']
elif csv_type == 'quotes':
return tweet['quote']
raise RuntimeError('Unknown csv type: {}.'.format(csv_type))
def tweets_to_csv(tweets, outfile, csv_type='all', user_data=True):
base_fields = FIELDS_TWEET
if user_data:
base_fields += FIELDS_USER
if csv_type == 'all':
fields = (base_fields + FIELDS_PARENT_TWEET + FIELDS_ALL +
FIELDS_REPLY + FIELDS_RETWEET + FIELDS_QUOTE)
elif csv_type == 'tweets':
fields = base_fields
elif csv_type == 'replies':
fields = base_fields + FIELDS_REPLY
elif csv_type == 'retweets':
fields = base_fields + FIELDS_PARENT_TWEET + FIELDS_RETWEET
elif csv_type == 'quotes':
fields = base_fields + FIELDS_PARENT_TWEET + FIELDS_QUOTE
else:
raise RuntimeError('Unknown csv type: {}'.format(csv_type))
if user_data:
fields += FIELDS_USER
with open(outfile, 'w') as outfile:
csvwriter = csv.writer(outfile)
csvwriter.writerow(fields)
for tweet in tweets:
csvwriter.writerow([tweet[field] for field in fields])
return 1
def hashtags(tweets, outfile, user_data):
counts = {}
for tweet in tweets:
user = tweet['user_screen_name']
if user not in counts:
counts[user] = {}
for occurrence in tweet['hashtags']:
if occurrence not in counts[user]:
counts[user][occurrence] = 0
counts[user][occurrence] += 1
if len(counts) == 0:
return 0
fields = ('hashtag', 'occurrences')
if user_data:
fields = ('user',) + fields
with open(outfile, 'w') as outfile:
csvwriter = csv.writer(outfile)
csvwriter.writerow(fields)
for user in counts:
for occurrence in counts[user]:
row = {'user': user,
'hashtag': occurrence,
'occurrences': counts[user][occurrence]}
csvwriter.writerow([row[field] for field in fields])
return 1
def mentions(tweets, outfile, user_data):
counts = {}
for tweet in tweets:
user = tweet['user_screen_name']
if user not in counts:
counts[user] = {}
for occurrence in tweet['mentions']:
if occurrence not in counts[user]:
counts[user][occurrence] = 0
counts[user][occurrence] += 1
if len(counts) == 0:
return 0
fields = ('mentioned_id', 'mentioned_screen_name', 'occurrences')
if user_data:
fields = ('user',) + fields
with open(outfile, 'w') as outfile:
csvwriter = csv.writer(outfile)
csvwriter.writerow(fields)
for user in counts:
for occurrence in counts[user]:
row = {'user': user,
'mentioned_id': occurrence[0],
'mentioned_screen_name': occurrence[1],
'occurrences': counts[user][occurrence]}
csvwriter.writerow([row[field] for field in fields])
return 1
def json_file_to_csv(infile, outfile, csv_type='all', user_data=True):
tweets = tuple(tweet for tweet in read_simple(infile)
if _matches_filter(csv_type, tweet))
if len(tweets) == 0:
return 0
if csv_type == 'hashtags':
return hashtags(tweets, outfile, user_data)
elif csv_type == 'mentions':
return mentions(tweets, outfile, user_data)
else:
return tweets_to_csv(tweets, outfile, csv_type, user_data)
def dir_to_csvs(indir, outdir, csv_type='all'):
files = [f for f in listdir(indir) if isfile(join(indir, f))]
n = 0
for file in files:
if file[-5:] == '.json':
infile = join(indir, file)
outfile = '{}-{}.csv'.format(file[:-5], csv_type)
outfile = join(outdir, outfile)
n += json_file_to_csv(
infile, outfile, csv_type, user_data=False)
return n
def to_csv(infile, outfile, indir, outdir, csv_type):
if csv_type:
filters = {csv_type}
else:
filters = ('all', 'tweets',
'replies', 'retweets', 'quotes',
'hashtags', 'mentions')
print('Using filters: {}.'.format(', '.join(filters)))
n = 0
if indir:
if infile:
raise RuntimeError(
'Only one of --infile or --indir should be provided.')
if outfile:
raise RuntimeError(
'Only one of --outfile or --indir should be provided.')
if not outdir:
raise RuntimeError('--outdir must be provided.')
for filt in filters:
print('Converting to csv type: {}'.format(filt))
n += dir_to_csvs(indir, outdir, filt)
elif infile:
if indir:
raise RuntimeError(
'Only one of --infile or --indir should be provided.')
if outdir:
raise RuntimeError(
'Only one of --infile or --outdir should be provided.')
if not outfile:
raise RuntimeError('--outfile must be provided.')
for filt in filters:
print('Converting to csv type: {}'.format(filt))
n += json_file_to_csv(infile, outfile, filt)
else:
raise RuntimeError('Either --infile or --indir must be provided.')
print('{} csv files created.'.format(str(n)))
| [
"telmo@telmomenezes.net"
] | telmo@telmomenezes.net |
68b2bc57d7156e26c1186631f1bca42f06f00ee0 | 592498a0e22897dcc460c165b4c330b94808b714 | /1000번/1978_소수 찾기.py | 7610212bd8ea4d8fe8dffaa19a1bc281c9a7c4d9 | [] | no_license | atom015/py_boj | abb3850469b39d0004f996e04aa7aa449b71b1d6 | 42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d | refs/heads/master | 2022-12-18T08:14:51.277802 | 2020-09-24T15:44:52 | 2020-09-24T15:44:52 | 179,933,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | def prime_number(num):
if num != 1: # 만약에 입력이 1이아니면
for i in range(2,num): #2부터 num-1,ex)3까지 반복을 돈다
if num % i == 0: #만약에 입력을 i로 나눈 나머지가 0이면 False를 리턴한다.
return False
else: #만약에 입력이 1이면
return False #False를 리턴 한다.
return True #만약에 3개의 if문 하나라도 해당이안되면 True를 리턴한다.
cnt = 0
t = int(input())
n = list(map(int,input().split()))
for i in n:
if prime_number(i) == True: #만약에 소수면 cnt에 1을더해준다.
cnt += 1
print(cnt)
| [
"zeezlelove@gmail.com"
] | zeezlelove@gmail.com |
fe8bd58c3101253540c2c6332815c1187b7be4a3 | 35b2ad0c656ff08234eee4c3f62208fa2dc4b893 | /e.g._call_overrided_method_using_super/py2_old_style.py | 830b0042f681ecb55548994805486e91b18370e5 | [
"Unlicense"
] | permissive | thinkAmi-sandbox/python_misc_samples | 0d55b3d40c5983ca2870fdd34221264bf2f6822a | 7a33a803cd0bd13e68c87303ae3ebfbc5a573875 | refs/heads/master | 2021-01-18T16:02:19.890404 | 2018-05-30T23:22:17 | 2018-05-30T23:22:17 | 86,705,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,521 | py | class Parent:
def reply(self):
print '[parent - reply]{}'.format(type(self))
self.say()
def say(self):
print '[parent - say ]{}'.format(type(self))
print 'parent!'
class Child1(Parent):
def reply(self):
print '[child - reply]{}'.format(type(self))
Parent.reply(self)
def say(self):
print '[child - say ]{}'.format(type(self))
print 'child1!'
class Child2(Parent):
def reply(self):
print '[child - reply]{}'.format(type(self))
super(Child2, self).reply()
def say(self):
print '[child - say ]{}'.format(type(self))
print 'child2!'
if __name__ == '__main__':
print '--- parent reply --->'
p = Parent()
p.reply()
print '--- child1 reply --->'
c1 = Child1()
c1.reply()
print '--- child2 reply --->'
c2 = Child2()
c2.reply()
# =>
# --- parent reply --->
# [parent - reply]<type 'instance'>
# [parent - say ]<type 'instance'>
# parent!
# --- child1 reply --->
# [child - reply]<type 'instance'>
# [parent - reply]<type 'instance'>
# [child - say ]<type 'instance'>
# child1!
# --- child2 reply --->
# [child - reply]<type 'instance'>
# Traceback (most recent call last):
# File "py2_old_style.py", line 40, in <module>
# c2.reply()
# File "py2_old_style.py", line 24, in reply
# super(Child2, self).reply()
# TypeError: super() argument 1 must be type, not classobj
| [
"dev.thinkami@gmail.com"
] | dev.thinkami@gmail.com |
0cc48f6a05685d468c82658acedeef5493f40c04 | 3176145632467710f2041f4f5dcfa66b4d874991 | /reinforcement-learning/approximation-methods/iterative_policy_evaluation.py | 6b787fc557c736183567b921d1a5f210af6afb3e | [] | no_license | WyckliffeAluga/potential-happiness | 804d3a83fc323ea306bdfeec2926031eb0686278 | 0737fb5ce64dd3683090aa70e7adf37769a54544 | refs/heads/master | 2022-11-20T11:53:03.879258 | 2020-07-29T02:03:48 | 2020-07-29T02:03:48 | 258,029,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,405 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 14 16:42:07 2020
@author: wyckliffe
"""
import numpy as np
from grid_world import standard_grid
import matplotlib.pyplot as plt
e = 10e-4 # threshold for convergence
def print_values(V, g):
for i in range(g.width):
print("---------------------------")
for j in range(g.height):
v = V.get((i,j), 0)
if v >= 0:
print(" %.2f|" % v, end="")
else:
print("%.2f|" % v, end="") # -ve sign takes up an extra space
print("")
def print_policy(P, g):
for i in range(g.width):
print("---------------------------")
for j in range(g.height):
a = P.get((i,j), ' ')
print(" %s |" % a, end="")
print("")
if __name__ == "__main__" :
# iteractive policy evaluations
# given a policu, find v(s)
# use both uniform random policy and fixed p olicy
# sources of randomness:
# p(a|s) --> deciding what action to take given the state
# p(s' , r | s, a) --> the next state and reward given action-state pair
# let us modle p(a|s) = uniform
grid = standard_grid()
# states will be positions (i,j)
states = grid.all_states()
# uniformly random actions
# initialize V(s) = 0
V = {}
for s in states :
V[s] = 0
gamma = 1.0 # discount gactor
# repeat until convergence
while True :
biggest_change = 0
for s in states :
old_v = V[s]
# V(s) only has value if it is not a terminal state
if s in grid.actions:
new_v = 0 # we will accumulate the answer
p_a = 1.0 / len(grid.actions[s]) # each action has equal probability
for a in grid.actions[s] :
grid.set_state(s)
r = grid.move(a)
new_v += p_a * (r + gamma * V[grid.current_state()])
V[s] = new_v
biggest_change = max(biggest_change, np.abs(old_v - V[s]))
if biggest_change < e :
break
print("Values for uniformly rabdom actions")
print_values(V, grid)
print("\n\n")
# fixed policy
policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'R',
(2, 1): 'R',
(2, 2): 'R',
(2, 3): 'U',
}
print_policy(policy, grid)
# initialize V(s) = 0
V = {}
for s in states :
V[s] = 0
# how does V(s) change as we get further away from the reward
gamma = 0.9 # discount factor
# repeat untill convergence
while True :
biggest_change = 0
for s in states:
old_v = V[s]
# V(s) only has value if it i snot a terminal state
if s in policy :
a = policy[s]
grid.set_state(s)
r = grid.move(a)
V[s] = r + gamma * V[grid.current_state()]
biggest_change = max(biggest_change , np.abs(old_v - V[s]))
if biggest_change < e :
break
print("Values for fixed policy.")
print_values(V, grid) | [
"51138208+WyckliffeAluga@users.noreply.github.com"
] | 51138208+WyckliffeAluga@users.noreply.github.com |
b37f56bd891cc9c216a019d7cc8cf4c89065115f | dfaf6f7ac83185c361c81e2e1efc09081bd9c891 | /k8sdeployment/k8sstat/python/kubernetes/client/models/v2beta1_horizontal_pod_autoscaler_list.py | b1e85c0e4ffff6429cf238d99e6d4e40127837b3 | [
"MIT",
"Apache-2.0"
] | permissive | JeffYFHuang/gpuaccounting | d754efac2dffe108b591ea8722c831d979b68cda | 2c63a63c571240561725847daf1a7f23f67e2088 | refs/heads/master | 2022-08-09T03:10:28.185083 | 2022-07-20T00:50:06 | 2022-07-20T00:50:06 | 245,053,008 | 0 | 0 | MIT | 2021-03-25T23:44:50 | 2020-03-05T02:44:15 | JavaScript | UTF-8 | Python | false | false | 6,832 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V2beta1HorizontalPodAutoscalerList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V2beta1HorizontalPodAutoscaler]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""V2beta1HorizontalPodAutoscalerList - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V2beta1HorizontalPodAutoscalerList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
items is the list of horizontal pod autoscaler objects. # noqa: E501
:return: The items of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:rtype: list[V2beta1HorizontalPodAutoscaler]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V2beta1HorizontalPodAutoscalerList.
items is the list of horizontal pod autoscaler objects. # noqa: E501
:param items: The items of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:type: list[V2beta1HorizontalPodAutoscaler]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V2beta1HorizontalPodAutoscalerList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:return: The metadata of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V2beta1HorizontalPodAutoscalerList.
:param metadata: The metadata of this V2beta1HorizontalPodAutoscalerList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1HorizontalPodAutoscalerList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"JeffYFHuang@github.com"
] | JeffYFHuang@github.com |
0c824d6473b9658dfa17bbd735171214cf1c1148 | 8bf1c3691f1b9202569f600ef7e22f270998683b | /runtest.py | 2b7d203e691f80185fd8202af3388df20663da6e | [
"MIT"
] | permissive | gonchik/grab | 35c3b1e0605cfa850f92b4b15a68944fb8c7fc40 | d007afb7aeab63036d494f3b2704be96ea570810 | refs/heads/master | 2021-01-18T12:15:43.105005 | 2016-01-31T12:38:07 | 2016-01-31T12:38:07 | 50,773,598 | 0 | 0 | MIT | 2019-09-19T20:38:34 | 2016-01-31T12:35:29 | Python | UTF-8 | Python | false | false | 5,402 | py | #!/usr/bin/env python
# coding: utf-8
import unittest
import sys
from optparse import OptionParser
import logging
from copy import copy
from test.util import GLOBAL, start_server, stop_server
from weblib.watch import watch
# **********
# Grab Tests
# * pycurl transport
# * extensions
# **********
# TODO:
# * test redirect and response.url after redirect
GRAB_TEST_LIST = (
# Internal API
'test.grab_api',
'test.grab_transport',
'test.response_class',
'test.grab_debug', # TODO: fix tests excluded for urllib3
# Response processing
'test.grab_xml_processing',
'test.grab_response_body_processing',
'test.grab_charset',
'test.grab_redirect',
# Network
'test.grab_get_request',
'test.grab_post_request',
'test.grab_request', # TODO: fix tests excluded for urllib3
'test.grab_user_agent',
'test.grab_cookies', # TODO: fix tests excluded for urllib3
'test.grab_url_processing',
# Refactor
'test.grab_proxy',
'test.grab_upload_file',
'test.grab_limit_option',
'test.grab_charset_issue',
'test.grab_pickle', # TODO: fix tests excluded for urllib3
# *** Extension sub-system
# *** Extensions
'test.ext_text',
'test.ext_rex',
'test.ext_lxml',
'test.ext_form',
'test.ext_doc',
'test.ext_structured',
# *** Pycurl Test
'test.pycurl_cookie',
# *** util.module
'test.util_module',
'test.util_log',
# *** grab.export
'test.util_config',
'test.script_crawl',
#'test.script_start_project',
'test.grab_error',
'test.selector_deprecated',
'test.grab_deprecated',
'test.ext_pyquery',
'test.tools_deprecated',
)
# ************
# Spider Tests
# ************
SPIDER_TEST_LIST = (
'test.spider_task',
'test.spider',
'test.spider_proxy',
'test.spider_queue',
'test.spider_misc',
'test.spider_meta',
'test.spider_error',
'test.spider_cache',
'test.spider_data',
'test.spider_stat',
'test.spider_multiprocess',
)
def main():
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option('-t', '--test', help='Run only specified tests')
parser.add_option('--transport', default='pycurl')
parser.add_option('--test-grab', action='store_true',
default=False, help='Run tests for Grab::Spider')
parser.add_option('--test-spider', action='store_true',
default=False, help='Run tests for Grab')
parser.add_option('--test-all', action='store_true',
default=False,
help='Run tests for both Grab and Grab::Spider')
parser.add_option('--backend-mongo', action='store_true',
default=False,
help='Run extra tests that depends on mongodb')
parser.add_option('--backend-redis', action='store_true',
default=False,
help='Run extra tests that depends on redis')
parser.add_option('--backend-mysql', action='store_true',
default=False,
help='Run extra tests that depends on mysql')
parser.add_option('--backend-postgresql', action='store_true',
default=False,
help='Run extra tests that depends on postgresql')
parser.add_option('--mp-mode', action='store_true', default=False,
help='Enable multiprocess mode in spider tests')
parser.add_option('--profile', action='store_true', default=False,
help='Do profiling')
opts, args = parser.parse_args()
GLOBAL['transport'] = opts.transport
if opts.backend_mongo:
GLOBAL['backends'].append('mongo')
if opts.backend_redis:
GLOBAL['backends'].append('redis')
if opts.backend_mysql:
GLOBAL['backends'].append('mysql')
if opts.backend_postgresql:
GLOBAL['backends'].append('postgresql')
test_list = []
if opts.test_all:
test_list += GRAB_TEST_LIST
test_list += SPIDER_TEST_LIST
if opts.test_grab:
test_list += GRAB_TEST_LIST
if opts.test_spider:
test_list += SPIDER_TEST_LIST
if opts.test:
test_list += [opts.test]
GLOBAL['mp_mode'] = opts.mp_mode
# Check tests integrity
# Ensure that all test modules are imported correctly
for path in test_list:
__import__(path, None, None, ['foo'])
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for path in test_list:
mod_suite = loader.loadTestsFromName(path)
for some_suite in mod_suite:
for test in some_suite:
if (not hasattr(test, '_backend') or
test._backend in GLOBAL['backends']):
suite.addTest(test)
runner = unittest.TextTestRunner()
#start_server()
if opts.profile:
import cProfile
import pyprof2calltree
import pstats
profile_tree_file = 'var/test.prof.out'
prof = cProfile.Profile()
result = prof.runcall(runner.run, suite)
stats = pstats.Stats(prof)
stats.strip_dirs()
pyprof2calltree.convert(stats, profile_tree_file)
else:
result = runner.run(suite)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
| [
"lorien@lorien.name"
] | lorien@lorien.name |
8294d076e880d517e835d02c3ff0c531a9974495 | f2ed1b993139c85767d2e6a1b1be74fdfad23822 | /jquery/insert_text1.py | 801f6404a588311a9a12d81bd92a18a83cc39609 | [] | no_license | bunkahle/Transcrypt-Examples | 5377674597eb4b6d6eb92d5ae71059b97f3e0d2e | 17d6460f3b532bb8258170a31875e4e26a977839 | refs/heads/master | 2022-06-22T17:40:33.195708 | 2022-05-31T15:36:37 | 2022-05-31T15:36:37 | 120,099,101 | 31 | 10 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__pragma__ ('alias', 'S', '$')
# instead of : document.getElementById('output').innerText = text
def action():
text = 'Hello, DOM!';
S("#output").text(text)
S(document).ready(action)
| [
"noreply@github.com"
] | bunkahle.noreply@github.com |
bfc654f03e4abb805c4eea1db3b5b3cdb780fb9b | 5f86944bdf1b810a84c63adc6ed01bbb48d2c59a | /kubernetes/test/test_v1_container_state_running.py | 2eb68d73308f529449f28a08d7f15a0f0e8c4179 | [
"Apache-2.0"
] | permissive | m4ttshaw/client-python | 384c721ba57b7ccc824d5eca25834d0288b211e2 | 4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1 | refs/heads/master | 2021-01-13T06:05:51.564765 | 2017-06-21T08:31:03 | 2017-06-21T08:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_container_state_running import V1ContainerStateRunning
class TestV1ContainerStateRunning(unittest.TestCase):
""" V1ContainerStateRunning unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ContainerStateRunning(self):
"""
Test V1ContainerStateRunning
"""
model = kubernetes.client.models.v1_container_state_running.V1ContainerStateRunning()
if __name__ == '__main__':
unittest.main()
| [
"mehdy@google.com"
] | mehdy@google.com |
d341468a41a394a1836146fad9c9b7d85402a0ab | 5dd2dc445bc0c4af6d29bf1290969593689c6dfc | /actor critic/main.py | 3696a4c30cf275d400afaeeed63ad7d7ac493038 | [] | no_license | RobertSamoilescu/RL_bootcamp | 446ff988f0dd8cfdf1c91f7d14ea983092a08ce0 | d5f774bfebf5f6a5d7f0440a7c60f58d2706e7aa | refs/heads/master | 2022-01-09T07:13:25.862297 | 2019-06-20T22:49:03 | 2019-06-20T22:49:03 | 192,088,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,525 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import numpy as np
import gym
from itertools import count
from torch.distributions.categorical import Categorical
from tensorboardX import SummaryWriter
# define tensorboard summary
writer = SummaryWriter()
# create environment
env = gym.make("CartPole-v0")
# create actor model
actor = nn.Sequential(
nn.Linear(4, 128),
nn.ReLU(),
nn.Linear(128, env.action_space.n)
).cuda()
# create critic model
critic = nn.Sequential(
nn.Linear(4, 128),
nn.ReLU(),
nn.Linear(128, 1)
).cuda()
# define optimizers
actor_optimizer = torch.optim.RMSprop(actor.parameters(), lr=7e-4)
critic_optimizer = torch.optim.RMSprop(critic.parameters(), lr=7e-4)
# loss criterion
critic_criterion = nn.MSELoss()
def sample_trajectories(no_trajectories=256):
trajectories = []
returns = []
for i in range(no_trajectories):
state = env.reset()
states = []; next_states = []
actions = []; rewards = []
masks = []
rreturn = 0
for t in count():
state = torch.tensor(state).unsqueeze(0).float().cuda()
with torch.no_grad():
probs = F.softmax(actor(state), dim=1).squeeze(0)
# sample action
categorical = Categorical(probs)
action = categorical.sample().item()
# interact with env
next_state, reward, done, info = env.step(action)
rreturn += reward
# add sample to trajectory
states.append(state)
actions.append(torch.tensor([action]))
rewards.append(torch.tensor([reward]))
next_states.append(torch.tensor(next_state).unsqueeze(0).float())
masks.append(torch.tensor([done]).float())
# update state
state = next_state
if done:
trajectories.append((states, actions, rewards, next_states, masks))
returns.append(rreturn)
break
return trajectories, returns
def optimize_critic(trajectories, gamma=0.99):
loss = 0
for states, actions, rewards, next_states, masks in trajectories:
states = torch.cat(states, dim=0)
actions = torch.cat(actions).reshape(-1, 1).cuda()
rewards = torch.cat(rewards).reshape(-1, 1).cuda()
next_states = torch.cat(next_states, dim=0).cuda()
masks = torch.cat(masks).reshape(-1, 1).cuda()
y = critic(states)
y_target = rewards + gamma * (1. - masks) * critic(next_states)
loss += critic_criterion(y, y_target)
loss = loss / len(trajectories)
# optimize critic
critic_optimizer.zero_grad()
loss.backward()
for param in critic.parameters():
param.grad.data.clamp(-1, 1)
critic_optimizer.step()
def optimize_actor(trajectories, gamma=0.99):
loss = 0
for states, actions, rewards, next_states, masks in trajectories:
states = torch.cat(states, dim=0)
actions = torch.cat(actions).reshape(-1, 1).cuda()
rewards = torch.cat(rewards).reshape(-1, 1).cuda()
next_states = torch.cat(next_states, dim=0).cuda()
masks = torch.cat(masks).reshape(-1, 1).cuda()
# compute log probabilities
log_pi = torch.log(F.softmax(actor(states), dim=1).gather(1, actions))
# compute advantage
adv = rewards + gamma * (1. - masks) * critic(next_states) - critic(states)
# compute loss
loss += torch.sum(log_pi * adv)
loss = -loss / len(trajectories)
actor_optimizer.zero_grad()
loss.backward()
for param in actor.parameters():
param.grad.data.clamp_(-1, 1)
actor_optimizer.step()
def actor_critic(no_updates):
for update in range(1, no_updates + 1):
# sample trajectories
trajectories, returns = sample_trajectories()
# compute gradient and optimize critic
optimize_critic(trajectories)
# compute gradient and optimize actor
optimize_actor(trajectories)
# tensorboardX logger
for name, param in critic.named_parameters():
writer.add_histogram("critic/" + name, param.clone().cpu().data.numpy(), update)
for name, param in actor.named_parameters():
writer.add_histogram("actor/" + name, param.clone().cpu().data.numpy(), update)
min_rr, mean_rr, max_rr, std = np.min(returns), np.mean(returns), \
np.max(returns), np.std(returns)
# logs
writer.add_scalar("mean_return", mean_rr, update)
writer.add_scalar("min_return", min_rr, update)
writer.add_scalar("max_return", max_rr, update)
print("Update: %d, Mean return: %.2f, Min return: %.2f, Max return: %.2f, Std: %.2f" %
(update, mean_rr, min_rr, max_rr, std))
if update % 10 == 0:
torch.save(actor.state_dict(), "actor")
torch.save(critic.state_dict(), "critic")
print("Models saved")
env.close()
writer.close()
if __name__ == "__main__":
actor_critic(no_updates=1000000)
| [
"robert.samoilescu@gmail.com"
] | robert.samoilescu@gmail.com |
10ba8748e14f3798670a8a137684dea34a321f07 | a36fb46fc6416aa9e1a874a8f61bfe10535f511b | /Day20/solution.py | e16b0986b1e6ac69d24610e5ccd82c17e52c90e7 | [] | no_license | RaspiKidd/AoC2018 | 846995bd292d0103da69855d2965efb19d958f2a | 76f5e42de98d26344d44f0ed389bc681137ea6ea | refs/heads/master | 2020-04-09T02:21:40.253987 | 2018-12-22T11:19:33 | 2018-12-22T11:19:33 | 159,937,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | import networkx
maze = networkx.Graph()
paths = open('input.txt').read()[1:-1]
pos = {0} # the current positions that we're building on
stack = [] # a stack keeping track of (starts, ends) for groups
starts, ends = {0}, set() # current possible starting and ending positions
for c in paths:
if c == '|':
# an alternate: update possible ending points, and restart the group
ends.update(pos)
pos = starts
elif c in 'NESW':
# move in a given direction: add all edges and update our current positions
direction = {'N': 1, 'E': 1j, 'S': -1, 'W': -1j}[c]
maze.add_edges_from((p, p + direction) for p in pos)
pos = {p + direction for p in pos}
elif c == '(':
# start of group: add current positions as start of a new group
stack.append((starts, ends))
starts, ends = pos, set()
elif c == ')':
# end of group: finish current group, add current positions as possible ends
pos.update(ends)
starts, ends = stack.pop()
# find the shortest path lengths from the starting room to all other rooms
lengths = networkx.algorithms.shortest_path_length(maze, 0)
print('part1:', max(lengths.values()))
print('part2:', sum(1 for length in lengths.values() if length >= 1000)) | [
"kerry@raspikidd.com"
] | kerry@raspikidd.com |
ce40ecd136466000b6cd713b9b978e417d2d76d2 | ff2d0e396c2e277751fced5869975faa8260e1d9 | /BASIC/SplitDataset.py | 2e72f8d4b143288ee97d5f5781819cf26a72aea2 | [] | no_license | kongruksiamza/MachineLearning | 53207055090deaea0a44789cebfef01f5d395188 | b6843a5fb97af9d21fe13aee6c0d45f36ff99131 | refs/heads/master | 2023-08-02T11:17:53.359201 | 2023-07-23T16:15:38 | 2023-07-23T16:15:38 | 251,460,002 | 54 | 41 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
iris_dataset=load_iris()
x_train,x_test,y_train,y_test = train_test_split(iris_dataset["data"],iris_dataset["target"],test_size=0.2,random_state=0)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
#150
#train 80% = 120
#test 20% = 30 | [
"noreply@github.com"
] | kongruksiamza.noreply@github.com |
3dd5f7ff3422dda82d6a47010f7030a4e120e353 | ba9cb3bbc46faeea1edc01ef7e18131ae2dbf923 | /problem-046.py | 13d019dd467ac87655a99dcce72627471b56b455 | [] | no_license | beautytiger/project-euler | fb9908d35a82cd4b912a541282842adca03b17e2 | a8de90a2e5b98660505169afd9c8c27b1b3af28e | refs/heads/master | 2021-06-19T00:40:17.331130 | 2017-05-31T11:04:05 | 2017-05-31T11:04:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from tools.runningTime import runTime
from tools.common import is_prime
def fit_conjecture(n_odd):
n, pt = 1, 2
while pt<n_odd:
if is_prime(n_odd-pt):
return True
n, pt = n+1, pt+4*n+2
return False
@runTime
def bruteForce():
odd = 33
while True:
odd += 2
if is_prime(odd) or fit_conjecture(odd):
continue
print "Result: {}".format(odd)
break
@runTime
def newBruteForce():
n = 5
primes = set([2, 3])
while True:
if all(n%p for p in primes):
primes.add(n)
elif not any((n-2*i*i) in primes for i in range(1, int(n**0.5)+1)):
break
n += 2
print "Result: {}".format(n)
if __name__ == "__main__":
bruteForce()
newBruteForce()
| [
"konmyn@163.com"
] | konmyn@163.com |
36e803ee78844708170f2dc1daa1eff208b5f8b0 | a003200e29c4ea64e9ef65a23eea2d1d8c4ad03b | /client/web/client_control_port.py | 6b54ac526ac9c42588bfaf40df72078e55dcbd25 | [] | no_license | jason12360/crazy_coder | 10b77ef69994ff11e7fde49ad28d928851388be9 | 7ba161ef89ffd2da66f430c4af04a53c2de1667f | refs/heads/master | 2020-03-20T22:10:35.884104 | 2018-06-29T10:06:24 | 2018-06-29T10:06:24 | 137,783,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,653 | py | from socket import *
import sys
import os
import time
import signal
from threading import Thread
import client_data_port
#导入model相关模块
from file import *
from file_folder import *
from database_handler import *
import my_protocol
#导入视图的相关模块
from login_view import Login_Page
from login_handler import Login_handler
from main_page import MainPage
from main_handler import Main_handler
# 用户路径
file_path = '/home/tarena/ftp_web(2)/'
#错误代码
# CODE_NUM=0
def run(ctrl_socket,child_pid,recv_queue,send_queue):
# 把myconnection传给handler进行相关的登录操作
# 创建客户端请求对象
global _ctrl_socket
_ctrl_socket = ctrl_socket
c_ftp = MyFtp_Client(_ctrl_socket,recv_queue)
# 界面
login_page = Login_Page()
login_handler = Login_handler(login_page)
login_handler.bind(comment_handler,c_ftp)
login_page.register_handler(login_handler)
login_handler.setup(child_pid,send_queue)
login_page.run()
main_page = MainPage()
global main_handler
main_handler =Main_handler(main_page)
c_ftp.set_view_handler(main_handler)
main_handler.bind(comment_handler,c_ftp)
main_page.register_handler(main_handler)
#设置父进程监听子进程信号
signal.signal(40,main_handler.display_chat)
main_handler.setup(child_pid,send_queue)
main_page.run()
# 协议结构:请求类别 + 属性 + 内容 + 结束符
# comment='list+'+str(client_add)+'+'+''+'+@end'
# 控制端--功能选择界面调用函数
#控制线程,这个线程中只做控制,具体传输反馈由父线程完成
def comment_handler(comment, c_ftp):
data = comment.split('+')
print(data)
if data[0] == "list":
if not data[2]:
# 判断有没文件夹,没有,就发送list
return c_ftp.list_request()
else:
# data[2]is 文件夹,有酒发送list data[2]
return c_ftp.list_request(data[2])
elif data[0] == "upld":
return c_ftp.upload_request(data[1],data[2])
elif data[0] == "dwld":
return c_ftp.download_request(data[1],data[2])
# 这两个功能只靠tcp---------------------------
elif data[0] == 'chat':
# data[2]是聊天内容
c_ftp.chat_request(data[2])
# 登录
elif data[0] == "login":
# data[1] 是账号
# data[2] 是密码
return c_ftp.login_request(data[1], data[2])
#---------------------------------------------
elif data[0] == "reg":
# data[1] 是账号
# data[2] 是密码
return c_ftp.register_request(data[1], data[2])
#---------------------------------------------
elif data[0] == "quit":
c_ftp.quit_request()
return 0
else:
print("commond is not defined")
class MyFtp_Client():
def __init__(self, s,chat_queue):
self.s = s
self.chat_queue = chat_queue
def list_request(self, foldername=''):
my_protocol.list_bale_TCP(self.s, foldername)
# 等待接收
data = ''
while True:
_ = self.s.recv(2048).decode()
if _[-4:]=='@end':
data += _
break
data+=_
x = data.split('+')
if x[0] in ['list','upld','dwld','chat','quit','login','reg']:
if x[3]=='@end':
return x[2]
else:
print('数据丢包')
return -1
elif x[0]=='':
print("客户端意外退出")
return -1
else:
print('数据丢包')
return -1
#为服务器绑定视图句柄,供副线程调用
def set_view_handler(self,handler):
self.view_handler = handler
#这个线程只负责告知服务器需要发送的文件名和文件属性,服务器会首先判断文件是否重名,
#如果重名,则返回文件重名,如果不重名则添加相应文件信息到数据库
def upload_request(self, file_property,filename):
#初始化错误码用于反馈结果
CODE_NUM='1'
# 打包发送
my_protocol.upld_bale_TCP(self.s,file_property,filename)
# 等待接收
#生成file对象传给副进程使用
file = File()
file.unpack(file_property)
data = my_protocol.unpake_TCP(self.s)
if data != -1:
if data[2] == '3':
CODE_NUM='3'
elif data[2]=='go':
DATA_HOST = self.s.getsockname()[0]
DATA_PORT = 0
DATA_ADDR = (DATA_HOST, DATA_PORT)
data_socket = socket()
data_socket.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
#客户端监听套接字
data_socket.bind(DATA_ADDR)
# data_socket.listen(10)
data_addr = data_socket.getsockname()
#给服务端发送端口号
my_protocol.upld_bale_TCP(self.s,'',str(data_socket.getsockname()[1]))
#等待服务端连接
data_socket.close()
# 开辟新的线程,上传文件
t = Thread(target=client_data_port.run, args=(
'u', data_addr,file,self.view_handler))
t.setDaemon(True)
t.start()
return CODE_NUM
def download_request(self,download_path,filename):
CODE_NUM="1"
my_protocol.dwld_bale_TCP(self.s,'',filename)
# 等待接收
data = my_protocol.unpake_TCP(self.s)
if data != -1:
if data[2] == '2':
# print("文件在服务器里不存")
CODE_NUM='2'
elif data[2]=='go':
file_path = download_path +'/'+filename
DATA_HOST = self.s.getsockname()[0]
DATA_PORT = 0
DATA_ADDR = (DATA_HOST, DATA_PORT)
data_socket = socket()
data_socket.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
#客户端监听套接字
data_socket.bind(DATA_ADDR)
# data_socket.listen(10)
data_addr = data_socket.getsockname()
#给服务端发送端口号
my_protocol.upld_bale_TCP(self.s,'',str(data_socket.getsockname()[1]))
#等待服务端连接
data_socket.close()
t = Thread(target=client_data_port.run, args=(
'd', data_addr,file_path,self.view_handler))
t.setDaemon(True)
t.start()
# if R==10:
# CODE_NUM=10
# else:
# CODE_NUM=11
return CODE_NUM
def chat_request(self, message):
# my_protocol.chat_bale_TCP(self.s,message)
pass
def login_request(self, admin, password):
# tcp通信传给服务端数据库中的用户表比对,成功则登录
# 注:admin,password:必须为字符串
my_protocol.login_request(self.s, admin, password)
response = self.s.recv(1024).decode()
return response
def register_request(self, admin, password):
# 注:admin,password:必须为字符串
my_protocol.reg_request(self.s, admin, password)
response = self.s.recv(1024).decode()
return response
def get_chat_word(self):
return self.chat_queue.get()
def quit_request(self):
# 通过协议打包发送
my_protocol.quit_bale_TCP(self.s)
self.s.close()
print("已退出")
| [
"370828117@qq.com"
] | 370828117@qq.com |
59b868c98b2088899c75c3b49e981d14738f1ed6 | 7ec5aa43d8f2e732189944391447a8551d24abaa | /backend/home/migrations/0002_load_initial_data.py | b7766e8a267c6d8282dbfff6aac063a6b6502fa2 | [] | no_license | crowdbotics-apps/louis-vesovski-19158 | e852e8158074704226a77ee9c564c7f29f39413e | 61f4460d77d9077363392820f719febdd8a30434 | refs/heads/master | 2022-11-26T04:53:16.880047 | 2020-07-25T00:53:05 | 2020-07-25T00:53:05 | 282,343,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Louis Vesovski"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Louis Vesovski</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "louis-vesovski-19158.botics.co"
site_params = {
"name": "Louis Vesovski",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a541a023a5932b483d9c0793625a70dedb494f6b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /G7m26EdX3AABCSQBv_16.py | 9a9f37cc926a9dcbbedbf54b6e1d21a544cebe38 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | """
Given a very long string of ASCII characters, split the string up into equal
sized groups of size `width`. To properly display the image, join up the
groups with the newline character `\n` and return the output string.
See the miniature examples below for clarity!
### Examples
format_ascii("0123456789", 2) ➞ "01\n23\n45\n67\n89"
format_ascii("................................", 8) ➞ "........\n........\n........\n........"
format_ascii("^^^^^^^^", 1) ➞ "^\n^\n^\n^\n^\n^\n^\n^"
### Notes
Enjoy the (somewhat oversized) art in the **Tests** tab.
"""
def format_ascii(txt, width):
result = ""
for i, letter in enumerate(txt):
if(i % width == 0) & (i > 0):
result += '\n'
result += letter
return result
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
59403b93978115731520c62a6f10b86aa0fa685f | 6915d6a20d82ecf2a2a3d3cd84ca22dab2491004 | /cbvproject3/testapp/admin.py | c1eeb35fe942b7f66b8b1bf1196933923b71ca4b | [] | no_license | iitian-gopu/django | bb4302d101f4434fb61ab374807e29699a432e42 | 31db982212bbb453cc4c56c7f5cfad9a00cd231d | refs/heads/master | 2023-05-14T07:22:35.176477 | 2021-06-04T04:43:26 | 2021-06-04T04:43:26 | 366,114,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.contrib import admin
from testapp.models import Company
# Register your models here.
class CompanyAdmin(admin.ModelAdmin):
list_display=['name','location','ceo']
admin.site.register(Company,CompanyAdmin) | [
"gopalkrishujaiswal2030@gmail.com"
] | gopalkrishujaiswal2030@gmail.com |
a91ee7ae0112a2ba01ad8bf8c33a7d499b5605e0 | 475e2fe71fecddfdc9e4610603b2d94005038e94 | /Coding/listComprehension.py | b45dd7656d0b549f761c910df9a9a7ed7b98f52d | [] | no_license | sidhumeher/PyPractice | 770473c699aab9e25ad1f8b7b7cd8ad05991d254 | 2938c14c2e285af8f02e2cfc7b400ee4f8d4bfe0 | refs/heads/master | 2021-06-28T20:44:50.328453 | 2020-12-15T00:51:39 | 2020-12-15T00:51:39 | 204,987,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | '''
Created on Dec 27, 2018
@author: siddardha.teegela
'''
if __name__ == '__main__':
newList = []
oldList = [1,2,3,4,5]
'''
List Comprehension
[expression for item in list if condition]
'''
newList = [item*2 for item in oldList if item > 1]
print (newList) | [
"sidhumeher@yahoo.co.in"
] | sidhumeher@yahoo.co.in |
70c89281004cff4894e4f6ebe0880822e29655e1 | 941babd1b7711c9e2935704db283568536f06306 | /app/members/backends.py | cf753ebab5f6ce66cf8c987be1950138f731b6b3 | [] | no_license | orca9s/eb-docker-deploy | 8006fc0bcc81f76137f92a11c417708bcc1acbd9 | f5f5ccc2ff8719160528f04c44f737acb39a9b00 | refs/heads/master | 2022-12-10T09:00:08.879297 | 2018-07-17T13:04:24 | 2018-07-17T13:04:24 | 140,647,708 | 0 | 0 | null | 2022-12-08T02:20:14 | 2018-07-12T02:00:52 | Python | UTF-8 | Python | false | false | 805 | py | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import check_password
from members.management.commands.createsu import Command
User = get_user_model()
class SettingsBackend:
def authenticate(self, request, username=None, password=None):
login_valid = (settings.ADMIN_USERNAME == username)
pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
if login_valid and pwd_valid:
try:
user = User.objects.get(
username=username,
)
except User.DoesNotExist:
user = User(username=username)
user.is_staff = True
user.is_superuser = True
user.save()
return user
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None | [
"sang93423@gmail.com"
] | sang93423@gmail.com |
f351c542553963cd2645833531a8376989706d28 | 62d62fd3ee2f0717c7acbb0c2edfa1e53926f728 | /phoenix/wizard/views/catalogsearch.py | 6d0f2b3bbffa9f8ff7fc8771dcc7123800a75736 | [
"Apache-2.0"
] | permissive | rmoorman/pyramid-phoenix | 911a9ef8dcca48889834cf46109321056cdbb35b | ed3ede4dbb80f00bcd647a5e4ae2afbedab94e09 | refs/heads/master | 2021-01-15T14:29:20.450828 | 2016-05-04T16:27:04 | 2016-05-04T16:27:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,893 | py | from pyramid.view import view_config
from phoenix.wizard.views import Wizard
import logging
logger = logging.getLogger(__name__)
import colander
class CatalogSearchSchema(colander.MappingSchema):
pass
class CatalogSearch(Wizard):
def __init__(self, request):
super(CatalogSearch, self).__init__(
request, name='wizard_csw', title="CSW Catalog Search")
self.description = self.wizard_state.get('wizard_complex_inputs')['identifier']
def schema(self):
return CatalogSearchSchema()
def next_success(self, appstruct):
#self.success(appstruct)
return self.next('wizard_done')
def search_csw(self, query=''):
keywords = [k for k in map(str.strip, str(query).split(' ')) if len(k)>0]
# TODO: search all formats
format = self.wizard_state.get('wizard_complex_inputs')['mime_types'][0]
from string import Template
cql_tmpl = Template("""\
dc:creator='${email}'\
and dc:format='${format}'
""")
cql = cql_tmpl.substitute({
'email': self.get_user().get('email'),
'format': format})
cql_keyword_tmpl = Template('and csw:AnyText like "%${keyword}%"')
for keyword in keywords:
cql += cql_keyword_tmpl.substitute({'keyword': keyword})
results = []
try:
self.csw.getrecords(esn="full", cql=cql)
logger.debug('csw results %s', self.csw.results)
for rec in self.csw.records:
myrec = self.csw.records[rec]
results.append(dict(
source = myrec.source,
identifier = myrec.identifier,
title = myrec.title,
abstract = myrec.abstract,
subjects = myrec.subjects,
format = myrec.format,
creator = myrec.creator,
modified = myrec.modified,
bbox = myrec.bbox,
references = myrec.references,
))
except:
logger.exception('could not get items for csw.')
return results
@view_config(route_name='wizard_csw_select', renderer='json')
def select_record(self):
recordid = self.request.matchdict.get('recordid')
# TODO: refactor this ... not efficient
appstruct = self.appstruct()
if recordid is not None:
selection = appstruct.get('selection', [])
if recordid in selection:
selection.remove(recordid)
else:
selection.append(recordid)
appstruct['selection'] = selection
self.success(appstruct)
return {}
def custom_view(self):
query = self.request.params.get('query', None)
checkbox = self.request.params.get('checkbox', None)
items = self.search_csw(query)
for item in items:
# TODO: refactor this
if item['identifier'] in self.appstruct().get('selection', []):
item['selected'] = True
else:
item['selected'] = False
grid = CatalogSearchGrid(
self.request,
items,
['title', 'format', 'selected'],
)
return dict(grid=grid, items=items)
@view_config(route_name='wizard_csw', renderer='../templates/wizard/csw.pt')
def view(self):
return super(CatalogSearch, self).view()
from phoenix.grid import MyGrid
class CatalogSearchGrid(MyGrid):
def __init__(self, request, *args, **kwargs):
super(CatalogSearchGrid, self).__init__(request, *args, **kwargs)
self.column_formats['selected'] = self.selected_td
self.column_formats['title'] = self.title_td
self.column_formats['format'] = self.format_td
self.column_formats['modified'] = self.modified_td
def title_td(self, col_num, i, item):
return self.render_title_td(item['title'], item['abstract'], item.get('subjects'))
def format_td(self, col_num, i, item):
return self.render_format_td(item['format'], item['source'])
def modified_td(self, col_num, i, item):
return self.render_timestamp_td(timestamp=item.get('modified'))
def selected_td(self, col_num, i, item):
from string import Template
from webhelpers2.html.builder import HTML
icon_class = "glyphicon glyphicon-thumbs-down"
if item.get('selected') == True:
icon_class = "glyphicon glyphicon-thumbs-up"
div = Template("""\
<a class="select" data-value="${recordid}" href="#"><i class="${icon_class}"></i></a>
""")
return HTML.td(HTML.literal(div.substitute({'recordid': item['identifier'],
'icon_class': icon_class} )))
| [
"ehbrecht@dkrz.de"
] | ehbrecht@dkrz.de |
232c7639ba3f954ee8f10d3f5f37d0a9a52dac8b | 381d5b981dbcff769297351467f4b3e994668a84 | /cmo_purchase_group/models/common.py | 6ff66c6dccda1e3a53ce1b8dc7782c690f1922c7 | [] | no_license | jutamatk/cmo_specific | f6c36da767f267b4c24a6933da54fa1536c4c309 | 14f5232dfde67f5d5dbeb4cf28538132954403cb | refs/heads/master | 2020-03-22T02:48:31.642768 | 2018-05-14T10:28:23 | 2018-05-14T10:28:23 | 139,394,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # -*- coding: utf-8 -*-
from openerp import api
from lxml import etree
class Common(object):
@api.model
def set_right_readonly_group(self, res):
root = etree.fromstring(res['arch'])
root.set('create', 'false')
root.set('edit', 'false')
root.set('delete', 'false')
res['arch'] = etree.tostring(root)
return res
| [
"tharathip.chaweewongphan@gmail.com"
] | tharathip.chaweewongphan@gmail.com |
044fb07a14c80e7e229a65320964625ed26c6cab | 8f1d6f17d3bdad867518b7b0a164adfe6aeeed95 | /detection/retinaface/rcnn/PY_OP/rpn_fpn_ohem3.py | b8f7d462ec9aec245852338b392fb4d8afd3311c | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | xwyangjshb/insightface | 2c7f030a5d1f5a24b18967bd0d775ee33933d37f | ae233babaf7614ef4ef28dac0171205835d78d64 | refs/heads/master | 2022-09-29T07:49:22.944700 | 2022-09-22T11:36:12 | 2022-09-22T11:36:12 | 221,020,460 | 1 | 0 | MIT | 2019-11-11T16:16:56 | 2019-11-11T16:16:55 | null | UTF-8 | Python | false | false | 7,624 | py | from __future__ import print_function
import sys
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from ..config import config, generate_config
STAT = {0: 0}
STEP = 28800
class RPNFPNOHEM3Operator(mx.operator.CustomOp):
def __init__(self, stride=0, network='', dataset='', prefix=''):
super(RPNFPNOHEM3Operator, self).__init__()
self.stride = int(stride)
self.prefix = prefix
generate_config(network, dataset)
self.mode = config.TRAIN.OHEM_MODE #0 for random 10:245, 1 for 10:246, 2 for 10:30, mode 1 for default
global STAT
for k in config.RPN_FEAT_STRIDE:
STAT[k] = [0, 0, 0]
def forward(self, is_train, req, in_data, out_data, aux):
global STAT
cls_score = in_data[0].asnumpy() #BS, 2, ANCHORS
labels_raw = in_data[1].asnumpy() # BS, ANCHORS
A = config.NUM_ANCHORS
anchor_weight = np.zeros((labels_raw.shape[0], labels_raw.shape[1], 1),
dtype=np.float32)
valid_count = np.zeros((labels_raw.shape[0], 1), dtype=np.float32)
#print('anchor_weight', anchor_weight.shape)
#assert labels.shape[0]==1
#assert cls_score.shape[0]==1
#assert bbox_weight.shape[0]==1
#print('shape', cls_score.shape, labels.shape, file=sys.stderr)
#print('bbox_weight 0', bbox_weight.shape, file=sys.stderr)
#bbox_weight = np.zeros( (labels_raw.shape[0], labels_raw.shape[1], 4), dtype=np.float32)
_stat = [0, 0, 0]
for ibatch in range(labels_raw.shape[0]):
_anchor_weight = np.zeros((labels_raw.shape[1], 1),
dtype=np.float32)
labels = labels_raw[ibatch]
fg_score = cls_score[ibatch, 1, :] - cls_score[ibatch, 0, :]
fg_inds = np.where(labels > 0)[0]
num_fg = int(config.TRAIN.RPN_FG_FRACTION *
config.TRAIN.RPN_BATCH_SIZE)
origin_num_fg = len(fg_inds)
#print(len(fg_inds), num_fg, file=sys.stderr)
if len(fg_inds) > num_fg:
if self.mode == 0:
disable_inds = np.random.choice(fg_inds,
size=(len(fg_inds) -
num_fg),
replace=False)
labels[disable_inds] = -1
else:
pos_ohem_scores = fg_score[fg_inds]
order_pos_ohem_scores = pos_ohem_scores.ravel().argsort()
sampled_inds = fg_inds[order_pos_ohem_scores[:num_fg]]
labels[fg_inds] = -1
labels[sampled_inds] = 1
n_fg = np.sum(labels > 0)
fg_inds = np.where(labels > 0)[0]
num_bg = config.TRAIN.RPN_BATCH_SIZE - n_fg
if self.mode == 2:
num_bg = max(
48, n_fg * int(1.0 / config.TRAIN.RPN_FG_FRACTION - 1))
bg_inds = np.where(labels == 0)[0]
origin_num_bg = len(bg_inds)
if num_bg == 0:
labels[bg_inds] = -1
elif len(bg_inds) > num_bg:
# sort ohem scores
if self.mode == 0:
disable_inds = np.random.choice(bg_inds,
size=(len(bg_inds) -
num_bg),
replace=False)
labels[disable_inds] = -1
else:
neg_ohem_scores = fg_score[bg_inds]
order_neg_ohem_scores = neg_ohem_scores.ravel().argsort(
)[::-1]
sampled_inds = bg_inds[order_neg_ohem_scores[:num_bg]]
#print('sampled_inds_bg', sampled_inds, file=sys.stderr)
labels[bg_inds] = -1
labels[sampled_inds] = 0
if n_fg > 0:
order0_labels = labels.reshape((1, A, -1)).transpose(
(0, 2, 1)).reshape((-1, ))
bbox_fg_inds = np.where(order0_labels > 0)[0]
#print('bbox_fg_inds, order0 ', bbox_fg_inds, file=sys.stderr)
_anchor_weight[bbox_fg_inds, :] = 1.0
anchor_weight[ibatch] = _anchor_weight
valid_count[ibatch][0] = n_fg
#if self.prefix=='face':
# #print('fg-bg', self.stride, n_fg, num_bg)
# STAT[0]+=1
# STAT[self.stride][0] += config.TRAIN.RPN_BATCH_SIZE
# STAT[self.stride][1] += n_fg
# STAT[self.stride][2] += np.sum(fg_score[fg_inds]>=0)
# #_stat[0] += config.TRAIN.RPN_BATCH_SIZE
# #_stat[1] += n_fg
# #_stat[2] += np.sum(fg_score[fg_inds]>=0)
# #print('stride num_fg', self.stride, n_fg, file=sys.stderr)
# #ACC[self.stride] += np.sum(fg_score[fg_inds]>=0)
# #x = float(labels_raw.shape[0]*len(config.RPN_FEAT_STRIDE))
# x = 1.0
# if STAT[0]%STEP==0:
# _str = ['STAT']
# STAT[0] = 0
# for k in config.RPN_FEAT_STRIDE:
# acc = float(STAT[k][2])/STAT[k][1]
# acc0 = float(STAT[k][1])/STAT[k][0]
# #_str.append("%d: all-fg(%d, %d, %.4f), fg-fgcorrect(%d, %d, %.4f)"%(k,STAT[k][0], STAT[k][1], acc0, STAT[k][1], STAT[k][2], acc))
# _str.append("%d: (%d, %d, %.4f)"%(k, STAT[k][1], STAT[k][2], acc))
# STAT[k] = [0,0,0]
# _str = ' | '.join(_str)
# print(_str, file=sys.stderr)
#if self.stride==4 and num_fg>0:
# print('_stat_', self.stride, num_fg, num_bg, file=sys.stderr)
#labels_ohem = mx.nd.array(labels_raw)
#anchor_weight = mx.nd.array(anchor_weight)
#print('valid_count', self.stride, np.sum(valid_count))
#print('_stat', _stat, valid_count)
for ind, val in enumerate([labels_raw, anchor_weight, valid_count]):
val = mx.nd.array(val)
self.assign(out_data[ind], req[ind], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
for i in range(len(in_grad)):
self.assign(in_grad[i], req[i], 0)
@mx.operator.register('rpn_fpn_ohem3')
class RPNFPNOHEM3Prop(mx.operator.CustomOpProp):
def __init__(self, stride=0, network='', dataset='', prefix=''):
super(RPNFPNOHEM3Prop, self).__init__(need_top_grad=False)
self.stride = stride
self.network = network
self.dataset = dataset
self.prefix = prefix
def list_arguments(self):
return ['cls_score', 'labels']
def list_outputs(self):
return ['labels_ohem', 'anchor_weight', 'valid_count']
def infer_shape(self, in_shape):
labels_shape = in_shape[1]
#print('in_rpn_ohem', in_shape[0], in_shape[1], in_shape[2], file=sys.stderr)
anchor_weight_shape = [labels_shape[0], labels_shape[1], 1]
#print('in_rpn_ohem', labels_shape, anchor_weight_shape)
return in_shape, \
[labels_shape, anchor_weight_shape, [labels_shape[0], 1]]
def create_operator(self, ctx, shapes, dtypes):
return RPNFPNOHEM3Operator(self.stride, self.network, self.dataset,
self.prefix)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| [
"guojia@gmail.com"
] | guojia@gmail.com |
16c92653ecd2a8eed34a9a224ebae4975ef7cc51 | 972c508bbd49cbb7800af2729328d5421fcbba8f | /flink-python/pyflink/table/tests/test_sort.py | 0d490c7120cbfaa869394ac96ddb3363bd2dd856 | [
"Apache-2.0",
"CC-BY-2.5",
"OFL-1.1",
"AGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-jdom",
"GCC-exception-3.1",
"MIT-0",
"MPL-2.0-no-copyleft-exception",
"CDDL-1.1",
"CDDL-1.0",
"MIT",
"CC0-1.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"BSD-2-Clause-Views",
"EPL-1.0",
"Classpath-exception-2.0",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"MPL-2.0",
"CC-PDDC",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"ISC"
] | permissive | Xeli/flink | 4e380cc4aa1b0a79d6eb1a1715a1b5e99993f937 | d5e74d6d4f3f6be6b8ebc63c09c42b3bc8eed0d0 | refs/heads/master | 2021-06-02T00:07:47.239600 | 2019-07-04T17:05:41 | 2019-07-04T17:07:45 | 108,726,889 | 0 | 1 | Apache-2.0 | 2019-07-04T17:05:42 | 2017-10-29T11:06:09 | Java | UTF-8 | Python | false | false | 1,766 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase
class BatchTableSortTests(PyFlinkBatchTableTestCase):
def test_order_by_offset_fetch(self):
t = self.t_env.from_elements([(1, "Hello")], ["a", "b"])
result = t.order_by("a.desc").offset(2).fetch(2)
query_operation = result._j_table.getQueryOperation()
self.assertEqual(2, query_operation.getOffset())
self.assertEqual(2, query_operation.getFetch())
self.assertEqual('[desc(a)]', query_operation.getOrder().toString())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| [
"sunjincheng121@gmail.com"
] | sunjincheng121@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.