blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c67ba89cafbbb153d4b3b5ee164d65a99332ec0 | 82f1b4c0bccd66933f93d02703a3948f08ebc1a9 | /tests/pytests/unit/states/test_bower.py | ed8cb9cfa178fe7804e3a3179833d7640555b57a | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | waynew/salt | ddb71301944b64f5429e0dbfeccb0ea873cdb62d | ac9f139f795295de11be3fb1490ab8cec29611e5 | refs/heads/master | 2023-01-24T10:43:53.104284 | 2022-03-29T04:27:22 | 2022-03-29T13:45:09 | 163,890,509 | 1 | 0 | Apache-2.0 | 2019-01-02T21:17:12 | 2019-01-02T21:17:11 | null | UTF-8 | Python | false | false | 7,232 | py | """
:codeauthor: Alexander Pyatkin <asp@thexyz.net>
"""
import pytest
import salt.states.bower as bower
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {bower: {"__opts__": {"test": False}}}
def test_removed_not_installed():
"""
Test if it returns True when specified package is not installed
"""
mock = MagicMock(return_value={"underscore": {}})
with patch.dict(bower.__salt__, {"bower.list": mock}):
ret = bower.removed("jquery", "/path/to/project")
expected = {
"name": "jquery",
"result": True,
"comment": "Package 'jquery' is not installed",
"changes": {},
}
assert ret == expected
def test_removed_with_error():
"""
Test if returns False when list packages fails
"""
mock = MagicMock(side_effect=CommandExecutionError)
with patch.dict(bower.__salt__, {"bower.list": mock}):
ret = bower.removed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": False,
"comment": "Error removing 'underscore': ",
"changes": {},
}
assert ret == expected
def test_removed_existing():
"""
Test if it returns True when specified package is installed and
uninstall succeeds
"""
mock_list = MagicMock(return_value={"underscore": {}})
mock_uninstall = MagicMock(return_value=True)
with patch.dict(
bower.__salt__, {"bower.list": mock_list, "bower.uninstall": mock_uninstall}
):
ret = bower.removed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": True,
"comment": "Package 'underscore' was successfully removed",
"changes": {"underscore": "Removed"},
}
assert ret == expected
def test_removed_existing_with_error():
"""
Test if it returns False when specified package is installed and
uninstall fails
"""
mock_list = MagicMock(return_value={"underscore": {}})
mock_uninstall = MagicMock(side_effect=CommandExecutionError)
with patch.dict(
bower.__salt__, {"bower.list": mock_list, "bower.uninstall": mock_uninstall}
):
ret = bower.removed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": False,
"comment": "Error removing 'underscore': ",
"changes": {},
}
assert ret == expected
def test_bootstrap_with_error():
"""
Test if it return False when install packages fails
"""
mock = MagicMock(side_effect=CommandExecutionError)
with patch.dict(bower.__salt__, {"bower.install": mock}):
ret = bower.bootstrap("/path/to/project")
expected = {
"name": "/path/to/project",
"result": False,
"comment": "Error bootstrapping '/path/to/project': ",
"changes": {},
}
assert ret == expected
def test_bootstrap_not_needed():
"""
Test if it returns True when there is nothing to install
"""
mock = MagicMock(return_value=False)
with patch.dict(bower.__salt__, {"bower.install": mock}):
ret = bower.bootstrap("/path/to/project")
expected = {
"name": "/path/to/project",
"result": True,
"comment": "Directory is already bootstrapped",
"changes": {},
}
assert ret == expected
def test_bootstrap_success():
"""
Test if it returns True when install packages succeeds
"""
mock = MagicMock(return_value=True)
with patch.dict(bower.__salt__, {"bower.install": mock}):
ret = bower.bootstrap("/path/to/project")
expected = {
"name": "/path/to/project",
"result": True,
"comment": "Directory was successfully bootstrapped",
"changes": {"/path/to/project": "Bootstrapped"},
}
assert ret == expected
def test_installed_with_error():
"""
Test if it returns False when list packages fails
"""
mock = MagicMock(side_effect=CommandExecutionError)
with patch.dict(bower.__salt__, {"bower.list": mock}):
ret = bower.installed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": False,
"comment": "Error looking up 'underscore': ",
"changes": {},
}
assert ret == expected
def test_installed_not_needed():
"""
Test if it returns True when there is nothing to install
"""
mock = MagicMock(
return_value={
"underscore": {"pkgMeta": {"version": "1.7.0"}},
"jquery": {"pkgMeta": {"version": "2.0.0"}},
}
)
with patch.dict(bower.__salt__, {"bower.list": mock}):
ret = bower.installed(
"test", "/path/to/project", ["underscore", "jquery#2.0.0"]
)
expected = {
"name": "test",
"result": True,
"comment": (
"Package(s) 'underscore, jquery#2.0.0'"
" satisfied by underscore#1.7.0, jquery#2.0.0"
),
"changes": {},
}
assert ret == expected
def test_installed_new_with_exc():
"""
Test if it returns False when install packages fails (exception)
"""
mock_list = MagicMock(return_value={})
mock_install = MagicMock(side_effect=CommandExecutionError)
with patch.dict(
bower.__salt__, {"bower.list": mock_list, "bower.install": mock_install}
):
ret = bower.installed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": False,
"comment": "Error installing 'underscore': ",
"changes": {},
}
assert ret == expected
def test_installed_new_with_error():
"""
Test if returns False when install packages fails (bower error)
"""
mock_list = MagicMock(return_value={})
mock_install = MagicMock(return_value=False)
with patch.dict(
bower.__salt__, {"bower.list": mock_list, "bower.install": mock_install}
):
ret = bower.installed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": False,
"comment": "Could not install package(s) 'underscore'",
"changes": {},
}
assert ret == expected
def test_installed_success():
"""
Test if it returns True when install succeeds
"""
mock_list = MagicMock(return_value={})
mock_install = MagicMock(return_value=True)
with patch.dict(
bower.__salt__, {"bower.list": mock_list, "bower.install": mock_install}
):
ret = bower.installed("underscore", "/path/to/project")
expected = {
"name": "underscore",
"result": True,
"comment": "Package(s) 'underscore' successfully installed",
"changes": {"new": ["underscore"], "old": []},
}
assert ret == expected
| [
"megan.wilhite@gmail.com"
] | megan.wilhite@gmail.com |
31b9b6a3e0c762b25c62148a0f116ae13ab70605 | e7164d44058a06331c034cc17eefe1521d6c95a2 | /tools/tieba/full-posts/auto-train/gen-title-content.old.py | 193e2a457612b3d90743df8ac1988f0cb02b821d | [] | no_license | chenghuige/gezi | fbc1e655396fbc365fffacc10409d35d20e3952c | 4fc8f9a3c5837e8add720bf6954a4f52abfff8b5 | refs/heads/master | 2021-01-20T01:57:18.362413 | 2016-11-08T15:34:07 | 2016-11-08T15:34:07 | 101,304,774 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | #!/usr/bin/env python
#coding=gbk
# ==============================================================================
# \file gen-title-content.py
# \author chenghuige
# \date 2014-05-21 17:31:30.584101
# \Description
# ==============================================================================
import sys,os
import urllib,json
import gezi
pids = {}
titles = {}
for line in open(sys.argv[1]):
pid, label = line.rstrip().split('\t')
if pid in pids:
continue
pids[pid] = 1
if os.path.exists('./history/%s.json'%pid):
try:
obj = gezi.jsonfile2obj('./history/%s.json'%pid)
title = obj.nowPostInfo.title.encode('gbk').replace('\n', ' ')
content = obj.nowPostInfo.content.encode('gbk').replace('\n', ' ')
print '\t'.join([pid, label, title, content])
continue
except Exception:
pass
url = "http://service.tieba.baidu.com/service/post?method=getPostInfo&post_ids=a:1:{i:0;i:%s;}&format=mcpackraw"%pid
content = urllib.urlopen(url).readline().decode('gbk')
m = {}
try:
m = json.loads(content)['output'][0]
except Exception:
print "wrong [%s]"%pid
print content
print line
continue
title = m['title'].encode('gbk').replace('\n', ' ')
content = m['content'].encode('gbk').replace('\n', ' ')
#title2 = title
#if len(title2) > 10:
# title2 = title2[6:-6]
#if not title2 in titles:
# titles[title2] = 1
print '\t'.join([pid, label, title, content])
| [
"chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97"
] | chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97 |
b0c5d00acebe8abe21fa37f6cac605126880291f | 3da991a057cd81de802c40da2edd640878685258 | /caffe2/contrib/fakelowp/test/test_batchnorm_nnpi_fp16.py | bf4c83960ff3eb5a474c942dbdc2df0f0f429444 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | sjx0451/pytorch | 9f5b1c0c7c874f9da72c0190dc131944ba828ab7 | 3544f60f7602081398ee62bc5d652a87f4743dab | refs/heads/master | 2022-12-01T22:30:29.888370 | 2020-08-13T23:45:58 | 2020-08-13T23:48:31 | 287,421,291 | 2 | 0 | NOASSERTION | 2020-08-14T02:06:11 | 2020-08-14T02:06:11 | null | UTF-8 | Python | false | false | 5,108 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
core.GlobalInit(["caffe2", "--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1"])
GLOW_LOWERED_BATCHNORM = False
def reference_spatialbn_test16(X, scale, bias, mean, var, epsilon, order):
X = X.astype(np.float16)
scale = scale.astype(np.float16)
bias = bias.astype(np.float16)
mean = mean.astype(np.float16)
# var = var.astype(np.float16)
assert(order == "NCHW")
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
Y = ((X - mean) * (scale / np.sqrt(var + epsilon).astype(np.float16))) + bias
return Y.astype(np.float32)
# Test the lowered BN op
class BatchnormTest(unittest.TestCase):
# TODO: using hypothesis seed, sweep dimensions
@given(seed=st.integers(0, 65535),
size=st.integers(2, 30),
input_channels=st.integers(2, 40),
batch_size=st.integers(2, 20))
@settings(max_examples=100, deadline=None)
def test_bn(self, seed, size, input_channels, batch_size):
workspace.ResetWorkspace()
np.random.seed(seed)
order = "NCHW"
epsilon = 1e-3
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "scale", "bias", "mean", "var"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon
)
)
if GLOW_LOWERED_BATCHNORM:
refopname = "SpatialBNFakeLoweredFp16NNPI"
else:
refopname = "SpatialBNFakeFp16NNPI"
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", "scale", "bias", "mean", "var"])
pred_net_ref.external_output.append("X")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
refopname,
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon
)
)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
workspace.FeedBlob("scale", scale)
workspace.FeedBlob("bias", bias)
workspace.FeedBlob("mean", mean)
workspace.FeedBlob("var", var)
# Use for reference to debug
# Y_np = reference_spatialbn_test16(X, scale, bias, mean, var, epsilon, order)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": [batch_size, input_channels, size, size],
"scale": [input_channels],
"bias": [input_channels],
"mean": [input_channels],
"var": [input_channels]},
debug=True,
adjust_batch=False,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob("Y")
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow.astype(np.float16), Y_c2.astype(np.float16)):
diff = np.abs(Y_glow - Y_c2).astype(np.float16)
print_test_debug_info(
"bn",
{
"seed": seed,
"scale": scale,
"bias": bias,
"mean": mean,
"var": var,
"Y_np": Y_c2,
"Y_glow": Y_glow,
"diff": diff,
"rowwise_diff": np.max(np.abs(diff), -1)})
assert(0)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
db391bcdfb9e854d008093d70ead2c93ce64fcb7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2374/60670/275031.py | 203cead93aeb306fc075e927ff4736ec0456a0ed | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | t=int(input())
for k in range(0,t):
n=int(input())
a=list(map(int,input().split()))
dic={}
for i in a:
if i in dic:
dic[i]+=1
else:
dic[i]=1
ss=dic.items()
print(ss) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
e8715164e3ce1aec3c5360de9ccb127390c598ab | 5ba3115523fb052d32db827e09443248ec5f6629 | /algorithm/시험대비문제풀이/0306/반나누기.py | 79a87a236295dcaff684f3b0ea5b681bc05b80f7 | [] | no_license | oliviaspark0825/TIL | 841095003ae794e14bd8c7e8c883826667c25f37 | 8bc66836f9a1eea5f42e9e1172f81f005abc042d | refs/heads/master | 2023-01-10T22:14:15.341489 | 2019-08-22T09:09:52 | 2019-08-22T09:09:52 | 162,099,057 | 0 | 0 | null | 2023-01-04T07:52:28 | 2018-12-17T08:32:43 | Jupyter Notebook | UTF-8 | Python | false | false | 1,562 | py | import sys
sys.stdin = open('반나누기.txt')
T = int(input())
for i in range(1):
N, k_min, k_max = [int(i) for i in input().split()]
test_score = list(map(int,input().split()))
test_score.sort()
print(test_score)
max = 0
min = 101
scorelist = [0]*(N+1) # 점수 갯수 분포테이블
score = sorted(list(set(test_score)))
print(score)
# 점수에 따라서 명수를 체크했음
for s in range(len(test_score)):
scorelist[test_score[s]]+=1
# 점수테이블에서 0이 아닌 점수가 3개 이상일경우, 그 사이를 기준으로 나누면 됨
# for t in range(len(scorelist)):
# if scorelist[t] != 0:
# score.append(t)
# score.sort()
# # print(score)
if len(score)>=3:
T1 = score[-1]
T2 = score[-2]
A = 0
B = 0
C = 0
classes = {'A':0, 'B':0, 'C':0}
max_c = 0
min_c = 101
# 학생을 점수에 따라 나누기
# for i in range(N):
# if test_score[i] >= T1:
# classes['A'] += 1
# elif T2<=test_score[i]<T1:
# classes['B'] += 1
# else:
# classes['C'] += 1
#
# for i in classes:
# if classes[i]> max_c:
# max_c = classes[i]
# if classes[i]< min_c:
# min_c = classes[i]
# # print(classes)
# # 반별 사람 수가 k_min 보다 적으면 탈락
# for i in classes:
# if classes[i] < k_min:
# print (-1)
# break
# else:
# print(max_c - min_c)
| [
"suhyunpark0825@gmail.com"
] | suhyunpark0825@gmail.com |
379f0d8f15a6af51620f3bf5bbfaf8f9c66c05bd | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /hyperbolic_discount/run_experiment_local.py | 6c1a91c1e8e3ae0b5a9c0ef818e5863247a6c99f | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,970 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run experiment."""
from absl import flags
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import run_experiment
import gin
from hyperbolic_discount import hyperbolic_dqn_agent
from hyperbolic_discount import hyperbolic_rainbow_agent
FLAGS = flags.FLAGS
def create_agent(sess, environment, summary_writer=None):
"""Creates a DQN agent.
Args:
sess: A `tf.Session`object for running associated ops.
environment: An Atari 2600 environment.
summary_writer: A Tensorflow summary writer to pass to the agent for
in-agent training statistics in Tensorboard.
Returns:
A DQN or SARSA agent.
"""
if FLAGS.agent_name == 'hyperbolic_dqn':
return hyperbolic_dqn_agent.HyperDQNAgent(
sess,
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif FLAGS.agent_name == 'hyperbolic_rainbow':
return hyperbolic_rainbow_agent.HyperRainbowAgent(
sess,
num_actions=environment.action_space.n,
summary_writer=summary_writer)
@gin.configurable()
def create_runner(base_dir,
create_agent_fn,
schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
TODO(b/): Figure out the right idiom to create a Runner. The current mechanism
of using a number of flags will not scale and is not elegant.
Args:
base_dir: Base directory for hosting all subdirectories.
create_agent_fn: A function that takes as args a Tensorflow session and a
Gym Atari 2600 environment, and returns an agent.
schedule: string, which type of Runner to use.
Returns:
runner: A `run_experiment.Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and eval till max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return run_experiment.Runner(base_dir, create_agent_fn,
atari_lib.create_atari_environment)
# Continuously runs training till maximum num_iterations is hit.
elif schedule == 'continuous_train':
return run_experiment.TrainRunner(base_dir, create_agent_fn,
atari_lib.create_atari_environment)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
ec576283d8bf50229e5af272fd8e0dd5488c18bb | 51da71a26628a3c6d1814e6da38f5c48f3101d9b | /uri/1189.py | e0cff1e77c7a09e65f260f08c26d28319a3cc77f | [] | no_license | da-ferreira/uri-online-judge | 279156249a1b0be49a7b29e6dbce85a293a47df1 | 6ec97122df3cb453ea26e0c9f9206a2e470ba37d | refs/heads/main | 2023-03-30T11:47:05.120388 | 2021-04-02T19:45:25 | 2021-04-02T19:45:25 | 309,744,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py |
matriz = []
for i in range(12):
linha = [0] * 12
matriz.append(linha)
operacao = input()
for i in range(12):
for j in range(12):
matriz[i][j] = float(input())
soma = 0
comeco = 1
fim = 11
for i in range(0, 5):
for j in range(comeco, fim):
soma += matriz[j][i]
comeco += 1
fim -= 1
if operacao == 'S':
print('{:.1f}'.format(soma))
else:
print('{:.1f}'.format(soma / 30))
| [
"noreply@github.com"
] | da-ferreira.noreply@github.com |
0233325600dfb9a088aa16e8e37c0238ae901baa | 8f5f0c3ef83fdd482387973149738f6178477a42 | /medium/intervals/interval_list_intersections.py | b15b462795b0e1d71b17696e3e612ddd56c31022 | [] | no_license | nicokuzak/leetcode | 79a5771ad83786cc7dbfd790f8fffcf1ce58794e | 39b0235dc429a97a7cba0689d44641a6af6d7a32 | refs/heads/main | 2023-04-06T21:02:09.553185 | 2021-04-14T22:21:20 | 2021-04-14T22:21:20 | 336,847,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | """You are given two lists of closed intervals, firstList and secondList, where firstList[i] = [starti, endi] and secondList[j] = [startj, endj]. Each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
A closed interval [a, b] (with a < b) denotes the set of real numbers x with a <= x <= b.
The intersection of two closed intervals is a set of real numbers that are either empty or represented as a closed interval. For example, the intersection of [1, 3] and [2, 4] is [2, 3].
Example 1:
Input: firstList = [[0,2],[5,10],[13,23],[24,25]], secondList = [[1,5],[8,12],[15,24],[25,26]]
Output: [[1,2],[5,5],[8,10],[15,23],[24,24],[25,25]]
Example 2:
Input: firstList = [[1,3],[5,9]], secondList = []
Output: []
Example 3:
Input: firstList = [], secondList = [[4,8],[10,12]]
Output: []
Example 4:
Input: firstList = [[1,7]], secondList = [[3,10]]
Output: [[3,7]]
Constraints:
0 <= firstList.length, secondList.length <= 1000
firstList.length + secondList.length >= 1
0 <= starti < endi <= 109
endi < starti+1
0 <= startj < endj <= 109
endj < startj+1"""
from typing import List
class Solution:
def intervalIntersection(self, firstList: List[List[int]], secondList: List[List[int]]) -> List[List[int]]:
if firstList == [] or secondList == []:
return []
res = []
i = j = 0
while i < len(firstList) and j < len(secondList):
sf, ss, ef, es = firstList[i][0], secondList[j][0], firstList[i][1], secondList[j][1]
if sf <= es and ef >= ss:
res.append([max(sf, ss), min(ef, es)])
if ef <= es:
i += 1
if es <= ef:
j += 1
return res | [
"nicokuzak95@gmail.com"
] | nicokuzak95@gmail.com |
55d659d2380723315476b965fc470c2a4405d138 | 8af47d2e88cabda034c662cc4ead32f0def6432a | /setup.py | c07280af686cf2e3ddba19ff2d2751c064775757 | [] | no_license | akashp1997/asv_mq | fd8d8f840280ccf967f1e97b5976106d641821e7 | c9835cee664db5eab35c64d836ea3e6406b72189 | refs/heads/master | 2022-01-07T04:45:12.780140 | 2019-05-21T07:53:42 | 2019-05-21T07:53:42 | 146,132,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if (sys.version_info.major!=3):
raise SystemError("Python version 2 is installed. Please use Python 3.")
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open(os.path.join(BASE_DIR,"README.md")).read()
requirements = open(os.path.join(BASE_DIR,"requirements.txt")).read().strip().split('\n')
import asvmq
version = asvmq.__version__
setup(
name="asvmq",
version=version,
description="ASV Messaging Queue API for message passing between processes.",
long_description=readme,
long_description_type="text/markdown",
author="Akash Purandare",
author_email="akash.p1997@gmail.com",
url="https://github.com/akashp1997/asv_mq",
packages=["asvmq"],
include_package_data=True,
install_requires=requirements,
license="BSD-3-Clause",
zip_safe=True,
keywords="asv_mq"
)
| [
"akash.p1997@gmail.com"
] | akash.p1997@gmail.com |
39095e1cece00399cfad8be38450b5aab546fe7c | 9ecd7568b6e4f0f55af7fc865451ac40038be3c4 | /tianlikai/SX/hanzhong_zhongbiao.py | 19c8cfb174fede2f6399d8672131e6c7f3dcea80 | [] | no_license | jasonTLK/scrapy | f5ac6e575e902c077a07dc0eb9d228506f1a173f | 2de8245fbc8731cfd868bbd91168e26271045300 | refs/heads/master | 2021-01-20T04:22:23.080864 | 2017-04-28T07:46:29 | 2017-04-28T07:46:29 | 89,681,374 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,751 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 陕西汉中招投标网站
# 中标信息
class hz_gov_Spider(scrapy.Spider):
name = "hanzhong_zhongbiao.py"
allowed_domains = ["http://www.gzsggzyjyzx.cn"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
page =1
while page<=5:
url ="http://hzcg.hanzhong.gov.cn/cgjg/zbgg/index_"+str(page)+".html"
page+=1
# print url
yield Request(url=url,callback=self.parse)
# start_urls=[
# "http://hzcg.hanzhong.gov.cn/cgjg/zbgg/index_1.html"
# ]
def parse(self, response):
selector = Selector(response)
names = selector.xpath("//table[@width='670']//a/text()").extract()
urls = selector.xpath("//table[@width='670']//a/@href").extract()
for i in range(len(urls)):
str ="".join(names[i]) + "," + "".join(urls[i]).replace("./","http://hzcg.hanzhong.gov.cn/cgjg/zbgg/")
url = "".join(urls[i]).replace("./","http://hzcg.hanzhong.gov.cn/cgjg/zbgg/")
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info)
db = MongodbHandle("172.20.3.10 ", 27017, "spiderBiding")
db.get_insert(
"bid_SX_HanZhong",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print infos
| [
"18723163167@163.com"
] | 18723163167@163.com |
b40a8d788ac3f01474354bd0045bc9d7531c86b0 | 34a70bf642b6f678dce2b22efc598656a1a7d90a | /Divide&ConQuer/power_function.py | 2357352c48b27f5f36964bbbef76c4e5dda0473e | [] | no_license | CodeForContribute/Algos-DataStructures | ce89a313b3e32de57f23b263966a84bb020e6a18 | d0ddc7c8f94270f9269a9a5233b3a07fe59c9a1f | refs/heads/master | 2022-03-28T03:08:17.934077 | 2019-11-23T21:58:03 | 2019-11-23T21:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # Method1 : Time Complexity : O(n)
def power(x, y):
if y == 0:
return 1
elif int(y % 2) == 0:
return power(x, int(y / 2)) * power(x, int(y / 2))
else:
return x * power(x, int(y / 2)) * power(x, int(y / 2))
# Method2: Time Complexity : O(log(n))
def power_function(x, y):
if y == 0:
return 1
temp = power_function(x, int(y / 2))
if y % 2 == 0:
return temp * temp
else:
if y > 0:
return x * temp * temp
return (temp * temp) / x
if __name__ == '__main__':
x = 2
y = -3
print(power(x, y))
print('%.6f' % power_function(x, y))
| [
"RAUSHAN.KUMAR2@otis.COM"
] | RAUSHAN.KUMAR2@otis.COM |
28161b65df0415fb7d18df691fd621ba62f44931 | b8a3e758eff2922ff6abc77947d879e3f6d1afa3 | /ws_moveit/build/moveit_msgs/catkin_generated/pkg.installspace.context.pc.py | c5447721e45ed6e844d1ce5e5589e2682fa2417b | [] | no_license | rrowlands/ros-baxter-coffee | ab7a496186591e709f88ccfd3b9944428e652f3e | 32473c3012b7ec4f91194069303c85844cf1aae7 | refs/heads/master | 2016-09-05T20:58:20.428241 | 2013-12-02T23:10:44 | 2013-12-02T23:10:44 | 14,313,406 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/p/peth8881/robotics/ws_moveit/install/include".split(';') if "/home/p/peth8881/robotics/ws_moveit/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;actionlib_msgs;sensor_msgs;geometry_msgs;trajectory_msgs;shape_msgs;object_recognition_msgs;octomap_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "moveit_msgs"
PROJECT_SPACE_DIR = "/home/p/peth8881/robotics/ws_moveit/install"
PROJECT_VERSION = "0.5.2"
| [
"peth8881@csel-112-02.csel.loc"
] | peth8881@csel-112-02.csel.loc |
9edbd1f48165c22827863683209e238077040bcf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02714/s605942361.py | f4180629c2b27da485494f42a20a4afeac754abc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | N = int(input())
S = input()
Rr = 0
Gr = 0
Br = 0
Rl = 0
Gl = 0
Bl = 0
for i in range(1,N):
if S[i] == "R":
Rr += 1
elif S[i] == "G":
Gr += 1
else:
Br += 1
ans = 0
for i in range(N-1):
if S[i] == "R":
ans += Gl*Br
ans += Bl*Gr
Rl += 1
elif S[i] == "G":
ans += Rl*Br
ans += Bl*Rr
Gl += 1
else:
ans += Rl*Gr
ans += Gl*Rr
Bl += 1
if S[i+1] == "R":
Rr -= 1
elif S[i+1] == "G":
Gr -= 1
else:
Br -= 1
for i in range(1, N-1):
for k in range(1, min(i, N-1-i)+1):
if S[i] != S[i+k] and S[i] != S[i-k] and S[i+k] != S[i-k]:
ans -= 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a0c37510531af69b6631e296f87085445fa7a23e | bc441bb06b8948288f110af63feda4e798f30225 | /alert_service_sdk/model/metadata_center/stream_aggregate_states_pb2.pyi | 546f81ca45a8d88483572852856bec57a264fe15 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from alert_service_sdk.model.metadata_center.stream_aggregate_rule_pb2 import (
StreamAggregateRule as alert_service_sdk___model___metadata_center___stream_aggregate_rule_pb2___StreamAggregateRule,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class StreamAggregateStates(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
org = ... # type: builtin___int
command = ... # type: typing___Text
@property
def payload(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[alert_service_sdk___model___metadata_center___stream_aggregate_rule_pb2___StreamAggregateRule]: ...
def __init__(self,
*,
org : typing___Optional[builtin___int] = None,
command : typing___Optional[typing___Text] = None,
payload : typing___Optional[typing___Iterable[alert_service_sdk___model___metadata_center___stream_aggregate_rule_pb2___StreamAggregateRule]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> StreamAggregateStates: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> StreamAggregateStates: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"command",b"command",u"org",b"org",u"payload",b"payload"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
6debcfb4dd4e6eabe94ddd35f9f2a9eb878f9569 | 056adbbdfb968486ecc330f913f0de6f51deee33 | /223-rectangle-area/rectangle-area.py | 51a8aac8e9b5ef9b29a6e1361d9e359a96848a5e | [] | no_license | privateHmmmm/leetcode | b84453a1a951cdece2dd629c127da59a4715e078 | cb303e610949e953b689fbed499f5bb0b79c4aea | refs/heads/master | 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # -*- coding:utf-8 -*-
# Find the total area covered by two rectilinear rectangles in a 2D plane.
# Each rectangle is defined by its bottom left corner and top right corner as shown in the figure.
#
#
#
#
# Assume that the total area is never beyond the maximum possible value of int.
#
#
# Credits:Special thanks to @mithmatt for adding this problem, creating the above image and all test cases.
class Solution(object):
def computeArea(self, A, B, C, D, E, F, G, H):
"""
:type A: int
:type B: int
:type C: int
:type D: int
:type E: int
:type F: int
:type G: int
:type H: int
:rtype: int
"""
left = max(A, E)
right = min(C, G)
bottom = max(B, F)
top = min(D, H)
overlap = 0
if left < right and bottom < top:
overlap = (right-left)*(top-bottom)
return (C-A)*(D-B) + (G-E)*(H-F) - overlap
| [
"hyan90@ucsc.edu"
] | hyan90@ucsc.edu |
9519f539345630a854d6e0c72ce0572206e9fc1c | 61df7d015ebdee4d0b16338e040aa08d3c9b9c5f | /main/schema/components/leaf_type.py | 51c661aa5471229c1e498246a3389220ae6b04a6 | [
"MIT"
] | permissive | namphongpt/tator | 95524823a1e92feefcf7f44b65d5ca88029620e5 | 0ca5ca588b86303ba8c23ba0717bc23bb0e9f097 | refs/heads/master | 2023-03-21T22:00:02.915690 | 2021-03-08T22:44:21 | 2021-03-08T22:44:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | leaf_type_properties = {
'name': {
'description': 'Name of the leaf type.',
'type': 'string',
},
'description': {
'description': 'Description of the leaf type.',
'type': 'string',
'default': '',
},
'attribute_types': {
'description': 'Attribute type definitions.',
'type': 'array',
'items': {'$ref': '#/components/schemas/AttributeType'},
},
}
leaf_type_spec = {
'type': 'object',
'description': 'Leaf type spec.',
'properties': leaf_type_properties,
}
leaf_type_update = {
'type': 'object',
'description': 'Leaf type update.',
'properties': {
'description': leaf_type_properties['description'],
'name': leaf_type_properties['name'],
},
}
leaf_type = {
'type': 'object',
'description': 'Leaf type.',
'properties': {
'id': {
'type': 'integer',
'description': 'Unique integer identifying a leaf type.',
},
'project': {
'type': 'integer',
'description': 'Unique integer identifying project for this leaf type.',
},
'dtype': {
'type': 'string',
'description': 'Name of this data type, value is always "leaf".',
},
**leaf_type_properties,
},
}
| [
"jonathan.takahashi@cvisionai.com"
] | jonathan.takahashi@cvisionai.com |
c6223dc4f09480b95a64ae989caea9ab4d424757 | cd4b7913aa2941b82796cea4df9474b7ae5af93f | /filterpy/common/__init__.py | fbcf9fd5bd60d946c117c30f80eef1da78bc8978 | [
"MIT"
] | permissive | bakfoo/filterpy | 12023bdc1e2d7b006f991172d2d0f9ef414d9ffc | 780881756d449591f60927529ef5322b49243081 | refs/heads/master | 2020-12-03T08:14:05.603670 | 2015-07-05T04:06:20 | 2015-07-05T04:06:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | """Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["helpers", "discretization", "stats"]
from .helpers import *
from .discretization import *
from .stats import *
| [
"rlabbejr@gmail.com"
] | rlabbejr@gmail.com |
85b4db89872aad8239bc1826bb6c901f96f8da14 | 58d0cb1ee51bc33d6cc793e89af251f8e4ce0e2e | /src/leetcode/medium/remove-nth-node-from-end-of-list/remove_nth_node_from_end_of_list.py | 5e319c6b786dc8cc4a2d92c5646ab2c33b51538f | [
"MIT"
] | permissive | nwthomas/code-challenges | 8998da07537cf5c02bf0d13fe4fc91416d5c2977 | 27ffb6b32d6d18d279c51cfa45bf305a409be5c2 | refs/heads/master | 2023-06-25T06:45:28.430194 | 2023-06-17T20:15:51 | 2023-06-17T20:15:51 | 166,868,294 | 2 | 8 | MIT | 2023-01-04T21:49:19 | 2019-01-21T19:27:31 | Python | UTF-8 | Python | false | false | 1,096 | py | """
https://leetcode.com/problems/remove-nth-node-from-end-of-list
Given the head of a linked list, remove the nth node from the end of the list and return its head.
Example 1:
Input: head = [1,2,3,4,5], n = 2
Output: [1,2,3,5]
Example 2:
Input: head = [1], n = 1
Output: []
Example 3:
Input: head = [1,2], n = 1
Output: [1]
Constraints:
The number of nodes in the list is sz.
1 <= sz <= 30
0 <= Node.val <= 100
1 <= n <= sz
Follow up: Could you do this in one pass?
"""
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def remove_nth_from_end(head: Optional[ListNode], n: int) -> Optional[ListNode]:
dummy_node = ListNode(None, head)
left = dummy_node
right = head
while right and n > 0:
right = right.next
n -= 1
while right:
left = left.next
right = right.next
new_next = left.next.next
left.next.next = None
left.next = new_next
final_head = dummy_node.next
dummy_node.next = None
return final_head | [
"nwthomas@me.com"
] | nwthomas@me.com |
e4a4ea3880896a2c831d2bbc53ec1b723b5cf94a | 625daac7e73b98935f9fe93e647eb809b48b712e | /Challenges/cyclicString.py | 6c8a493a5f7e005714b1d373f0f2155ae258892d | [] | no_license | aleksaa01/codefights-codesignal | 19b2d70779cc60f62511b6f88ae5d049451eac82 | a57a5589ab2c9d9580ef44900ea986c826b23051 | refs/heads/master | 2022-03-15T04:46:40.356440 | 2019-12-08T15:41:37 | 2019-12-08T15:41:37 | 112,034,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | def cyclicString(s):
for i in range(len(s)):
for j in range(i + 1, len(s) + 1):
sub = s[i:j]
if s in sub * 15:
return len(sub)
"""
You're given a substring s of some cyclic string. What's the length of the smallest possible string that can be
concatenated to itself many times to obtain this cyclic string?
Example
For s = "cabca", the output should be
cyclicString(s) = 3.
"cabca" is a substring of a cycle string "abcabcabcabc..." that can be obtained by concatenating "abc" to itself.
Thus, the answer is 3.
""" | [
"some12curious@gmail.com"
] | some12curious@gmail.com |
2dbfa43b78aa836cbc808b9aa556848c15c60824 | 9267da344111bde2278d0054e899352314c254a3 | /src/server/apps/reduction/models/spectrometry/sns/hyspec/__init__.py | 0364eabc87e1b358fff7699379ab667ce174794e | [] | no_license | bidochon/WebReduction | 5badd7467b8c9d5cbd87c36f7dea1932e17f0950 | 507ff81617abf583edd4ef4858985daefc0afcbe | refs/heads/master | 2020-09-03T18:05:11.846179 | 2019-07-03T15:59:38 | 2019-07-03T15:59:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | from .models import ReductionHYSPEC as Reduction
from .models import RegionHYSPEC as Region
__all__ = [
'Reduction',
'Region',
]
| [
"ricleal@gmail.com"
] | ricleal@gmail.com |
98c4b1a10ed3d965b85cc88258a0accf2a6a6bfe | 6e68ef0a53ce48da79b4906d85fc9785deee4ca5 | /discussion/consumers.py | 7c63d239f205b3278e96653fa51b81dbd3415255 | [] | no_license | shubhamkharose/CODEDAEMON | e3ed8050b5c43ec146c6d253d06121fc37cdb2d4 | 6df7af35c51f5f54b2e2167e3d64d163c9a688f9 | refs/heads/master | 2021-04-06T00:58:01.515828 | 2018-03-15T11:04:31 | 2018-03-15T11:04:31 | 125,353,062 | 1 | 4 | null | 2019-10-28T04:03:58 | 2018-03-15T10:48:53 | JavaScript | UTF-8 | Python | false | false | 716 | py | import json
from channels import Group
from channels.auth import channel_session_user
@channel_session_user
def ws_connect(message):
print 'connecting..'
message.reply_channel.send({
'accept': True
})
@channel_session_user
def ws_receive(message):
data = json.loads(message.content.get('text'))
con_name = str(data.get('contest'))
con_name=con_name.replace(" ","_")
print con_name,'added'
Group(con_name).add(message.reply_channel)
message.channel_session['con_group'] = con_name
@channel_session_user
def ws_disconnect(message):
print 'disconnecting..'
user_group = message.channel_session['con_group']
Group(user_group).discard(message.reply_channel)
| [
"vvt5676@gmail.com"
] | vvt5676@gmail.com |
51916f0bcc4852581a1390ffda6023dc70c4f65a | c8a38e65e71de888fc5b22fbd027bbaa0f3f6ef1 | /Python/100.py | 3405d35a04e5175b2c89d7ae724e26c8128aee8b | [] | no_license | skywhat/leetcode | e451a10cdab0026d884b8ed2b03e305b92a3ff0f | 6aaf58b1e1170a994affd6330d90b89aaaf582d9 | refs/heads/master | 2023-03-30T15:54:27.062372 | 2023-03-30T06:51:20 | 2023-03-30T06:51:20 | 90,644,891 | 82 | 27 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if p == None and q == None:
return True
if p == None:
return False
if q == None:
return False
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
s = [p, q]
while s != list():
a = s.pop()
b = s.pop()
if a == None and b == None:
continue
if a == None:
return False
if b == None:
return False
if a.val != b.val:
return False
s += [a.left, b.left, a.right, b.right]
return True
| [
"gangzh@uber.com"
] | gangzh@uber.com |
4792c46d0c600e9904c05498d2dfe7060a2dad72 | f3eba94061ae88d179c66fd0d2c12676ad251b8f | /automatas_celulares/particle.py | 6186a9ed3af503b6540c9bc6efa77bd31465abfd | [
"Unlicense"
] | permissive | binary-hideout/sistemas-adaptativos | 46151dd4f29b760eb6fa539112f310fec109c878 | b0d57c3b98a6f8c3da9ee33fd063b19b4af71b82 | refs/heads/master | 2020-07-05T07:28:40.594286 | 2019-11-03T19:12:47 | 2019-11-03T19:12:47 | 202,571,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,077 | py | import pygame, sys, random
from math import cos
from pygame.locals import *
FPS = 30
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
FIRE = pygame.image.load('fire.png')
PIXEL = pygame.image.load('white.png')
FIRE_YELLOW = pygame.image.load('fire_yellow.png')
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.mouse.set_visible(0)
# particle_xysize Elements:
# Its a List of Lists, where particle_xysize[element][0,1,2,3,4,5,6..]
# x
# y
# size
# direction
# type
# addition
# color
particles = 240
particle_xysize = []
while particles > 0:
particle_xysize.append([0,0,0,0,0,0,0,(0,0,0),(0,0)])
particles -= 1
for element in range(len(particle_xysize)):
particle_xysize[element][2] = 10
particle_xysize[element][4] = random.randint(0,1)
velocity = []
for particle in particle_xysize:
velocity.append(random.randint(1, 10))
mouse_x = 0
mouse_y = 0
random_numbers = [1, -1]
# Reset Values
for integer in velocity:
integer *= random.sample(random_numbers, 1)[0]
for direction in range(len(particle_xysize)):
particle_xysize[direction][3] = random.sample(random_numbers, 1)[0]
particle_xysize[direction][4] = random.sample(random_numbers, 1)[0]
particle_xysize[direction][8] = (random.randint(1,2), random.randint(1,2))
traegheit = 0
while True:
# Get Events of Game Loop
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == MOUSEMOTION:
mouse_x, mouse_y = event.pos
# Fill the Display for new objects to be drawn
DISPLAYSURF.fill((0, 0, 0))
# Draw Elements
for element in range(len(particle_xysize)):
width = particle_xysize[element][2]
height = particle_xysize[element][2]
particle_x = particle_xysize[element][0]
particle_y = particle_xysize[element][1]
addition = particle_xysize[element][6]
influence = particle_xysize[element][8]
color = particle_xysize[element][7]
particle_x += (velocity[element] + addition * influence[0] / 4) * particle_xysize[element][4]
particle_y += (velocity[element] + addition * influence[1] / 4) * particle_xysize[element][3]
if particle_xysize[element][5] == 0:
firesmall = pygame.transform.scale(FIRE, (int(width), int(height)))
#pygame.draw.rect(DISPLAYSURF, color, (particle_x - width / 2, particle_y - height / 2, width, height))
DISPLAYSURF.blit(firesmall,[particle_x - width / 2,particle_y - height / 2])
elif particle_xysize[element][5] == 1:
#pygame.draw.circle(DISPLAYSURF, color, (particle_x - int(width / 2), particle_y - int(height / 2)), int(width))
white = pygame.transform.scale(PIXEL, (int(width / 4), int(height / 4)))
DISPLAYSURF.blit(white,[particle_x - width / 2,particle_y - height / 2])
elif particle_xysize[element][5] == 2:
fire_yellow = pygame.transform.scale(FIRE_YELLOW, (int(width * 2), int(height * 2)))
DISPLAYSURF.blit(fire_yellow,[particle_x - width,particle_y - height])
if particle_xysize[element][2] > 0:
particle_xysize[element][2] -= 0.5
velocity[element] += 1
if particle_xysize[element][6] < 50:
particle_xysize[element][6] += 1
else:
while True:
particle_xysize[element][3] = random.sample(random_numbers, 1)[0]
particle_xysize[element][4] = random.sample(random_numbers, 1)[0]
particle_xysize[element][5] = random.randint(0,2)
particle_xysize[element][7] = (random.randint(1,255), random.randint(1,255), random.randint(1,255))
particle_xysize[element][6] = 0
particle_xysize[element][8] = (random.randint(1,20), random.randint(1,20))
if random.randint(1, 10) > 4:
particle_xysize[element][2] = random.randint(1, 20)
velocity[element] = random.randint(1,5)
particle_xysize[element][0], particle_xysize[element][1] = mouse_x, mouse_y
break
pygame.display.update()
FPSCLOCK.tick(FPS)
if __name__ == '__main__':
main()
| [
"neto.otz@hotmail.com"
] | neto.otz@hotmail.com |
bedd83d14ffdcad67d03805d9d49dfa71327b1be | e62bcf5cc7062bca85839f071be41153daf153ff | /doc/source/conf.py | 29f1c33084a537a7e9cf0a14e6addb14d15198ad | [
"Apache-2.0"
] | permissive | MountainWei/senlin | e1a9f51adf657ae9f261a929d0fe6d6a57bef2c4 | 8fe640df673f929147284781565faa48a89077ab | refs/heads/master | 2020-12-13T20:55:03.145516 | 2015-10-21T13:36:07 | 2015-10-21T13:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo'
# 'sphinx.ext.intersphinx',
# 'oslo.sphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'senlin'
copyright = u'2015, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
[extensions]
todo_include_todos = True
| [
"tengqim@cn.ibm.com"
] | tengqim@cn.ibm.com |
58c8953c4b0e590e6da96b0f238378d213361523 | a139de988c9c79a368bd3cee13f0fbb3d1a1e905 | /2_Estrutura_Decisão (novo)/14estD.py | d255bec3b63ff87ed958221997ab4286894b2708 | [] | no_license | brunodantascg/listaExercicioPythonBR | 1e6942af06180fd05556de06daa7e745254f894d | 8309e877cb6f63e9195ae34e040860a0c24f50e8 | refs/heads/master | 2021-07-04T00:24:04.080851 | 2020-10-25T05:20:53 | 2020-10-25T05:20:53 | 192,741,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # 14 - Faça um programa que lê as duas notas parciais obtidas por um aluno numa disciplina ao longo de um semestre, e calcule a sua média. A atribuição de conceitos obedece à tabela abaixo:
# Média de Aproveitamento Conceito
# Entre 9.0 e 10.0 A
# Entre 7.5 e 9.0 B
# Entre 6.0 e 7.5 C
# Entre 4.0 e 6.0 D
# Entre 4.0 e zero E
# O algoritmo deve mostrar na tela as notas, a média, o conceito correspondente e a mensagem “APROVADO” se o conceito for A, B ou C ou “REPROVADO” se o conceito for D ou E.
nota1 = float(input("Informe primeira nota: "))
nota2 = float(input("Informe segunda nota: "))
media = (nota1+nota2)/2
if((media >= 6)and(media <=10)):
print("APROVADO")
if((media >= 9) and (media<=10)):
print("Conceito A")
elif((media >= 7.5)and(media <9)):
print("Conceito B")
elif((media >= 6)and(media < 7.5)):
print("Conceito C")
else:
print("REPROVADO")
if((media >= 4)and(media < 6.0)):
print("Conceito D")
elif((media >= 0)and(media <= 4)):
print("Conceito E")
else:
print("Média errada")
| [
"brunodedantas@gmail.com"
] | brunodedantas@gmail.com |
e132618e5a93782d6a9306b36d52db07c2de1dff | 775822912186176ced195cc224659f2b4f5994d3 | /whoville/cloudbreak/models/structured_event_container.py | f61cf1d81b101951a80868fa62d9c5df1a219d44 | [
"Apache-2.0"
] | permissive | Chaffelson/whoville | 7885d7d0994659dd1d1c02b4bcd87d48b161c5d0 | f71fda629c9fd50d0a482120165ea5abcc754522 | refs/heads/master | 2021-06-04T06:17:29.603774 | 2021-02-11T13:35:28 | 2021-02-11T13:35:28 | 93,987,343 | 31 | 32 | Apache-2.0 | 2019-10-01T04:59:13 | 2017-06-11T06:57:39 | Python | UTF-8 | Python | false | false | 5,463 | py | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StructuredEventContainer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'flow': 'list[StructuredFlowEvent]',
'rest': 'list[StructuredRestCallEvent]',
'notification': 'list[StructuredNotificationEvent]'
}
attribute_map = {
'flow': 'flow',
'rest': 'rest',
'notification': 'notification'
}
def __init__(self, flow=None, rest=None, notification=None):
"""
StructuredEventContainer - a model defined in Swagger
"""
self._flow = None
self._rest = None
self._notification = None
if flow is not None:
self.flow = flow
if rest is not None:
self.rest = rest
if notification is not None:
self.notification = notification
@property
def flow(self):
"""
Gets the flow of this StructuredEventContainer.
:return: The flow of this StructuredEventContainer.
:rtype: list[StructuredFlowEvent]
"""
return self._flow
@flow.setter
def flow(self, flow):
"""
Sets the flow of this StructuredEventContainer.
:param flow: The flow of this StructuredEventContainer.
:type: list[StructuredFlowEvent]
"""
self._flow = flow
@property
def rest(self):
"""
Gets the rest of this StructuredEventContainer.
:return: The rest of this StructuredEventContainer.
:rtype: list[StructuredRestCallEvent]
"""
return self._rest
@rest.setter
def rest(self, rest):
"""
Sets the rest of this StructuredEventContainer.
:param rest: The rest of this StructuredEventContainer.
:type: list[StructuredRestCallEvent]
"""
self._rest = rest
@property
def notification(self):
"""
Gets the notification of this StructuredEventContainer.
:return: The notification of this StructuredEventContainer.
:rtype: list[StructuredNotificationEvent]
"""
return self._notification
@notification.setter
def notification(self, notification):
"""
Sets the notification of this StructuredEventContainer.
:param notification: The notification of this StructuredEventContainer.
:type: list[StructuredNotificationEvent]
"""
self._notification = notification
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StructuredEventContainer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"chaffelson@gmail.com"
] | chaffelson@gmail.com |
c5477488fb4bac77644b149baa3eab68e031e998 | 2391005dee2848fcf36c0499bef15c726bcf218e | /sphinxpro/settings.py | de6aa1d2b2ca47f9ec2c59ce2aeb8f0cedc760e2 | [] | no_license | GanapathiAmbore/Django_Sphinxsearch_Pro | 05caf64f50941da89ea0e9c6d97fadfe75f00f6b | a43f1cea0842f128e8e4d077a42a0c6f4cb16c36 | refs/heads/master | 2020-07-21T00:09:33.241108 | 2019-09-06T06:34:53 | 2019-09-06T06:34:53 | 206,729,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,171 | py | """
Django settings for sphinxpro project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x3+r)3jpywqinxa_+*q1lo6+j5o400*bser*wvr6gtm(=4_qv0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sphinxapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sphinxpro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sphinxpro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sphinx',
'USER':'root',
'PASSWORD':'root',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"ganapathiambore@gmail.com"
] | ganapathiambore@gmail.com |
44885eb6d3883986024c59c8b1dd8e6bc291fab5 | bef93432b7745ba5492f11e709e47a5a372590f0 | /modules/xia2/Wrappers/CCP4/Truncate.py | 8e23b271fbcc9f7965ee3ae0964bb6c8c9bae947 | [
"BSD-3-Clause"
] | permissive | BlenderCN-Org/dials-dev20190819 | 939378744d546692e3de33d106a1b5218a584c2a | 1b719b88a1642c13a5a8d488addbb215d0fa290c | refs/heads/master | 2020-07-19T17:00:06.944870 | 2019-08-19T21:36:25 | 2019-08-19T21:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,398 | py | #!/usr/bin/env python
# Truncate.py
# Copyright (C) 2006 CCLRC, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# 26th October 2006
#
# A wrapper for the CCP4 program Truncate, which calculates F's from
# I's and gives a few useful statistics about the data set.
from __future__ import absolute_import, division, print_function
import os
import sys
from xia2.Decorators.DecoratorFactory import DecoratorFactory
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import Chatter, Debug
from xia2.lib.bits import transpose_loggraph
from xia2.Wrappers.CCP4.Ctruncate import Ctruncate
def Truncate(DriverType=None):
"""A factory for TruncateWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
CCP4DriverInstance = DecoratorFactory.Decorate(DriverInstance, "ccp4")
if PhilIndex.params.ccp4.truncate.program == "ctruncate":
return Ctruncate(DriverType)
elif PhilIndex.params.ccp4.truncate.program == "cctbx":
from xia2.Wrappers.XIA.FrenchWilson import FrenchWilson
return FrenchWilson(DriverType)
class TruncateWrapper(CCP4DriverInstance.__class__):
"""A wrapper for Truncate, using the CCP4-ified Driver."""
def __init__(self):
# generic things
CCP4DriverInstance.__class__.__init__(self)
self.set_executable(os.path.join(os.environ.get("CBIN", ""), "truncate"))
self._anomalous = False
self._nres = 0
# should we do wilson scaling?
self._wilson = True
self._b_factor = 0.0
self._moments = None
self._wilson_fit_grad = 0.0
self._wilson_fit_grad_sd = 0.0
self._wilson_fit_m = 0.0
self._wilson_fit_m_sd = 0.0
self._wilson_fit_range = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def set_wilson(self, wilson):
"""Set the use of Wilson scaling - if you set this to False
Wilson scaling will be switched off..."""
self._wilson = wilson
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
self.check_hklin()
self.check_hklout()
self.start()
if self._anomalous:
self.input("anomalous yes")
else:
self.input("anomalous no")
if self._nres:
self.input("nres %d" % self._nres)
if not self._wilson:
self.input("scale 1")
self.close_wait()
try:
self.check_for_errors()
self.check_ccp4_errors()
except RuntimeError:
try:
os.remove(self.get_hklout())
except Exception:
pass
raise RuntimeError("truncate failure")
# parse the output for interesting things, including the
# numbers of reflections in and out (isn't that a standard CCP4
# report?) and the number of absent reflections.
self._nref_in, self._nref_out = self.read_nref_hklin_hklout(
self.get_all_output()
)
# FIXME guess I should be reading this properly...
self._nabsent = self._nref_in - self._nref_out
for line in self.get_all_output():
if "Least squares straight line gives" in line:
list = line.replace("=", " ").split()
if not "***" in list[6]:
self._b_factor = float(list[6])
else:
Debug.write("no B factor available")
if "LSQ Line Gradient" in line:
self._wilson_fit_grad = float(line.split()[-1])
resol_width = max(self._wilson_fit_range) - min(
self._wilson_fit_range
)
if self._wilson_fit_grad > 0 and resol_width > 1.0 and False:
raise RuntimeError(
"wilson plot gradient positive: %.2f"
% self._wilson_fit_grad
)
elif self._wilson_fit_grad > 0:
Debug.write("Positive gradient but not much wilson plot")
if "Uncertainty in Gradient" in line:
self._wilson_fit_grad_sd = float(line.split()[-1])
if "X Intercept" in line:
self._wilson_fit_m = float(line.split()[-1])
if "Uncertainty in Intercept" in line:
self._wilson_fit_m_sd = float(line.split()[-1])
if "Resolution range" in line:
self._wilson_fit_range = map(float, line.split()[-2:])
results = self.parse_ccp4_loggraph()
moments = transpose_loggraph(
results["Acentric Moments of E for k = 1,3,4,6,8"]
)
# keys we want in this are "Resln_Range" "1/resol^2" and
# MomentZ2. The last of these should be around two, but is
# likely to be a little different to this.
self._moments = moments
def get_b_factor(self):
return self._b_factor
def get_wilson_fit(self):
return (
self._wilson_fit_grad,
self._wilson_fit_grad_sd,
self._wilson_fit_m,
self._wilson_fit_m_sd,
)
def get_wilson_fit_range(self):
return self._wilson_fit_range
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
def read_nref_hklin_hklout(self, records):
"""Look to see how many reflections came in through HKLIN, and
how many went out again in HKLOUT."""
nref_in = 0
nref_out = 0
current_logical = None
for record in records:
if "Logical Name" in record:
current_logical = record.split()[2]
assert current_logical in ["HKLIN", "HKLOUT", "SYMINFO"]
if "Number of Reflections" in record:
if current_logical == "HKLIN":
nref_in = int(record.split()[-1])
elif current_logical == "HKLOUT":
nref_out = int(record.split()[-1])
return nref_in, nref_out
return TruncateWrapper()
if __name__ == "__main__":
truncate = Truncate()
truncate.set_hklin(sys.argv[1])
truncate.set_hklout(sys.argv[2])
truncate.truncate()
print(truncate.get_nref_in(), truncate.get_nref_out(), truncate.get_nabsent())
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
e8db6cddb755df8eb65e4ad727c5c16988917856 | b1c3326df1b72de39be0e96da9c01c842ca25a64 | /srhapp/migrations/0003_auto_20190712_1433.py | 1a9dc3a5c1e640214cdc48f904ff90bc905893f8 | [] | no_license | harakzf/oscar_search_test | b03ee53504fa35ce0a4fd076361d3b8f8d82622d | 7cef383ba606a3956ff0e0efad15476ced8064d5 | refs/heads/master | 2023-04-30T02:04:14.994803 | 2019-07-30T02:51:57 | 2019-07-30T02:51:57 | 199,562,532 | 0 | 0 | null | 2023-04-21T20:36:16 | 2019-07-30T02:50:10 | Python | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.1 on 2019-07-12 05:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('srhapp', '0002_auto_20190620_1843'),
]
operations = [
migrations.AlterField(
model_name='good',
name='title',
field=models.CharField(max_length=255, verbose_name='商品名'),
),
]
| [
"you@example.com"
] | you@example.com |
5a62dafd21dc9a38cb45ca335e83bf0012cf9556 | 6331a82d741ac3c8d27c983c0a9d932ea15c6e1e | /uttu/uttu_1/migrations/0001_initial.py | 8929990e4e5ba2324ab97579d3466e3b82361a91 | [] | no_license | samirpatil2000/django_pratice | 27fd40c4adfb201d6420d239e4f9182668a72227 | 8c324cbe245f97ee01f2041f3d5b594dfbff5b04 | refs/heads/master | 2022-12-28T22:42:04.135127 | 2020-09-21T10:59:44 | 2020-09-21T10:59:44 | 292,592,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # Generated by Django 3.0.4 on 2020-03-11 18:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Manufacture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=100)),
],
),
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('manufacture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uttu_1.Manufacture')),
],
),
]
| [
"samirspatil742099@gmail.com"
] | samirspatil742099@gmail.com |
c82075a78e141b03c99399ba5faa066f8d3e0a7e | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-AVFoundation/PyObjCTest/test_avaudiounitgenerator.py | e0dc76137a8f3acfa21c033038262e2f3a19c554 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from PyObjCTools.TestSupport import *
import AVFoundation
class TestAVAudioUnitEffect(TestCase):
@min_os_level("10.10")
def testMethods10_10(self):
self.assertResultIsBOOL(AVFoundation.AVAudioUnitGenerator.bypass)
self.assertArgIsBOOL(AVFoundation.AVAudioUnitGenerator.setBypass_, 0)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
371a57e00abc94448295159ad0cbcf70df1ad3c2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AoiUser.py | f1464928dd071c90be78ba78f78eb6aa5b80e7fa | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,310 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AoiUser(object):
def __init__(self):
self._user_id = None
self._user_name = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AoiUser()
if 'user_id' in d:
o.user_id = d['user_id']
if 'user_name' in d:
o.user_name = d['user_name']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
14150f11112f3c92d38915a38307a0c3dab72088 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D99A/PRODEXD99AUN.py | fb38595d0e085e2d11abc5c44ae99cd86a0e83ee | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 968 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD99AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 2},
{ID: 'MEA', MIN: 1, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 2},
{ID: 'RFF', MIN: 1, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'IMD', MIN: 1, MAX: 99, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 10},
{ID: 'LIN', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'GIS', MIN: 0, MAX: 2},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'TDT', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
d1b8388bee2cc425552126091de228ba16dd8e75 | 5b9f9b4ea1494943e6f7f842df55909599ed1304 | /python/onshape_client/oas/models/transform_group.py | 48feaba53bcc7527b3f055fc9bc8659766a0df5b | [] | no_license | jenniferyoung02/onshape-clients | f50534f033428027515b7fc0b801b1caab4d0aec | 8ee31a17d7af32f105b851e45f69fd4a3006e1ba | refs/heads/master | 2020-09-07T06:44:37.682545 | 2019-10-08T18:52:06 | 2019-10-08T18:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,839 | py | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.104
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TransformGroup(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'transform': 'list[float]',
'instances': 'list[BTAssemblyInstanceDefinitionParams]'
}
attribute_map = {
'transform': 'transform',
'instances': 'instances'
}
def __init__(self, transform=None, instances=None): # noqa: E501
"""TransformGroup - a model defined in OpenAPI""" # noqa: E501
self._transform = None
self._instances = None
self.discriminator = None
if transform is not None:
self.transform = transform
if instances is not None:
self.instances = instances
@property
def transform(self):
"""Gets the transform of this TransformGroup. # noqa: E501
:return: The transform of this TransformGroup. # noqa: E501
:rtype: list[float]
"""
return self._transform
@transform.setter
def transform(self, transform):
"""Sets the transform of this TransformGroup.
:param transform: The transform of this TransformGroup. # noqa: E501
:type: list[float]
"""
self._transform = transform
@property
def instances(self):
"""Gets the instances of this TransformGroup. # noqa: E501
:return: The instances of this TransformGroup. # noqa: E501
:rtype: list[BTAssemblyInstanceDefinitionParams]
"""
return self._instances
@instances.setter
def instances(self, instances):
"""Sets the instances of this TransformGroup.
:param instances: The instances of this TransformGroup. # noqa: E501
:type: list[BTAssemblyInstanceDefinitionParams]
"""
self._instances = instances
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransformGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"ekeller@onshape.com"
] | ekeller@onshape.com |
1b62042c0d2c3745194a27db18e6434a2bb26b36 | a8db8ac98b1df5e3020e15bd4b191582b983f655 | /PKUTreeMaker/test/CrabJobsSrc/SingleEle/crab3_analysis_seleE.py | ce860a09ae24c787c31cf05199208745fb169742 | [] | no_license | JINGFFF/fullRun2VBSWG | 9ced38082fbe6794207acb06dd3efaf75df4f7d1 | 8d26c28e93c1562ce38e7b3dabd07902b68420ab | refs/heads/master | 2023-01-12T08:22:52.450034 | 2020-11-08T07:26:22 | 2020-11-08T07:26:22 | 258,943,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'fullrun2_2017_seleE_v1'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.maxMemoryMB = 3000
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles =['Fall17_17Nov2017DE_V32_DATA_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017DE_V32_DATA_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017DE_V32_DATA_L2L3Residual_AK4PFchs.txt','Fall17_17Nov2017DE_V32_DATA_L2L3Residual_AK4PFPuppi.txt','Fall17_17Nov2017DE_V32_DATA_L2Relative_AK4PFchs.txt','Fall17_17Nov2017DE_V32_DATA_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017DE_V32_DATA_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017DE_V32_DATA_L3Absolute_AK4PFPuppi.txt']
config.JobType.psetName = 'analysis_data_DE.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.outputPrimaryDataset = 'VBS_WGAMMA_94X'
config.Data.inputDataset = '/SingleElectron/Run2017E-31Mar2018-v1/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 40
config.Data.lumiMask = 'Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON.txt'
config.Data.publication = False
config.Data.outputDatasetTag = 'fullrun2_2017_seleE_v1'
config.section_("Site")
config.Site.storageSite = 'T2_CN_Beijing'
| [
"15827238926@163.com"
] | 15827238926@163.com |
bc453a69f69ddb02b7d3f8d5ffb9e3d84809d5d2 | 3ea104409b5ab5f1d1928af7d31b4a58b11d220a | /venv/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py | 903bbe9a82d1e432b28be0f91110184af616264d | [
"Apache-2.0"
] | permissive | farhananwari07/flask-image-processing | 0103ab0600995a760e27ffc644ffb313de4eaade | a4a4ad717ffd074afbe31cbf8803060764034375 | refs/heads/main | 2023-09-02T01:21:27.328049 | 2021-11-10T07:58:17 | 2021-11-10T07:58:17 | 425,517,466 | 0 | 0 | Apache-2.0 | 2021-11-07T13:55:56 | 2021-11-07T13:55:56 | null | UTF-8 | Python | false | false | 1,921 | py | """
Tests for second order centrality.
"""
import pytest
pytest.importorskip("numpy")
pytest.importorskip("scipy")
import networkx as nx
class TestSecondOrderCentrality:
def test_empty(self):
with pytest.raises(nx.NetworkXException):
G = nx.empty_graph()
nx.second_order_centrality(G)
def test_non_connected(self):
with pytest.raises(nx.NetworkXException):
G = nx.Graph()
G.add_node(0)
G.add_node(1)
nx.second_order_centrality(G)
def test_non_negative_edge_weights(self):
with pytest.raises(nx.NetworkXException):
G = nx.path_graph(2)
G.add_edge(0, 1, weight=-1)
nx.second_order_centrality(G)
def test_one_node_graph(self):
"""Second order centrality: single node"""
G = nx.Graph()
G.add_node(0)
G.add_edge(0, 0)
assert nx.second_order_centrality(G)[0] == 0
def test_P3(self):
"""Second order centrality: line graph, as defined in paper"""
G = nx.path_graph(3)
b_answer = {0: 3.741, 1: 1.414, 2: 3.741}
b = nx.second_order_centrality(G)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
def test_K3(self):
"""Second order centrality: complete graph, as defined in paper"""
G = nx.complete_graph(3)
b_answer = {0: 1.414, 1: 1.414, 2: 1.414}
b = nx.second_order_centrality(G)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
def test_ring_graph(self):
"""Second order centrality: ring graph, as defined in paper"""
G = nx.cycle_graph(5)
b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472}
b = nx.second_order_centrality(G)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
| [
"agoes.minarno@gmail.com"
] | agoes.minarno@gmail.com |
a9731b75609fd3be10e9dc65cbf587c87c60e72f | d384e15a0b40660d9026608995b34e410e923de7 | /core/mixins.py | f8628a0f65bc9865c90ed1da35a42a0a659fcdd3 | [] | no_license | Naveendata-ux/QuickAd-Django-Classified-App | 93deb6283228adce03ff2cd448791317df4148e3 | dc65b657d7b47d5c0321bd33640c3ca677e30ba6 | refs/heads/master | 2022-07-04T18:52:02.125969 | 2020-05-15T19:20:15 | 2020-05-15T19:20:15 | 265,176,361 | 1 | 0 | null | 2020-05-19T07:27:39 | 2020-05-19T07:27:38 | null | UTF-8 | Python | false | false | 476 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
class CustomLoginRequiredMixin(LoginRequiredMixin):
"""Verify that the current user is authenticated."""
login_url = reverse_lazy('accounts:login')
# def dispatch(self, request, *args, **kwargs):
# if not request.user.is_authenticated:
# return redirect(reverse_lazy('accounts:login'))
# return super().dispatch(request, *args, **kwargs)
| [
"rumimanzurulhoque@gmail.com"
] | rumimanzurulhoque@gmail.com |
561f0e1ec3949085391efe913aeb375796f89186 | e4c6741c0d264e7c274c14fadababd08505627aa | /sdepy/core.py | cf72142dda99889a83cebe054900f278a741c1ec | [] | no_license | JonasRSV/mc-sde | 860d1168496795fa7d40665437915eb226942815 | a002b3c550177e717aa345a206f7f9c8142a8c77 | refs/heads/master | 2021-04-10T20:35:18.533947 | 2020-04-27T09:42:47 | 2020-04-27T09:42:47 | 248,963,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | from abc import ABC, abstractmethod
from typing import Tuple, List
import numpy as np
class SDE(ABC):
@abstractmethod
def __init__(self, *args, **kwargs):
raise NotImplementedError()
@abstractmethod
def step(self) -> Tuple[np.float64, np.ndarray]:
raise NotImplementedError()
@abstractmethod
def stop(self) -> bool:
raise NotImplementedError()
@abstractmethod
def preprocess(self):
raise NotImplementedError()
@property
@abstractmethod
def steps(self) -> int: # returns the total number of steps left
raise NotImplementedError()
class PDF(ABC):
@abstractmethod
def merge(self, pdf: "PDF"):
raise NotImplementedError()
@abstractmethod
def __call__(self, particles: np.ndarray):
raise NotImplementedError()
@abstractmethod
def fit(self, particles: np.ndarray):
raise NotImplementedError()
@abstractmethod
def preprocess(self):
raise NotImplementedError()
@property
@abstractmethod
def mean(self) -> float: # mean of distribution
raise NotImplementedError()
class Job:
RAW = 0
Video = 1
def __init__(self, sde: SDE, pdf: PDF, mode: int, settings: dict):
self.sde = sde
self.pdf = pdf
self.mode = mode
self.settings = settings
def init_on_process(self):
self.sde.preprocess()
self.pdf.preprocess()
class Result:
def __init__(self, distributions):
self.distributions: List[PDF] = distributions
def extend(self, result):
self.distributions.extend(result.distributions)
| [
"jonas@valfridsson.net"
] | jonas@valfridsson.net |
9982d44f64826f7373273a38161d571be7f2b7e4 | 45870a80cbe343efe95eb9e8d0bd47c8c88353d1 | /机器学习竞赛/NLP-master/DataDetail.py | 14b794ee6683469b741b4c6f00b737b618ecb9f6 | [] | no_license | pippichi/IntelliJ_PYTHON | 3af7fbb2c8a3c2ff4c44e66736bbfb7aed51fe88 | 0bc6ded6fb5b5d9450920e4ed5e90a2b82eae7ca | refs/heads/master | 2021-07-10T09:53:01.264372 | 2020-07-09T13:19:41 | 2020-07-09T13:19:41 | 159,319,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | import jieba
from ReadFile import ReadCsv, cut2wd, get_feature_words, Count
import codecs
import pandas as pd
import numpy as np
def loan_csv(filename):
# lists = []
# with codecs.open(filename, 'r', 'utf-8') as f:
# for each in f.readlines():
# if each != '':
# lists.append(each.strip('\n'))
Motion_result = ReadCsv(filename).data_np()
lists = []
for each in Motion_result[:, 1]:
if each != ' ' and each != '':
lists.append(each)
return lists
#读取数据
Motion_result = ReadCsv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_trainingset_20180816\\sentiment_analysis_trainingset1.csv").data_np()
Motion_validation = ReadCsv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_validationset_20180816\\sentiment_analysis_validationset1.csv").data_np()
# pd.DataFrame(Motion_result[0:1000]).to_csv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_trainingset_20180816\\sentiment_analysis_trainingset.csv",encoding="utf_8_sig")
# pd.DataFrame(Motion_validation[0:100]).to_csv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_validationset_20180816\\sentiment_analysis_validationset.csv",encoding="utf_8_sig")
# df = pd.DataFrame(Motion_result)
# df.to_csv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_trainingset_20180816\\sentiment_analysis_trainingset.csv")
# result = df.append(Motion_result).to_csv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_trainingset_20180816\\sentiment_analysis_trainingset.csv")
# result2 = df2.append(Motion_validation).to_csv("C:\\Users\\HASEE\\Desktop\\ai_challenger_sentiment_analysis_trainingset_20180816\\sentiment_analysis_trainingset.csv")
# print(Motion_result.shape)
# print(Motion_validation.shape)
# lists = []
# for each in Motion_result[:, 1]:
# if each!='':
# lists.append(each.strip('\n'))
#
# y_train = Motion_result[:,2:]
# text = Motion_result[:,1]
# with codecs.open('corpus\\停用词表.txt', 'r', 'utf-8') as f:
# list2 = []
# for each in f.readlines():
# if each != ' ' and each != '':
# list2.append(each.strip('\n'))
# print(len(list2))
# print(text[0])
# list3 = jieba.cut(text[0],False)
# print(type(list3))
# for temp in list3:
# print(type(temp))
# #存入text
# for t in text:
# save_txt('txtfile.txt',t)
# print(readfile('txtfile.txt')[2])
# a = cut2wd(text)
#print(text[2])
# get_feature_words(text[3])
# b = Count(a)
| [
"874496049@qq.com"
] | 874496049@qq.com |
139414f0e8b9b21c35efbb0689213127dfe4b510 | 71e43068e82c91acbb3849169d1723f1375ac27f | /test/test_customer_profile_audience_request_item.py | 1fe2fd857c07d66bb7cacc4e09f20aaa9f622080 | [
"MIT"
] | permissive | talon-one/talon_one.py | aa08a1dbddd8ea324846ae022e43d441c57028f6 | 917dffb010e3d3e2f841be9cccba5bba1ea6c5c3 | refs/heads/master | 2023-05-11T18:50:00.041890 | 2023-05-03T20:17:39 | 2023-05-03T20:17:39 | 79,575,913 | 1 | 7 | MIT | 2023-05-03T15:10:14 | 2017-01-20T16:29:46 | Python | UTF-8 | Python | false | false | 2,433 | py | # coding: utf-8
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document:
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.customer_profile_audience_request_item import CustomerProfileAudienceRequestItem # noqa: E501
from talon_one.rest import ApiException
class TestCustomerProfileAudienceRequestItem(unittest.TestCase):
"""CustomerProfileAudienceRequestItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CustomerProfileAudienceRequestItem
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.customer_profile_audience_request_item.CustomerProfileAudienceRequestItem() # noqa: E501
if include_optional :
return CustomerProfileAudienceRequestItem(
action = 'add',
profile_integration_id = 'R195412',
audience_id = 748
)
else :
return CustomerProfileAudienceRequestItem(
action = 'add',
profile_integration_id = 'R195412',
audience_id = 748,
)
def testCustomerProfileAudienceRequestItem(self):
"""Test CustomerProfileAudienceRequestItem"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | talon-one.noreply@github.com |
50d23df1eb5403f0531c6bb52eeeebebeb32a255 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/eliben_deep-learning-samples/deep-learning-samples-master/cs231n/linear_svm.py | 13629f57fb24c4ac3ac5f3ed95a8e21653dad6e9 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,590 | py | # Linear SVM classifier.
# See http://cs231n.github.io/classification/ for background.
# And http://cs231n.github.io/optimization-1/ for the gradient parts.
import numpy as np
import random
import linear_classifier
def svm_loss_naive(W, X, y, reg):
"""Structured SVM loss function, naive implementation (with loops).
Important dimensions: K is number of classes we classify samples to. D is
the dimensionality of data (for example, 32x32x3 images have D=3072). Note
that bias is often folded into the sample as "1", so the actual
dimensionality may be +1 (or 3073 for those images).
N is simply the number of samples we're working with.
This function uses a delta value of 1.
Inputs:
- W: K x D array of weights.
- X: D x N array of data. Each datum is a (D-dimensional) column.
- y: 1-dimensional array of length N with labels 0...K-1, for K classes.
y[i] is the correct classification of sample i.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
delta = 1
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
K = W.shape[0]
N = X.shape[1]
loss = 0.0
for i in xrange(N):
# Compute the loss for this sample.
# The equation is:
#
# Li = Sum_{j!=yi} max(0, wj*xi - wyi*xi + delta)
#
# We use W * Xi to find both wj*xi and wyi*xi, so we just index into
# the result to find these distinct parts.
#
# X[:, i] is the ith column of X. scores now has the shape K x 1
scores = W.dot(X[:, i])
# wyi*xi is not changing in the sigma (internal loop), so precompute it.
correct_class_score = scores[y[i]]
# This computes the sigma.
for j in xrange(K):
margin = scores[j] - correct_class_score + delta
if j == y[i]:
continue
if margin > 0:
loss += margin
# The gradient is only updated when margin > 0.
dW[j, :] += X[:, i]
dW[y[i], :] -= X[:, i]
# Average the loss over N samples and add regularization.
loss = (loss / N) + 0.5 * reg * np.sum(W * W)
# Same for gradient.
dW = (dW / N) + reg * W
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
N = X.shape[1]
delta = 1
# scores's shape is (K, N): contains scores for all N samples, in columns.
scores = W.dot(X)
# We want to select the score of the correct class for every sample. Samples
# are in columns. y[i] gives, for sample i, the correct class. Therefore
# we need to index into every column at the appropriate y[i].
# The result is a (N,) vector.
correct_class_scores = scores[y, np.arange(N)]
# Vectorized sum for all samples. This computes the sigma for all Li.
# scores is (K,N), correct_class_scores is (N,) so it's broadcast over each
# row of scores.
# The shape remains (K,N) since it contains the score per class for each
# sample.
si = scores - correct_class_scores + delta
# Sum all class scores for each sample into a total "loss per sample".
# clip performs the max(0,...) operation.
s = si.clip(min=0).sum(axis=0)
# The sum was supposed to ignore the category with the correct score. But
# for j=yi, the summed element is just max(0, delta), so we subtract delta
# from the sums.
s -= delta
# Finally compute the average loss with regularization.
loss = np.mean(s) + 0.5 * reg * np.sum(W * W)
# To compute the vectorized gradient, create a (K,N) array of indicators
# where each cell is the gradient contribution to the row's class from the
# column's sample.
indicators = np.zeros(scores.shape)
# This is for all dW_j
indicators[si > 0] = 1
# For dW_yi, subtract the number of positive indicators
num_positive = np.sum(si > 0, axis=0)
indicators[y, np.arange(N)] -= num_positive
# Finally, indicators * X.T will give use the result
dW = indicators.dot(X.T) / N + reg * W
return loss, dW
class LinearSVM(linear_classifier.LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
| [
"659338505@qq.com"
] | 659338505@qq.com |
67d6bd91027cc51bd4be86eb9ac25c2d6f6ded2f | ad25a84cac4eb9c0bfaf81c9172d4ed3864b3f7a | /fintech/fintech/pipelines.py | 0228db3d8d199e6f9a2959e0ee1cf30a3062ab1a | [] | no_license | kishanpython/Let-Scrape-It | 69b3ea756e95dc0d81074d671ff77197f072dbe3 | f4c98a4ba9a5865831f228547a43a2787a5709bd | refs/heads/main | 2023-04-30T18:22:41.872076 | 2021-05-24T15:21:03 | 2021-05-24T15:21:03 | 367,649,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from scrapy.pipelines.images import ImagesPipeline
class CustomImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
# EX:- https://australianfintech.com.au/wp-content/uploads/sites/7/2020/08/Adatree-2021.png
# output :- Adatree-2021.png
# Image name extraction from url
image_name = request.url.split('/')[-1]
# Creating image directory from url name
# output :- Adatree-2021
image_dir_for_stg = request.url.split('/')[-1].split('.')[0]
return f'{image_dir_for_stg}/{image_name}' | [
"kishanyadav3223@gmail.com"
] | kishanyadav3223@gmail.com |
39798be77a8da68cbe120c29e855228e768f59a8 | a430ecc74572bf6c7ff0ece3eef6a737a7b615a3 | /Code/wanfang分布式 2/build/lib/wanfang/pipelines.py | 409d033998c5fb8aa2fc125a2de2cc764d198687 | [] | no_license | wangxuyongkang/chengxuyuanhh | 084e3ab086499a7fc18a477f0ef6234fa1eea152 | 15e1322dcc36de4f1d1e467525761746cadb58fa | refs/heads/master | 2022-12-26T00:46:10.693454 | 2019-12-25T12:16:40 | 2019-12-25T12:16:40 | 230,065,182 | 1 | 0 | null | 2022-12-11T18:23:10 | 2019-12-25T07:54:47 | HTML | UTF-8 | Python | false | false | 1,322 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import pymysql
class WanfangPipeline(object):
def __init__(self):
# self.mysql_client = pymysql.Connect(
# '127.0.0.1','root','ljh1314',
# 'wanwang',charset='utf8',
# )
self.mysql_client = pymysql.Connect(
'rm-2zet9m2x33kh23506o.mysql.rds.aliyuncs.com','bylwdata',
're(r02fdkW+ny9dsOD^','paper_data', charset='utf8',
)
self.cursor = self.mysql_client.cursor()
def process_item(self, item, spider):
print('1111111111111111111111111111111111')
insertSql,insertData = item.get_sql_data(dict(item))
try:
self.cursor.execute(insertSql,insertData)
self.mysql_client.commit()
except Exception as err:
print(err)
self.mysql_client.rollback()
return item
# data = json.dumps(dict(item),ensure_ascii=False)+'\n'
# with open('tech.json','a+') as filehandler:
# filehandler.write(data)
# return item
def close_spider(self,spider0):
self.mysql_client.close()
self.cursor.close()
| [
"2238366634@qq.com"
] | 2238366634@qq.com |
9bef8c1994469795e37ace2d29a3aea6bf4b167e | bbec348efb79c6588a4cb6bb565c813fe3fe86ad | /pyVpx/sample/vmPrincipalUser.py | 81641a033a522bad50da1693ac19d07f31bcbede | [] | no_license | free-Zen/pvc | 2be60fdc0fd0345039219c802223f987fce3b113 | 8428a84481be319ae739dfbb87715f31810138d9 | refs/heads/master | 2022-02-24T12:13:31.599398 | 2019-10-14T07:49:13 | 2019-10-14T07:49:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,691 | py | #!/usr/bin/python
"""
Simple command-line program for getting/setting the vmPrincipalUser
"""
from pyVim.connect import Connect
from pyVim import host
from optparse import OptionParser
def GetOptionsAndArgs():
"""
Supports the command-line arguments listed below.
"""
parser = OptionParser()
parser.add_option("--host",
default="localhost",
help="remote host to connect to")
parser.add_option("-u", "--user",
default="root",
help="User name to use when connecting to hostd")
parser.add_option("-p", "--password", "--pwd",
default="ca$hc0w",
help="Password to use when connecting to hostd")
(options, args) = parser.parse_args()
return (options, args)
def main():
"""
Simple command-line program for getting/setting the vmPrincipalUser
"""
(options, args) = GetOptionsAndArgs()
serviceInstance = Connect(host=options.host,
user=options.user,
pwd=options.password)
hostSystem = host.GetHostSystem(serviceInstance)
hostConfig = hostSystem.GetConfig()
hostConfigManager = hostSystem.GetConfigManager()
datastoreSystem = hostConfigManager.GetDatastoreSystem()
vmPrincipalUser = hostConfig.datastorePrincipal
print "vmPrincipalUser is \"%s\"" % vmPrincipalUser
if len(args) == 1:
newVmPrincipalUser = args[0]
print "Changing vmPrincipalUser to \"%s\"" % newVmPrincipalUser
datastoreSystem.ConfigureDatastorePrincipal(userName=newVmPrincipalUser)
# Start program
if __name__ == "__main__":
main()
| [
"liuzhen@vmware.com"
] | liuzhen@vmware.com |
9fe9f1c5f135c8b98d7329f1d2ba8497aa831cc7 | 8b5fe2b82b2323a1329d22a131b50ff3f6be7ef3 | /xos/tosca/resources/service.py | 5a57418f585d79fd951e9a6a3a5f36be66b16c13 | [
"Apache-2.0"
] | permissive | nareshblu/xos | 822c33d8cf5054bd7eb062f97565f1b56b1d539d | e39ec4850f3dc066c5b02b9700a10f211ef4dc2a | refs/heads/master | 2020-12-25T05:02:41.270079 | 2016-06-27T09:22:20 | 2016-06-27T09:22:20 | 62,042,102 | 1 | 0 | null | 2016-06-27T09:13:11 | 2016-06-27T09:13:10 | null | UTF-8 | Python | false | false | 1,702 | py | import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
import pdb
from core.models import Service,User,CoarseTenant,AddressPool
from xosresource import XOSResource
class XOSService(XOSResource):
provides = "tosca.nodes.Service"
xos_model = Service
copyin_props = ["view_url", "icon_url", "kind", "enabled", "published", "public_key", "private_key_fn", "versionNumber"]
def postprocess(self, obj):
for provider_service_name in self.get_requirements("tosca.relationships.TenantOfService"):
provider_service = self.get_xos_object(Service, name=provider_service_name)
existing_tenancy = CoarseTenant.get_tenant_objects().filter(provider_service = provider_service, subscriber_service = obj)
if existing_tenancy:
self.info("Tenancy relationship from %s to %s already exists" % (str(obj), str(provider_service)))
else:
tenancy = CoarseTenant(provider_service = provider_service,
subscriber_service = obj)
tenancy.save()
self.info("Created Tenancy relationship from %s to %s" % (str(obj), str(provider_service)))
for ap_name in self.get_requirements("tosca.relationships.ProvidesAddresses"):
ap = self.get_xos_object(AddressPool, name=ap_name)
ap.service = obj
ap.save()
def can_delete(self, obj):
if obj.slices.exists():
self.info("Service %s has active slices; skipping delete" % obj.name)
return False
return super(XOSService, self).can_delete(obj)
| [
"smbaker@gmail.com"
] | smbaker@gmail.com |
bf197e793718ee1c57d11ae875b3a0a74cd80441 | d9a490dc36da08051b2685489a8e6af3d29fa903 | /example7_4.py | 2e02840bb73f5f5f1be770939048cbd8cbb76e35 | [] | no_license | freephys/numeric-for-engineer | 403679c3f055164bf8b7097c360ad8bfc2cb9978 | a98d318e8cdff679cc02a575d32840fa87a4717d | refs/heads/master | 2020-04-16T01:33:43.530839 | 2009-11-28T18:42:12 | 2009-11-28T18:42:12 | 388,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | ## example7_4
from numarray import array,zeros,Float64
from printSoln import *
from run_kut4 import *
def F(x,y):
F = zeros((2),type=Float64)
F[0] = y[1]
F[1] = -0.1*y[1] - x
return F
x = 0.0 # Start of integration
xStop = 2.0 # End of integration
y = array([0.0, 1.0]) # Initial values of {y}
h = 0.25 # Step size
freq = 1 # Printout frequency
X,Y = integrate(F,x,y,xStop,h)
printSoln(X,Y,freq)
raw_input("Press return to exit")
| [
"freephys@gmail.com"
] | freephys@gmail.com |
68bb03315877a635672ed4e0a62821abdfd375b6 | a6d2727d76b0fec48848cab8f4965ecd594cc8f7 | /Learning_Python_part1/lettersinstring.py | 7ef73273e04daefbfee5a250226edaa6fe3e4492 | [] | no_license | ccsreenidhin/Learning_Python_Part1 | b31adbcd4c27f97f90ec40c8c5eacf58b9e12a7c | 3d92400ada3bb0c07a1da1e6ef4a5573d3b6ce08 | refs/heads/master | 2022-10-15T13:31:58.396233 | 2017-04-17T09:35:00 | 2020-06-11T11:46:07 | 271,532,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | str=raw_input("enter the line: ")
tmem=len(str)
i=tmem-1
print(tmem)
while i!=0:
if str[i] == ' ':
tmem-=1
i-=1
print("no of letters: %d", tmem)
| [
"ccsreenidhin@gmail.com"
] | ccsreenidhin@gmail.com |
997850d4abeb537cbb1abef68c48e6fb6a3fd59c | 517d461257edd1d6b239200b931c6c001b99f6da | /Circuit_Playground/CircuitPython/libraries/adafruit-circuitpython-bundle-6.x-mpy-20211013/examples/lis331_low_pass_filter.py | e2d0c1fba73d83b2ef9aa8cd0158519d5be8c8a5 | [] | no_license | cmontalvo251/Microcontrollers | 7911e173badff93fc29e52fbdce287aab1314608 | 09ff976f2ee042b9182fb5a732978225561d151a | refs/heads/master | 2023-06-23T16:35:51.940859 | 2023-06-16T19:29:30 | 2023-06-16T19:29:30 | 229,314,291 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_lis331 import LIS331HH, Rate, Frequency
i2c = board.I2C() # uses board.SCL and board.SDA
# un-comment the sensor you are using
# lis = H3LIS331(i2c)
lis = LIS331HH(i2c)
# `data_rate` must be a `LOWPOWER` rate to use the low-pass filter
lis.data_rate = Rate.RATE_LOWPOWER_10_HZ
# next set the cutoff frequency. Anything changing faster than
# the specified frequency will be filtered out
lis.lpf_cutoff = Frequency.FREQ_74_HZ
# Once you've seen the filter do its thing, you can comment out the
# lines above to use the default data rate without the low pass filter
# and see the difference it makes
while True:
print(lis.acceleration) # plotter friendly printing
time.sleep(0.002)
| [
"cmontalvo@southalabama.edu"
] | cmontalvo@southalabama.edu |
8011b0c3f8cec9359b3077eb0f429e5acbdcbc2e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy275.py | 17927721fcc43ec618fafeb52df84b209728b72f | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | # qubit number=2
# total number=17
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.z(input_qubit[1]) # number=16
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.h(input_qubit[0]) # number=13
prog.cz(input_qubit[1],input_qubit[0]) # number=14
prog.h(input_qubit[0]) # number=15
prog.x(input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.cx(input_qubit[1],input_qubit[0]) # number=10
prog.x(input_qubit[0]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy275.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
45ecd3e371af713635f538969fcab20f1985be98 | 9699577e6ac81ead34b809e2551d3bf1a25d198c | /merge_test/demo.py | 28cada470095ea3136aa9692c327688bc1379f9c | [] | no_license | zolcsika71/CodeInGames_PY | b8efbb7206e9872636ac419e3ac56449a473a573 | d127e736451326176973d24b1261fc1d2ab6e5ae | refs/heads/main | 2023-06-13T02:06:59.464269 | 2021-07-06T14:44:34 | 2021-07-06T14:44:34 | 335,744,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Normal library imports
# NOTE: imports declared in imported functions need to be
# manually added here for now...
import sys
import math
import time
# etc...
# IMPORT
from lib.functions.Util_ import *
from lib.functions.Second_ import *
from lib.functions.First_ import *
# END_IMPORT
def run():
init_t = time.time()
first_func()
debug_time(f"Time:", init_t, time.time())
run()
| [
"bushdoctor2008@gmail.com"
] | bushdoctor2008@gmail.com |
7a61ac1527f4a58cfe8d094d86da082483f7743f | d01055a7e08b158b65d1fb39d92e529692956537 | /python/moneysocket/message/__init__.py | 4e4c8cd3e7a037aa3510ec03163b13363e11b11f | [
"MIT"
] | permissive | moneysocket/prototype | 8da14038ce026ab03c5f9723882bcfbf0830f064 | c0c3cbf7f3176c0e9ff9d77a6941b53a9a5edae1 | refs/heads/master | 2023-02-16T16:46:26.253371 | 2021-01-18T18:37:18 | 2021-01-18T18:37:18 | 285,721,497 | 5 | 3 | MIT | 2021-01-18T18:37:19 | 2020-08-07T02:48:59 | JavaScript | UTF-8 | Python | false | false | 369 | py | # Copyright (c) 2020 Jarret Dyrbye
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
# import to register subclasses with superclass
from moneysocket.message.request.request import MoneysocketRequest
from moneysocket.message.notification.notification import MoneysocketNotification
| [
"jarret.dyrbye@gmail.com"
] | jarret.dyrbye@gmail.com |
0ab5347140a40a59b7d085d88915dcb29f170f9b | 91d7987874dcfa0d8dbbd9a3a3831ed9b67691f8 | /IPProxyPool/IPProxy.py | 27e2b4afa3c1e65f191e47ab59be4b1694f1769a | [] | no_license | lsz1995/amazon | c9388cc78f8465804b53e8759940ebc9625cbdd6 | e648ff21f642632e30925ffab1d3a4608eb201ca | refs/heads/master | 2020-03-19T05:55:23.726649 | 2018-06-04T05:31:22 | 2018-06-04T05:31:22 | 135,974,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # coding:utf-8
from validator.Validator import validator, getMyIP
from multiprocessing import Value, Queue, Process
from api.apiServer import start_api_server
from db.DataStore import store_data
from spider.ProxyCrawl import startProxyCrawl
from config import TASK_QUEUE_SIZE
if __name__ == "__main__":
myip = getMyIP()
DB_PROXY_NUM = Value('i', 0)
q1 = Queue(maxsize=TASK_QUEUE_SIZE)
q2 = Queue()
p0 = Process(target=start_api_server)
p1 = Process(target=startProxyCrawl, args=(q1, DB_PROXY_NUM,myip))
p2 = Process(target=validator, args=(q1, q2, myip))
p3 = Process(target=store_data, args=(q2, DB_PROXY_NUM))
p0.start()
p1.start()
p2.start()
p3.start()
p0.join()
p1.join()
p2.join()
p3.join()
| [
"qqlsz87@126.com"
] | qqlsz87@126.com |
91230769810c7749e457b9e839b5203295de47ec | 20e3010608e40a6ec5ea56f69d122a62182e4bdb | /arrays/list comprehension.py | d0f2f8a8aa771399df5c56918500ac6b0b22d8fd | [] | no_license | LarisaOvchinnikova/Python | ee65eac221cd03563d60110118175692564c5b2d | 9cc86a260828662995dec59a6d69528f96d37e79 | refs/heads/master | 2021-08-22T21:41:02.351589 | 2021-05-25T18:37:09 | 2021-05-25T18:37:09 | 253,842,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | arr = ['asff', 'fdlkdld', 's']
list_length = [len(word) for word in arr]
print(list_length)
arr = [1,3,4,5]
newarr = [str(el) for el in arr]
print(newarr)
even = [el for el in arr if el % 2 == 0]
print(even)
arr = ['sfsfj', 'xd', 'kfhfhjhfsh']
arr = [word.rjust(10) for word in arr] # like arr.map()
print(arr)
fib = [0, 1, 1, 2, 3, 5, 8]
fib = ", ".join([str(num) for num in fib])
print(fib)
x = [number for number in range(10) if number % 3 == 0]
print(x) # create array [0,3,6,9]
x = ["Even" if n % 2 == 0 else "Odd" for n in range(10)]
print(x)
# [-1, 2, -3, 4, -5...]
x = [number if number % 2 == 0 else -number for number in range(10)]
print(x)
x = ["hello", "world", "sky", "blue"]
x = [word.capitalize() for word in x]
print(x)
q = ['a', 'abc', 'aa', 'qwerty']
z = [len(word) for word in q]
print(z)
| [
"larisaplantation@gmail.com"
] | larisaplantation@gmail.com |
e9267182bff3276c1af4ca9613ccd0c11a75be5c | dbe7e1d9fe2457c26f83095d941e4392e7d30f8c | /django_dashboard/migrations/0022_auto_20180208_1621.py | d8135ce56ed3463b01f3f4b94d069029fc7a64ee | [
"MIT"
] | permissive | keepexploring/smartbiogas | 51e124735ec04bc6b87a8ac75c66c83de6865001 | ca663435b05666113e3c0cb55e6f087c61497208 | refs/heads/master | 2022-12-12T10:42:37.412038 | 2018-07-18T15:29:04 | 2018-07-18T15:29:04 | 111,402,799 | 0 | 0 | MIT | 2022-12-08T00:56:54 | 2017-11-20T11:39:05 | JavaScript | UTF-8 | Python | false | false | 551 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-08 16:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_dashboard', '0021_auto_20180208_1609'),
]
operations = [
migrations.RemoveField(
model_name='userdetail',
name='admin_role_in_companies',
),
migrations.RemoveField(
model_name='userdetail',
name='technican_role_in_companies',
),
]
| [
"joel.c@scene.community"
] | joel.c@scene.community |
a7af016bb84a1a55a8a81a27abaf108fe93f7e29 | 97afa34a118378827e579de9ab3ceced71488dfa | /dedupsqlfs/db/mysql/table/hash_sizes.py | 655ebc3f11315758306984e1a0fcc569b5f89868 | [
"MIT"
] | permissive | himadrisd/dedupsqlfs | 4d07dbd4f8f0294ce5352777525085cf80fc7c99 | 9a0e5ee3e2bfd1bc25f2e604d4f1fb584c43bffc | refs/heads/master | 2020-04-24T14:52:30.804688 | 2018-06-09T06:38:37 | 2018-06-09T06:38:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | # -*- coding: utf8 -*-
__author__ = 'sergey'
from dedupsqlfs.db.mysql.table import Table
class TableHashSizes( Table ):
_table_name = "hash_sizes"
def create( self ):
cur = self.getCursor()
# Create table
cur.execute(
"CREATE TABLE IF NOT EXISTS `%s` (" % self.getName()+
"`hash_id` BIGINT UNSIGNED PRIMARY KEY, "+
"`writed_size` INT UNSIGNED NOT NULL, "+
"`compressed_size` INT UNSIGNED NOT NULL "+
")"+
self._getCreationAppendString()
)
return
def insert( self, hash_id, writed_size, compressed_size):
"""
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"INSERT INTO `%s` " % self.getName()+
" (`hash_id`, `writed_size`, `compressed_size`) VALUES (%(id)s, %(ws)s, %(cs)s)",
{
"id": hash_id,
"ws": writed_size,
"cs": compressed_size
}
)
item = cur.lastrowid
self.stopTimer('insert')
return item
def update( self, hash_id, writed_size, compressed_size):
"""
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"UPDATE `%s` " % self.getName() +
" SET `compressed_size`=%(cs)s, `writed_size`=%(ws)s WHERE `hash_id`=%(id)s",
{
"cs": compressed_size,
"ws": writed_size,
"id": hash_id
}
)
count = cur.rowcount
self.stopTimer('update')
return count
def get( self, hash_id):
"""
:param hash_id: int
:return: Row
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT * FROM `%s` " % self.getName()+
" WHERE `hash_id`=%(id)s",
{
"id": hash_id
}
)
item = cur.fetchone()
self.stopTimer('get')
return item
def remove_by_ids(self, id_str):
self.startTimer()
count = 0
if id_str:
cur = self.getCursor()
cur.execute("DELETE FROM `%s` " % self.getName()+
" WHERE `hash_id` IN (%s)" % (id_str,))
count = cur.rowcount
self.stopTimer('remove_by_ids')
return count
def get_sizes_by_hash_ids(self, id_str):
self.startTimer()
items = {}
if id_str:
cur = self.getCursor()
cur.execute("SELECT * FROM `%s` " % self.getName()+
" WHERE `hash_id` IN (%s)" % (id_str,))
for _i in cur:
items[ _i["hash_id"] ] = (_i["writed_size"], _i["compressed_size"],)
self.stopTimer('get_sizes_by_hash_ids')
return items
pass
| [
"sergey.dryabzhinsky@gmail.com"
] | sergey.dryabzhinsky@gmail.com |
17eeb3e8b9ed2045f7a91decd487c5618bbc831f | 7c9919126b96122c1a8c6353769e209d850e4564 | /bnk_asset/models/bnk_asset.py | d6f1eeee892c4bddeeb702d98e3e4a42e574a06d | [] | no_license | Duongnv-dev/hr | 8ee34c904d481a4d0f4182c3c6bfd6c28ef25ffe | 962e0edab5b824304f4a2b2dff23458135f94c3c | refs/heads/master | 2023-06-19T06:54:00.337453 | 2021-07-13T01:53:34 | 2021-07-13T01:53:34 | 385,439,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,223 | py | from odoo import models, fields, api, tools
from odoo.exceptions import ValidationError
import datetime
class Asset(models.Model):
_inherit = 'account.asset.asset'
user_id = fields.Many2one('res.users', 'User')
employee_id = fields.Many2one('hr.employee', 'Employee', readonly=True, track_visibility='onchange')
location_id = fields.Many2one('bnk.location', string="Asset Location", track_visibility='onchange')
asset_request_id = fields.Many2one('asset.request', string="Asset Request")
value = fields.Float(string='Gross Value', required=False, readonly=True, digits=0,
states={'draft': [('readonly', False)]}, oldname='purchase_value')
date = fields.Date(string='Date', required=False, readonly=True, states={'draft': [('readonly', False)]},
default=fields.Date.context_today, oldname="purchase_date")
date_first_depreciation = fields.Selection([
('last_day_period', 'Based on Last Day of Purchase Period'),
('manual', 'Manual')],
string='Depreciation Dates', default='manual',
readonly=True, states={'draft': [('readonly', False)]}, required=False,
help='The way to compute the date of the first depreciation.\n'
' * Based on last day of purchase period: The depreciation dates will be based on the last day of the purchase month or the purchase year (depending on the periodicity of the depreciations).\n'
' * Based on purchase date: The depreciation dates will be based on the purchase date.\n')
first_depreciation_manual_date = fields.Date(
string='First Depreciation Date', required=False,
readonly=True, states={'draft': [('readonly', False)]},
help='Note that this date does not alter the computation of the first journal entry in case of prorata temporis assets. It simply changes its accounting date'
)
maintenance_request_id = fields.One2many('maintenance.request','asset_id')
start_date = fields.Date(string='Warranty Date')
end_date = fields.Date(string='Warranty expires')
owner = fields.Many2one('res.partner', string="Asset Owner")
borrowed_asset = fields.Boolean(default = False, string="3rd party Asset")
id_asset = fields.Char('Asset Code', compute='cp_asset_id', store=True)
id_barcode = fields.Binary('Barcode', compute='compute_barcode', store=True)
@api.depends('category_id.code')
def cp_asset_id(self):
for s in self:
if not s.category_id:
continue
if s.id_asset:
code = s.category_id.code + '{:08}'.format(s.id)
if s.id_asset == code:
continue
s.id_asset = code
elif type(s.id) is int:
code = s.category_id.code + '{:08}'.format(s.id)
s.id_asset = code
@api.depends('id_asset')
def compute_barcode(self):
for s in self:
if not s.id_asset:
continue
barcode = s.env['tool.barcodez'].barcode(s.id_asset, False)
s.id_barcode = barcode
class ReportBarcodeXlsx(models.AbstractModel):
_name = "report.bnk_asset.export_barcode"
_inherit = 'report.report_xlsx.abstract'
def generate_xlsx_report(self, workbook, data, form):
data_ = []
for f in form:
data_.append([f.name, f.id_asset, f.id_barcode])
if not data_:
return
ws = workbook.add_worksheet('Barcode')
ws.set_column(0, 0, 25)
ws.set_column(1, 1, 15)
ws.set_column(1, 1, 50)
content_left = workbook.add_format({
'valign': 'vcenter',
'align': 'left',
'border': 1,
'font_name': 'Arial',
'font_size': 12,
'text_wrap': True,
'bold': True,
})
content_center = workbook.add_format({
'valign': 'vcenter',
'align': 'content',
'border': 1,
'font_name': 'Arial',
'font_size': 12,
'text_wrap': True,
'bold': True,
})
content_header = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'border': 0,
'font_name': 'Arial',
'font_size': 18,
'text_wrap': True,
'bold': True,
})
row = 2
ws.merge_range('B{}:C{}'.format(row, row+1), 'Barcode Asset', content_header)
row = row + 3
for d in data_:
if d[2]:
ws.merge_range('A{}:A{}'.format(row, row + 8), d[0], content_left)
ws.merge_range('B{}:B{}'.format(row, row + 8), d[1], content_left)
image = d[2]
read_file_obj = self.env['create.tempfile']
time = '{}'.format(datetime.datetime.now())
for t in time:
if t in ['-', ' ', ':', '.']:
time = time.replace(t, '_')
logo_data = read_file_obj.create_tempfile(image, '/barcode_{}_{}'.format(self._uid, time))
ws.merge_range('C{}:G{}'.format(row, row+8), '', content_center)
ws.insert_image('C{}:G{}'.format(row, row+8), logo_data, {'x_scale': 0.70, 'y_scale': 0.60})
row += 9
else:
ws.write('A{}'.format(row), d[0], content_left)
ws.write('B{}'.format(row), d[1], content_left)
ws.write('C{}'.format(row), '', content_center)
row += 1
class Location(models.Model):
_name = 'bnk.location'
_description = "BnK Asset Location"
_inherit = ['mail.thread', 'mail.activity.mixin']
name = fields.Char(string="Location", required="True", track_visibility='onchange')
asset_ids = fields.One2many('account.asset.asset', 'location_id', string="Asset Number",
track_visibility='onchange')
class AssetCategory(models.Model):
_inherit = 'account.asset.category'
code = fields.Char(required=True, track_visibility='onchange')
asset_ids = fields.One2many('account.asset.asset','category_id', string='Asset', track_visibility='onchange') | [
"duong.nguyen@bnksolution.com"
] | duong.nguyen@bnksolution.com |
c38434dd7cee1767e7a3f3f468998f69f33b280b | 82b495a208ebdeb71314961021fbfe767de57820 | /chapter-04/sample001.py | fc5521ad3b1c7679d112c56cf067ed031d795687 | [
"MIT"
] | permissive | krastin/pp-cs3.0 | 7c860794332e598aa74278972d5daa16853094f6 | 502be9aac2d84215db176864e443c219e5e26591 | refs/heads/master | 2020-05-28T02:23:58.131428 | 2019-11-13T13:06:08 | 2019-11-13T13:06:08 | 188,853,205 | 0 | 0 | MIT | 2019-11-13T13:06:09 | 2019-05-27T13:56:41 | Python | UTF-8 | Python | false | false | 780 | py | 'Aristotle'
'Isaac Newton'
''
print(len('Albert Einstein'))
print(len('123!'))
print(len(''))
print('Albert' + ' Einstein')
print("Alan Turing" + '')
print("" + 'Grace Hopper')
#print('NH' + 3) #error
#print(9 + ' planets') #error
print('Four score and ' + str(7) + ' years ago')
print(int('0'))
print(int("11"))
print(int('-324'))
print(float('-324'))
print(float("56.34"))
#print(int('a')) #error
#print(float('b')) #error
print('AT' * 5)
print(4 * '-')
print('GC' * 0)
print('TATATATA' * -3)
sequence = 'ATTGTCCCCC'
print(len(sequence))
new_sequence = sequence + 'GGCCTCCTGC'
print(new_sequence)
print(new_sequence * 2)
print("that's better")
print('She said, "That is better."')
print('She said, "That' + "'" + 's hard to read."')
print(len('\''))
print(len('it\'s'))
| [
"krastin@hashicorp.com"
] | krastin@hashicorp.com |
5376b409dc4406212c78d338e49ccd504a8234f1 | 546ef5cf37c46bb378b310004e812d24bda6cd9a | /code/load_radius.py | 680d7f9bc69b7dd2e7245d8bd699e64e7988a6ab | [
"MIT"
] | permissive | annayqho/SN2018gep | edf888ce0f7928f357d5093e558fdab2164845c9 | 93cd64a1aab326771199f9093339df5bc4eb8002 | refs/heads/master | 2021-07-11T19:45:24.867867 | 2020-06-19T02:40:47 | 2020-06-19T02:40:47 | 156,262,433 | 2 | 1 | null | 2019-10-15T00:42:08 | 2018-11-05T18:17:37 | Python | UTF-8 | Python | false | false | 458 | py | import numpy as np
def load_radius():
DATA_DIR = "/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/data"
dat = np.loadtxt("%s/physevol.dat" %DATA_DIR, dtype=str)
mjd = dat[:,0].astype(float)
mjd0 = 58370.1473
dt = mjd-mjd0
Rsun = 6.955E10
AU = 1.496e+13
rad = dat[:,2].astype(float) * AU # original units AU
lrad = np.abs(dat[:,3].astype(float)*AU)
urad = dat[:,4].astype(float)*AU
return dt, rad, lrad, urad
| [
"annayqho@gmail.com"
] | annayqho@gmail.com |
2def0ac500f2c316f8ce4f30ba725b89f691f571 | 9b1f196163b2489f5292712d82b0afc538ac8c49 | /setup.py | 2179d83b5c6923c3978b58fb9ff465d6d7c274b0 | [
"MIT"
] | permissive | furious-luke/django-main | 18728249e03730e5163cd806a56ba365910b7fc8 | 0d76ff93b0bfe5afb7615ee9fbc33e0de25ebc58 | refs/heads/master | 2021-01-19T22:10:58.651765 | 2017-04-23T02:29:32 | 2017-04-23T02:29:32 | 88,768,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | import os
from setuptools import setup, find_packages
setup(
name='django-main',
version='0.1',
author='Luke Hodkinson',
author_email='furious.luke@gmail.com',
maintainer='Luke Hodkinson',
maintainer_email='furious.luke@gmail.com',
url='https://github.com/furious-luke/django-main',
description='',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
classifiers = [
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
license='BSD',
packages=find_packages(),
include_package_data=True,
package_data={'': ['*.txt', '*.js', '*.html', '*.*']},
install_requires=[
'setuptools',
'django-jsdata'
],
zip_safe=False,
)
| [
"furious.luke@gmail.com"
] | furious.luke@gmail.com |
4436f6cf9699760cecaa788c5ab445d04c11ade2 | c741f04141784a2571d2d27d95e0d994e4584ab1 | /learning/py3/Python3 运算符/算术运算符.py | 815b51e6916feb9e81b4eea8f6d6cb77008b2dac | [] | no_license | haodonghui/python | bbdece136620bc6f787b4942d6e1760ed808afd4 | 365062ba54297c81093b7f378742e76d438658b7 | refs/heads/master | 2022-02-03T23:52:37.288503 | 2022-01-27T05:23:25 | 2022-01-27T05:23:25 | 191,729,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | """
以下假设变量a为10,变量b为21:
运算符 描述 实例
+ 加 - 两个对象相加 a + b 输出结果 31
- 减 - 得到负数或是一个数减去另一个数 a - b 输出结果 -11
* 乘 - 两个数相乘或是返回一个被重复若干次的字符串 a * b 输出结果 210
/ 除 - x 除以 y b / a 输出结果 2.1
% 取模 - 返回除法的余数 b % a 输出结果 1
** 幂 - 返回x的y次幂 a**b 为10的21次方
// 取整除 - 向下取接近商的整数 9//2 输出4;-9//2 输出-5
"""
# !/usr/bin/python3
a = 21
b = 10
c = 0
c = a + b
print("1 - c 的值为:", c)
c = a - b
print("2 - c 的值为:", c)
c = a * b
print("3 - c 的值为:", c)
c = a / b
print("4 - c 的值为:", c)
c = a % b
print("5 - c 的值为:", c)
# 修改变量 a 、b 、c
a = 2
b = 3
c = a ** b
print("6 - c 的值为:", c)
a = 10
b = 5
c = a // b
print("7 - c 的值为:", c)
| [
"haodonghui@yestae.com"
] | haodonghui@yestae.com |
3c6bff3848f851830ac2218aa707f785c103dc38 | b7d642b29ec27f2b1402dd3e1428b534c2508f3f | /Python/Worksheets/Worksheet5/3.py | 2e0daf044f1f86b7203c68f945f670a635d1fade | [] | no_license | liuya2360/H2-Computing | 57b604d3807381d2777dd913491d23b4300caadd | 2472c22b09ce27d7be745851e27354cb1ba0f134 | refs/heads/master | 2021-07-12T01:54:07.470996 | 2020-08-13T11:21:00 | 2020-08-13T11:21:00 | 177,277,298 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | class Trapezium():
def __init__(self, top, bottom, side1, side2 ,height):
self.top = top
self.bottom = bottom
self.height = height
self.side1 = side1
self.side2 = side2
def area(self):
return 0.5*(self.top+self.bottom)*height
def perimeter(self):
return self.top+self.bottom+self.side1+self.side2
class Parallelgram():
def __init__(self, side1, side2, height):
self.side1 = side1
self.side2 = side2
self.height = height
def area(self):
return self.side1*self.height
def perimeter(self):
return 2*(self.side1+self.side2)
class Kite():
def __init__(self, diagonal1, diagonal2):
self.diagonal1 = diagonal1
self.diagonal2 = diagonal2
def area(self):
return 0.5*(self.diagonal1*self.diagonal2)
def perimeter(self):
return 2*(self.diagonal1**2+self.diagonal2**2)**0.5
class Rhombus(Kite):
def __init__(self, diagonal1, diagonal2):
super().__init__(diagonal1,diagonal2)
class Rectangle(Parallelgram):
def __init__(self, length, breadth):
super().__init__(length, length, breadth)
class Square(Rectangle):
def __init__(self, length):
super().__init__(length, length)
x = Rectangle(1,1)
print(x.area())
| [
"liuya2360@gmail.com"
] | liuya2360@gmail.com |
26df3d7b2323591c5c7c1f4daea61c7bd7a1522f | 038af1bfd275530413a7b4e28bf0e40eddf632c6 | /parsifal/utils/test.py | 226950eb05d782901ec424349ec63921ab902b76 | [
"MIT"
] | permissive | vitorfs/parsifal | 5c5345ff75b48c5596977c8e0a9c4c537ed4726c | 68c3ce3623a210a9c649a27f9d21ae6130541ea9 | refs/heads/dev | 2023-05-24T16:34:31.899776 | 2022-08-14T16:30:06 | 2022-08-14T16:30:06 | 11,648,402 | 410 | 223 | MIT | 2023-05-22T10:47:20 | 2013-07-25T00:27:21 | Python | UTF-8 | Python | false | false | 508 | py | from django.conf import settings
from django.shortcuts import resolve_url
from django.utils.http import urlencode
def login_redirect_url(url):
"""
Utility function to be used as the "expected_url" param of the
test case assertRedirects.
:param url: Model instance, url pattern, url
:return: String in the format "/login/?next=%2Fabout%2F"
"""
login_url = resolve_url(settings.LOGIN_URL)
next_url = urlencode({"next": resolve_url(url)})
return f"{login_url}?{next_url}"
| [
"vitorfs@gmail.com"
] | vitorfs@gmail.com |
a7c85eac0f99c02323cd077c689485f336824451 | fb5d2c4c76b311871b23c1d7266f074d4a709ef6 | /plotting/plot_w0omegak.py | 23a1a7798ee11bc52f9e102ca6f4e4e46a21d864 | [
"AFL-3.0"
] | permissive | philbull/RadioFisher | 50be8d49d7bdde2712bd35682a359c43f22e3a28 | fe25f969de9a700c5697168ba9e0d2645c55ed81 | refs/heads/master | 2023-01-20T01:27:39.982180 | 2020-11-24T07:44:51 | 2020-11-24T07:44:51 | 315,553,003 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,637 | py | #!/usr/bin/python
"""
Plot 2D constraints on (w0, wa).
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import matplotlib.ticker
from units import *
from mpi4py import MPI
import os
import euclid
cosmo = rf.experiments.cosmo
names = ["cNEWexptL", "cNEW2exptL"] #"cexptL", "iexptM"] #, "exptS"]
#colours = ['#CC0000', '#ED5F21', '#FAE300', '#5B9C0A', '#1619A1', '#56129F', '#990A9C', 'y']
#colours = ['#CC0000', '#5B9C0A', '#1619A1', '#990A9C', '#FAE300']
#labels = ['Behemoth', 'Mature', 'Snapshot']
labels = ['SKAMREF2COMP', 'SKAMREF2']
# FIXME
names = ["SKA1SUR", "cSKA1MID",] # "SKA1MID"]
labels = ["SKA1-SUR (Dish)", "SKA1-MID (Combined)"] #, "SKA1-MID (Dish)"]
"""
names = ['SKA1MID190', 'SKA1MID250', 'SKA1MID350',
'SKA1MID190oHI9', 'SKA1MID250oHI9', 'SKA1MID350oHI9',
'SKA1MID350oHI9-numax1150', 'SKA1MID350oHI9-numax1150-dnu800']
labels = ['SKA1MID190', 'SKA1MID250', 'SKA1MID350',
'SKA1MID190oHI9', 'SKA1MID250oHI9', 'SKA1MID350oHI9',
'SKA1MID350oHI9-numax1150', 'SKA1MID350oHI9-numax1150-dnu800']
"""
colours = [ ['#CC0000', '#F09B9B'],
['#1619A1', '#B1C9FD'],
['#5B9C0A', '#BAE484'],
['#5B9C0A', '#BAE484'],
['#5B9C0A', '#BAE484'],
['#5B9C0A', '#BAE484'],
['#5B9C0A', '#BAE484'],
['#5B9C0A', '#BAE484'] ]
cosmo_fns, cosmo = rf.precompute_for_fisher(rf.experiments.cosmo, "camb/rf_matterpower.dat")
H, r, D, f = cosmo_fns
# Fiducial value and plotting
fig = P.figure()
ax = fig.add_subplot(111)
_k = range(len(names))[::-1]
for k in _k:
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
zs, Hs, dAs, Ds, fs = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = ['A', 'b_HI', 'Tb', 'sigma_NL', 'sigma8', 'n_s', 'f', 'aperp', 'apar',
'omegak', 'omegaDE', 'w0', 'wa', 'h', 'gamma', 'Mnu']
pnames += ["pk%d" % i for i in range(kc.size)]
zfns = [1,]
excl = [2,4,5, 6,7,8, 12, 14,15] # wa fixed
excl += [i for i in range(len(pnames)) if "pk" in pnames[i]]
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
# Add Planck prior
Fpl = euclid.add_planck_prior(F, lbls, info=False)
print "-"*50
print names[k]
print "-"*50
# Invert matrices
pw0 = rf.indexes_for_sampled_fns(5, zc.size, zfns) #4, for omegak=fixed
pok = rf.indexes_for_sampled_fns(3, zc.size, zfns) #5
cov_pl = np.linalg.inv(Fpl)
print lbls[pw0], lbls[pok]
fom = rf.figure_of_merit(pw0, pok, None, cov=cov_pl)
print "%s: FOM = %3.2f" % (names[k], fom)
y = rf.experiments.cosmo['w0']
x = 0.
# Plot contours for w0, omega_k; wa fixed
w, h, ang, alpha = rf.ellipse_for_fisher_params(pok, pw0, None, Finv=cov_pl)
ellipses = [matplotlib.patches.Ellipse(xy=(x, y), width=alpha[kk]*w,
height=alpha[kk]*h, angle=ang, fc=colours[k][kk],
ec=colours[k][0], lw=1.5, alpha=1.) for kk in [1,0]]
for e in ellipses: ax.add_patch(e)
# Centroid
ax.plot(x, y, 'kx')
# Legend
labels = [labels[k] + " + Planck" for k in range(len(labels))]
lines = [ matplotlib.lines.Line2D([0.,], [0.,], lw=8.5, color=colours[k][0], alpha=0.65) for k in range(len(labels))]
P.gcf().legend((l for l in lines), (name for name in labels), loc='upper right', prop={'size':'x-large'})
fontsize = 20
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
xminorLocator = matplotlib.ticker.MultipleLocator(0.1)
yminorLocator = matplotlib.ticker.MultipleLocator(0.5)
ax.xaxis.set_minor_locator(xminorLocator)
ax.yaxis.set_minor_locator(yminorLocator)
ax.set_ylabel(r"$w_0$", fontdict={'fontsize':'20'})
ax.set_xlabel(r"$\Omega_K$", fontdict={'fontsize':'20'})
ax.set_ylim((-1.25, -0.75))
ax.set_xlim((-0.015, 0.015))
P.tight_layout()
# Set size and save
P.gcf().set_size_inches(16.5,10.5)
#P.savefig('mario-pub-w0wa-okfixed.png', dpi=150) #100
P.savefig('mario-pub-w0ok.png', dpi=100) #100
P.show()
| [
"philbull@gmail.com"
] | philbull@gmail.com |
e08860a1e1569a8e0fdbbedd335f8be4123f0957 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/linetools_linetools/linetools-master/linetools/lists/tests/test_use_linelist.py | d48184f46bb64b6636db97a598702da9b38bbb38 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,700 | py | # Module to run tests on Generating a LineList
# Also tests some simple functionality
from __future__ import (print_function, absolute_import, division,
unicode_literals)
# TEST_UNICODE_LITERALS
import os, pdb
import pytest
import astropy.io.ascii as ascii
from astropy import units as u
from astropy.table import QTable
import numpy as np
from linetools.lists.linelist import LineList
#import pdb
#pdb.set_trace()
# ISM LineList
def test_lines_from_ion():
ism = LineList('ISM')
#
lines = ism[(6,2)]
assert (1334.5323*u.AA in lines['wrest'])
def test_subset():
ism = LineList('ISM')
subset = np.array([1215.6700, 1608.4511])*u.AA
#pytest.set_trace()
ism = ism.subset_lines(subset)
assert len(ism._data) == 2
np.testing.assert_allclose(ism['FeII 1608']['wrest'], 1608.4511*u.AA, rtol=1e-7)
# Now with names
ism = LineList('ISM')
subset = ['HI 1215', 'HI 1025', 'CIV 1548']
ism = ism.subset_lines(subset)
np.testing.assert_allclose(ism['HI 1215']['wrest'], 1215.6700*u.AA, rtol=1e-7)
def test_closest():
ism = LineList('ISM')
ism.closest=True
#
line = ism[1250.584*u.AA]
np.testing.assert_allclose(line['wrest'], 1250.578*u.AA, rtol=1e-7)
def test_all_transitions():
error_msg = 'Something is wrong in all_transitions()'
ism = LineList('ISM')
#check simple case
line = 'OVI'
ovi_transitions = ism.all_transitions(line)
assert len(ovi_transitions) == 2, error_msg
#print(ovi_transitions['name'])
#check unknown
line = 'unknown'
out = ism.all_transitions(line)
assert type(out) == dict, error_msg
#check case of single transition ion
line = 'CIII'
out = ism.all_transitions(line)
assert type(out) == dict, error_msg
#check case of transitions from excited levels
line='FeII*'
out = ism.all_transitions(line)
assert len(out) == 27, "wrong line counts"
print(out)
# wrest
out = ism.all_transitions(1215.6700*u.AA)
assert len(out) == 30,"wrong line counts" # 30 Lyman series transitions
#print('test_all_transitions() passed')
h2 = LineList('H2')
line = 'B19-0P(1)'
out = h2.all_transitions(line)
assert len(out) == 7
def test_strongest_transitions():
error_msg = 'Something is wrong in strongest_transitions()'
ism = LineList('ISM')
wvlims = (1200,1800)*u.AA
z = 0.5
transitions = ism.strongest_transitions('HI',wvlims/(1+z),n_max=5)
assert len(transitions) == 5, error_msg
assert transitions[0]['name'] == 'HI 1025' , error_msg
assert isinstance(transitions,QTable), error_msg
wvlims = (1500,1700)*u.AA
z = 0.5
transitions = ism.strongest_transitions('HI',wvlims/(1+z),n_max=5)
assert isinstance(transitions,dict), error_msg #only Lyb should be available, so dict is expected
assert transitions['name'] == 'HI 1025'
wvlims = (1100,1200)*u.AA
z = 0.0
transitions = ism.strongest_transitions('HI',wvlims/(1+z),n_max=5)
assert transitions is None, error_msg
def test_available_transitions():
error_msg = 'Something is wrong in available_transitions()'
ism = LineList('ISM')
wvlims = (900,1800)*u.AA
z = 0.1
transitions = ism.available_transitions(wvlims/(1+z),n_max_tuple=5)
assert transitions[2]['name'] == 'HI 972' , error_msg
assert isinstance(transitions,QTable), error_msg
transitions = ism.available_transitions(wvlims/(1+z),n_max_tuple=2)
assert transitions[2]['name'] == 'CIII 977' , error_msg
wvlims = (1200,1800)*u.AA
z = 0.5
transitions = ism.available_transitions(wvlims/(1+z), n_max_tuple=2)
assert transitions[0]['name'] == 'HI 1025', error_msg
assert 'OVI 1031' in transitions['name'], error_msg
assert 'CIII 977' in transitions['name'], error_msg
wvlims = (1000,3000)*u.AA
z = 1.5
transitions = ism.available_transitions(wvlims/(1+z),n_max_tuple=2)
assert 'NeVIII 770' in transitions['name'], error_msg
assert 'MgX 609' in transitions['name'], error_msg
assert 'HI 1215' not in transitions['name'], error_msg
wvlims = (1215.6,1217)*u.AA
z = 0
transitions = ism.available_transitions(wvlims/(1+z),n_max_tuple=2)
assert isinstance(transitions,dict), error_msg
def test_sortdata():
error_msg = 'Something is wrong in sortdata()'
ism = LineList('ISM', sort_by='name')
assert ism.name[0] == 'AlII 1670', error_msg
ism.sortdata('name', reverse=True)
assert ism.name[0] == 'ZrIII 1798', error_msg
ism.sortdata(['abundance', 'rel_strength'], reverse=True)
assert ism.name[0] == 'HI 1215', error_msg
ism.sortdata(['rel_strength'])
assert ism.name[0] == 'CI** 1123b', error_msg
| [
"659338505@qq.com"
] | 659338505@qq.com |
641a62d84d5eeef5e1ab0a631f99c3f697d0f655 | d9aece850fb295c43c42cb907f200285c33e0087 | /aizu/lectures/algorithm_and_data_structure/reqursion_divide_and_conquer/the_number_of_inversions.py | 782fbda52c715aed907638705d9e1243fc15346f | [] | no_license | memicq/ProgrammingContestAnswers | 02a8f9ba76819cb04fb9934c6945a5de0735e6d4 | b679894cadcbab1b84037db6fb8fd496079cf63a | refs/heads/master | 2020-03-30T23:12:10.570349 | 2019-04-18T06:33:53 | 2019-04-18T06:33:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | #! python3
# the_number_of_inversions.py
def merge(A, left, mid, right):
count = 0
L = A[left: mid]
R = A[mid: right]
L.append(10e9 + 1)
R.append(10e9 + 1)
i, j = 0, 0
for k in range(left, right):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
count += (len(L)-1) - i
return count
def merge_sort(A, left, right):
if left + 1 < right:
mid = int((left + right)/2)
v1 = merge_sort(A, left, mid)
v2 = merge_sort(A, mid, right)
v3 = merge(A, left, mid, right)
return v1 + v2 + v3
else:
return 0
n = int(input())
A = list(map(int, input().split(' ')))
ans = merge_sort(A, 0, n)
print(ans)
| [
"jgr9ey2ug8@gmail.com"
] | jgr9ey2ug8@gmail.com |
60586dfcac559f5f73517d083e70bd20263b4f43 | f338eb32c45d8d5d002a84798a7df7bb0403b3c4 | /DQM/TrigXMonitorClient/python/test/l1t_scalerclient_cfg.py | bde19bf5e95e018995364d9df90c4801813ad974 | [] | permissive | wouf/cmssw | 0a8a8016e6bebc611f1277379e12bef130464afb | 60da16aec83a0fc016cca9e2a5ed0768ba3b161c | refs/heads/CMSSW_7_3_X | 2022-06-30T04:35:45.380754 | 2015-05-08T17:40:17 | 2015-05-08T17:40:17 | 463,028,972 | 0 | 0 | Apache-2.0 | 2022-02-24T06:05:30 | 2022-02-24T06:05:26 | null | UTF-8 | Python | false | false | 5,929 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
process.load("DQMServices.Core.DQM_cfg")
### L1TGT unpacking
from DQM.L1TMonitor.L1TGT_unpack_cff import *
l1tgtpath = cms.Path(l1GtUnpack*l1GtEvmUnpack*cms.SequencePlaceholder("l1tgt"))
#-----------------------------
# DQM SOURCES
#-----------------------------
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Geometry.MuonCommonData.muonIdealGeometryXML_cfi")
process.load("DQM.TrigXMonitor.L1Scalers_cfi")
process.load("DQM.TrigXMonitorClient.L1TScalersClient_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.source = cms.Source("PoolSource",
#process.source = cms.Source("NewEventStreamFileReader",
debugVerbosity = cms.untracked.uint32(1),
debugVebosity = cms.untracked.bool(True),
fileNames = cms.untracked.vstring()
)
# from cff file
process.PoolSource.fileNames = cms.untracked.vstring(
'file:/tmp/wittich/0ECC6BE3-5F6F-DD11-A328-0019DB29C614.root',
'file:/tmp/wittich/26FD1D12-5E6F-DD11-8C6B-000423D6CA42.root',
'file:/tmp/wittich/3E414179-5E6F-DD11-B6DA-001617DBD472.root'
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/0ECC6BE3-5F6F-DD11-A328-0019DB29C614.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/26FD1D12-5E6F-DD11-8C6B-000423D6CA42.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/3E414179-5E6F-DD11-B6DA-001617DBD472.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/4E825423-5F6F-DD11-A891-001617C3B778.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/6C5C8A15-5D6F-DD11-9D27-000423D985E4.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/7A04421E-5F6F-DD11-8681-000423D992A4.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/7C3DC3B1-5F6F-DD11-8D31-001617C3B70E.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/8684AADF-5D6F-DD11-9082-001617DBD332.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/98F7037F-5D6F-DD11-88FE-000423D98DB4.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/A8C09AA5-5D6F-DD11-9DC8-000423D6B358.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/AC2B1871-5F6F-DD11-9949-001617C3B654.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/B499AEAB-5E6F-DD11-A84B-001617C3B6C6.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/CA095D4A-5E6F-DD11-A6DD-000423D6B48C.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/E884D9E4-5D6F-DD11-88AD-000423D6C8E6.root',
# '/store/data/Commissioning08/HLTDebug/RAW/CRUZET4_v1/000/058/042/EC646647-626F-DD11-A9AA-000423D98804.root'
)
process.maxEvents = cms.untracked.PSet(
# input = cms.untracked.int32(-1)
input = cms.untracked.int32(200)
)
process.ModuleWebRegistry = cms.Service("ModuleWebRegistry")
process.MessageLogger = cms.Service("MessageLogger",
# debugModules = cms.untracked.vstring('l1tsClient', 'l1ts', 'main_input'),
debugModules = cms.untracked.vstring('*'),
categories = cms.untracked.vstring('Status', 'Parameter'),
noLineBreaks = cms.untracked.bool(True),
destinations = cms.untracked.vstring('detailedInfo',
'critical',
'cout'),
detailedInfo = cms.untracked.PSet(threshold = cms.untracked.string('DEBUG'),
DEBUG=cms.untracked.PSet(limit=cms.untracked.int32(0)),
Parameter=cms.untracked.PSet(limit=cms.untracked.int32(-1)),
Status=cms.untracked.PSet(limit=cms.untracked.int32(-1)),
Product=cms.untracked.PSet(limit=cms.untracked.int32(100)),
FwkReport = cms.untracked.PSet(reportEvery = cms.untracked.int32(1000),
limit = cms.untracked.int32(10000000)),
),
critical = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')),
cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING'),
WARNING = cms.untracked.PSet(limit = cms.untracked.int32(0))),
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.p3 = cms.EndPath(process.l1s+process.l1tsClient)
#process.p3 = cms.EndPath(process.l1s)
process.dqmWork = cms.Path(process.dqmEnv+process.dqmSaver)
process.l1s.l1GtData = cms.InputTag("hltGtDigis","","HLT")
###########################
### DQM Environment ###
###########################
process.DQMStore.verbose = 0
process.DQM.collectorHost = 'srv-c2d05-12'
process.DQM.collectorPort = 9190
process.dqmSaver.convention = 'Online'
process.dqmSaver.dirName = '.'
process.dqmEnv.subSystemFolder = 'L1T'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.saveAtJobEnd = False
## HLT REPORT
process.hltrep = cms.EDAnalyzer("HLTrigReport")
process.hltrep.HLTriggerResults = cms.InputTag("TriggerResults","","HLT")
process.hltsum = cms.Path(process.hltrep)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
734eedcef3f7b42297bb0ec03bf4e1945ce02be8 | f698c042812612f94691ee2fafd8347e7dc9ed39 | /dashboard/models.py | 9317f757ebe7868bc4177bd5e18026bdd07b25b6 | [] | no_license | boyuan12/CustomEmail | b589454718b875d227d4e1f65d6e419a1d5b1c82 | a7a388c94b0fd812d89784ecd83a93f0875f17f6 | refs/heads/master | 2023-03-08T03:49:10.143793 | 2021-02-24T07:07:54 | 2021-02-24T07:07:54 | 341,792,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.db import models
# Create your models here.
class Email(models.Model):
timestamp = models.CharField(max_length=15)
subject = models.CharField(max_length=255)
body = models.TextField()
from_email = models.EmailField()
to_email = models.EmailField()
| [
"boyuanliu6@yahoo.com"
] | boyuanliu6@yahoo.com |
76f7c48e61a150bca12e600c03b16bf1156a99cd | e916a80eba284b399f9bff3a1f4c676502946059 | /p1-text-calls/Task0.py | 684e444967d0f2c3853f78348fcf9bdb57df6aa0 | [] | no_license | valmsmith39a/u-data-structures-algorithms | 109e7d9345bbf19bfd5896bb72afb0020f67c39f | 26c2ce76f46fe43f8ea40314b69b41784c461c40 | refs/heads/master | 2020-12-12T20:25:18.490231 | 2020-05-24T23:22:50 | 2020-05-24T23:22:50 | 234,222,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | """
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 0:
What is the first record of texts and what is the last record of calls?
Print messages:
"First record of texts, <incoming number> texts <answering number> at time <time>"
"Last record of calls, <incoming number> calls <answering number> at time <time>, lasting <during> seconds"
"""
first_text_in_num = texts[0][0]
first_text_ans_num = texts[0][1]
first_text_time = texts[0][2]
last_call_in_num = calls[len(calls) - 1][0]
last_call_ans_num = calls[len(calls) - 1][1]
last_call_time = calls[len(calls) - 1][2]
last_call_duration = calls[len(calls) - 1][3]
print('First record of texts, {} texts {} at time {}'.format(first_text_in_num, first_text_ans_num, first_text_time))
print('Last record of calls, {} calls {} at time {}, lasting {} seconds'.format(last_call_in_num, last_call_ans_num, last_call_time, last_call_duration))
"""
TASK 0 Runtime Analysis
This set of operations will take O(1) time (constant time).
The runtime is the same for small or large n because we are always
getting data from the first and last elements.
"""
| [
"valmsmith39a@gmail.com"
] | valmsmith39a@gmail.com |
358a5f616c3b2a110eb84fc04e64deb019b6fb0a | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467467/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_20/run_cfg.py | b5626623dbc9edc9319c93f758292157d4169fc8 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467467/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_95_1_hyl.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_96_2_V5x.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_97_1_YpL.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_98_1_t9p.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-145_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_99_1_cR4.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
98a98b97d50faf8e95c58e69b97c07808c039099 | 57dccf7b8da26753b66a9eecb9eb6cd1ae5584b5 | /solov2/r50_fpn_1x.py | 06bc648493aad8d49a515aba75ccab0709914694 | [] | no_license | vbvg2008/benchmarks | 4b743d6b19a4d0b41fa78b8db2a3f3a3f4e86018 | 29e2e445e6701529e048e8ffa283b5b071295566 | refs/heads/master | 2022-12-12T21:50:51.082085 | 2022-12-06T22:09:26 | 2022-12-06T22:09:26 | 187,144,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,969 | py | # model settings
model = dict(
type='SOLOv2',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3), # C2, C3, C4, C5
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
bbox_head=dict(
type='SOLOv2Head',
num_classes=81,
in_channels=256,
stacked_convs=4,
seg_feat_channels=512,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
sigma=0.2,
num_grids=[40, 36, 24, 16, 12],
ins_out_channels=256,
loss_ins=dict(
type='DiceLoss',
use_sigmoid=True,
loss_weight=3.0),
loss_cate=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0)),
mask_feat_head=dict(
type='MaskFeatHead',
in_channels=256,
out_channels=128,
start_level=0,
end_level=3,
num_classes=256,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
)
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
update_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.01, step=[9, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
device_ids = range(8)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/solov2_release_r50_fpn_8gpu_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"shawnmengdong@gmail.com"
] | shawnmengdong@gmail.com |
a6098d7bb89d051c31f79eda75fc3f2d4af00911 | cae9ca1dda110cd6f65d5021c5891fdee76ec6fe | /day1/strings/3.py | 1c3f4fa0268caa9f6b56734e23fcd26ddb9ab709 | [] | no_license | shobhit-nigam/yagami | fb33d6de76a698a160f9e8df9d7d9f5b836797d8 | 283e2a464f74ac07c21ae7095b9a45fa632aa38a | refs/heads/main | 2023-07-04T09:46:51.057558 | 2021-08-10T05:13:27 | 2021-08-10T05:13:27 | 391,846,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | place = "india"
country = 'india'
stra = 'i love "sienfeld"'
print(stra)
strb = "she does'nt love me"
print(strb)
# error
# area = "hind'
| [
"noreply@github.com"
] | shobhit-nigam.noreply@github.com |
6bb1a1cf8a55d6d8626ed43abfd39b9a66de6c5d | c34c8479b4534d98c967311861e2d33f7738df6d | /data/tools.py | 7a5df54a58767251e89130a37ef96aa06a9932a0 | [
"MIT"
] | permissive | PFCM/slack-ml | 94e30c088867da1f0f5d6828702420a77b602e35 | ed3fe68a13668bfc5df9343fa78a0f7cd1e4eeed | refs/heads/master | 2021-04-26T14:32:47.225414 | 2016-01-15T22:50:23 | 2016-01-15T22:50:23 | 48,972,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """Tools for other people to use the data biz.
"""
import datetime
import models
def store_msg(msg):
"""Actually puts a message into storage. Also checks whether or not there
are enough messages to do a batch of training and if so, starts that
process.
Args:
msg (dict) - the parsed JSON representing the message
Returns:
the ndb key for the added item
"""
msg['timestamp'] = datetime.datetime.fromtimestamp(float(msg['timestamp']))
msg_model = models.Message(**msg)
key = msg_model.put()
# how many are there?
return key
| [
"pfcmathews@gmail.com"
] | pfcmathews@gmail.com |
a7467deab0675894a34a28bcb0025792b569ca30 | ee79e734486c0ca550bb8238ef54c78c7727384a | /Composite Profiler/Composite.py | 2cf00d629eac460f289b23ab3b635b141de7d7c2 | [] | no_license | neilrobertson/BICRCode | 212636e5395f0c0e4dfb3ac3c133f01eb07273ee | 7b3f4da9cdefd7680f07b707339aee59faece1d2 | refs/heads/master | 2020-03-30T16:07:22.637571 | 2018-10-03T10:12:57 | 2018-10-03T10:12:57 | 151,394,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,395 | py | '''
Created on 25 Jul 2012
@author: mcbryan
'''
import collections
from itertools import izip
import csv
import math
# composite gene class
class CompositeGene():
keys = None # keys
def __init__(self, name, regionBehaviour,dataBehaviour,avgFunction,validRow):
# Stores parameters
# name = name of file
# regionBehaviour = object for gene names, ids, positions etc
# dataBehaviour = the data tree
self.name = name # name of composite gene
self.regionBehaviour = regionBehaviour
self.dataBehaviour = dataBehaviour
self.avgFunction = avgFunction
self.validRow = validRow
#Creates, dictionaries to store the slices, index is slice, list entry is each feature.
#The second dictionary stores the lengths of each feature at each slice (as they differ by FEATURE. Used for normalising)
self.composite = collections.defaultdict(list) # dictionary of lists, key = index in slices
self.slicewidths = collections.defaultdict(list) # as above
self.fulloutput = [] # this is where the entire composite gene details are stored
self.fulloutputsaved = None
# for updating our composite structures with new values
# Takes a feature (e.g. a gene) and slices, then updates dictionaries
def addToComposite(self, obj):
# gets the object containing gene names, ids, etc from Tonys parsed list, for the correct chromosome
chrm = self.regionBehaviour.getChr(obj)
# slices the gene:
# Calls slicer (in regions), ultimately in Baselib - gene slicer.
# Returns parallel lists of keys and slices, each entry is a string. REALLY EACH IS IN A SLICES OBJECT.
keys,slices = self.regionBehaviour.slicer(obj)
# only updates the keys if they look more complete
## Gets the keys list from the constructor, ~checks that all of the keys in the slice keys are the same as those in the constructor slice keys (initially these are empty)
if CompositeGene.keys == None or len(CompositeGene.keys)<len(keys):
CompositeGene.keys = keys
# note that this is done in key order
assert len(keys)==len(slices), "Make sure the list of keys is the same length as list of slices"
rowvalues = {}
# start full output with the id of the row (gene)
##This is simple a string of the feature (e.g. gene name)
row = [str(obj)]
#Gets the parallel key and slice information using izip (for each slice)
for key,slice in izip(keys,slices):
assert key == slice.sliceid, "Make sure the keys and sliceids are in the same order"
# get raw data in the range slice.start -> slice.end
## Uses the data tree to pull raw data based on the slice start and end.
values = self.dataBehaviour.getValues(chrm, slice.start, slice.end)
# calculate value(s) for the slice
## Gets the values:
## Uses ultimately the L/R thing (this occured downstream.
slicevalue = self.dataBehaviour.valuesBehaviour(chrm, values, slice.start, slice.end)
rowvalues[slice.sliceid]=(slicevalue,slice.end-slice.start)
# add a row for full output
## Row was the gene name earlier, this is the output!
## Gets the average... using the average function object, from right at the start.
row.append(self.avgFunction(slicevalue))
assert len(rowvalues)==len(slices),str(keys) + str(slices)
if self.validRow(row):
# Appends the slice VALUES (i.e. the final data, for each feature) to the full output.
# This is in the format: feature name, slice 1 value, slice 2 value, slice 3 value, etc
self.fulloutput.append(row)
for sliceid in rowvalues:
# add to composite gene
## Updates: for the current slice, gets the list from composite, then appends the values to the list (i.e. a new gene)
## Does the same for the lengths....
slicevalue,slicelength = rowvalues[sliceid]
self.composite[sliceid].extend(slicevalue)
self.slicewidths[sliceid].append(slicelength)
# this should always be the case after we have added the slices
assert len(slices) <= len(self.composite),"Object: "+str(obj)+", Len(slices) ["+str(len(slices))+"] != len(self.composite) [" + str(len(self.composite))+"]"
#Iterates for each vailidated feature (e.g. genes, exons etc)
def buildComposite(self, regionsToUse):
for obj in regionsToUse:
# add the new values to the composite for those slices
#Passes the feature, to be sliced and added to the dictionaries of lists
self.addToComposite(obj)
def writeFulloutput(self,filename):
with open(filename,"w") as f:
csvfile = csv.writer(f,delimiter="\t")
header = [self.name]
header.extend(self.keys)
csvfile.writerow(header)
csvfile.writerows(self.fulloutput)
self.fulloutputsaved = filename
def heatmapCommand(self, dataSource):
slicewidths = [0.0] # prepopulate with 0 as breaks will be number of widths + 1
for k in CompositeGene.keys:
slicewidths.append(self.avgFunction(self.slicewidths[k]))
scalingfactor = (len(slicewidths) - 1) / math.fsum(slicewidths)
breaks = []
runningtotal = 0
for width in slicewidths:
runningtotal += width
breaks.append(runningtotal * scalingfactor)
commands = []
commands.append("set.seed(0)")
commands.append("x <- read.table (file='"+self.fulloutputsaved+"', header=TRUE, sep='\\t', quote='\\'', dec='.', fill=FALSE, comment.char='#', row.names=1, na.strings = 'NA', nrows = -1, skip = 0, check.names = TRUE, strip.white = FALSE, blank.lines.skip = TRUE)")
commands.append("library(gplots)")
commands.append("pdf(file='"+self.fulloutputsaved+".pdf',width=4.5,height=9)")
#Tests for the appropriate R colour scale
if dataSource.heatmapHasNegativeValues():
a,b = dataSource.heatmapUpperLowerBounds()
commands.append("heatmap.2(data.matrix(x),key=FALSE, Colv=FALSE, Rowv=FALSE, dendrogram='none',cexCol = 0.25, col=colorpanel(n=40,low='blue',mid = 'white', high='red'), breaks=seq(" + str(a) + "," + str(b) + ", length = 41), trace='none', labRow=' ', sepwidth=c(0,0), colWidths = c("+",".join(str(i) for i in breaks)+"))")
else:
commands.append("heatmap.2(data.matrix(x),key=FALSE, Colv=FALSE, Rowv=FALSE, dendrogram='none',cexCol = 0.25, col=colorpanel(n=40,low='white',high='red'), breaks=sort(kmeans(Filter(function(x){x>=0}, c(data.matrix(x))),iter.max=50,centers=41)$centers), trace='none', labRow=' ', sepwidth=c(0,0), colWidths = c("+",".join(str(i) for i in breaks)+"))")
commands.append("dev.off()")
return "\n".join(commands)
| [
"neil.alistair.robertson@hotmail.co.uk"
] | neil.alistair.robertson@hotmail.co.uk |
d829730efc735cbc6ac9369fac3ae3dffe0c67d0 | f4c0172e70ca5ffbe01695245e82a28291f88d04 | /v0.3/Controller/Algorithm.py | 3e0095e81b4864bf3d50235addea2d2ce3801968 | [] | no_license | huangxinkid/DeepLearning_Wavelet-LSTM | a84e667d5f2db477ac5a9993d8ae329ec9fd115f | b726f99a8631fc48e6943655ace222b0f6b0290b | refs/heads/master | 2020-03-24T07:11:52.832149 | 2018-05-30T18:43:38 | 2018-05-30T18:43:38 | 142,556,218 | 0 | 1 | null | 2018-07-27T09:21:18 | 2018-07-27T09:21:18 | null | UTF-8 | Python | false | false | 1,624 | py | # -*- coding: utf-8 -*-
''' ***** 计算包 ***** '''
import numpy as np
import pywt #小波分析包
from scipy import signal
def MyPywtCWT(data):
pass
''' --原始数据信息初始化--'''
# Fs = 500000 #采样频率:500 000 Hz ; 采样周期:2 us
''' --尺度计算-- '''
wavename = 'gaus1'
totalscal = 256 #尺度序列的长度
#Fc = 2000; #小波中心频率(Hz)(“主波峰之间的差值”=2000Hz)
#Fc = pywt.central_frequency(wavename, precision=8)
Fc = pywt.central_frequency(wavename)
C = 2*Fc*totalscal # C为常数,用于计算尺度序列. C = 2*Fc/totalscal
scal= C/np.arange(1,totalscal+1) #尺度序列,范围(2*Fc,inf)
#--连续小波变换--
coef,freqs = pywt.cwt(data, scal, wavename)
coef = np.abs(coef)
return coef,freqs
def MyScipyCwt(data,MyWidths):
''' 将int型data转为float型sig '''
sig = np.ones(len(data),np.float) #产生空的float型sig
for i in range(0,len(data)):
sig[i] = float(data[i])
# widths = np.arange(1, 31)
widths = np.arange(1, MyWidths+1)
'''
signal.cwt(sig, signal.ricker, widths)
- CWT: Continuous wavelet transform 连续小波变换
- signal.ricker返回一个Ricker小波,也被称为“墨西哥帽子小波”
'''
cwtmatr = signal.cwt(sig, signal.ricker, widths)
# cwtmatr = np.abs(cwtmatr)
# plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto', vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
return cwtmatr
| [
"hello.sea@qq.com"
] | hello.sea@qq.com |
1d705786d7fa0d53d73234849ae53cb166fa2a17 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005588.py | 305693770a044e0eb4eb602d15ef79ef2212cc9c | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,559 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher32182(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1, 1: 1}), [
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 0
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher32182._instance is None:
CommutativeMatcher32182._instance = CommutativeMatcher32182()
return CommutativeMatcher32182._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 32181
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 32183
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1', tmp3)
except ValueError:
pass
else:
pass
# State 32184
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.2', tmp5)
except ValueError:
pass
else:
pass
# State 32185
if len(subjects2) == 0:
pass
# State 32186
if len(subjects) == 0:
pass
# 0: x**m
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
if len(subjects) >= 1 and isinstance(subjects[0], Add):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher32188.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 32205
if len(subjects) == 0:
pass
# 1: f + e*x**r
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset
from .generated_part005589 import * | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
193b2dcbd810673d2f4bf5c9e7f66da963b0ddbb | 9875dfce571669d8ce3f6523651dc8e1ebf661d4 | /apps/users/middleware.py | 70db7af8c9c826af40f487afae7f587af677f090 | [
"MIT"
] | permissive | AppforallHQ/users | a80c0a08e3d660aed21af744383c3d42288c58c5 | bb5f393ef3966dbde019a1a07f244b71a342a7ac | refs/heads/master | 2020-06-30T20:17:37.641107 | 2016-11-21T10:53:52 | 2016-11-21T12:57:38 | 74,354,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | class DisableCSRF(object):
def process_request(self, request):
# because i use Ajax call and form submit from another system i need it
setattr(request, '_dont_enforce_csrf_checks', True)
| [
"hi@appforall.ir"
] | hi@appforall.ir |
d06300467f74a49f3dcee22bb6accd8a5ecbd128 | 3b3b78da400b23ea480e437d4ab620da648f6525 | /script.py | dad8b85d8238898a3a71bbbfd46c9e23be6475c0 | [] | no_license | gaybro8777/Apokalypsis | d1c419fa24807636e211ee54def4399b1baa7e56 | eefa59f8e0ff4ff72d411bcf4521e3cdd9ac86f6 | refs/heads/main | 2023-01-20T10:11:22.803144 | 2020-11-30T02:17:16 | 2020-11-30T02:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | import numpy as np
import PIL
import pyllusion as ill
# Test
# ill.pareidolia(
# pattern="patterns/logo.png",
# n=[20, 300, 4000],
# sd=[4, 2, 1],
# weight=[3, 2, 1],
# alpha=80,
# blur=0.2)
# Logo
logo = ill.pareidolia(
pattern="patterns/logo.png",
n=[20, 300, 4000],
sd=[4, 2, 1],
weight=[3, 2, 1],
alpha=90,
blur=0.2)
logo.save("logo.png")
# Demo
def make_demo(file="patterns/png/snake_7.png", name="snake", alpha=[60, 70, 80], blur=[1, 2, 4]):
pattern = PIL.Image.open(file)
imgs = []
for a in alpha:
for b in blur:
stim = ill.pareidolia(pattern=pattern,
n=[20, 300, 4000],
sd=[4, 2, 1],
weight=[3, 2, 1],
alpha=a,
blur=b)
PIL.ImageDraw.Draw(stim).text((0, 0),
f"alpha = {a}%, blur = {b}%",
(255, 0, 0),
font = PIL.ImageFont.truetype("arial.ttf", 60))
imgs.append(stim)
nrows, ncols = int(np.sqrt(len(imgs))), int(np.sqrt(len(imgs)))
new = PIL.Image.new('RGB', (imgs[0].width * nrows, imgs[0].height * ncols))
i = 0
for row in range(nrows):
for col in range(ncols):
new.paste(imgs[i], (imgs[i].width * row, imgs[i].height * col))
i += 1
new
new.save("demo_" + name + ".png")
# make_demo(file="patterns/png/snake_7.png", name="snake", alpha=[60, 70, 80], blur=[1, 2, 4])
# make_demo(file="patterns/faces/full_168_m_f_f_a.png", name="face", alpha=[20, 30, 40], blur=[1, 2, 3])
make_demo(file="patterns/png/skull_5.png", name="skull", alpha=[50, 65, 80], blur=[1, 2, 3])
# # Profile
# pattern = PIL.Image.open("patterns/final/snake_7.png")
# call = """
# ill.pareidolia(
# pattern=pattern,
# n=[20, 300, 4000],
# sd=[4, 2, 1],
# weight=[3, 2, 1],
# alpha=80,
# blur=0.5)
# """
# call2 = """
# ill.image_blobs(
# n=[20, 300, 4000],
# sd=[4, 2, 1],
# weight=[3, 2, 1])
# """
# import cProfile
# import pstats
# cProfile.run(call2, "profile.stats")
# stats = pstats.Stats("profile.stats").strip_dirs()
# stats.sort_stats('tottime').print_stats()
| [
"dom.mak19@gmail.com"
] | dom.mak19@gmail.com |
bfe25251929173fa052d679fdd5236e028ac0634 | b2041d50df277d3bb6b1b6512811f3dfed88df32 | /jc/parsers/w.py | 65682166606e265b80e33cf2dfbb9b170fe667fe | [
"MIT"
] | permissive | kasulani/jc | 67b4bc076f3646070da4f8c51b339cbdb19bc01f | 30cff5f28140a5b20a617c8145c13e43b7b6685b | refs/heads/master | 2023-08-17T07:57:08.143292 | 2021-09-27T17:35:08 | 2021-09-27T17:35:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,778 | py | """jc - JSON CLI output utility `w` command output parser
Usage (cli):
$ w | jc --w
or
$ jc w
Usage (module):
import jc.parsers.w
result = jc.parsers.w.parse(w_command_output)
Schema:
[
{
"user": string, # '-' = null
"tty": string, # '-' = null
"from": string, # '-' = null
"login_at": string, # '-' = null
"idle": string, # '-' = null
"jcpu": string,
"pcpu": string,
"what": string # '-' = null
}
]
Examples:
$ w | jc --w -p
[
{
"user": "root",
"tty": "tty1",
"from": null,
"login_at": "07:49",
"idle": "1:15m",
"jcpu": "0.00s",
"pcpu": "0.00s",
"what": "-bash"
},
{
"user": "root",
"tty": "ttyS0",
"from": null,
"login_at": "06:24",
"idle": "0.00s",
"jcpu": "0.43s",
"pcpu": "0.00s",
"what": "w"
},
{
"user": "root",
"tty": "pts/0",
"from": "192.168.71.1",
"login_at": "06:29",
"idle": "2:35m",
"jcpu": "0.00s",
"pcpu": "0.00s",
"what": "-bash"
}
]
$ w | jc --w -p -r
[
{
"user": "kbrazil",
"tty": "tty1",
"from": "-",
"login_at": "07:49",
"idle": "1:16m",
"jcpu": "0.00s",
"pcpu": "0.00s",
"what": "-bash"
},
{
"user": "kbrazil",
"tty": "ttyS0",
"from": "-",
"login_at": "06:24",
"idle": "2.00s",
"jcpu": "0.46s",
"pcpu": "0.00s",
"what": "w"
},
{
"user": "kbrazil",
"tty": "pts/0",
"from": "192.168.71.1",
"login_at": "06:29",
"idle": "2:36m",
"jcpu": "0.00s",
"pcpu": "0.00s",
"what": "-bash"
}
]
"""
import string
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.4'
description = '`w` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin', 'cygwin', 'aix', 'freebsd']
magic_commands = ['w']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
for entry in proc_data:
null_list = ['user', 'tty', 'from', 'login_at', 'idle', 'what']
for key in entry:
if key in null_list:
if entry[key] == '-':
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = data.splitlines()[1:]
raw_output = []
if jc.utils.has_data(data):
header_text = cleandata[0].lower()
# fixup for 'from' column that can be blank
from_col = header_text.find('from')
# clean up 'login@' header
# even though @ in a key is valid json, it can make things difficult
header_text = header_text.replace('login@', 'login_at')
headers = [h for h in ' '.join(header_text.strip().split()).split() if h]
# parse lines
raw_output = []
if cleandata:
for entry in cleandata[1:]:
output_line = {}
# normalize data by inserting Null for missing data
temp_line = entry.split(maxsplit=len(headers) - 1)
# fix from column, always at column 2
if 'from' in headers:
if entry[from_col] in string.whitespace:
temp_line.insert(2, '-')
output_line = dict(zip(headers, temp_line))
raw_output.append(output_line)
# strip whitespace from beginning and end of all string values
for row in raw_output:
for item in row:
if isinstance(row[item], str):
row[item] = row[item].strip()
if raw:
return raw_output
else:
return _process(raw_output)
| [
"kellyjonbrazil@gmail.com"
] | kellyjonbrazil@gmail.com |
a8060407becaf0a010d49e498661497187ac4776 | d80ef8c716bcc5ea54e87540dbf0463f15bf44ce | /Proxy/test/pathod/test_pathoc.py | 4b50e2a76e18587ad486817ba5b5a914e0711afc | [
"MIT"
] | permissive | YagiGo/YPTN | 5043d22eb131c7164d3fa575f0c4e3d8a963dbf4 | d7692a68ee1bf578536b4c09c566272210fc8b69 | refs/heads/master | 2018-10-16T03:44:18.024169 | 2018-07-24T08:53:57 | 2018-07-24T08:53:57 | 107,633,669 | 4 | 1 | MIT | 2018-06-08T09:04:29 | 2017-10-20T04:55:22 | JavaScript | UTF-8 | Python | false | false | 7,770 | py | import io
from unittest.mock import Mock
import pytest
from mitmproxy.net import http
from mitmproxy.net.http import http1
from mitmproxy import exceptions
from pathod import pathoc, language
from pathod.protocols.http2 import HTTP2StateProtocol
from mitmproxy.test import tutils
from . import tservers
def test_response():
r = http.Response(b"HTTP/1.1", 200, b"Message", {}, None, None)
assert repr(r)
class PathocTestDaemon(tservers.DaemonTests):
def tval(self, requests, timeout=None, showssl=False, **kwargs):
s = io.StringIO()
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
ssl=self.ssl,
fp=s,
**kwargs
)
with c.connect(showssl=showssl, fp=s):
if timeout:
c.settimeout(timeout)
for i in requests:
r = next(language.parse_pathoc(i))
if kwargs.get("explain"):
r = r.freeze(language.Settings())
try:
c.request(r)
except exceptions.NetlibException:
pass
self.d.wait_for_silence()
return s.getvalue()
class TestDaemonSSL(PathocTestDaemon):
ssl = True
ssloptions = dict(
request_client_cert=True,
sans=[b"test1.com", b"test2.com"],
alpn_select=b'h2',
)
def test_sni(self):
self.tval(
["get:/p/200"],
sni="foobar.com"
)
log = self.d.log()
assert log[0]["request"]["sni"] == "foobar.com"
def test_showssl(self):
assert "certificate chain" in self.tval(["get:/p/200"], showssl=True)
def test_clientcert(self):
self.tval(
["get:/p/200"],
clientcert=tutils.test_data.path("pathod/data/clientcert/client.pem"),
)
log = self.d.log()
assert log[0]["request"]["clientcert"]["keyinfo"]
def test_http2_without_ssl(self):
fp = io.StringIO()
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
use_http2=True,
ssl=False,
fp=fp
)
with pytest.raises(NotImplementedError):
c.connect()
class TestDaemon(PathocTestDaemon):
ssl = False
def test_ssl_error(self):
c = pathoc.Pathoc(("127.0.0.1", self.d.port), ssl=True, fp=None)
try:
with c.connect():
pass
except Exception as e:
assert "SSL" in str(e)
else:
raise AssertionError("No exception raised.")
def test_showssl(self):
assert "certificate chain" not in self.tval(
["get:/p/200"],
showssl=True)
def test_ignorecodes(self):
assert "200" in self.tval(["get:'/p/200:b@1'"])
assert "200" in self.tval(["get:'/p/200:b@1'"])
assert "200" in self.tval(["get:'/p/200:b@1'"])
assert "200" not in self.tval(["get:'/p/200:b@1'"], ignorecodes=[200])
assert "200" not in self.tval(
["get:'/p/200:b@1'"],
ignorecodes=[
200,
201])
assert "202" in self.tval(["get:'/p/202:b@1'"], ignorecodes=[200, 201])
def _test_timeout(self):
assert "Timeout" in self.tval(["get:'/p/200:p0,100'"], timeout=0.01)
assert "HTTP" in self.tval(
["get:'/p/200:p5,100'"],
showresp=True,
timeout=1
)
assert "HTTP" not in self.tval(
["get:'/p/200:p3,100'"],
showresp=True,
timeout=1,
ignoretimeout=True
)
def test_showresp(self):
reqs = ["get:/p/200:da", "get:/p/200:da"]
assert self.tval(reqs).count("200 OK") == 2
assert self.tval(reqs, showresp=True).count("HTTP/1.1 200 OK") == 2
assert self.tval(
reqs, showresp=True, hexdump=True
).count("0000000000") == 2
def test_showresp_httperr(self):
v = self.tval(["get:'/p/200:d20'"], showresp=True, showsummary=True)
assert "Invalid header" in v
assert "HTTP/" in v
def test_explain(self):
reqs = ["get:/p/200:b@100"]
assert "b@100" not in self.tval(reqs, explain=True)
def test_showreq(self):
reqs = ["get:/p/200:da", "get:/p/200:da"]
assert self.tval(reqs, showreq=True).count("GET /p/200") == 2
assert self.tval(
reqs, showreq=True, hexdump=True
).count("0000000000") == 2
def test_conn_err(self):
assert "Invalid server response" in self.tval(["get:'/p/200:d2'"])
def test_websocket_shutdown(self):
self.tval(["ws:/"])
def test_wait_finish(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ws_read_limit=1
)
with c.connect():
c.request("ws:/")
c.request("wf:f'wf'")
# This should read a frame and close the websocket reader
assert len([i for i in c.wait(timeout=5, finish=False)]) == 1
assert not [i for i in c.wait(timeout=0)]
def test_connect_fail(self):
to = ("foobar", 80)
c = pathoc.Pathoc(("127.0.0.1", self.d.port), fp=None)
c.rfile, c.wfile = io.BytesIO(), io.BytesIO()
with pytest.raises(Exception, match="CONNECT failed"):
c.http_connect(to)
c.rfile = io.BytesIO(
b"HTTP/1.1 500 OK\r\n"
)
with pytest.raises(Exception, match="CONNECT failed"):
c.http_connect(to)
c.rfile = io.BytesIO(
b"HTTP/1.1 200 OK\r\n"
)
c.http_connect(to)
def test_socks_connect(self):
to = ("foobar", 80)
c = pathoc.Pathoc(("127.0.0.1", self.d.port), fp=None)
c.rfile, c.wfile = tutils.treader(b""), io.BytesIO()
with pytest.raises(pathoc.PathocError):
c.socks_connect(to)
c.rfile = tutils.treader(
b"\x05\xEE"
)
with pytest.raises(Exception, match="SOCKS without authentication"):
c.socks_connect(("example.com", 0xDEAD))
c.rfile = tutils.treader(
b"\x05\x00" +
b"\x05\xEE\x00\x03\x0bexample.com\xDE\xAD"
)
with pytest.raises(Exception, match="SOCKS server error"):
c.socks_connect(("example.com", 0xDEAD))
c.rfile = tutils.treader(
b"\x05\x00" +
b"\x05\x00\x00\x03\x0bexample.com\xDE\xAD"
)
c.socks_connect(("example.com", 0xDEAD))
class TestDaemonHTTP2(PathocTestDaemon):
ssl = True
explain = False
def test_http2(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ssl=True,
use_http2=True,
)
assert isinstance(c.protocol, HTTP2StateProtocol)
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
)
assert c.protocol == http1
def test_http2_alpn(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ssl=True,
use_http2=True,
http2_skip_connection_preface=True,
)
tmp_convert_to_ssl = c.convert_to_ssl
c.convert_to_ssl = Mock()
c.convert_to_ssl.side_effect = tmp_convert_to_ssl
with c.connect():
_, kwargs = c.convert_to_ssl.call_args
assert set(kwargs['alpn_protos']) == set([b'http/1.1', b'h2'])
def test_request(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ssl=True,
use_http2=True,
)
with c.connect():
resp = c.request("get:/p/200")
assert resp.status_code == 200
| [
"jeremywu1995@gmail.com"
] | jeremywu1995@gmail.com |
cc18cdf5ec88d2e1c641ebc8d35c0b33e3954614 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/Lib/json/tests/test_decode.py | 034213fe0a12220df7d326ee605216939dcc8117 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,850 | py | # 2017.05.04 15:32:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/json/tests/test_decode.py
import decimal
from StringIO import StringIO
from collections import OrderedDict
from json.tests import PyTest, CTest
class TestDecode(object):
def test_decimal(self):
rval = self.loads('1.1', parse_float=decimal.Decimal)
self.assertTrue(isinstance(rval, decimal.Decimal))
self.assertEqual(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = self.loads('1', parse_int=float)
self.assertTrue(isinstance(rval, float))
self.assertEqual(rval, 1.0)
def test_decoder_optimizations(self):
rval = self.loads('{ "key" : "value" , "k":"v" }')
self.assertEqual(rval, {'key': 'value',
'k': 'v'})
def test_empty_objects(self):
self.assertEqual(self.loads('{}'), {})
self.assertEqual(self.loads('[]'), [])
self.assertEqual(self.loads('""'), u'')
self.assertIsInstance(self.loads('""'), unicode)
def test_object_pairs_hook(self):
s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [('xkd', 1),
('kcw', 2),
('art', 3),
('hxm', 4),
('qrt', 5),
('pad', 6),
('hoy', 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook=lambda x: x), p)
self.assertEqual(self.json.load(StringIO(s), object_pairs_hook=lambda x: x), p)
od = self.loads(s, object_pairs_hook=OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p))
self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict())
self.assertEqual(self.loads('{"empty": {}}', object_pairs_hook=OrderedDict), OrderedDict([('empty', OrderedDict())]))
def test_extra_data(self):
s = '[1, 2, 3]5'
msg = 'Extra data'
self.assertRaisesRegexp(ValueError, msg, self.loads, s)
def test_invalid_escape(self):
s = '["abc\\y"]'
msg = 'escape'
self.assertRaisesRegexp(ValueError, msg, self.loads, s)
def test_negative_index(self):
d = self.json.JSONDecoder()
self.assertRaises(ValueError, d.raw_decode, 'a' * 42, -50000)
self.assertRaises(ValueError, d.raw_decode, u'a' * 42, -50000)
class TestPyDecode(TestDecode, PyTest):
pass
class TestCDecode(TestDecode, CTest):
pass
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\json\tests\test_decode.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:32:48 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
41cfcc72d802cd54cd3984cb735f5ab70df06312 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil125.py | 8f452637b1307ee94e925a1a28f92d86a6a67300 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += X(2) # number=6
prog += H(3) # number=4
prog += Y(3) # number=5
prog += H(0) # number=9
prog += CZ(1,0) # number=10
prog += H(0) # number=11
prog += CNOT(1,0) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil125.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
2e9dddf344054ea39d08047c8f848aabd8ec720b | 8534f1109cbd6bdae8e5110e2438331ded6f1134 | /test_be.py | 5079e98a2e08731fe7f7b26f5816aa1a6d8b8243 | [] | no_license | laike9m/TestPython | dc802053fd0eee329aca8517ccd6f2e97846f221 | 0587c33764b8f88975d1156b73400926e77014c6 | refs/heads/master | 2022-12-21T12:29:45.570030 | 2022-01-05T06:13:32 | 2022-01-05T06:13:32 | 195,931,990 | 0 | 0 | null | 2022-12-13T23:41:15 | 2019-07-09T04:21:59 | Python | UTF-8 | Python | false | false | 209 | py | from birdseye import eye, server
class A:
def __init__(self, x, y):
self.x = x
self.y = y
@eye
def f():
a = A(1, 2)
b = A(3, a)
f()
server.app.run(host="0.0.0.0", port=7777)
| [
"laike9m@gmail.com"
] | laike9m@gmail.com |
fbe77b70ea1246c6d8bbf2a4286df9a36dd20496 | c0c1103a4e8b526fc417010ac9a7d5413acbf62c | /server/app.py | bbf77b5a926c813e56ed3b0e0be5a0f0b676747e | [] | no_license | zedoul/url-shorten | 0d7c95a49f3543d1e64bcd0bed24ab9f3e534aca | 176b304a456fa0b7302f051537bc1a375790323d | refs/heads/master | 2021-01-10T00:57:25.256718 | 2015-10-14T02:52:13 | 2015-10-14T02:52:13 | 44,220,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # -*- coding: utf-8 -*-
import os
import config
from flask import Flask
from server.api import api
from server.word import Word
def create_app(settings_overrides=None):
app = Flask(__name__)
app.config.from_object('config')
configure_settings(app, settings_overrides)
configure_blueprints(app)
app.word = Word()
return app
def configure_settings(app, settings_override):
parent = os.path.dirname(__file__)
data_path = os.path.join(parent, '..', 'data')
app.config.update({
'DEBUG': True,
'TESTING': False,
'DATA_PATH': data_path
})
if settings_override:
app.config.update(settings_override)
def configure_blueprints(app):
app.register_blueprint(api)
| [
"shyeon.kim@scipi.net"
] | shyeon.kim@scipi.net |
3c087284efaeefb27e84aa2f5941cb9a64946470 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /little_number/woman_or_same_government/do_time_after_own_company/man/big_eye.py | ac1641e6b110d4de26f550d9cbe4f340c25978d5 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#! /usr/bin/env python
def group(str_arg):
new_work_and_other_world(str_arg)
print('year_or_thing')
def new_work_and_other_world(str_arg):
print(str_arg)
if __name__ == '__main__':
group('long_fact_and_thing')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
efd9b9d1affe8cfe53ccb336becdd3c96d4232d6 | d28501318adf2b95690a1d44c0ddc5c71b89cbc9 | /nircam_calib/pipeline_testing/saturation/modify_sat_map.py | f6d7f4bf0edc34cba5e8339609ded98cc5672d35 | [] | no_license | spacetelescope/nircam_calib | 67b9df6c32f0ec1da6d369c6979b584ec6dac29c | 2471a70785884f70e1bf5f546ba9f971b15d77fe | refs/heads/master | 2023-02-26T08:38:35.436644 | 2022-06-20T17:22:23 | 2022-06-20T17:22:23 | 74,314,514 | 2 | 17 | null | 2023-02-11T00:31:12 | 2016-11-21T01:20:19 | Jupyter Notebook | UTF-8 | Python | false | false | 965 | py | #! /usr/bin/env python
'''
Modify an existing saturation map reference file in order
to more completely test the pipeline step
Flag a couple pixels with no_sat_check, one with the saturation
value set to 0 (as it usually is in the reference file), and
the other with some non-zero value, just to make sure that
it is being ignored.
'''
from astropy.io import fits
file = '../reffiles/NRCA1_17004_WellDepthADU_2016-03-10_ssbsaturation_wfact_DMSorient.fits'
outfile = 'nrca1_modified_saturation_reffile.fits'
#pixel to set saturation value to 0 and flag with no_sat_check
x0 = 100
y0 = 100
#pixel to set non-zero sat value and flag with no_sat_check
x1 = 101
y1 = 101
sat1 = 100.
h = fits.open(file)
#set pixel to no_sat_check and
#set saturation value to zero
h[1].data[y0,x0] = 0.
h[2].data[y0,x0] = 2
#set pixel to no_sat_check and
#set saturation value to non-zero
h[1].data[y1,x1] = sat1
h[2].data[y1,x1] = 2
#save
h.writeto(outfile,overwrite=True)
| [
"noreply@github.com"
] | spacetelescope.noreply@github.com |
055e6e256f7b4e98c925cdd583c303c700180173 | d1742451b25705fc128acc245524659628ab3e7d | /Codeforces with Python/B. Bad Prices.py | cceda8a63f653e6f267356a7b1334164a2e898ef | [] | no_license | Shovon588/Programming | ebab793a3c97aedddfcad5ea06e7e22f5c54a86e | e4922c9138998358eed09a1be7598f9b060c685f | refs/heads/master | 2022-12-23T18:29:10.141117 | 2020-10-04T17:29:32 | 2020-10-04T17:29:32 | 256,915,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | for _ in range(int(input())):
n=int(input())
a=list(map(int,input().split()))
a=a[::-1]
bad=0
check=a[0]
for i in a:
if check<i:
bad+=1
check=min(check,i)
print(bad)
| [
"mainulislam588@gmail.com"
] | mainulislam588@gmail.com |
ba6dde35036cc09f75099493ddd7bce259bb7981 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/latest/get_ddos_protection_plan.py | a9f464342a673d2fa7a57ce6fa5ee6df98c55f77 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 5,799 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetDdosProtectionPlanResult',
'AwaitableGetDdosProtectionPlanResult',
'get_ddos_protection_plan',
]
@pulumi.output_type
class GetDdosProtectionPlanResult:
"""
A DDoS protection plan in a resource group.
"""
def __init__(__self__, etag=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None, virtual_networks=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_networks and not isinstance(virtual_networks, list):
raise TypeError("Expected argument 'virtual_networks' to be a list")
pulumi.set(__self__, "virtual_networks", virtual_networks)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the DDoS protection plan resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the DDoS protection plan resource. It uniquely identifies the resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworks")
def virtual_networks(self) -> Sequence['outputs.SubResourceResponse']:
"""
The list of virtual networks associated with the DDoS protection plan resource. This list is read-only.
"""
return pulumi.get(self, "virtual_networks")
class AwaitableGetDdosProtectionPlanResult(GetDdosProtectionPlanResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDdosProtectionPlanResult(
etag=self.etag,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type,
virtual_networks=self.virtual_networks)
def get_ddos_protection_plan(ddos_protection_plan_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDdosProtectionPlanResult:
"""
Use this data source to access information about an existing resource.
:param str ddos_protection_plan_name: The name of the DDoS protection plan.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['ddosProtectionPlanName'] = ddos_protection_plan_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/latest:getDdosProtectionPlan', __args__, opts=opts, typ=GetDdosProtectionPlanResult).value
return AwaitableGetDdosProtectionPlanResult(
etag=__ret__.etag,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type,
virtual_networks=__ret__.virtual_networks)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
de2dd4fb6cb12e29ac93e2b19c0685cea3da3579 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_Seasonal_MonthOfYear_AR.py | ae4615b7c8fb265b5454bb1ba6bbe5a82f93c1df | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 176 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['Lag1Trend'] , ['Seasonal_MonthOfYear'] , ['AR'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
baead4af65aac01182a9b5f529ca9fa2f0e73076 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_/test_artificial_32_None_LinearTrend_12__20.py | a0c4fd563b742db20a231fa41f78c7c3452b63b4 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 266 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
328ab5856b3a3f826d156704fe27061cea5e53f2 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2022_01_01_preview/models/__init__.py | 21d9474c1ea607f8bcfd75f5d21e4c6745d83586 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 5,565 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccessKeys
from ._models_py3 import Action
from ._models_py3 import ArmDisasterRecovery
from ._models_py3 import ArmDisasterRecoveryListResult
from ._models_py3 import CheckNameAvailability
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import ConnectionState
from ._models_py3 import CorrelationFilter
from ._models_py3 import Encryption
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorResponse
from ._models_py3 import ErrorResponseError
from ._models_py3 import FailoverProperties
from ._models_py3 import Identity
from ._models_py3 import KeyVaultProperties
from ._models_py3 import MessageCountDetails
from ._models_py3 import MigrationConfigListResult
from ._models_py3 import MigrationConfigProperties
from ._models_py3 import NWRuleSetIpRules
from ._models_py3 import NWRuleSetVirtualNetworkRules
from ._models_py3 import NetworkRuleSet
from ._models_py3 import NetworkRuleSetListResult
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourcesListResult
from ._models_py3 import ProxyResource
from ._models_py3 import RegenerateAccessKeyParameters
from ._models_py3 import Resource
from ._models_py3 import ResourceNamespacePatch
from ._models_py3 import Rule
from ._models_py3 import RuleListResult
from ._models_py3 import SBAuthorizationRule
from ._models_py3 import SBAuthorizationRuleListResult
from ._models_py3 import SBClientAffineProperties
from ._models_py3 import SBNamespace
from ._models_py3 import SBNamespaceListResult
from ._models_py3 import SBNamespaceUpdateParameters
from ._models_py3 import SBQueue
from ._models_py3 import SBQueueListResult
from ._models_py3 import SBSku
from ._models_py3 import SBSubscription
from ._models_py3 import SBSubscriptionListResult
from ._models_py3 import SBTopic
from ._models_py3 import SBTopicListResult
from ._models_py3 import SqlFilter
from ._models_py3 import SqlRuleAction
from ._models_py3 import Subnet
from ._models_py3 import SystemData
from ._models_py3 import TrackedResource
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import UserAssignedIdentityProperties
from ._service_bus_management_client_enums import (
AccessRights,
CreatedByType,
DefaultAction,
EndPointProvisioningState,
EntityStatus,
FilterType,
KeySource,
KeyType,
ManagedServiceIdentityType,
MigrationConfigurationName,
NetworkRuleIPAction,
PrivateLinkConnectionStatus,
ProvisioningStateDR,
PublicNetworkAccess,
PublicNetworkAccessFlag,
RoleDisasterRecovery,
SkuName,
SkuTier,
TlsVersion,
UnavailableReason,
)
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
'AccessKeys',
'Action',
'ArmDisasterRecovery',
'ArmDisasterRecoveryListResult',
'CheckNameAvailability',
'CheckNameAvailabilityResult',
'ConnectionState',
'CorrelationFilter',
'Encryption',
'ErrorAdditionalInfo',
'ErrorResponse',
'ErrorResponseError',
'FailoverProperties',
'Identity',
'KeyVaultProperties',
'MessageCountDetails',
'MigrationConfigListResult',
'MigrationConfigProperties',
'NWRuleSetIpRules',
'NWRuleSetVirtualNetworkRules',
'NetworkRuleSet',
'NetworkRuleSetListResult',
'Operation',
'OperationDisplay',
'OperationListResult',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateEndpointConnectionListResult',
'PrivateLinkResource',
'PrivateLinkResourcesListResult',
'ProxyResource',
'RegenerateAccessKeyParameters',
'Resource',
'ResourceNamespacePatch',
'Rule',
'RuleListResult',
'SBAuthorizationRule',
'SBAuthorizationRuleListResult',
'SBClientAffineProperties',
'SBNamespace',
'SBNamespaceListResult',
'SBNamespaceUpdateParameters',
'SBQueue',
'SBQueueListResult',
'SBSku',
'SBSubscription',
'SBSubscriptionListResult',
'SBTopic',
'SBTopicListResult',
'SqlFilter',
'SqlRuleAction',
'Subnet',
'SystemData',
'TrackedResource',
'UserAssignedIdentity',
'UserAssignedIdentityProperties',
'AccessRights',
'CreatedByType',
'DefaultAction',
'EndPointProvisioningState',
'EntityStatus',
'FilterType',
'KeySource',
'KeyType',
'ManagedServiceIdentityType',
'MigrationConfigurationName',
'NetworkRuleIPAction',
'PrivateLinkConnectionStatus',
'ProvisioningStateDR',
'PublicNetworkAccess',
'PublicNetworkAccessFlag',
'RoleDisasterRecovery',
'SkuName',
'SkuTier',
'TlsVersion',
'UnavailableReason',
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk() | [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
9cba653d2bb957f09c90d01b76976d7b4fbbbf2c | eb8d4e39a14e2a9c6a982c7b0fa467175371854b | /kids/utils.py | 36fcae70df6025faedb7e58326b54faacd0a50fd | [] | no_license | fatelei/kidspy | 5df2312238733af661c3c238c722a608ff49df61 | ac9630b1b55cab476d39c5997a2ce7b0cf8978ef | refs/heads/master | 2021-01-21T14:01:52.358052 | 2016-06-06T08:27:24 | 2016-06-06T08:27:24 | 47,865,336 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # -*- coding: utf8 -*-
"""
kids.utils
~~~~~~~~~~
Kids utilities.
"""
import logging
from .log_handler import KidsHandler
def setup_kids(level=logging.INFO,
port=3388,
host="localhost",
topic=None):
"""Setup kids, add `KidsHandler` to root logger.
:param int level: Log level
:param int port: Kids server port
:param str host: Kids server hostname
;param str topic: Kids topic
"""
root = logging.getLogger()
if not root.handlers:
logging.basicConfig(level=level) # Here add a streamhandler.
handler = KidsHandler(port=port,
host=host,
topic=topic)
handler.setLevel(level)
root.addHandler(handler)
| [
"fatelei@gmail.com"
] | fatelei@gmail.com |
4ab4e493acd6943bfa5c5663513522e6742342a3 | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/lib/python2.7/site-packages/django/conf/locale/zh_Hant/formats.py | 77f25d0322d7582dadbce9305b2f69394c2da5a6 | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 1,663 | py | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "Y年n月j日" # 2016年9月5日
TIME_FORMAT = "H:i" # 20:45
DATETIME_FORMAT = "Y年n月j日 H:i" # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = "Y年n月" # 2016年9月
MONTH_DAY_FORMAT = "m月j日" # 9月5日
SHORT_DATE_FORMAT = "Y年n月j日" # 2016年9月5日
SHORT_DATETIME_FORMAT = "Y年n月j日 H:i" # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%Y/%m/%d", # '2016/09/05'
"%Y-%m-%d", # '2016-09-05'
"%Y年%n月%j日", # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
"%H:%M", # '20:45'
"%H:%M:%S", # '20:45:29'
"%H:%M:%S.%f", # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
"%Y/%m/%d %H:%M", # '2016/09/05 20:45'
"%Y-%m-%d %H:%M", # '2016-09-05 20:45'
"%Y年%n月%j日 %H:%M", # '2016年9月5日 14:45'
"%Y/%m/%d %H:%M:%S", # '2016/09/05 20:45:29'
"%Y-%m-%d %H:%M:%S", # '2016-09-05 20:45:29'
"%Y年%n月%j日 %H:%M:%S", # '2016年9月5日 20:45:29'
"%Y/%m/%d %H:%M:%S.%f", # '2016/09/05 20:45:29.000200'
"%Y-%m-%d %H:%M:%S.%f", # '2016-09-05 20:45:29.000200'
"%Y年%n月%j日 %H:%n:%S.%f", # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ""
NUMBER_GROUPING = 4
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr |
c53ee2e0f93e980c58699ae131b8906d7b73e36c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/303/113340/submittedfiles/testes.py | 59df6d6091bc1c7081466969047a53d3b612690c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # -*- coding: utf-8 -*#notas=[]
#for i in range(0,50,1):
# notas.append(float(input('DIGITE A NOTA%d[%d]:' %(i+1 , i)))
nome=input('Digite o nome de usuário:')
verifica=[]
n=str(input('Digite a senha:'))
verifica.append(n)
for i in range(1,verifica,1):
if verifica[i-1]==verifica[i]:
print('SENHA INVÁLIDA')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
91898b151a5f4686627a0d145e22e70e9c49065a | 1a9013a86b5c3866b8d3133a0e73b40474e2381c | /config.py | 0c3a75bcbd85d0cd94f9b1de6d337fe32b217271 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | Stanford-PERTS/qdirector | 9591797308e941d8d247d04a4c3c8e6c4f384080 | 20bd9feead2e7517cd42d07b093c9fb12a573f9d | refs/heads/master | 2022-10-29T09:39:35.798678 | 2020-06-14T17:37:25 | 2020-06-14T17:37:25 | 272,250,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | """Hard coded configuration parameters."""
# Lowercase letters, numbers, and underscore.
cohort_code_regex = r'^[a-z0-9_]{4,}$'
anonymous_link_regex = r'^https?://.*'
unit_test_directory = 'unit_testing'
prm_admins = []
boolean_url_arguments = [
]
integer_url_arguments = [
]
# UTC timezone, in ISO date format: YYYY-MM-DD
date_url_arguments = [
'scheduled_date', # used in sending emails
]
# UTC timezone, in an ISO-like format (missing the 'T' character between
# date and time): YYYY-MM-DD HH:mm:SS
datetime_url_arguments = [
]
# Converted to JSON with json.dumps().
json_url_arugments = [
]
# JSON only allows strings as dictionary keys. But for these values, we want
# to interpret the keys as numbers (ints).
json_url_arugments_with_numeric_keys = [
]
# These arguments are meta-data and are never applicable to specific entities
# or api actions. They appear in url_handlers.BaseHandler.get().
ignored_url_arguments = [
]
# also, any normal url argument suffixed with _json will be interpreted as json
# Converted by util.get_request_dictionary()
# Problem: we want to be able to set null values to the server, but
# angular drops these from request strings. E.g. given {a: 1, b: null}
# angular creates the request string '?a=1'
# Solution: translate javascript nulls to a special string, which
# the server will again translate to python None. We use '__null__'
# because is more client-side-ish, given that js and JSON have a null
# value.
# javascript | request string | server
# -----------|----------------|----------
# p = null; | ?p=__null__ | p = None
url_values = {
'__null__': None,
}
# In URL query strings, only the string 'true' ever counts as boolean True.
true_strings = ['true']
# Email settings
#
# Platform generated emails can only be sent from email addresses that have
# viewer permissions or greater on app engine. So if you are going to change
# this please add the sender as an application viewer on
# https://appengine.google.com/permissions?app_id=s~pegasusplatform
#
# There are other email options if this doesn't suit your needs check the
# google docs.
# https://developers.google.com/appengine/docs/python/mail/sendingmail
from_server_email_address = ""
# This address should forward to the development team
# Ben says: I could not use directly because of
# limited permissions, so I created this gmail account which forwards all its
# mail there.
to_dev_team_email_address = ""
# * spam prevention *
# time between emails
# if we exceed this for a give to address, an error will be logged
suggested_delay_between_emails = 1 # 1 day
# whitelist
# some addessess we spam, like our own
# * we allow us to spam anyone at a *@perts.net domain so
# this is the best address for an admin
addresses_we_can_spam = [
to_dev_team_email_address,
from_server_email_address,
]
| [
"chris@perts.net"
] | chris@perts.net |
2521fde1fac369839bb3a45bce890927e631e4ff | d96787f92bd86c8d8bcf01a4e7ec8f7feec24194 | /kattis/veci/solution.py | 82a4b65521b91e8e01a7f71943ef00cdc8c94e9d | [] | no_license | iandioch/solutions | 133cbc3af58fadcde0b2e981fb0e7d05801070a7 | 8b3e458b3c01179ddf776bfbb897f263f22f3693 | refs/heads/master | 2023-04-09T03:39:16.952817 | 2023-03-15T20:00:53 | 2023-03-15T20:00:53 | 47,693,495 | 48 | 40 | null | 2019-10-22T14:52:59 | 2015-12-09T13:36:55 | Python | UTF-8 | Python | false | false | 247 | py | def get_digits(n):
return sorted(str(n))
n = int(input())
digs = get_digits(n)
dig_n = len(digs)
i = n + 1
while True:
new_digs = get_digits(i)
if new_digs == digs:
print (i)
break
elif len(new_digs) > dig_n:
print (0)
break
i += 1
| [
"iandioch11@gmail.com"
] | iandioch11@gmail.com |
ad1e9727722dbe9f4ae6e592dbc09757c166744e | 55defa28b5bd395e7ead2f9ca848f378ee2c8b13 | /python/tvm/meta_schedule/utils.py | ceb5f72106041750285f662a26ecbaa43e60f73b | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | neo-ai/tvm | 456d48c8d80bd7190c91b488b8f9d6cf22918706 | da529bf421fcfddd914b41bbe9bf9d5863671266 | refs/heads/dev | 2023-03-06T03:28:18.303189 | 2022-05-09T04:25:16 | 2022-05-09T04:25:16 | 167,632,700 | 101 | 43 | Apache-2.0 | 2023-02-17T20:49:09 | 2019-01-26T00:35:54 | Python | UTF-8 | Python | false | false | 8,140 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for meta schedule"""
import ctypes
import json
import os
import shutil
from typing import Any, Callable, List, Optional, Union
import psutil # type: ignore
import tvm
from tvm._ffi import get_global_func, register_func
from tvm.error import TVMError
from tvm.ir import Array, IRModule, Map
from tvm.rpc import RPCSession
from tvm.runtime import PackedFunc, String
from tvm.tir import FloatImm, IntImm
@register_func("meta_schedule.cpu_count")
def _cpu_count_impl(logical: bool = True) -> int:
return psutil.cpu_count(logical=logical) or 1
def cpu_count(logical: bool = True) -> int:
"""Return the number of logical or physical CPUs in the system
Parameters
----------
logical : bool = True
If True, return the number of logical CPUs, otherwise return the number of physical CPUs
Returns
-------
cpu_count : int
The number of logical or physical CPUs in the system
Note
----
The meta schedule search infra intentionally does not adopt the following convention in TVM:
- C++ API `tvm::runtime::threading::MaxConcurrency()`
- Environment variable `TVM_NUM_THREADS` or
- Environment variable `OMP_NUM_THREADS`
This is because these variables are dedicated to controlling
the runtime behavior of generated kernels, instead of the host-side search.
Setting these variables may interfere the host-side search with profiling of generated kernels
when measuring locally.
"""
return _cpu_count_impl(logical)
def get_global_func_with_default_on_worker(
name: Union[None, str, Callable],
default: Callable,
) -> Callable:
"""Get the registered global function on the worker process.
Parameters
----------
name : Union[None, str, Callable]
If given a string, retrieve the function in TVM's global registry;
If given a python function, return it as it is;
Otherwise, return `default`.
default : Callable
The function to be returned if `name` is None.
Returns
-------
result : Callable
The retrieved global function or `default` if `name` is None
"""
if name is None:
return default
if callable(name):
return name
try:
return get_global_func(name)
except TVMError as error:
raise ValueError(
"Function '{name}' is not registered on the worker process. "
"The build function and export function should be registered in the worker process. "
"Note that the worker process is only aware of functions registered in TVM package, "
"if there are extra functions to be registered, "
"please send the registration logic via initializer."
) from error
def get_global_func_on_rpc_session(
session: RPCSession,
name: str,
extra_error_msg: Optional[str] = None,
) -> PackedFunc:
"""Get a PackedFunc from the global registry from an RPCSession.
Parameters
----------
session : RPCSession
The RPCSession to be retrieved from
name : str
The name of the PackedFunc
extra_error_msg : Optional[str]
Extra information to provide in the error message
Returns
-------
result : PackedFunc
The result
"""
try:
result = session.get_function(name)
except AttributeError as error:
error_msg = f'Unable to find function "{name}" on the remote RPC server.'
if extra_error_msg:
error_msg = f"{error_msg} {extra_error_msg}"
raise AttributeError(error_msg) from error
return result
@register_func("meta_schedule.remove_build_dir")
def remove_build_dir(artifact_path: str) -> None:
"""Clean up the build directory"""
shutil.rmtree(os.path.dirname(artifact_path))
def _json_de_tvm(obj: Any) -> Any:
"""Unpack a TVM nested container to a JSON object in python.
Parameters
----------
obj : Any
The TVM nested container to be unpacked.
Returns
-------
result : Any
The unpacked json object.
"""
if obj is None:
return None
if isinstance(obj, (int, float)):
return obj
if isinstance(obj, (IntImm, FloatImm)):
return obj.value
if isinstance(obj, (str, String)):
return str(obj)
if isinstance(obj, Array):
return [_json_de_tvm(i) for i in obj]
if isinstance(obj, Map):
return {_json_de_tvm(k): _json_de_tvm(v) for k, v in obj.items()}
raise TypeError("Not supported type: " + str(type(obj)))
@register_func("meta_schedule.json_obj2str")
def json_obj2str(json_obj: Any) -> str:
json_obj = _json_de_tvm(json_obj)
return json.dumps(json_obj)
@register_func("meta_schedule.batch_json_str2obj")
def batch_json_str2obj(json_strs: List[str]) -> List[Any]:
"""Covert a list of JSON strings to a list of json objects.
Parameters
----------
json_strs : List[str]
The list of JSON strings
Returns
-------
result : List[Any]
The list of json objects
"""
return [
json.loads(json_str)
for json_str in map(str.strip, json_strs)
if json_str and (not json_str.startswith("#")) and (not json_str.startswith("//"))
]
def structural_hash(mod: IRModule) -> str:
"""Get the structural hash of a module.
Parameters
----------
mod : IRModule
The module to be hashed.
Returns
-------
result : str
The structural hash of the module.
"""
shash = tvm.ir.structural_hash(mod)
if shash < 0:
# Workaround because `structural_hash` returns a size_t, i.e., unsigned integer
# but ffi can't handle unsigned integers properly so it's parsed into a negative number
shash += 1 << 64
return str(shash)
def check_override(
derived_class: Any, base_class: Any, required: bool = True, func_name: str = None
) -> Callable:
"""Check if the derived class has overridden the base class's method.
Parameters
----------
derived_class : Any
The derived class.
base_class : Any
The base class of derived class.
required : bool
If the method override is required.
func_name : str
Name of the method. Default value None, which would be set to substring of the given
function, e.g. `f_generate`->`generate`.
Returns
-------
func : Callable
Raise NotImplementedError if the function is required and not overridden. If the
function is not overridden return None, other return the overridden function.
"""
def inner(func: Callable):
if func_name is None:
method = func.__name__[2:]
else:
method = func_name
if getattr(derived_class, method) is getattr(base_class, method):
if required:
raise NotImplementedError(f"{derived_class}'s {method} method is not implemented!")
return None
return func
return inner
def _get_hex_address(handle: ctypes.c_void_p) -> str:
"""Get the hexadecimal address of a handle.
Parameters
----------
handle : ctypes.c_void_p
The handle to be converted.
Returns
-------
result : str
The hexadecimal address of the handle.
"""
return hex(ctypes.cast(handle, ctypes.c_void_p).value)
| [
"ylchobe@gmail.com"
] | ylchobe@gmail.com |
5f7d8d8cbcdf942bf9c24e1cc9b618edee85dfb0 | 90e2c40819b50a512c38f7ea8b8c53c8a11b1103 | /requests_test.py | 2afe3a177f1b57760b3e3109109a647033d00878 | [] | no_license | z6833/EnvProtect | 6b93d8413aa3c42a453f17ff09758f54c8722ce6 | 09619e63264398d8f5b2ac4397dfd1bf7a26f43a | refs/heads/master | 2020-04-18T01:32:46.022389 | 2019-01-23T05:32:28 | 2019-01-23T05:32:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,861 | py | import time
import math
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from openpyxl import Workbook
from lxml import etree
def save_data(item_dicts):
workbook = Workbook()
booksheet = workbook.active
booksheet.append(['日期', '检测点位', '二氧化硫',
'二氧化氮', '可吸入颗粒物','一氧化碳',
'臭氧', '细颗粒物', '空气质量指数',
'首要污染物', 'AQI指数级别', 'AQI指数类别'])
for item in item_dicts:
DATA = [
item['date'], item['loca'], item['SO_2'],
item['NO_2'], item['PMIO'], item['CO_1'],
item['O3_d'], item['PM25'], item['AQIe'],
item['prmy'], item['AQIl'], item['AQIt']
]
booksheet.append(DATA)
workbook.save('./EnvProtext.xls')
def parse_html(html):
item=dict();item_list=[]
e = etree.HTML(html)
node_list = e.xpath('//*[@id="tableForm"]/div/div[3]/table/tbody/tr')[1:]
for node in node_list:
item['date'] = node.xpath("./td[1]/text()")[0]
item['loca'] = node.xpath("./td[2]/text()")[0]
item['SO_2'] = node.xpath("./td[3]/text()")[0]
item['NO_2'] = node.xpath("./td[4]/text()")[0]
item['PMIO'] = node.xpath("./td[5]/text()")[0]
item['CO_1'] = node.xpath("./td[6]/text()")[0]
item['O3_d'] = node.xpath("./td[7]/text()")[0]
item['PM25'] = node.xpath("./td[8]/text()")[0]
item['AQIe'] = node.xpath("./td[9]/text()")[0]
item['prmy'] = node.xpath("./td[10]/text()")[0]
item['AQIl'] = node.xpath("./td[11]/text()")[0]
item['AQIt'] = node.xpath("./td[12]/text()")[0]
return item_list
def main():
workbook = Workbook()
booksheet = workbook.active
booksheet.append(['日期', '检测点位', '二氧化硫',
'二氧化氮', '可吸入颗粒物', '一氧化碳',
'臭氧', '细颗粒物', '空气质量指数',
'首要污染物', 'AQI指数级别', 'AQI指数类别'])
# 目标url
url = 'http://hbj.wuhan.gov.cn/viewAirDarlyForestWaterInfo.jspx'
driver = webdriver.Chrome()
driver.get(url) # 访问目标url
wait = WebDriverWait(driver, 30) # 等待页面加载
try:
# 等待表格出现
wait.until(EC.frame_to_be_available_and_switch_to_it((By.ID, "iframepage")))
# 选择检测点
driver.find_element_by_xpath("//select[@id='typedictionary']/option[2]").click()
# 开始(结束)时间
driver.find_element_by_id('cdateBeginDic').send_keys('2019-01-01')
driver.find_element_by_id('cdateEndDic').send_keys('2019-01-20')
# 点击查询
driver.find_element_by_xpath("//a[@href='#' and @onclick='toQuery(2);']").click()
time.sleep(5)
except:
print("Error!!")
driver.close()
quit()
# 匹配数据总条数
pages_num = driver.find_element_by_xpath("//div[@class='serviceitempage fr']/span[@class='fl']").text
pages_num = math.ceil(int(pages_num.split(' ')[0]) / 22)
page=1; item_lists=[]
while page <= pages_num:
driver.find_element_by_id('goPag').send_keys(str(page))
driver.find_element_by_id('_goPag').click() # 跳转到下一页
html = driver.page_source # 获取源码
# 解析数据
e = etree.HTML(html)
node_list = e.xpath('//*[@id="tableForm"]/div/div[3]/table/tbody/tr')[1:]
for node in node_list:
item=dict()
item['date'] = node.xpath("./td[1]/text()")[0]
item['loca'] = node.xpath("./td[2]/text()")[0]
item['SO_2'] = node.xpath("./td[3]/text()")[0]
item['NO_2'] = node.xpath("./td[4]/text()")[0]
item['PMIO'] = node.xpath("./td[5]/text()")[0]
item['CO_1'] = node.xpath("./td[6]/text()")[0]
item['O3_d'] = node.xpath("./td[7]/text()")[0]
item['PM25'] = node.xpath("./td[8]/text()")[0]
item['AQIe'] = node.xpath("./td[9]/text()")[0]
item['prmy'] = node.xpath("./td[10]/text()")[0]
item['AQIl'] = node.xpath("./td[11]/text()")[0]
item['AQIt'] = node.xpath("./td[12]/text()")[0]
# 保存数据
DATA = [
item['date'], item['loca'], item['SO_2'],
item['NO_2'], item['PMIO'], item['CO_1'],
item['O3_d'], item['PM25'], item['AQIe'],
item['prmy'], item['AQIl'], item['AQIt']
]
booksheet.append(DATA)
workbook.save('./EnvProtext.xls')
page += 1
driver.quit()
if __name__ == '__main__':
main()
| [
"184419810@qq.com"
] | 184419810@qq.com |
af2a5758dbcd89c3ca38d3f816a417fd564b1d03 | 76be7830882f7840949239a3a4c3760fde085464 | /backend/home/migrations/0002_load_initial_data.py | ed1005248defdd4c566fa00c2707b1f0e6781a27 | [] | no_license | crowdbotics-apps/templatecheck4-dev-6979 | d433393a79ab46b4c07dcb76e37896842cf756aa | 4f4852535215b50745817c9213c98543f13a464f | refs/heads/master | 2022-11-12T00:09:05.630079 | 2020-07-02T20:15:51 | 2020-07-02T20:15:51 | 276,733,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "TemplateCheck4"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">TemplateCheck4</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "templatecheck4-dev-6979.botics.co"
site_params = {
"name": "TemplateCheck4",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
624cb514c92074f37db1e935ea5c74b38f393a7c | 6a683f5a74863e55b60fdcbb4d0e61008744e364 | /Problem Solving/Algorithms/Birthday Cake Candles/solution.py | c378945273c9caed20d253b1632ebef60e4c9dd0 | [] | no_license | hercules261188/HackerRank | a9b7ddb16ea4e0c8a127c563b3f41aafbced47af | fc5db02b286ccb4ba0ec53d4716971f2de7d2ee5 | refs/heads/master | 2022-01-09T16:10:55.009660 | 2018-04-16T22:20:08 | 2018-04-16T22:20:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #!/bin/python3
import sys
def birthdayCakeCandles(n, ar):
m = max(ar)
sum = 0
for i in ar:
if(i == m):
sum += 1
return(sum)
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = birthdayCakeCandles(n, ar)
print(result)
| [
"noreply@github.com"
] | hercules261188.noreply@github.com |
e06182272cd94022fe05c43ac446d317d0b13a88 | b0649339b23354610f63e0dc25795cc4d7c7ae06 | /django_interface_project/interface_main/migrations/0004_task_taskinterfacerelation.py | 0bebd64ca132b1b8acaf840a174a890edb021147 | [] | no_license | harter123/test-dev2 | 65afb6e4f40533629b91c76b3e6cb6ebd4245c27 | fa859efae69351ec79b3d8a56e9948797ab76b31 | refs/heads/master | 2021-07-11T03:43:39.673795 | 2019-11-24T03:30:10 | 2019-11-24T03:30:10 | 196,722,309 | 7 | 5 | null | 2020-09-10T04:34:55 | 2019-07-13T13:02:55 | Python | UTF-8 | Python | false | false | 1,192 | py | # Generated by Django 2.1.7 on 2019-11-03 02:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('interface_main', '0003_mock'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100, verbose_name='mock的名称')),
('description', models.CharField(default='', max_length=2000, verbose_name='mock的描述')),
],
),
migrations.CreateModel(
name='TaskInterfaceRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interface', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='interface_main.Interface')),
('task', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='interface_main.Task')),
],
),
]
| [
"harter@126.com"
] | harter@126.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.